diff options
Diffstat (limited to 'drivers/parport/share.c')
-rw-r--r-- | drivers/parport/share.c | 1014 |
1 files changed, 1014 insertions, 0 deletions
diff --git a/drivers/parport/share.c b/drivers/parport/share.c new file mode 100644 index 000000000000..ae7becf7efa5 --- /dev/null +++ b/drivers/parport/share.c | |||
@@ -0,0 +1,1014 @@ | |||
1 | /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $ | ||
2 | * Parallel-port resource manager code. | ||
3 | * | ||
4 | * Authors: David Campbell <campbell@tirian.che.curtin.edu.au> | ||
5 | * Tim Waugh <tim@cyberelk.demon.co.uk> | ||
6 | * Jose Renau <renau@acm.org> | ||
7 | * Philip Blundell <philb@gnu.org> | ||
8 | * Andrea Arcangeli | ||
9 | * | ||
10 | * based on work by Grant Guenther <grant@torque.net> | ||
11 | * and Philip Blundell | ||
12 | * | ||
13 | * Any part of this program may be used in documents licensed under | ||
14 | * the GNU Free Documentation License, Version 1.1 or any later version | ||
15 | * published by the Free Software Foundation. | ||
16 | */ | ||
17 | |||
18 | #undef PARPORT_DEBUG_SHARING /* undef for production */ | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/threads.h> | ||
24 | #include <linux/parport.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/ioport.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/kmod.h> | ||
33 | |||
34 | #include <linux/spinlock.h> | ||
35 | #include <asm/irq.h> | ||
36 | |||
37 | #undef PARPORT_PARANOID | ||
38 | |||
39 | #define PARPORT_DEFAULT_TIMESLICE (HZ/5) | ||
40 | |||
41 | unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE; | ||
42 | int parport_default_spintime = DEFAULT_SPIN_TIME; | ||
43 | |||
44 | static LIST_HEAD(portlist); | ||
45 | static DEFINE_SPINLOCK(parportlist_lock); | ||
46 | |||
47 | /* list of all allocated ports, sorted by ->number */ | ||
48 | static LIST_HEAD(all_ports); | ||
49 | static DEFINE_SPINLOCK(full_list_lock); | ||
50 | |||
51 | static LIST_HEAD(drivers); | ||
52 | |||
53 | static DECLARE_MUTEX(registration_lock); | ||
54 | |||
55 | /* What you can do to a port that's gone away.. */ | ||
56 | static void dead_write_lines (struct parport *p, unsigned char b){} | ||
57 | static unsigned char dead_read_lines (struct parport *p) { return 0; } | ||
58 | static unsigned char dead_frob_lines (struct parport *p, unsigned char b, | ||
59 | unsigned char c) { return 0; } | ||
60 | static void dead_onearg (struct parport *p){} | ||
61 | static void dead_initstate (struct pardevice *d, struct parport_state *s) { } | ||
62 | static void dead_state (struct parport *p, struct parport_state *s) { } | ||
63 | static size_t dead_write (struct parport *p, const void *b, size_t l, int f) | ||
64 | { return 0; } | ||
65 | static size_t dead_read (struct parport *p, void *b, size_t l, int f) | ||
66 | { return 0; } | ||
67 | static struct parport_operations dead_ops = { | ||
68 | .write_data = dead_write_lines, /* data */ | ||
69 | .read_data = dead_read_lines, | ||
70 | |||
71 | .write_control = dead_write_lines, /* control */ | ||
72 | .read_control = dead_read_lines, | ||
73 | .frob_control = dead_frob_lines, | ||
74 | |||
75 | .read_status = dead_read_lines, /* status */ | ||
76 | |||
77 | .enable_irq = dead_onearg, /* enable_irq */ | ||
78 | .disable_irq = dead_onearg, /* disable_irq */ | ||
79 | |||
80 | .data_forward = dead_onearg, /* data_forward */ | ||
81 | .data_reverse = dead_onearg, /* data_reverse */ | ||
82 | |||
83 | .init_state = dead_initstate, /* init_state */ | ||
84 | .save_state = dead_state, | ||
85 | .restore_state = dead_state, | ||
86 | |||
87 | .epp_write_data = dead_write, /* epp */ | ||
88 | .epp_read_data = dead_read, | ||
89 | .epp_write_addr = dead_write, | ||
90 | .epp_read_addr = dead_read, | ||
91 | |||
92 | .ecp_write_data = dead_write, /* ecp */ | ||
93 | .ecp_read_data = dead_read, | ||
94 | .ecp_write_addr = dead_write, | ||
95 | |||
96 | .compat_write_data = dead_write, /* compat */ | ||
97 | .nibble_read_data = dead_read, /* nibble */ | ||
98 | .byte_read_data = dead_read, /* byte */ | ||
99 | |||
100 | .owner = NULL, | ||
101 | }; | ||
102 | |||
103 | /* Call attach(port) for each registered driver. */ | ||
104 | static void attach_driver_chain(struct parport *port) | ||
105 | { | ||
106 | /* caller has exclusive registration_lock */ | ||
107 | struct parport_driver *drv; | ||
108 | list_for_each_entry(drv, &drivers, list) | ||
109 | drv->attach(port); | ||
110 | } | ||
111 | |||
112 | /* Call detach(port) for each registered driver. */ | ||
113 | static void detach_driver_chain(struct parport *port) | ||
114 | { | ||
115 | struct parport_driver *drv; | ||
116 | /* caller has exclusive registration_lock */ | ||
117 | list_for_each_entry(drv, &drivers, list) | ||
118 | drv->detach (port); | ||
119 | } | ||
120 | |||
121 | /* Ask kmod for some lowlevel drivers. */ | ||
122 | static void get_lowlevel_driver (void) | ||
123 | { | ||
124 | /* There is no actual module called this: you should set | ||
125 | * up an alias for modutils. */ | ||
126 | request_module ("parport_lowlevel"); | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * parport_register_driver - register a parallel port device driver | ||
131 | * @drv: structure describing the driver | ||
132 | * | ||
133 | * This can be called by a parallel port device driver in order | ||
134 | * to receive notifications about ports being found in the | ||
135 | * system, as well as ports no longer available. | ||
136 | * | ||
137 | * The @drv structure is allocated by the caller and must not be | ||
138 | * deallocated until after calling parport_unregister_driver(). | ||
139 | * | ||
140 | * The driver's attach() function may block. The port that | ||
141 | * attach() is given will be valid for the duration of the | ||
142 | * callback, but if the driver wants to take a copy of the | ||
143 | * pointer it must call parport_get_port() to do so. Calling | ||
144 | * parport_register_device() on that port will do this for you. | ||
145 | * | ||
146 | * The driver's detach() function may block. The port that | ||
147 | * detach() is given will be valid for the duration of the | ||
148 | * callback, but if the driver wants to take a copy of the | ||
149 | * pointer it must call parport_get_port() to do so. | ||
150 | * | ||
151 | * Returns 0 on success. Currently it always succeeds. | ||
152 | **/ | ||
153 | |||
154 | int parport_register_driver (struct parport_driver *drv) | ||
155 | { | ||
156 | struct parport *port; | ||
157 | |||
158 | if (list_empty(&portlist)) | ||
159 | get_lowlevel_driver (); | ||
160 | |||
161 | down(®istration_lock); | ||
162 | list_for_each_entry(port, &portlist, list) | ||
163 | drv->attach(port); | ||
164 | list_add(&drv->list, &drivers); | ||
165 | up(®istration_lock); | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * parport_unregister_driver - deregister a parallel port device driver | ||
172 | * @drv: structure describing the driver that was given to | ||
173 | * parport_register_driver() | ||
174 | * | ||
175 | * This should be called by a parallel port device driver that | ||
176 | * has registered itself using parport_register_driver() when it | ||
177 | * is about to be unloaded. | ||
178 | * | ||
179 | * When it returns, the driver's attach() routine will no longer | ||
180 | * be called, and for each port that attach() was called for, the | ||
181 | * detach() routine will have been called. | ||
182 | * | ||
183 | * All the driver's attach() and detach() calls are guaranteed to have | ||
184 | * finished by the time this function returns. | ||
185 | **/ | ||
186 | |||
187 | void parport_unregister_driver (struct parport_driver *drv) | ||
188 | { | ||
189 | struct parport *port; | ||
190 | |||
191 | down(®istration_lock); | ||
192 | list_del_init(&drv->list); | ||
193 | list_for_each_entry(port, &portlist, list) | ||
194 | drv->detach(port); | ||
195 | up(®istration_lock); | ||
196 | } | ||
197 | |||
198 | static void free_port (struct parport *port) | ||
199 | { | ||
200 | int d; | ||
201 | spin_lock(&full_list_lock); | ||
202 | list_del(&port->full_list); | ||
203 | spin_unlock(&full_list_lock); | ||
204 | for (d = 0; d < 5; d++) { | ||
205 | if (port->probe_info[d].class_name) | ||
206 | kfree (port->probe_info[d].class_name); | ||
207 | if (port->probe_info[d].mfr) | ||
208 | kfree (port->probe_info[d].mfr); | ||
209 | if (port->probe_info[d].model) | ||
210 | kfree (port->probe_info[d].model); | ||
211 | if (port->probe_info[d].cmdset) | ||
212 | kfree (port->probe_info[d].cmdset); | ||
213 | if (port->probe_info[d].description) | ||
214 | kfree (port->probe_info[d].description); | ||
215 | } | ||
216 | |||
217 | kfree(port->name); | ||
218 | kfree(port); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * parport_get_port - increment a port's reference count | ||
223 | * @port: the port | ||
224 | * | ||
225 | * This ensure's that a struct parport pointer remains valid | ||
226 | * until the matching parport_put_port() call. | ||
227 | **/ | ||
228 | |||
229 | struct parport *parport_get_port (struct parport *port) | ||
230 | { | ||
231 | atomic_inc (&port->ref_count); | ||
232 | return port; | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * parport_put_port - decrement a port's reference count | ||
237 | * @port: the port | ||
238 | * | ||
239 | * This should be called once for each call to parport_get_port(), | ||
240 | * once the port is no longer needed. | ||
241 | **/ | ||
242 | |||
243 | void parport_put_port (struct parport *port) | ||
244 | { | ||
245 | if (atomic_dec_and_test (&port->ref_count)) | ||
246 | /* Can destroy it now. */ | ||
247 | free_port (port); | ||
248 | |||
249 | return; | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * parport_register_port - register a parallel port | ||
254 | * @base: base I/O address | ||
255 | * @irq: IRQ line | ||
256 | * @dma: DMA channel | ||
257 | * @ops: pointer to the port driver's port operations structure | ||
258 | * | ||
259 | * When a parallel port (lowlevel) driver finds a port that | ||
260 | * should be made available to parallel port device drivers, it | ||
261 | * should call parport_register_port(). The @base, @irq, and | ||
262 | * @dma parameters are for the convenience of port drivers, and | ||
263 | * for ports where they aren't meaningful needn't be set to | ||
264 | * anything special. They can be altered afterwards by adjusting | ||
265 | * the relevant members of the parport structure that is returned | ||
266 | * and represents the port. They should not be tampered with | ||
267 | * after calling parport_announce_port, however. | ||
268 | * | ||
269 | * If there are parallel port device drivers in the system that | ||
270 | * have registered themselves using parport_register_driver(), | ||
271 | * they are not told about the port at this time; that is done by | ||
272 | * parport_announce_port(). | ||
273 | * | ||
274 | * The @ops structure is allocated by the caller, and must not be | ||
275 | * deallocated before calling parport_remove_port(). | ||
276 | * | ||
277 | * If there is no memory to allocate a new parport structure, | ||
278 | * this function will return %NULL. | ||
279 | **/ | ||
280 | |||
281 | struct parport *parport_register_port(unsigned long base, int irq, int dma, | ||
282 | struct parport_operations *ops) | ||
283 | { | ||
284 | struct list_head *l; | ||
285 | struct parport *tmp; | ||
286 | int num; | ||
287 | int device; | ||
288 | char *name; | ||
289 | |||
290 | tmp = kmalloc(sizeof(struct parport), GFP_KERNEL); | ||
291 | if (!tmp) { | ||
292 | printk(KERN_WARNING "parport: memory squeeze\n"); | ||
293 | return NULL; | ||
294 | } | ||
295 | |||
296 | /* Init our structure */ | ||
297 | memset(tmp, 0, sizeof(struct parport)); | ||
298 | tmp->base = base; | ||
299 | tmp->irq = irq; | ||
300 | tmp->dma = dma; | ||
301 | tmp->muxport = tmp->daisy = tmp->muxsel = -1; | ||
302 | tmp->modes = 0; | ||
303 | INIT_LIST_HEAD(&tmp->list); | ||
304 | tmp->devices = tmp->cad = NULL; | ||
305 | tmp->flags = 0; | ||
306 | tmp->ops = ops; | ||
307 | tmp->physport = tmp; | ||
308 | memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info)); | ||
309 | rwlock_init(&tmp->cad_lock); | ||
310 | spin_lock_init(&tmp->waitlist_lock); | ||
311 | spin_lock_init(&tmp->pardevice_lock); | ||
312 | tmp->ieee1284.mode = IEEE1284_MODE_COMPAT; | ||
313 | tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; | ||
314 | init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */ | ||
315 | tmp->spintime = parport_default_spintime; | ||
316 | atomic_set (&tmp->ref_count, 1); | ||
317 | INIT_LIST_HEAD(&tmp->full_list); | ||
318 | |||
319 | name = kmalloc(15, GFP_KERNEL); | ||
320 | if (!name) { | ||
321 | printk(KERN_ERR "parport: memory squeeze\n"); | ||
322 | kfree(tmp); | ||
323 | return NULL; | ||
324 | } | ||
325 | /* Search for the lowest free parport number. */ | ||
326 | |||
327 | spin_lock(&full_list_lock); | ||
328 | for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) { | ||
329 | struct parport *p = list_entry(l, struct parport, full_list); | ||
330 | if (p->number != num) | ||
331 | break; | ||
332 | } | ||
333 | tmp->portnum = tmp->number = num; | ||
334 | list_add_tail(&tmp->full_list, l); | ||
335 | spin_unlock(&full_list_lock); | ||
336 | |||
337 | /* | ||
338 | * Now that the portnum is known finish doing the Init. | ||
339 | */ | ||
340 | sprintf(name, "parport%d", tmp->portnum = tmp->number); | ||
341 | tmp->name = name; | ||
342 | |||
343 | for (device = 0; device < 5; device++) | ||
344 | /* assume the worst */ | ||
345 | tmp->probe_info[device].class = PARPORT_CLASS_LEGACY; | ||
346 | |||
347 | tmp->waithead = tmp->waittail = NULL; | ||
348 | |||
349 | return tmp; | ||
350 | } | ||
351 | |||
352 | /** | ||
353 | * parport_announce_port - tell device drivers about a parallel port | ||
354 | * @port: parallel port to announce | ||
355 | * | ||
356 | * After a port driver has registered a parallel port with | ||
357 | * parport_register_port, and performed any necessary | ||
358 | * initialisation or adjustments, it should call | ||
359 | * parport_announce_port() in order to notify all device drivers | ||
360 | * that have called parport_register_driver(). Their attach() | ||
361 | * functions will be called, with @port as the parameter. | ||
362 | **/ | ||
363 | |||
364 | void parport_announce_port (struct parport *port) | ||
365 | { | ||
366 | int i; | ||
367 | |||
368 | #ifdef CONFIG_PARPORT_1284 | ||
369 | /* Analyse the IEEE1284.3 topology of the port. */ | ||
370 | parport_daisy_init(port); | ||
371 | #endif | ||
372 | |||
373 | parport_proc_register(port); | ||
374 | down(®istration_lock); | ||
375 | spin_lock_irq(&parportlist_lock); | ||
376 | list_add_tail(&port->list, &portlist); | ||
377 | for (i = 1; i < 3; i++) { | ||
378 | struct parport *slave = port->slaves[i-1]; | ||
379 | if (slave) | ||
380 | list_add_tail(&slave->list, &portlist); | ||
381 | } | ||
382 | spin_unlock_irq(&parportlist_lock); | ||
383 | |||
384 | /* Let drivers know that new port(s) has arrived. */ | ||
385 | attach_driver_chain (port); | ||
386 | for (i = 1; i < 3; i++) { | ||
387 | struct parport *slave = port->slaves[i-1]; | ||
388 | if (slave) | ||
389 | attach_driver_chain(slave); | ||
390 | } | ||
391 | up(®istration_lock); | ||
392 | } | ||
393 | |||
394 | /** | ||
395 | * parport_remove_port - deregister a parallel port | ||
396 | * @port: parallel port to deregister | ||
397 | * | ||
398 | * When a parallel port driver is forcibly unloaded, or a | ||
399 | * parallel port becomes inaccessible, the port driver must call | ||
400 | * this function in order to deal with device drivers that still | ||
401 | * want to use it. | ||
402 | * | ||
403 | * The parport structure associated with the port has its | ||
404 | * operations structure replaced with one containing 'null' | ||
405 | * operations that return errors or just don't do anything. | ||
406 | * | ||
407 | * Any drivers that have registered themselves using | ||
408 | * parport_register_driver() are notified that the port is no | ||
409 | * longer accessible by having their detach() routines called | ||
410 | * with @port as the parameter. | ||
411 | **/ | ||
412 | |||
413 | void parport_remove_port(struct parport *port) | ||
414 | { | ||
415 | int i; | ||
416 | |||
417 | down(®istration_lock); | ||
418 | |||
419 | /* Spread the word. */ | ||
420 | detach_driver_chain (port); | ||
421 | |||
422 | #ifdef CONFIG_PARPORT_1284 | ||
423 | /* Forget the IEEE1284.3 topology of the port. */ | ||
424 | parport_daisy_fini(port); | ||
425 | for (i = 1; i < 3; i++) { | ||
426 | struct parport *slave = port->slaves[i-1]; | ||
427 | if (!slave) | ||
428 | continue; | ||
429 | detach_driver_chain(slave); | ||
430 | parport_daisy_fini(slave); | ||
431 | } | ||
432 | #endif | ||
433 | |||
434 | port->ops = &dead_ops; | ||
435 | spin_lock(&parportlist_lock); | ||
436 | list_del_init(&port->list); | ||
437 | for (i = 1; i < 3; i++) { | ||
438 | struct parport *slave = port->slaves[i-1]; | ||
439 | if (slave) | ||
440 | list_del_init(&slave->list); | ||
441 | } | ||
442 | spin_unlock(&parportlist_lock); | ||
443 | |||
444 | up(®istration_lock); | ||
445 | |||
446 | parport_proc_unregister(port); | ||
447 | |||
448 | for (i = 1; i < 3; i++) { | ||
449 | struct parport *slave = port->slaves[i-1]; | ||
450 | if (slave) | ||
451 | parport_put_port(slave); | ||
452 | } | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * parport_register_device - register a device on a parallel port | ||
457 | * @port: port to which the device is attached | ||
458 | * @name: a name to refer to the device | ||
459 | * @pf: preemption callback | ||
460 | * @kf: kick callback (wake-up) | ||
461 | * @irq_func: interrupt handler | ||
462 | * @flags: registration flags | ||
463 | * @handle: data for callback functions | ||
464 | * | ||
465 | * This function, called by parallel port device drivers, | ||
466 | * declares that a device is connected to a port, and tells the | ||
467 | * system all it needs to know. | ||
468 | * | ||
469 | * The @name is allocated by the caller and must not be | ||
470 | * deallocated until the caller calls @parport_unregister_device | ||
471 | * for that device. | ||
472 | * | ||
473 | * The preemption callback function, @pf, is called when this | ||
474 | * device driver has claimed access to the port but another | ||
475 | * device driver wants to use it. It is given @handle as its | ||
476 | * parameter, and should return zero if it is willing for the | ||
477 | * system to release the port to another driver on its behalf. | ||
478 | * If it wants to keep control of the port it should return | ||
479 | * non-zero, and no action will be taken. It is good manners for | ||
480 | * the driver to try to release the port at the earliest | ||
481 | * opportunity after its preemption callback rejects a preemption | ||
482 | * attempt. Note that if a preemption callback is happy for | ||
483 | * preemption to go ahead, there is no need to release the port; | ||
484 | * it is done automatically. This function may not block, as it | ||
485 | * may be called from interrupt context. If the device driver | ||
486 | * does not support preemption, @pf can be %NULL. | ||
487 | * | ||
488 | * The wake-up ("kick") callback function, @kf, is called when | ||
489 | * the port is available to be claimed for exclusive access; that | ||
490 | * is, parport_claim() is guaranteed to succeed when called from | ||
491 | * inside the wake-up callback function. If the driver wants to | ||
492 | * claim the port it should do so; otherwise, it need not take | ||
493 | * any action. This function may not block, as it may be called | ||
494 | * from interrupt context. If the device driver does not want to | ||
495 | * be explicitly invited to claim the port in this way, @kf can | ||
496 | * be %NULL. | ||
497 | * | ||
498 | * The interrupt handler, @irq_func, is called when an interrupt | ||
499 | * arrives from the parallel port. Note that if a device driver | ||
500 | * wants to use interrupts it should use parport_enable_irq(), | ||
501 | * and can also check the irq member of the parport structure | ||
502 | * representing the port. | ||
503 | * | ||
504 | * The parallel port (lowlevel) driver is the one that has called | ||
505 | * request_irq() and whose interrupt handler is called first. | ||
506 | * This handler does whatever needs to be done to the hardware to | ||
507 | * acknowledge the interrupt (for PC-style ports there is nothing | ||
508 | * special to be done). It then tells the IEEE 1284 code about | ||
509 | * the interrupt, which may involve reacting to an IEEE 1284 | ||
510 | * event depending on the current IEEE 1284 phase. After this, | ||
511 | * it calls @irq_func. Needless to say, @irq_func will be called | ||
512 | * from interrupt context, and may not block. | ||
513 | * | ||
514 | * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and | ||
515 | * so should only be used when sharing the port with other device | ||
516 | * drivers is impossible and would lead to incorrect behaviour. | ||
517 | * Use it sparingly! Normally, @flags will be zero. | ||
518 | * | ||
519 | * This function returns a pointer to a structure that represents | ||
520 | * the device on the port, or %NULL if there is not enough memory | ||
521 | * to allocate space for that structure. | ||
522 | **/ | ||
523 | |||
524 | struct pardevice * | ||
525 | parport_register_device(struct parport *port, const char *name, | ||
526 | int (*pf)(void *), void (*kf)(void *), | ||
527 | void (*irq_func)(int, void *, struct pt_regs *), | ||
528 | int flags, void *handle) | ||
529 | { | ||
530 | struct pardevice *tmp; | ||
531 | |||
532 | if (port->physport->flags & PARPORT_FLAG_EXCL) { | ||
533 | /* An exclusive device is registered. */ | ||
534 | printk (KERN_DEBUG "%s: no more devices allowed\n", | ||
535 | port->name); | ||
536 | return NULL; | ||
537 | } | ||
538 | |||
539 | if (flags & PARPORT_DEV_LURK) { | ||
540 | if (!pf || !kf) { | ||
541 | printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name); | ||
542 | return NULL; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | /* We up our own module reference count, and that of the port | ||
547 | on which a device is to be registered, to ensure that | ||
548 | neither of us gets unloaded while we sleep in (e.g.) | ||
549 | kmalloc. | ||
550 | */ | ||
551 | if (!try_module_get(port->ops->owner)) { | ||
552 | return NULL; | ||
553 | } | ||
554 | |||
555 | parport_get_port (port); | ||
556 | |||
557 | tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL); | ||
558 | if (tmp == NULL) { | ||
559 | printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); | ||
560 | goto out; | ||
561 | } | ||
562 | |||
563 | tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL); | ||
564 | if (tmp->state == NULL) { | ||
565 | printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); | ||
566 | goto out_free_pardevice; | ||
567 | } | ||
568 | |||
569 | tmp->name = name; | ||
570 | tmp->port = port; | ||
571 | tmp->daisy = -1; | ||
572 | tmp->preempt = pf; | ||
573 | tmp->wakeup = kf; | ||
574 | tmp->private = handle; | ||
575 | tmp->flags = flags; | ||
576 | tmp->irq_func = irq_func; | ||
577 | tmp->waiting = 0; | ||
578 | tmp->timeout = 5 * HZ; | ||
579 | |||
580 | /* Chain this onto the list */ | ||
581 | tmp->prev = NULL; | ||
582 | /* | ||
583 | * This function must not run from an irq handler so we don' t need | ||
584 | * to clear irq on the local CPU. -arca | ||
585 | */ | ||
586 | spin_lock(&port->physport->pardevice_lock); | ||
587 | |||
588 | if (flags & PARPORT_DEV_EXCL) { | ||
589 | if (port->physport->devices) { | ||
590 | spin_unlock (&port->physport->pardevice_lock); | ||
591 | printk (KERN_DEBUG | ||
592 | "%s: cannot grant exclusive access for " | ||
593 | "device %s\n", port->name, name); | ||
594 | goto out_free_all; | ||
595 | } | ||
596 | port->flags |= PARPORT_FLAG_EXCL; | ||
597 | } | ||
598 | |||
599 | tmp->next = port->physport->devices; | ||
600 | wmb(); /* Make sure that tmp->next is written before it's | ||
601 | added to the list; see comments marked 'no locking | ||
602 | required' */ | ||
603 | if (port->physport->devices) | ||
604 | port->physport->devices->prev = tmp; | ||
605 | port->physport->devices = tmp; | ||
606 | spin_unlock(&port->physport->pardevice_lock); | ||
607 | |||
608 | init_waitqueue_head(&tmp->wait_q); | ||
609 | tmp->timeslice = parport_default_timeslice; | ||
610 | tmp->waitnext = tmp->waitprev = NULL; | ||
611 | |||
612 | /* | ||
613 | * This has to be run as last thing since init_state may need other | ||
614 | * pardevice fields. -arca | ||
615 | */ | ||
616 | port->ops->init_state(tmp, tmp->state); | ||
617 | parport_device_proc_register(tmp); | ||
618 | return tmp; | ||
619 | |||
620 | out_free_all: | ||
621 | kfree (tmp->state); | ||
622 | out_free_pardevice: | ||
623 | kfree (tmp); | ||
624 | out: | ||
625 | parport_put_port (port); | ||
626 | module_put(port->ops->owner); | ||
627 | |||
628 | return NULL; | ||
629 | } | ||
630 | |||
631 | /** | ||
632 | * parport_unregister_device - deregister a device on a parallel port | ||
633 | * @dev: pointer to structure representing device | ||
634 | * | ||
635 | * This undoes the effect of parport_register_device(). | ||
636 | **/ | ||
637 | |||
638 | void parport_unregister_device(struct pardevice *dev) | ||
639 | { | ||
640 | struct parport *port; | ||
641 | |||
642 | #ifdef PARPORT_PARANOID | ||
643 | if (dev == NULL) { | ||
644 | printk(KERN_ERR "parport_unregister_device: passed NULL\n"); | ||
645 | return; | ||
646 | } | ||
647 | #endif | ||
648 | |||
649 | parport_device_proc_unregister(dev); | ||
650 | |||
651 | port = dev->port->physport; | ||
652 | |||
653 | if (port->cad == dev) { | ||
654 | printk(KERN_DEBUG "%s: %s forgot to release port\n", | ||
655 | port->name, dev->name); | ||
656 | parport_release (dev); | ||
657 | } | ||
658 | |||
659 | spin_lock(&port->pardevice_lock); | ||
660 | if (dev->next) | ||
661 | dev->next->prev = dev->prev; | ||
662 | if (dev->prev) | ||
663 | dev->prev->next = dev->next; | ||
664 | else | ||
665 | port->devices = dev->next; | ||
666 | |||
667 | if (dev->flags & PARPORT_DEV_EXCL) | ||
668 | port->flags &= ~PARPORT_FLAG_EXCL; | ||
669 | |||
670 | spin_unlock(&port->pardevice_lock); | ||
671 | |||
672 | /* Make sure we haven't left any pointers around in the wait | ||
673 | * list. */ | ||
674 | spin_lock (&port->waitlist_lock); | ||
675 | if (dev->waitprev || dev->waitnext || port->waithead == dev) { | ||
676 | if (dev->waitprev) | ||
677 | dev->waitprev->waitnext = dev->waitnext; | ||
678 | else | ||
679 | port->waithead = dev->waitnext; | ||
680 | if (dev->waitnext) | ||
681 | dev->waitnext->waitprev = dev->waitprev; | ||
682 | else | ||
683 | port->waittail = dev->waitprev; | ||
684 | } | ||
685 | spin_unlock (&port->waitlist_lock); | ||
686 | |||
687 | kfree(dev->state); | ||
688 | kfree(dev); | ||
689 | |||
690 | module_put(port->ops->owner); | ||
691 | parport_put_port (port); | ||
692 | } | ||
693 | |||
694 | /** | ||
695 | * parport_find_number - find a parallel port by number | ||
696 | * @number: parallel port number | ||
697 | * | ||
698 | * This returns the parallel port with the specified number, or | ||
699 | * %NULL if there is none. | ||
700 | * | ||
701 | * There is an implicit parport_get_port() done already; to throw | ||
702 | * away the reference to the port that parport_find_number() | ||
703 | * gives you, use parport_put_port(). | ||
704 | */ | ||
705 | |||
706 | struct parport *parport_find_number (int number) | ||
707 | { | ||
708 | struct parport *port, *result = NULL; | ||
709 | |||
710 | if (list_empty(&portlist)) | ||
711 | get_lowlevel_driver (); | ||
712 | |||
713 | spin_lock (&parportlist_lock); | ||
714 | list_for_each_entry(port, &portlist, list) { | ||
715 | if (port->number == number) { | ||
716 | result = parport_get_port (port); | ||
717 | break; | ||
718 | } | ||
719 | } | ||
720 | spin_unlock (&parportlist_lock); | ||
721 | return result; | ||
722 | } | ||
723 | |||
724 | /** | ||
725 | * parport_find_base - find a parallel port by base address | ||
726 | * @base: base I/O address | ||
727 | * | ||
728 | * This returns the parallel port with the specified base | ||
729 | * address, or %NULL if there is none. | ||
730 | * | ||
731 | * There is an implicit parport_get_port() done already; to throw | ||
732 | * away the reference to the port that parport_find_base() | ||
733 | * gives you, use parport_put_port(). | ||
734 | */ | ||
735 | |||
736 | struct parport *parport_find_base (unsigned long base) | ||
737 | { | ||
738 | struct parport *port, *result = NULL; | ||
739 | |||
740 | if (list_empty(&portlist)) | ||
741 | get_lowlevel_driver (); | ||
742 | |||
743 | spin_lock (&parportlist_lock); | ||
744 | list_for_each_entry(port, &portlist, list) { | ||
745 | if (port->base == base) { | ||
746 | result = parport_get_port (port); | ||
747 | break; | ||
748 | } | ||
749 | } | ||
750 | spin_unlock (&parportlist_lock); | ||
751 | return result; | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * parport_claim - claim access to a parallel port device | ||
756 | * @dev: pointer to structure representing a device on the port | ||
757 | * | ||
758 | * This function will not block and so can be used from interrupt | ||
759 | * context. If parport_claim() succeeds in claiming access to | ||
760 | * the port it returns zero and the port is available to use. It | ||
761 | * may fail (returning non-zero) if the port is in use by another | ||
762 | * driver and that driver is not willing to relinquish control of | ||
763 | * the port. | ||
764 | **/ | ||
765 | |||
766 | int parport_claim(struct pardevice *dev) | ||
767 | { | ||
768 | struct pardevice *oldcad; | ||
769 | struct parport *port = dev->port->physport; | ||
770 | unsigned long flags; | ||
771 | |||
772 | if (port->cad == dev) { | ||
773 | printk(KERN_INFO "%s: %s already owner\n", | ||
774 | dev->port->name,dev->name); | ||
775 | return 0; | ||
776 | } | ||
777 | |||
778 | /* Preempt any current device */ | ||
779 | write_lock_irqsave (&port->cad_lock, flags); | ||
780 | if ((oldcad = port->cad) != NULL) { | ||
781 | if (oldcad->preempt) { | ||
782 | if (oldcad->preempt(oldcad->private)) | ||
783 | goto blocked; | ||
784 | port->ops->save_state(port, dev->state); | ||
785 | } else | ||
786 | goto blocked; | ||
787 | |||
788 | if (port->cad != oldcad) { | ||
789 | /* I think we'll actually deadlock rather than | ||
790 | get here, but just in case.. */ | ||
791 | printk(KERN_WARNING | ||
792 | "%s: %s released port when preempted!\n", | ||
793 | port->name, oldcad->name); | ||
794 | if (port->cad) | ||
795 | goto blocked; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | /* Can't fail from now on, so mark ourselves as no longer waiting. */ | ||
800 | if (dev->waiting & 1) { | ||
801 | dev->waiting = 0; | ||
802 | |||
803 | /* Take ourselves out of the wait list again. */ | ||
804 | spin_lock_irq (&port->waitlist_lock); | ||
805 | if (dev->waitprev) | ||
806 | dev->waitprev->waitnext = dev->waitnext; | ||
807 | else | ||
808 | port->waithead = dev->waitnext; | ||
809 | if (dev->waitnext) | ||
810 | dev->waitnext->waitprev = dev->waitprev; | ||
811 | else | ||
812 | port->waittail = dev->waitprev; | ||
813 | spin_unlock_irq (&port->waitlist_lock); | ||
814 | dev->waitprev = dev->waitnext = NULL; | ||
815 | } | ||
816 | |||
817 | /* Now we do the change of devices */ | ||
818 | port->cad = dev; | ||
819 | |||
820 | #ifdef CONFIG_PARPORT_1284 | ||
821 | /* If it's a mux port, select it. */ | ||
822 | if (dev->port->muxport >= 0) { | ||
823 | /* FIXME */ | ||
824 | port->muxsel = dev->port->muxport; | ||
825 | } | ||
826 | |||
827 | /* If it's a daisy chain device, select it. */ | ||
828 | if (dev->daisy >= 0) { | ||
829 | /* This could be lazier. */ | ||
830 | if (!parport_daisy_select (port, dev->daisy, | ||
831 | IEEE1284_MODE_COMPAT)) | ||
832 | port->daisy = dev->daisy; | ||
833 | } | ||
834 | #endif /* IEEE1284.3 support */ | ||
835 | |||
836 | /* Restore control registers */ | ||
837 | port->ops->restore_state(port, dev->state); | ||
838 | write_unlock_irqrestore(&port->cad_lock, flags); | ||
839 | dev->time = jiffies; | ||
840 | return 0; | ||
841 | |||
842 | blocked: | ||
843 | /* If this is the first time we tried to claim the port, register an | ||
844 | interest. This is only allowed for devices sleeping in | ||
845 | parport_claim_or_block(), or those with a wakeup function. */ | ||
846 | |||
847 | /* The cad_lock is still held for writing here */ | ||
848 | if (dev->waiting & 2 || dev->wakeup) { | ||
849 | spin_lock (&port->waitlist_lock); | ||
850 | if (test_and_set_bit(0, &dev->waiting) == 0) { | ||
851 | /* First add ourselves to the end of the wait list. */ | ||
852 | dev->waitnext = NULL; | ||
853 | dev->waitprev = port->waittail; | ||
854 | if (port->waittail) { | ||
855 | port->waittail->waitnext = dev; | ||
856 | port->waittail = dev; | ||
857 | } else | ||
858 | port->waithead = port->waittail = dev; | ||
859 | } | ||
860 | spin_unlock (&port->waitlist_lock); | ||
861 | } | ||
862 | write_unlock_irqrestore (&port->cad_lock, flags); | ||
863 | return -EAGAIN; | ||
864 | } | ||
865 | |||
866 | /** | ||
867 | * parport_claim_or_block - claim access to a parallel port device | ||
868 | * @dev: pointer to structure representing a device on the port | ||
869 | * | ||
870 | * This behaves like parport_claim(), but will block if necessary | ||
871 | * to wait for the port to be free. A return value of 1 | ||
872 | * indicates that it slept; 0 means that it succeeded without | ||
873 | * needing to sleep. A negative error code indicates failure. | ||
874 | **/ | ||
875 | |||
876 | int parport_claim_or_block(struct pardevice *dev) | ||
877 | { | ||
878 | int r; | ||
879 | |||
880 | /* Signal to parport_claim() that we can wait even without a | ||
881 | wakeup function. */ | ||
882 | dev->waiting = 2; | ||
883 | |||
884 | /* Try to claim the port. If this fails, we need to sleep. */ | ||
885 | r = parport_claim(dev); | ||
886 | if (r == -EAGAIN) { | ||
887 | #ifdef PARPORT_DEBUG_SHARING | ||
888 | printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name); | ||
889 | #endif | ||
890 | /* | ||
891 | * FIXME!!! Use the proper locking for dev->waiting, | ||
892 | * and make this use the "wait_event_interruptible()" | ||
893 | * interfaces. The cli/sti that used to be here | ||
894 | * did nothing. | ||
895 | * | ||
896 | * See also parport_release() | ||
897 | */ | ||
898 | |||
899 | /* If dev->waiting is clear now, an interrupt | ||
900 | gave us the port and we would deadlock if we slept. */ | ||
901 | if (dev->waiting) { | ||
902 | interruptible_sleep_on (&dev->wait_q); | ||
903 | if (signal_pending (current)) { | ||
904 | return -EINTR; | ||
905 | } | ||
906 | r = 1; | ||
907 | } else { | ||
908 | r = 0; | ||
909 | #ifdef PARPORT_DEBUG_SHARING | ||
910 | printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n", | ||
911 | dev->name); | ||
912 | #endif | ||
913 | } | ||
914 | |||
915 | #ifdef PARPORT_DEBUG_SHARING | ||
916 | if (dev->port->physport->cad != dev) | ||
917 | printk(KERN_DEBUG "%s: exiting parport_claim_or_block " | ||
918 | "but %s owns port!\n", dev->name, | ||
919 | dev->port->physport->cad ? | ||
920 | dev->port->physport->cad->name:"nobody"); | ||
921 | #endif | ||
922 | } | ||
923 | dev->waiting = 0; | ||
924 | return r; | ||
925 | } | ||
926 | |||
927 | /** | ||
928 | * parport_release - give up access to a parallel port device | ||
929 | * @dev: pointer to structure representing parallel port device | ||
930 | * | ||
931 | * This function cannot fail, but it should not be called without | ||
932 | * the port claimed. Similarly, if the port is already claimed | ||
933 | * you should not try claiming it again. | ||
934 | **/ | ||
935 | |||
936 | void parport_release(struct pardevice *dev) | ||
937 | { | ||
938 | struct parport *port = dev->port->physport; | ||
939 | struct pardevice *pd; | ||
940 | unsigned long flags; | ||
941 | |||
942 | /* Make sure that dev is the current device */ | ||
943 | write_lock_irqsave(&port->cad_lock, flags); | ||
944 | if (port->cad != dev) { | ||
945 | write_unlock_irqrestore (&port->cad_lock, flags); | ||
946 | printk(KERN_WARNING "%s: %s tried to release parport " | ||
947 | "when not owner\n", port->name, dev->name); | ||
948 | return; | ||
949 | } | ||
950 | |||
951 | #ifdef CONFIG_PARPORT_1284 | ||
952 | /* If this is on a mux port, deselect it. */ | ||
953 | if (dev->port->muxport >= 0) { | ||
954 | /* FIXME */ | ||
955 | port->muxsel = -1; | ||
956 | } | ||
957 | |||
958 | /* If this is a daisy device, deselect it. */ | ||
959 | if (dev->daisy >= 0) { | ||
960 | parport_daisy_deselect_all (port); | ||
961 | port->daisy = -1; | ||
962 | } | ||
963 | #endif | ||
964 | |||
965 | port->cad = NULL; | ||
966 | write_unlock_irqrestore(&port->cad_lock, flags); | ||
967 | |||
968 | /* Save control registers */ | ||
969 | port->ops->save_state(port, dev->state); | ||
970 | |||
971 | /* If anybody is waiting, find out who's been there longest and | ||
972 | then wake them up. (Note: no locking required) */ | ||
973 | /* !!! LOCKING IS NEEDED HERE */ | ||
974 | for (pd = port->waithead; pd; pd = pd->waitnext) { | ||
975 | if (pd->waiting & 2) { /* sleeping in claim_or_block */ | ||
976 | parport_claim(pd); | ||
977 | if (waitqueue_active(&pd->wait_q)) | ||
978 | wake_up_interruptible(&pd->wait_q); | ||
979 | return; | ||
980 | } else if (pd->wakeup) { | ||
981 | pd->wakeup(pd->private); | ||
982 | if (dev->port->cad) /* racy but no matter */ | ||
983 | return; | ||
984 | } else { | ||
985 | printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name); | ||
986 | } | ||
987 | } | ||
988 | |||
989 | /* Nobody was waiting, so walk the list to see if anyone is | ||
990 | interested in being woken up. (Note: no locking required) */ | ||
991 | /* !!! LOCKING IS NEEDED HERE */ | ||
992 | for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) { | ||
993 | if (pd->wakeup && pd != dev) | ||
994 | pd->wakeup(pd->private); | ||
995 | } | ||
996 | } | ||
997 | |||
998 | /* Exported symbols for modules. */ | ||
999 | |||
1000 | EXPORT_SYMBOL(parport_claim); | ||
1001 | EXPORT_SYMBOL(parport_claim_or_block); | ||
1002 | EXPORT_SYMBOL(parport_release); | ||
1003 | EXPORT_SYMBOL(parport_register_port); | ||
1004 | EXPORT_SYMBOL(parport_announce_port); | ||
1005 | EXPORT_SYMBOL(parport_remove_port); | ||
1006 | EXPORT_SYMBOL(parport_register_driver); | ||
1007 | EXPORT_SYMBOL(parport_unregister_driver); | ||
1008 | EXPORT_SYMBOL(parport_register_device); | ||
1009 | EXPORT_SYMBOL(parport_unregister_device); | ||
1010 | EXPORT_SYMBOL(parport_put_port); | ||
1011 | EXPORT_SYMBOL(parport_find_number); | ||
1012 | EXPORT_SYMBOL(parport_find_base); | ||
1013 | |||
1014 | MODULE_LICENSE("GPL"); | ||