diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/irda/irqueue.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'net/irda/irqueue.c')
-rw-r--r-- | net/irda/irqueue.c | 915 |
1 files changed, 915 insertions, 0 deletions
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c new file mode 100644 index 000000000000..b0dd3ea35999 --- /dev/null +++ b/net/irda/irqueue.c | |||
@@ -0,0 +1,915 @@ | |||
1 | /********************************************************************* | ||
2 | * | ||
3 | * Filename: irqueue.c | ||
4 | * Version: 0.3 | ||
5 | * Description: General queue implementation | ||
6 | * Status: Experimental. | ||
7 | * Author: Dag Brattli <dagb@cs.uit.no> | ||
8 | * Created at: Tue Jun 9 13:29:31 1998 | ||
9 | * Modified at: Sun Dec 12 13:48:22 1999 | ||
10 | * Modified by: Dag Brattli <dagb@cs.uit.no> | ||
11 | * Modified at: Thu Jan 4 14:29:10 CET 2001 | ||
12 | * Modified by: Marc Zyngier <mzyngier@freesurf.fr> | ||
13 | * | ||
14 | * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no> | ||
15 | * Copyright (C) 1998, Dag Brattli, | ||
16 | * All Rights Reserved. | ||
17 | * | ||
18 | * This code is taken from the Vortex Operating System written by Aage | ||
19 | * Kvalnes. Aage has agreed that this code can use the GPL licence, | ||
20 | * although he does not use that licence in his own code. | ||
21 | * | ||
22 | * This copyright does however _not_ include the ELF hash() function | ||
23 | * which I currently don't know which licence or copyright it | ||
24 | * has. Please inform me if you know. | ||
25 | * | ||
26 | * This program is free software; you can redistribute it and/or | ||
27 | * modify it under the terms of the GNU General Public License as | ||
28 | * published by the Free Software Foundation; either version 2 of | ||
29 | * the License, or (at your option) any later version. | ||
30 | * | ||
31 | * Neither Dag Brattli nor University of Tromsų admit liability nor | ||
32 | * provide warranty for any of this software. This material is | ||
33 | * provided "AS-IS" and at no charge. | ||
34 | * | ||
35 | ********************************************************************/ | ||
36 | |||
37 | /* | ||
38 | * NOTE : | ||
39 | * There are various problems with this package : | ||
40 | * o the hash function for ints is pathetic (but could be changed) | ||
41 | * o locking is sometime suspicious (especially during enumeration) | ||
42 | * o most users have only a few elements (== overhead) | ||
43 | * o most users never use seach, so don't benefit from hashing | ||
44 | * Problem already fixed : | ||
45 | * o not 64 bit compliant (most users do hashv = (int) self) | ||
46 | * o hashbin_remove() is broken => use hashbin_remove_this() | ||
47 | * I think most users would be better served by a simple linked list | ||
48 | * (like include/linux/list.h) with a global spinlock per list. | ||
49 | * Jean II | ||
50 | */ | ||
51 | |||
52 | /* | ||
53 | * Notes on the concurrent access to hashbin and other SMP issues | ||
54 | * ------------------------------------------------------------- | ||
55 | * Hashbins are very often in the IrDA stack a global repository of | ||
56 | * information, and therefore used in a very asynchronous manner following | ||
57 | * various events (driver calls, timers, user calls...). | ||
58 | * Therefore, very often it is highly important to consider the | ||
59 | * management of concurrent access to the hashbin and how to guarantee the | ||
60 | * consistency of the operations on it. | ||
61 | * | ||
62 | * First, we need to define the objective of locking : | ||
63 | * 1) Protect user data (content pointed by the hashbin) | ||
64 | * 2) Protect hashbin structure itself (linked list in each bin) | ||
65 | * | ||
66 | * OLD LOCKING | ||
67 | * ----------- | ||
68 | * | ||
69 | * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were | ||
70 | * both inadequate in *both* aspect. | ||
71 | * o HB_GLOBAL was using a spinlock for each bin (local locking). | ||
72 | * o HB_LOCAL was disabling irq on *all* CPUs, so use a single | ||
73 | * global semaphore. | ||
74 | * The problems were : | ||
75 | * A) Global irq disabling is no longer supported by the kernel | ||
76 | * B) No protection for the hashbin struct global data | ||
77 | * o hashbin_delete() | ||
78 | * o hb_current | ||
79 | * C) No protection for user data in some cases | ||
80 | * | ||
81 | * A) HB_LOCAL use global irq disabling, so doesn't work on kernel | ||
82 | * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its | ||
83 | * performance is not satisfactory on SMP setups. Most hashbins were | ||
84 | * HB_LOCAL, so (A) definitely need fixing. | ||
85 | * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL | ||
86 | * lock only the individual bins, it will never be able to lock the | ||
87 | * global data, so can't do (B). | ||
88 | * C) Some functions return pointer to data that is still in the | ||
89 | * hashbin : | ||
90 | * o hashbin_find() | ||
91 | * o hashbin_get_first() | ||
92 | * o hashbin_get_next() | ||
93 | * As the data is still in the hashbin, it may be changed or free'd | ||
94 | * while the caller is examinimg the data. In those case, locking can't | ||
95 | * be done within the hashbin, but must include use of the data within | ||
96 | * the caller. | ||
97 | * The caller can easily do this with HB_LOCAL (just disable irqs). | ||
98 | * However, this is impossible with HB_GLOBAL because the caller has no | ||
99 | * way to know the proper bin, so don't know which spinlock to use. | ||
100 | * | ||
101 | * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is | ||
102 | * fundamentally broken and will never work. | ||
103 | * | ||
104 | * NEW LOCKING | ||
105 | * ----------- | ||
106 | * | ||
107 | * To fix those problems, I've introduce a few changes in the | ||
108 | * hashbin locking : | ||
109 | * 1) New HB_LOCK scheme | ||
110 | * 2) hashbin->hb_spinlock | ||
111 | * 3) New hashbin usage policy | ||
112 | * | ||
113 | * HB_LOCK : | ||
114 | * ------- | ||
115 | * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL | ||
116 | * and HB_GLOBAL. It uses a single spinlock to protect the whole content | ||
117 | * of the hashbin. As it is a single spinlock, it can protect the global | ||
118 | * data of the hashbin and not only the bins themselves. | ||
119 | * HB_LOCK can only protect some of the hashbin calls, so it only lock | ||
120 | * call that can be made 100% safe and leave other call unprotected. | ||
121 | * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin | ||
122 | * content is always small contention is not high, so it doesn't matter | ||
123 | * much. HB_LOCK is probably faster than HB_LOCAL. | ||
124 | * | ||
125 | * hashbin->hb_spinlock : | ||
126 | * -------------------- | ||
127 | * The spinlock that HB_LOCK uses is available for caller, so that | ||
128 | * the caller can protect unprotected calls (see below). | ||
129 | * If the caller want to do entirely its own locking (HB_NOLOCK), he | ||
130 | * can do so and may use safely this spinlock. | ||
131 | * Locking is done like this : | ||
132 | * spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
133 | * Releasing the lock : | ||
134 | * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
135 | * | ||
136 | * Safe & Protected calls : | ||
137 | * ---------------------- | ||
138 | * The following calls are safe or protected via HB_LOCK : | ||
139 | * o hashbin_new() -> safe | ||
140 | * o hashbin_delete() | ||
141 | * o hashbin_insert() | ||
142 | * o hashbin_remove_first() | ||
143 | * o hashbin_remove() | ||
144 | * o hashbin_remove_this() | ||
145 | * o HASHBIN_GET_SIZE() -> atomic | ||
146 | * | ||
147 | * The following calls only protect the hashbin itself : | ||
148 | * o hashbin_lock_find() | ||
149 | * o hashbin_find_next() | ||
150 | * | ||
151 | * Unprotected calls : | ||
152 | * ----------------- | ||
153 | * The following calls need to be protected by the caller : | ||
154 | * o hashbin_find() | ||
155 | * o hashbin_get_first() | ||
156 | * o hashbin_get_next() | ||
157 | * | ||
158 | * Locking Policy : | ||
159 | * -------------- | ||
160 | * If the hashbin is used only in a single thread of execution | ||
161 | * (explicitly or implicitely), you can use HB_NOLOCK | ||
162 | * If the calling module already provide concurrent access protection, | ||
163 | * you may use HB_NOLOCK. | ||
164 | * | ||
165 | * In all other cases, you need to use HB_LOCK and lock the hashbin | ||
166 | * every time before calling one of the unprotected calls. You also must | ||
167 | * use the pointer returned by the unprotected call within the locked | ||
168 | * region. | ||
169 | * | ||
170 | * Extra care for enumeration : | ||
171 | * -------------------------- | ||
172 | * hashbin_get_first() and hashbin_get_next() use the hashbin to | ||
173 | * store the current position, in hb_current. | ||
174 | * As long as the hashbin remains locked, this is safe. If you unlock | ||
175 | * the hashbin, the current position may change if anybody else modify | ||
176 | * or enumerate the hashbin. | ||
177 | * Summary : do the full enumeration while locked. | ||
178 | * | ||
179 | * Alternatively, you may use hashbin_find_next(). But, this will | ||
180 | * be slower, is more complex to use and doesn't protect the hashbin | ||
181 | * content. So, care is needed here as well. | ||
182 | * | ||
183 | * Other issues : | ||
184 | * ------------ | ||
185 | * I believe that we are overdoing it by using spin_lock_irqsave() | ||
186 | * and we should use only spin_lock_bh() or similar. But, I don't have | ||
187 | * the balls to try it out. | ||
188 | * Don't believe that because hashbin are now (somewhat) SMP safe | ||
189 | * that the rest of the code is. Higher layers tend to be safest, | ||
190 | * but LAP and LMP would need some serious dedicated love. | ||
191 | * | ||
192 | * Jean II | ||
193 | */ | ||
194 | #include <linux/module.h> | ||
195 | |||
196 | #include <net/irda/irda.h> | ||
197 | #include <net/irda/irqueue.h> | ||
198 | |||
199 | /************************ QUEUE SUBROUTINES ************************/ | ||
200 | |||
201 | /* | ||
202 | * Hashbin | ||
203 | */ | ||
204 | #define GET_HASHBIN(x) ( x & HASHBIN_MASK ) | ||
205 | |||
206 | /* | ||
207 | * Function hash (name) | ||
208 | * | ||
209 | * This function hash the input string 'name' using the ELF hash | ||
210 | * function for strings. | ||
211 | */ | ||
212 | static __u32 hash( const char* name) | ||
213 | { | ||
214 | __u32 h = 0; | ||
215 | __u32 g; | ||
216 | |||
217 | while(*name) { | ||
218 | h = (h<<4) + *name++; | ||
219 | if ((g = (h & 0xf0000000))) | ||
220 | h ^=g>>24; | ||
221 | h &=~g; | ||
222 | } | ||
223 | return h; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * Function enqueue_first (queue, proc) | ||
228 | * | ||
229 | * Insert item first in queue. | ||
230 | * | ||
231 | */ | ||
232 | static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) | ||
233 | { | ||
234 | |||
235 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | ||
236 | |||
237 | /* | ||
238 | * Check if queue is empty. | ||
239 | */ | ||
240 | if ( *queue == NULL ) { | ||
241 | /* | ||
242 | * Queue is empty. Insert one element into the queue. | ||
243 | */ | ||
244 | element->q_next = element->q_prev = *queue = element; | ||
245 | |||
246 | } else { | ||
247 | /* | ||
248 | * Queue is not empty. Insert element into front of queue. | ||
249 | */ | ||
250 | element->q_next = (*queue); | ||
251 | (*queue)->q_prev->q_next = element; | ||
252 | element->q_prev = (*queue)->q_prev; | ||
253 | (*queue)->q_prev = element; | ||
254 | (*queue) = element; | ||
255 | } | ||
256 | } | ||
257 | |||
258 | |||
259 | /* | ||
260 | * Function dequeue (queue) | ||
261 | * | ||
262 | * Remove first entry in queue | ||
263 | * | ||
264 | */ | ||
265 | static irda_queue_t *dequeue_first(irda_queue_t **queue) | ||
266 | { | ||
267 | irda_queue_t *ret; | ||
268 | |||
269 | IRDA_DEBUG( 4, "dequeue_first()\n"); | ||
270 | |||
271 | /* | ||
272 | * Set return value | ||
273 | */ | ||
274 | ret = *queue; | ||
275 | |||
276 | if ( *queue == NULL ) { | ||
277 | /* | ||
278 | * Queue was empty. | ||
279 | */ | ||
280 | } else if ( (*queue)->q_next == *queue ) { | ||
281 | /* | ||
282 | * Queue only contained a single element. It will now be | ||
283 | * empty. | ||
284 | */ | ||
285 | *queue = NULL; | ||
286 | } else { | ||
287 | /* | ||
288 | * Queue contained several element. Remove the first one. | ||
289 | */ | ||
290 | (*queue)->q_prev->q_next = (*queue)->q_next; | ||
291 | (*queue)->q_next->q_prev = (*queue)->q_prev; | ||
292 | *queue = (*queue)->q_next; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Return the removed entry (or NULL of queue was empty). | ||
297 | */ | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * Function dequeue_general (queue, element) | ||
303 | * | ||
304 | * | ||
305 | */ | ||
306 | static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) | ||
307 | { | ||
308 | irda_queue_t *ret; | ||
309 | |||
310 | IRDA_DEBUG( 4, "dequeue_general()\n"); | ||
311 | |||
312 | /* | ||
313 | * Set return value | ||
314 | */ | ||
315 | ret = *queue; | ||
316 | |||
317 | if ( *queue == NULL ) { | ||
318 | /* | ||
319 | * Queue was empty. | ||
320 | */ | ||
321 | } else if ( (*queue)->q_next == *queue ) { | ||
322 | /* | ||
323 | * Queue only contained a single element. It will now be | ||
324 | * empty. | ||
325 | */ | ||
326 | *queue = NULL; | ||
327 | |||
328 | } else { | ||
329 | /* | ||
330 | * Remove specific element. | ||
331 | */ | ||
332 | element->q_prev->q_next = element->q_next; | ||
333 | element->q_next->q_prev = element->q_prev; | ||
334 | if ( (*queue) == element) | ||
335 | (*queue) = element->q_next; | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Return the removed entry (or NULL of queue was empty). | ||
340 | */ | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | /************************ HASHBIN MANAGEMENT ************************/ | ||
345 | |||
346 | /* | ||
347 | * Function hashbin_create ( type, name ) | ||
348 | * | ||
349 | * Create hashbin! | ||
350 | * | ||
351 | */ | ||
352 | hashbin_t *hashbin_new(int type) | ||
353 | { | ||
354 | hashbin_t* hashbin; | ||
355 | |||
356 | /* | ||
357 | * Allocate new hashbin | ||
358 | */ | ||
359 | hashbin = kmalloc( sizeof(hashbin_t), GFP_ATOMIC); | ||
360 | if (!hashbin) | ||
361 | return NULL; | ||
362 | |||
363 | /* | ||
364 | * Initialize structure | ||
365 | */ | ||
366 | memset(hashbin, 0, sizeof(hashbin_t)); | ||
367 | hashbin->hb_type = type; | ||
368 | hashbin->magic = HB_MAGIC; | ||
369 | //hashbin->hb_current = NULL; | ||
370 | |||
371 | /* Make sure all spinlock's are unlocked */ | ||
372 | if ( hashbin->hb_type & HB_LOCK ) { | ||
373 | spin_lock_init(&hashbin->hb_spinlock); | ||
374 | } | ||
375 | |||
376 | return hashbin; | ||
377 | } | ||
378 | EXPORT_SYMBOL(hashbin_new); | ||
379 | |||
380 | |||
381 | /* | ||
382 | * Function hashbin_delete (hashbin, free_func) | ||
383 | * | ||
384 | * Destroy hashbin, the free_func can be a user supplied special routine | ||
385 | * for deallocating this structure if it's complex. If not the user can | ||
386 | * just supply kfree, which should take care of the job. | ||
387 | */ | ||
388 | int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) | ||
389 | { | ||
390 | irda_queue_t* queue; | ||
391 | unsigned long flags = 0; | ||
392 | int i; | ||
393 | |||
394 | IRDA_ASSERT(hashbin != NULL, return -1;); | ||
395 | IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); | ||
396 | |||
397 | /* Synchronize */ | ||
398 | if ( hashbin->hb_type & HB_LOCK ) { | ||
399 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Free the entries in the hashbin, TODO: use hashbin_clear when | ||
404 | * it has been shown to work | ||
405 | */ | ||
406 | for (i = 0; i < HASHBIN_SIZE; i ++ ) { | ||
407 | queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); | ||
408 | while (queue ) { | ||
409 | if (free_func) | ||
410 | (*free_func)(queue); | ||
411 | queue = dequeue_first( | ||
412 | (irda_queue_t**) &hashbin->hb_queue[i]); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | /* Cleanup local data */ | ||
417 | hashbin->hb_current = NULL; | ||
418 | hashbin->magic = ~HB_MAGIC; | ||
419 | |||
420 | /* Release lock */ | ||
421 | if ( hashbin->hb_type & HB_LOCK) { | ||
422 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Free the hashbin structure | ||
427 | */ | ||
428 | kfree(hashbin); | ||
429 | |||
430 | return 0; | ||
431 | } | ||
432 | EXPORT_SYMBOL(hashbin_delete); | ||
433 | |||
434 | /********************* HASHBIN LIST OPERATIONS *********************/ | ||
435 | |||
436 | /* | ||
437 | * Function hashbin_insert (hashbin, entry, name) | ||
438 | * | ||
439 | * Insert an entry into the hashbin | ||
440 | * | ||
441 | */ | ||
442 | void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, | ||
443 | const char* name) | ||
444 | { | ||
445 | unsigned long flags = 0; | ||
446 | int bin; | ||
447 | |||
448 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | ||
449 | |||
450 | IRDA_ASSERT( hashbin != NULL, return;); | ||
451 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); | ||
452 | |||
453 | /* | ||
454 | * Locate hashbin | ||
455 | */ | ||
456 | if ( name ) | ||
457 | hashv = hash( name ); | ||
458 | bin = GET_HASHBIN( hashv ); | ||
459 | |||
460 | /* Synchronize */ | ||
461 | if ( hashbin->hb_type & HB_LOCK ) { | ||
462 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
463 | } /* Default is no-lock */ | ||
464 | |||
465 | /* | ||
466 | * Store name and key | ||
467 | */ | ||
468 | entry->q_hash = hashv; | ||
469 | if ( name ) | ||
470 | strlcpy( entry->q_name, name, sizeof(entry->q_name)); | ||
471 | |||
472 | /* | ||
473 | * Insert new entry first | ||
474 | */ | ||
475 | enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ], | ||
476 | entry); | ||
477 | hashbin->hb_size++; | ||
478 | |||
479 | /* Release lock */ | ||
480 | if ( hashbin->hb_type & HB_LOCK ) { | ||
481 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
482 | } /* Default is no-lock */ | ||
483 | } | ||
484 | EXPORT_SYMBOL(hashbin_insert); | ||
485 | |||
486 | /* | ||
487 | * Function hashbin_remove_first (hashbin) | ||
488 | * | ||
489 | * Remove first entry of the hashbin | ||
490 | * | ||
491 | * Note : this function no longer use hashbin_remove(), but does things | ||
492 | * similar to hashbin_remove_this(), so can be considered safe. | ||
493 | * Jean II | ||
494 | */ | ||
495 | void *hashbin_remove_first( hashbin_t *hashbin) | ||
496 | { | ||
497 | unsigned long flags = 0; | ||
498 | irda_queue_t *entry = NULL; | ||
499 | |||
500 | /* Synchronize */ | ||
501 | if ( hashbin->hb_type & HB_LOCK ) { | ||
502 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
503 | } /* Default is no-lock */ | ||
504 | |||
505 | entry = hashbin_get_first( hashbin); | ||
506 | if ( entry != NULL) { | ||
507 | int bin; | ||
508 | long hashv; | ||
509 | /* | ||
510 | * Locate hashbin | ||
511 | */ | ||
512 | hashv = entry->q_hash; | ||
513 | bin = GET_HASHBIN( hashv ); | ||
514 | |||
515 | /* | ||
516 | * Dequeue the entry... | ||
517 | */ | ||
518 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | ||
519 | (irda_queue_t*) entry ); | ||
520 | hashbin->hb_size--; | ||
521 | entry->q_next = NULL; | ||
522 | entry->q_prev = NULL; | ||
523 | |||
524 | /* | ||
525 | * Check if this item is the currently selected item, and in | ||
526 | * that case we must reset hb_current | ||
527 | */ | ||
528 | if ( entry == hashbin->hb_current) | ||
529 | hashbin->hb_current = NULL; | ||
530 | } | ||
531 | |||
532 | /* Release lock */ | ||
533 | if ( hashbin->hb_type & HB_LOCK ) { | ||
534 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
535 | } /* Default is no-lock */ | ||
536 | |||
537 | return entry; | ||
538 | } | ||
539 | |||
540 | |||
541 | /* | ||
542 | * Function hashbin_remove (hashbin, hashv, name) | ||
543 | * | ||
544 | * Remove entry with the given name | ||
545 | * | ||
546 | * The use of this function is highly discouraged, because the whole | ||
547 | * concept behind hashbin_remove() is broken. In many cases, it's not | ||
548 | * possible to guarantee the unicity of the index (either hashv or name), | ||
549 | * leading to removing the WRONG entry. | ||
550 | * The only simple safe use is : | ||
551 | * hashbin_remove(hasbin, (int) self, NULL); | ||
552 | * In other case, you must think hard to guarantee unicity of the index. | ||
553 | * Jean II | ||
554 | */ | ||
555 | void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) | ||
556 | { | ||
557 | int bin, found = FALSE; | ||
558 | unsigned long flags = 0; | ||
559 | irda_queue_t* entry; | ||
560 | |||
561 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | ||
562 | |||
563 | IRDA_ASSERT( hashbin != NULL, return NULL;); | ||
564 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | ||
565 | |||
566 | /* | ||
567 | * Locate hashbin | ||
568 | */ | ||
569 | if ( name ) | ||
570 | hashv = hash( name ); | ||
571 | bin = GET_HASHBIN( hashv ); | ||
572 | |||
573 | /* Synchronize */ | ||
574 | if ( hashbin->hb_type & HB_LOCK ) { | ||
575 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
576 | } /* Default is no-lock */ | ||
577 | |||
578 | /* | ||
579 | * Search for entry | ||
580 | */ | ||
581 | entry = hashbin->hb_queue[ bin ]; | ||
582 | if ( entry ) { | ||
583 | do { | ||
584 | /* | ||
585 | * Check for key | ||
586 | */ | ||
587 | if ( entry->q_hash == hashv ) { | ||
588 | /* | ||
589 | * Name compare too? | ||
590 | */ | ||
591 | if ( name ) { | ||
592 | if ( strcmp( entry->q_name, name) == 0) | ||
593 | { | ||
594 | found = TRUE; | ||
595 | break; | ||
596 | } | ||
597 | } else { | ||
598 | found = TRUE; | ||
599 | break; | ||
600 | } | ||
601 | } | ||
602 | entry = entry->q_next; | ||
603 | } while ( entry != hashbin->hb_queue[ bin ] ); | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * If entry was found, dequeue it | ||
608 | */ | ||
609 | if ( found ) { | ||
610 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | ||
611 | (irda_queue_t*) entry ); | ||
612 | hashbin->hb_size--; | ||
613 | |||
614 | /* | ||
615 | * Check if this item is the currently selected item, and in | ||
616 | * that case we must reset hb_current | ||
617 | */ | ||
618 | if ( entry == hashbin->hb_current) | ||
619 | hashbin->hb_current = NULL; | ||
620 | } | ||
621 | |||
622 | /* Release lock */ | ||
623 | if ( hashbin->hb_type & HB_LOCK ) { | ||
624 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
625 | } /* Default is no-lock */ | ||
626 | |||
627 | |||
628 | /* Return */ | ||
629 | if ( found ) | ||
630 | return entry; | ||
631 | else | ||
632 | return NULL; | ||
633 | |||
634 | } | ||
635 | EXPORT_SYMBOL(hashbin_remove); | ||
636 | |||
637 | /* | ||
638 | * Function hashbin_remove_this (hashbin, entry) | ||
639 | * | ||
640 | * Remove entry with the given name | ||
641 | * | ||
642 | * In some cases, the user of hashbin can't guarantee the unicity | ||
643 | * of either the hashv or name. | ||
644 | * In those cases, using the above function is guaranteed to cause troubles, | ||
645 | * so we use this one instead... | ||
646 | * And by the way, it's also faster, because we skip the search phase ;-) | ||
647 | */ | ||
648 | void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) | ||
649 | { | ||
650 | unsigned long flags = 0; | ||
651 | int bin; | ||
652 | long hashv; | ||
653 | |||
654 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | ||
655 | |||
656 | IRDA_ASSERT( hashbin != NULL, return NULL;); | ||
657 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | ||
658 | IRDA_ASSERT( entry != NULL, return NULL;); | ||
659 | |||
660 | /* Synchronize */ | ||
661 | if ( hashbin->hb_type & HB_LOCK ) { | ||
662 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
663 | } /* Default is no-lock */ | ||
664 | |||
665 | /* Check if valid and not already removed... */ | ||
666 | if((entry->q_next == NULL) || (entry->q_prev == NULL)) { | ||
667 | entry = NULL; | ||
668 | goto out; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * Locate hashbin | ||
673 | */ | ||
674 | hashv = entry->q_hash; | ||
675 | bin = GET_HASHBIN( hashv ); | ||
676 | |||
677 | /* | ||
678 | * Dequeue the entry... | ||
679 | */ | ||
680 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | ||
681 | (irda_queue_t*) entry ); | ||
682 | hashbin->hb_size--; | ||
683 | entry->q_next = NULL; | ||
684 | entry->q_prev = NULL; | ||
685 | |||
686 | /* | ||
687 | * Check if this item is the currently selected item, and in | ||
688 | * that case we must reset hb_current | ||
689 | */ | ||
690 | if ( entry == hashbin->hb_current) | ||
691 | hashbin->hb_current = NULL; | ||
692 | out: | ||
693 | /* Release lock */ | ||
694 | if ( hashbin->hb_type & HB_LOCK ) { | ||
695 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
696 | } /* Default is no-lock */ | ||
697 | |||
698 | return entry; | ||
699 | } | ||
700 | EXPORT_SYMBOL(hashbin_remove_this); | ||
701 | |||
702 | /*********************** HASHBIN ENUMERATION ***********************/ | ||
703 | |||
704 | /* | ||
705 | * Function hashbin_common_find (hashbin, hashv, name) | ||
706 | * | ||
707 | * Find item with the given hashv or name | ||
708 | * | ||
709 | */ | ||
710 | void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) | ||
711 | { | ||
712 | int bin; | ||
713 | irda_queue_t* entry; | ||
714 | |||
715 | IRDA_DEBUG( 4, "hashbin_find()\n"); | ||
716 | |||
717 | IRDA_ASSERT( hashbin != NULL, return NULL;); | ||
718 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | ||
719 | |||
720 | /* | ||
721 | * Locate hashbin | ||
722 | */ | ||
723 | if ( name ) | ||
724 | hashv = hash( name ); | ||
725 | bin = GET_HASHBIN( hashv ); | ||
726 | |||
727 | /* | ||
728 | * Search for entry | ||
729 | */ | ||
730 | entry = hashbin->hb_queue[ bin]; | ||
731 | if ( entry ) { | ||
732 | do { | ||
733 | /* | ||
734 | * Check for key | ||
735 | */ | ||
736 | if ( entry->q_hash == hashv ) { | ||
737 | /* | ||
738 | * Name compare too? | ||
739 | */ | ||
740 | if ( name ) { | ||
741 | if ( strcmp( entry->q_name, name ) == 0 ) { | ||
742 | return entry; | ||
743 | } | ||
744 | } else { | ||
745 | return entry; | ||
746 | } | ||
747 | } | ||
748 | entry = entry->q_next; | ||
749 | } while ( entry != hashbin->hb_queue[ bin ] ); | ||
750 | } | ||
751 | |||
752 | return NULL; | ||
753 | } | ||
754 | EXPORT_SYMBOL(hashbin_find); | ||
755 | |||
756 | /* | ||
757 | * Function hashbin_lock_find (hashbin, hashv, name) | ||
758 | * | ||
759 | * Find item with the given hashv or name | ||
760 | * | ||
761 | * Same, but with spinlock protection... | ||
762 | * I call it safe, but it's only safe with respect to the hashbin, not its | ||
763 | * content. - Jean II | ||
764 | */ | ||
765 | void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name ) | ||
766 | { | ||
767 | unsigned long flags = 0; | ||
768 | irda_queue_t* entry; | ||
769 | |||
770 | /* Synchronize */ | ||
771 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
772 | |||
773 | /* | ||
774 | * Search for entry | ||
775 | */ | ||
776 | entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name ); | ||
777 | |||
778 | /* Release lock */ | ||
779 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
780 | |||
781 | return entry; | ||
782 | } | ||
783 | EXPORT_SYMBOL(hashbin_lock_find); | ||
784 | |||
785 | /* | ||
786 | * Function hashbin_find (hashbin, hashv, name, pnext) | ||
787 | * | ||
788 | * Find an item with the given hashv or name, and its successor | ||
789 | * | ||
790 | * This function allow to do concurrent enumerations without the | ||
791 | * need to lock over the whole session, because the caller keep the | ||
792 | * context of the search. On the other hand, it might fail and return | ||
793 | * NULL if the entry is removed. - Jean II | ||
794 | */ | ||
795 | void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name, | ||
796 | void ** pnext) | ||
797 | { | ||
798 | unsigned long flags = 0; | ||
799 | irda_queue_t* entry; | ||
800 | |||
801 | /* Synchronize */ | ||
802 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | ||
803 | |||
804 | /* | ||
805 | * Search for current entry | ||
806 | * This allow to check if the current item is still in the | ||
807 | * hashbin or has been removed. | ||
808 | */ | ||
809 | entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name ); | ||
810 | |||
811 | /* | ||
812 | * Trick hashbin_get_next() to return what we want | ||
813 | */ | ||
814 | if(entry) { | ||
815 | hashbin->hb_current = entry; | ||
816 | *pnext = hashbin_get_next( hashbin ); | ||
817 | } else | ||
818 | *pnext = NULL; | ||
819 | |||
820 | /* Release lock */ | ||
821 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | ||
822 | |||
823 | return entry; | ||
824 | } | ||
825 | EXPORT_SYMBOL(hashbin_find_next); | ||
826 | |||
827 | /* | ||
828 | * Function hashbin_get_first (hashbin) | ||
829 | * | ||
830 | * Get a pointer to first element in hashbin, this function must be | ||
831 | * called before any calls to hashbin_get_next()! | ||
832 | * | ||
833 | */ | ||
834 | irda_queue_t *hashbin_get_first( hashbin_t* hashbin) | ||
835 | { | ||
836 | irda_queue_t *entry; | ||
837 | int i; | ||
838 | |||
839 | IRDA_ASSERT( hashbin != NULL, return NULL;); | ||
840 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | ||
841 | |||
842 | if ( hashbin == NULL) | ||
843 | return NULL; | ||
844 | |||
845 | for ( i = 0; i < HASHBIN_SIZE; i ++ ) { | ||
846 | entry = hashbin->hb_queue[ i]; | ||
847 | if ( entry) { | ||
848 | hashbin->hb_current = entry; | ||
849 | return entry; | ||
850 | } | ||
851 | } | ||
852 | /* | ||
853 | * Did not find any item in hashbin | ||
854 | */ | ||
855 | return NULL; | ||
856 | } | ||
857 | EXPORT_SYMBOL(hashbin_get_first); | ||
858 | |||
859 | /* | ||
860 | * Function hashbin_get_next (hashbin) | ||
861 | * | ||
862 | * Get next item in hashbin. A series of hashbin_get_next() calls must | ||
863 | * be started by a call to hashbin_get_first(). The function returns | ||
864 | * NULL when all items have been traversed | ||
865 | * | ||
866 | * The context of the search is stored within the hashbin, so you must | ||
867 | * protect yourself from concurrent enumerations. - Jean II | ||
868 | */ | ||
869 | irda_queue_t *hashbin_get_next( hashbin_t *hashbin) | ||
870 | { | ||
871 | irda_queue_t* entry; | ||
872 | int bin; | ||
873 | int i; | ||
874 | |||
875 | IRDA_ASSERT( hashbin != NULL, return NULL;); | ||
876 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | ||
877 | |||
878 | if ( hashbin->hb_current == NULL) { | ||
879 | IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); | ||
880 | return NULL; | ||
881 | } | ||
882 | entry = hashbin->hb_current->q_next; | ||
883 | bin = GET_HASHBIN( entry->q_hash); | ||
884 | |||
885 | /* | ||
886 | * Make sure that we are not back at the beginning of the queue | ||
887 | * again | ||
888 | */ | ||
889 | if ( entry != hashbin->hb_queue[ bin ]) { | ||
890 | hashbin->hb_current = entry; | ||
891 | |||
892 | return entry; | ||
893 | } | ||
894 | |||
895 | /* | ||
896 | * Check that this is not the last queue in hashbin | ||
897 | */ | ||
898 | if ( bin >= HASHBIN_SIZE) | ||
899 | return NULL; | ||
900 | |||
901 | /* | ||
902 | * Move to next queue in hashbin | ||
903 | */ | ||
904 | bin++; | ||
905 | for ( i = bin; i < HASHBIN_SIZE; i++ ) { | ||
906 | entry = hashbin->hb_queue[ i]; | ||
907 | if ( entry) { | ||
908 | hashbin->hb_current = entry; | ||
909 | |||
910 | return entry; | ||
911 | } | ||
912 | } | ||
913 | return NULL; | ||
914 | } | ||
915 | EXPORT_SYMBOL(hashbin_get_next); | ||