]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /********************************************************************* |
6819bc2e | 2 | * |
1da177e4 LT |
3 | * Filename: irqueue.c |
4 | * Version: 0.3 | |
5 | * Description: General queue implementation | |
6 | * Status: Experimental. | |
7 | * Author: Dag Brattli <[email protected]> | |
8 | * Created at: Tue Jun 9 13:29:31 1998 | |
9 | * Modified at: Sun Dec 12 13:48:22 1999 | |
10 | * Modified by: Dag Brattli <[email protected]> | |
11 | * Modified at: Thu Jan 4 14:29:10 CET 2001 | |
12 | * Modified by: Marc Zyngier <[email protected]> | |
6819bc2e | 13 | * |
1da177e4 | 14 | * Copyright (C) 1998-1999, Aage Kvalnes <[email protected]> |
6819bc2e | 15 | * Copyright (C) 1998, Dag Brattli, |
1da177e4 LT |
16 | * All Rights Reserved. |
17 | * | |
18 | * This code is taken from the Vortex Operating System written by Aage | |
19 | * Kvalnes. Aage has agreed that this code can use the GPL licence, | |
20 | * although he does not use that licence in his own code. | |
6819bc2e | 21 | * |
1da177e4 LT |
22 | * This copyright does however _not_ include the ELF hash() function |
23 | * which I currently don't know which licence or copyright it | |
24 | * has. Please inform me if you know. | |
6819bc2e YH |
25 | * |
26 | * This program is free software; you can redistribute it and/or | |
27 | * modify it under the terms of the GNU General Public License as | |
28 | * published by the Free Software Foundation; either version 2 of | |
1da177e4 | 29 | * the License, or (at your option) any later version. |
6819bc2e | 30 | * |
96de0e25 | 31 | * Neither Dag Brattli nor University of Tromsø admit liability nor |
6819bc2e | 32 | * provide warranty for any of this software. This material is |
1da177e4 | 33 | * provided "AS-IS" and at no charge. |
6819bc2e | 34 | * |
1da177e4 LT |
35 | ********************************************************************/ |
36 | ||
37 | /* | |
38 | * NOTE : | |
39 | * There are various problems with this package : | |
40 | * o the hash function for ints is pathetic (but could be changed) | |
41 | * o locking is sometime suspicious (especially during enumeration) | |
42 | * o most users have only a few elements (== overhead) | |
25985edc | 43 | * o most users never use search, so don't benefit from hashing |
1da177e4 LT |
44 | * Problem already fixed : |
45 | * o not 64 bit compliant (most users do hashv = (int) self) | |
46 | * o hashbin_remove() is broken => use hashbin_remove_this() | |
47 | * I think most users would be better served by a simple linked list | |
48 | * (like include/linux/list.h) with a global spinlock per list. | |
49 | * Jean II | |
50 | */ | |
51 | ||
52 | /* | |
53 | * Notes on the concurrent access to hashbin and other SMP issues | |
54 | * ------------------------------------------------------------- | |
55 | * Hashbins are very often in the IrDA stack a global repository of | |
56 | * information, and therefore used in a very asynchronous manner following | |
57 | * various events (driver calls, timers, user calls...). | |
58 | * Therefore, very often it is highly important to consider the | |
59 | * management of concurrent access to the hashbin and how to guarantee the | |
60 | * consistency of the operations on it. | |
61 | * | |
62 | * First, we need to define the objective of locking : | |
63 | * 1) Protect user data (content pointed by the hashbin) | |
64 | * 2) Protect hashbin structure itself (linked list in each bin) | |
65 | * | |
66 | * OLD LOCKING | |
67 | * ----------- | |
68 | * | |
69 | * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were | |
70 | * both inadequate in *both* aspect. | |
71 | * o HB_GLOBAL was using a spinlock for each bin (local locking). | |
72 | * o HB_LOCAL was disabling irq on *all* CPUs, so use a single | |
73 | * global semaphore. | |
74 | * The problems were : | |
75 | * A) Global irq disabling is no longer supported by the kernel | |
76 | * B) No protection for the hashbin struct global data | |
77 | * o hashbin_delete() | |
78 | * o hb_current | |
79 | * C) No protection for user data in some cases | |
80 | * | |
81 | * A) HB_LOCAL use global irq disabling, so doesn't work on kernel | |
82 | * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its | |
83 | * performance is not satisfactory on SMP setups. Most hashbins were | |
84 | * HB_LOCAL, so (A) definitely need fixing. | |
85 | * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL | |
86 | * lock only the individual bins, it will never be able to lock the | |
87 | * global data, so can't do (B). | |
88 | * C) Some functions return pointer to data that is still in the | |
89 | * hashbin : | |
90 | * o hashbin_find() | |
91 | * o hashbin_get_first() | |
92 | * o hashbin_get_next() | |
93 | * As the data is still in the hashbin, it may be changed or free'd | |
94 | * while the caller is examinimg the data. In those case, locking can't | |
95 | * be done within the hashbin, but must include use of the data within | |
96 | * the caller. | |
97 | * The caller can easily do this with HB_LOCAL (just disable irqs). | |
98 | * However, this is impossible with HB_GLOBAL because the caller has no | |
99 | * way to know the proper bin, so don't know which spinlock to use. | |
100 | * | |
101 | * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is | |
102 | * fundamentally broken and will never work. | |
103 | * | |
104 | * NEW LOCKING | |
105 | * ----------- | |
106 | * | |
107 | * To fix those problems, I've introduce a few changes in the | |
108 | * hashbin locking : | |
109 | * 1) New HB_LOCK scheme | |
110 | * 2) hashbin->hb_spinlock | |
111 | * 3) New hashbin usage policy | |
112 | * | |
113 | * HB_LOCK : | |
114 | * ------- | |
115 | * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL | |
116 | * and HB_GLOBAL. It uses a single spinlock to protect the whole content | |
117 | * of the hashbin. As it is a single spinlock, it can protect the global | |
118 | * data of the hashbin and not only the bins themselves. | |
119 | * HB_LOCK can only protect some of the hashbin calls, so it only lock | |
120 | * call that can be made 100% safe and leave other call unprotected. | |
121 | * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin | |
122 | * content is always small contention is not high, so it doesn't matter | |
123 | * much. HB_LOCK is probably faster than HB_LOCAL. | |
124 | * | |
125 | * hashbin->hb_spinlock : | |
126 | * -------------------- | |
127 | * The spinlock that HB_LOCK uses is available for caller, so that | |
128 | * the caller can protect unprotected calls (see below). | |
129 | * If the caller want to do entirely its own locking (HB_NOLOCK), he | |
130 | * can do so and may use safely this spinlock. | |
131 | * Locking is done like this : | |
132 | * spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
133 | * Releasing the lock : | |
134 | * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
135 | * | |
136 | * Safe & Protected calls : | |
137 | * ---------------------- | |
138 | * The following calls are safe or protected via HB_LOCK : | |
139 | * o hashbin_new() -> safe | |
140 | * o hashbin_delete() | |
141 | * o hashbin_insert() | |
142 | * o hashbin_remove_first() | |
143 | * o hashbin_remove() | |
144 | * o hashbin_remove_this() | |
145 | * o HASHBIN_GET_SIZE() -> atomic | |
146 | * | |
147 | * The following calls only protect the hashbin itself : | |
148 | * o hashbin_lock_find() | |
149 | * o hashbin_find_next() | |
150 | * | |
151 | * Unprotected calls : | |
152 | * ----------------- | |
153 | * The following calls need to be protected by the caller : | |
154 | * o hashbin_find() | |
155 | * o hashbin_get_first() | |
156 | * o hashbin_get_next() | |
157 | * | |
158 | * Locking Policy : | |
159 | * -------------- | |
160 | * If the hashbin is used only in a single thread of execution | |
161 | * (explicitly or implicitely), you can use HB_NOLOCK | |
162 | * If the calling module already provide concurrent access protection, | |
163 | * you may use HB_NOLOCK. | |
164 | * | |
165 | * In all other cases, you need to use HB_LOCK and lock the hashbin | |
166 | * every time before calling one of the unprotected calls. You also must | |
167 | * use the pointer returned by the unprotected call within the locked | |
168 | * region. | |
169 | * | |
170 | * Extra care for enumeration : | |
171 | * -------------------------- | |
172 | * hashbin_get_first() and hashbin_get_next() use the hashbin to | |
173 | * store the current position, in hb_current. | |
174 | * As long as the hashbin remains locked, this is safe. If you unlock | |
175 | * the hashbin, the current position may change if anybody else modify | |
176 | * or enumerate the hashbin. | |
177 | * Summary : do the full enumeration while locked. | |
178 | * | |
179 | * Alternatively, you may use hashbin_find_next(). But, this will | |
180 | * be slower, is more complex to use and doesn't protect the hashbin | |
181 | * content. So, care is needed here as well. | |
182 | * | |
183 | * Other issues : | |
184 | * ------------ | |
185 | * I believe that we are overdoing it by using spin_lock_irqsave() | |
186 | * and we should use only spin_lock_bh() or similar. But, I don't have | |
187 | * the balls to try it out. | |
188 | * Don't believe that because hashbin are now (somewhat) SMP safe | |
189 | * that the rest of the code is. Higher layers tend to be safest, | |
190 | * but LAP and LMP would need some serious dedicated love. | |
191 | * | |
192 | * Jean II | |
193 | */ | |
194 | #include <linux/module.h> | |
5a0e3ad6 | 195 | #include <linux/slab.h> |
1da177e4 LT |
196 | |
197 | #include <net/irda/irda.h> | |
198 | #include <net/irda/irqueue.h> | |
199 | ||
200 | /************************ QUEUE SUBROUTINES ************************/ | |
201 | ||
202 | /* | |
203 | * Hashbin | |
204 | */ | |
205 | #define GET_HASHBIN(x) ( x & HASHBIN_MASK ) | |
206 | ||
207 | /* | |
208 | * Function hash (name) | |
209 | * | |
210 | * This function hash the input string 'name' using the ELF hash | |
211 | * function for strings. | |
212 | */ | |
213 | static __u32 hash( const char* name) | |
214 | { | |
215 | __u32 h = 0; | |
216 | __u32 g; | |
6819bc2e | 217 | |
1da177e4 LT |
218 | while(*name) { |
219 | h = (h<<4) + *name++; | |
220 | if ((g = (h & 0xf0000000))) | |
221 | h ^=g>>24; | |
222 | h &=~g; | |
223 | } | |
224 | return h; | |
225 | } | |
226 | ||
227 | /* | |
228 | * Function enqueue_first (queue, proc) | |
229 | * | |
230 | * Insert item first in queue. | |
231 | * | |
232 | */ | |
233 | static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) | |
234 | { | |
6819bc2e | 235 | |
0dc47877 | 236 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
237 | |
238 | /* | |
239 | * Check if queue is empty. | |
240 | */ | |
241 | if ( *queue == NULL ) { | |
242 | /* | |
243 | * Queue is empty. Insert one element into the queue. | |
244 | */ | |
245 | element->q_next = element->q_prev = *queue = element; | |
6819bc2e | 246 | |
1da177e4 LT |
247 | } else { |
248 | /* | |
249 | * Queue is not empty. Insert element into front of queue. | |
250 | */ | |
251 | element->q_next = (*queue); | |
252 | (*queue)->q_prev->q_next = element; | |
253 | element->q_prev = (*queue)->q_prev; | |
254 | (*queue)->q_prev = element; | |
255 | (*queue) = element; | |
256 | } | |
257 | } | |
258 | ||
259 | ||
260 | /* | |
261 | * Function dequeue (queue) | |
262 | * | |
263 | * Remove first entry in queue | |
264 | * | |
265 | */ | |
266 | static irda_queue_t *dequeue_first(irda_queue_t **queue) | |
267 | { | |
268 | irda_queue_t *ret; | |
269 | ||
270 | IRDA_DEBUG( 4, "dequeue_first()\n"); | |
6819bc2e | 271 | |
1da177e4 LT |
272 | /* |
273 | * Set return value | |
274 | */ | |
275 | ret = *queue; | |
6819bc2e | 276 | |
1da177e4 LT |
277 | if ( *queue == NULL ) { |
278 | /* | |
279 | * Queue was empty. | |
280 | */ | |
281 | } else if ( (*queue)->q_next == *queue ) { | |
6819bc2e | 282 | /* |
1da177e4 | 283 | * Queue only contained a single element. It will now be |
6819bc2e | 284 | * empty. |
1da177e4 LT |
285 | */ |
286 | *queue = NULL; | |
287 | } else { | |
288 | /* | |
289 | * Queue contained several element. Remove the first one. | |
290 | */ | |
291 | (*queue)->q_prev->q_next = (*queue)->q_next; | |
292 | (*queue)->q_next->q_prev = (*queue)->q_prev; | |
293 | *queue = (*queue)->q_next; | |
294 | } | |
6819bc2e | 295 | |
1da177e4 LT |
296 | /* |
297 | * Return the removed entry (or NULL of queue was empty). | |
298 | */ | |
299 | return ret; | |
300 | } | |
301 | ||
302 | /* | |
303 | * Function dequeue_general (queue, element) | |
304 | * | |
305 | * | |
306 | */ | |
307 | static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) | |
308 | { | |
309 | irda_queue_t *ret; | |
6819bc2e | 310 | |
1da177e4 | 311 | IRDA_DEBUG( 4, "dequeue_general()\n"); |
6819bc2e | 312 | |
1da177e4 LT |
313 | /* |
314 | * Set return value | |
315 | */ | |
316 | ret = *queue; | |
6819bc2e | 317 | |
1da177e4 LT |
318 | if ( *queue == NULL ) { |
319 | /* | |
320 | * Queue was empty. | |
321 | */ | |
322 | } else if ( (*queue)->q_next == *queue ) { | |
6819bc2e | 323 | /* |
1da177e4 | 324 | * Queue only contained a single element. It will now be |
6819bc2e | 325 | * empty. |
1da177e4 LT |
326 | */ |
327 | *queue = NULL; | |
6819bc2e | 328 | |
1da177e4 LT |
329 | } else { |
330 | /* | |
331 | * Remove specific element. | |
332 | */ | |
333 | element->q_prev->q_next = element->q_next; | |
334 | element->q_next->q_prev = element->q_prev; | |
335 | if ( (*queue) == element) | |
336 | (*queue) = element->q_next; | |
337 | } | |
6819bc2e | 338 | |
1da177e4 LT |
339 | /* |
340 | * Return the removed entry (or NULL of queue was empty). | |
341 | */ | |
342 | return ret; | |
343 | } | |
344 | ||
345 | /************************ HASHBIN MANAGEMENT ************************/ | |
346 | ||
347 | /* | |
348 | * Function hashbin_create ( type, name ) | |
349 | * | |
350 | * Create hashbin! | |
351 | * | |
352 | */ | |
353 | hashbin_t *hashbin_new(int type) | |
354 | { | |
355 | hashbin_t* hashbin; | |
6819bc2e | 356 | |
1da177e4 LT |
357 | /* |
358 | * Allocate new hashbin | |
359 | */ | |
b3ab09f9 | 360 | hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC); |
1da177e4 LT |
361 | if (!hashbin) |
362 | return NULL; | |
363 | ||
364 | /* | |
365 | * Initialize structure | |
366 | */ | |
1da177e4 LT |
367 | hashbin->hb_type = type; |
368 | hashbin->magic = HB_MAGIC; | |
369 | //hashbin->hb_current = NULL; | |
370 | ||
371 | /* Make sure all spinlock's are unlocked */ | |
372 | if ( hashbin->hb_type & HB_LOCK ) { | |
373 | spin_lock_init(&hashbin->hb_spinlock); | |
374 | } | |
375 | ||
376 | return hashbin; | |
377 | } | |
378 | EXPORT_SYMBOL(hashbin_new); | |
379 | ||
380 | ||
381 | /* | |
382 | * Function hashbin_delete (hashbin, free_func) | |
383 | * | |
6819bc2e YH |
384 | * Destroy hashbin, the free_func can be a user supplied special routine |
385 | * for deallocating this structure if it's complex. If not the user can | |
1da177e4 LT |
386 | * just supply kfree, which should take care of the job. |
387 | */ | |
c7630a4b SO |
388 | #ifdef CONFIG_LOCKDEP |
389 | static int hashbin_lock_depth = 0; | |
390 | #endif | |
1da177e4 LT |
391 | int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) |
392 | { | |
393 | irda_queue_t* queue; | |
394 | unsigned long flags = 0; | |
395 | int i; | |
396 | ||
397 | IRDA_ASSERT(hashbin != NULL, return -1;); | |
398 | IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); | |
6819bc2e | 399 | |
1da177e4 LT |
400 | /* Synchronize */ |
401 | if ( hashbin->hb_type & HB_LOCK ) { | |
c7630a4b SO |
402 | spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, |
403 | hashbin_lock_depth++); | |
1da177e4 LT |
404 | } |
405 | ||
406 | /* | |
407 | * Free the entries in the hashbin, TODO: use hashbin_clear when | |
408 | * it has been shown to work | |
409 | */ | |
410 | for (i = 0; i < HASHBIN_SIZE; i ++ ) { | |
411 | queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); | |
412 | while (queue ) { | |
413 | if (free_func) | |
414 | (*free_func)(queue); | |
6819bc2e | 415 | queue = dequeue_first( |
1da177e4 LT |
416 | (irda_queue_t**) &hashbin->hb_queue[i]); |
417 | } | |
418 | } | |
6819bc2e | 419 | |
1da177e4 LT |
420 | /* Cleanup local data */ |
421 | hashbin->hb_current = NULL; | |
422 | hashbin->magic = ~HB_MAGIC; | |
423 | ||
424 | /* Release lock */ | |
425 | if ( hashbin->hb_type & HB_LOCK) { | |
426 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
c7630a4b SO |
427 | #ifdef CONFIG_LOCKDEP |
428 | hashbin_lock_depth--; | |
429 | #endif | |
1da177e4 LT |
430 | } |
431 | ||
432 | /* | |
433 | * Free the hashbin structure | |
434 | */ | |
435 | kfree(hashbin); | |
436 | ||
437 | return 0; | |
438 | } | |
439 | EXPORT_SYMBOL(hashbin_delete); | |
440 | ||
441 | /********************* HASHBIN LIST OPERATIONS *********************/ | |
442 | ||
443 | /* | |
444 | * Function hashbin_insert (hashbin, entry, name) | |
445 | * | |
446 | * Insert an entry into the hashbin | |
447 | * | |
448 | */ | |
6819bc2e | 449 | void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, |
1da177e4 LT |
450 | const char* name) |
451 | { | |
452 | unsigned long flags = 0; | |
453 | int bin; | |
454 | ||
0dc47877 | 455 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
456 | |
457 | IRDA_ASSERT( hashbin != NULL, return;); | |
458 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); | |
459 | ||
460 | /* | |
461 | * Locate hashbin | |
462 | */ | |
463 | if ( name ) | |
464 | hashv = hash( name ); | |
465 | bin = GET_HASHBIN( hashv ); | |
466 | ||
467 | /* Synchronize */ | |
468 | if ( hashbin->hb_type & HB_LOCK ) { | |
469 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
470 | } /* Default is no-lock */ | |
6819bc2e | 471 | |
1da177e4 LT |
472 | /* |
473 | * Store name and key | |
474 | */ | |
475 | entry->q_hash = hashv; | |
476 | if ( name ) | |
477 | strlcpy( entry->q_name, name, sizeof(entry->q_name)); | |
6819bc2e | 478 | |
1da177e4 LT |
479 | /* |
480 | * Insert new entry first | |
481 | */ | |
482 | enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
483 | entry); | |
484 | hashbin->hb_size++; | |
485 | ||
486 | /* Release lock */ | |
487 | if ( hashbin->hb_type & HB_LOCK ) { | |
488 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
489 | } /* Default is no-lock */ | |
490 | } | |
491 | EXPORT_SYMBOL(hashbin_insert); | |
492 | ||
6819bc2e | 493 | /* |
1da177e4 LT |
494 | * Function hashbin_remove_first (hashbin) |
495 | * | |
496 | * Remove first entry of the hashbin | |
497 | * | |
498 | * Note : this function no longer use hashbin_remove(), but does things | |
499 | * similar to hashbin_remove_this(), so can be considered safe. | |
500 | * Jean II | |
501 | */ | |
502 | void *hashbin_remove_first( hashbin_t *hashbin) | |
503 | { | |
504 | unsigned long flags = 0; | |
505 | irda_queue_t *entry = NULL; | |
506 | ||
507 | /* Synchronize */ | |
508 | if ( hashbin->hb_type & HB_LOCK ) { | |
509 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
510 | } /* Default is no-lock */ | |
511 | ||
512 | entry = hashbin_get_first( hashbin); | |
513 | if ( entry != NULL) { | |
514 | int bin; | |
515 | long hashv; | |
516 | /* | |
517 | * Locate hashbin | |
518 | */ | |
519 | hashv = entry->q_hash; | |
520 | bin = GET_HASHBIN( hashv ); | |
521 | ||
522 | /* | |
523 | * Dequeue the entry... | |
524 | */ | |
525 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
e3192690 | 526 | entry); |
1da177e4 LT |
527 | hashbin->hb_size--; |
528 | entry->q_next = NULL; | |
529 | entry->q_prev = NULL; | |
530 | ||
531 | /* | |
532 | * Check if this item is the currently selected item, and in | |
533 | * that case we must reset hb_current | |
534 | */ | |
535 | if ( entry == hashbin->hb_current) | |
536 | hashbin->hb_current = NULL; | |
537 | } | |
538 | ||
539 | /* Release lock */ | |
540 | if ( hashbin->hb_type & HB_LOCK ) { | |
541 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
542 | } /* Default is no-lock */ | |
543 | ||
544 | return entry; | |
545 | } | |
546 | ||
547 | ||
6819bc2e | 548 | /* |
1da177e4 LT |
549 | * Function hashbin_remove (hashbin, hashv, name) |
550 | * | |
551 | * Remove entry with the given name | |
552 | * | |
553 | * The use of this function is highly discouraged, because the whole | |
554 | * concept behind hashbin_remove() is broken. In many cases, it's not | |
555 | * possible to guarantee the unicity of the index (either hashv or name), | |
556 | * leading to removing the WRONG entry. | |
557 | * The only simple safe use is : | |
558 | * hashbin_remove(hasbin, (int) self, NULL); | |
559 | * In other case, you must think hard to guarantee unicity of the index. | |
560 | * Jean II | |
561 | */ | |
562 | void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) | |
563 | { | |
564 | int bin, found = FALSE; | |
565 | unsigned long flags = 0; | |
566 | irda_queue_t* entry; | |
567 | ||
0dc47877 | 568 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
569 | |
570 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
571 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
6819bc2e | 572 | |
1da177e4 LT |
573 | /* |
574 | * Locate hashbin | |
575 | */ | |
576 | if ( name ) | |
577 | hashv = hash( name ); | |
578 | bin = GET_HASHBIN( hashv ); | |
579 | ||
580 | /* Synchronize */ | |
581 | if ( hashbin->hb_type & HB_LOCK ) { | |
582 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
583 | } /* Default is no-lock */ | |
584 | ||
585 | /* | |
586 | * Search for entry | |
587 | */ | |
588 | entry = hashbin->hb_queue[ bin ]; | |
589 | if ( entry ) { | |
590 | do { | |
591 | /* | |
592 | * Check for key | |
593 | */ | |
594 | if ( entry->q_hash == hashv ) { | |
595 | /* | |
596 | * Name compare too? | |
597 | */ | |
598 | if ( name ) { | |
599 | if ( strcmp( entry->q_name, name) == 0) | |
600 | { | |
601 | found = TRUE; | |
602 | break; | |
603 | } | |
604 | } else { | |
605 | found = TRUE; | |
606 | break; | |
607 | } | |
608 | } | |
609 | entry = entry->q_next; | |
610 | } while ( entry != hashbin->hb_queue[ bin ] ); | |
611 | } | |
6819bc2e | 612 | |
1da177e4 LT |
613 | /* |
614 | * If entry was found, dequeue it | |
615 | */ | |
616 | if ( found ) { | |
617 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
e3192690 | 618 | entry); |
1da177e4 LT |
619 | hashbin->hb_size--; |
620 | ||
621 | /* | |
622 | * Check if this item is the currently selected item, and in | |
623 | * that case we must reset hb_current | |
624 | */ | |
625 | if ( entry == hashbin->hb_current) | |
626 | hashbin->hb_current = NULL; | |
627 | } | |
628 | ||
629 | /* Release lock */ | |
630 | if ( hashbin->hb_type & HB_LOCK ) { | |
631 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
632 | } /* Default is no-lock */ | |
6819bc2e YH |
633 | |
634 | ||
1da177e4 | 635 | /* Return */ |
6819bc2e | 636 | if ( found ) |
1da177e4 LT |
637 | return entry; |
638 | else | |
639 | return NULL; | |
6819bc2e | 640 | |
1da177e4 LT |
641 | } |
642 | EXPORT_SYMBOL(hashbin_remove); | |
643 | ||
6819bc2e | 644 | /* |
1da177e4 LT |
645 | * Function hashbin_remove_this (hashbin, entry) |
646 | * | |
647 | * Remove entry with the given name | |
648 | * | |
649 | * In some cases, the user of hashbin can't guarantee the unicity | |
650 | * of either the hashv or name. | |
651 | * In those cases, using the above function is guaranteed to cause troubles, | |
652 | * so we use this one instead... | |
653 | * And by the way, it's also faster, because we skip the search phase ;-) | |
654 | */ | |
655 | void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) | |
656 | { | |
657 | unsigned long flags = 0; | |
658 | int bin; | |
659 | long hashv; | |
660 | ||
0dc47877 | 661 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
662 | |
663 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
664 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
665 | IRDA_ASSERT( entry != NULL, return NULL;); | |
6819bc2e | 666 | |
1da177e4 LT |
667 | /* Synchronize */ |
668 | if ( hashbin->hb_type & HB_LOCK ) { | |
669 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
670 | } /* Default is no-lock */ | |
671 | ||
672 | /* Check if valid and not already removed... */ | |
673 | if((entry->q_next == NULL) || (entry->q_prev == NULL)) { | |
674 | entry = NULL; | |
675 | goto out; | |
676 | } | |
677 | ||
678 | /* | |
679 | * Locate hashbin | |
680 | */ | |
681 | hashv = entry->q_hash; | |
682 | bin = GET_HASHBIN( hashv ); | |
683 | ||
684 | /* | |
685 | * Dequeue the entry... | |
686 | */ | |
687 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
e3192690 | 688 | entry); |
1da177e4 LT |
689 | hashbin->hb_size--; |
690 | entry->q_next = NULL; | |
691 | entry->q_prev = NULL; | |
692 | ||
693 | /* | |
694 | * Check if this item is the currently selected item, and in | |
695 | * that case we must reset hb_current | |
696 | */ | |
697 | if ( entry == hashbin->hb_current) | |
698 | hashbin->hb_current = NULL; | |
699 | out: | |
700 | /* Release lock */ | |
701 | if ( hashbin->hb_type & HB_LOCK ) { | |
702 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
703 | } /* Default is no-lock */ | |
704 | ||
705 | return entry; | |
706 | } | |
707 | EXPORT_SYMBOL(hashbin_remove_this); | |
708 | ||
709 | /*********************** HASHBIN ENUMERATION ***********************/ | |
710 | ||
711 | /* | |
712 | * Function hashbin_common_find (hashbin, hashv, name) | |
713 | * | |
714 | * Find item with the given hashv or name | |
715 | * | |
716 | */ | |
717 | void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) | |
718 | { | |
719 | int bin; | |
720 | irda_queue_t* entry; | |
721 | ||
722 | IRDA_DEBUG( 4, "hashbin_find()\n"); | |
723 | ||
724 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
725 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
726 | ||
727 | /* | |
728 | * Locate hashbin | |
729 | */ | |
730 | if ( name ) | |
731 | hashv = hash( name ); | |
732 | bin = GET_HASHBIN( hashv ); | |
6819bc2e | 733 | |
1da177e4 LT |
734 | /* |
735 | * Search for entry | |
736 | */ | |
737 | entry = hashbin->hb_queue[ bin]; | |
738 | if ( entry ) { | |
739 | do { | |
740 | /* | |
741 | * Check for key | |
742 | */ | |
743 | if ( entry->q_hash == hashv ) { | |
744 | /* | |
745 | * Name compare too? | |
746 | */ | |
747 | if ( name ) { | |
748 | if ( strcmp( entry->q_name, name ) == 0 ) { | |
749 | return entry; | |
750 | } | |
751 | } else { | |
752 | return entry; | |
753 | } | |
754 | } | |
755 | entry = entry->q_next; | |
756 | } while ( entry != hashbin->hb_queue[ bin ] ); | |
757 | } | |
758 | ||
759 | return NULL; | |
760 | } | |
761 | EXPORT_SYMBOL(hashbin_find); | |
762 | ||
763 | /* | |
764 | * Function hashbin_lock_find (hashbin, hashv, name) | |
765 | * | |
766 | * Find item with the given hashv or name | |
767 | * | |
768 | * Same, but with spinlock protection... | |
769 | * I call it safe, but it's only safe with respect to the hashbin, not its | |
770 | * content. - Jean II | |
771 | */ | |
772 | void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name ) | |
773 | { | |
774 | unsigned long flags = 0; | |
775 | irda_queue_t* entry; | |
776 | ||
777 | /* Synchronize */ | |
778 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
779 | ||
780 | /* | |
781 | * Search for entry | |
782 | */ | |
ea110733 | 783 | entry = hashbin_find(hashbin, hashv, name); |
1da177e4 LT |
784 | |
785 | /* Release lock */ | |
786 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
787 | ||
788 | return entry; | |
789 | } | |
790 | EXPORT_SYMBOL(hashbin_lock_find); | |
791 | ||
792 | /* | |
793 | * Function hashbin_find (hashbin, hashv, name, pnext) | |
794 | * | |
795 | * Find an item with the given hashv or name, and its successor | |
796 | * | |
797 | * This function allow to do concurrent enumerations without the | |
798 | * need to lock over the whole session, because the caller keep the | |
799 | * context of the search. On the other hand, it might fail and return | |
800 | * NULL if the entry is removed. - Jean II | |
801 | */ | |
802 | void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name, | |
803 | void ** pnext) | |
804 | { | |
805 | unsigned long flags = 0; | |
806 | irda_queue_t* entry; | |
807 | ||
808 | /* Synchronize */ | |
809 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
810 | ||
811 | /* | |
812 | * Search for current entry | |
813 | * This allow to check if the current item is still in the | |
814 | * hashbin or has been removed. | |
815 | */ | |
ea110733 | 816 | entry = hashbin_find(hashbin, hashv, name); |
1da177e4 LT |
817 | |
818 | /* | |
819 | * Trick hashbin_get_next() to return what we want | |
820 | */ | |
821 | if(entry) { | |
822 | hashbin->hb_current = entry; | |
823 | *pnext = hashbin_get_next( hashbin ); | |
824 | } else | |
825 | *pnext = NULL; | |
826 | ||
827 | /* Release lock */ | |
828 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
829 | ||
830 | return entry; | |
831 | } | |
1da177e4 LT |
832 | |
833 | /* | |
834 | * Function hashbin_get_first (hashbin) | |
835 | * | |
836 | * Get a pointer to first element in hashbin, this function must be | |
837 | * called before any calls to hashbin_get_next()! | |
838 | * | |
839 | */ | |
6819bc2e | 840 | irda_queue_t *hashbin_get_first( hashbin_t* hashbin) |
1da177e4 LT |
841 | { |
842 | irda_queue_t *entry; | |
843 | int i; | |
844 | ||
845 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
846 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
847 | ||
848 | if ( hashbin == NULL) | |
849 | return NULL; | |
850 | ||
851 | for ( i = 0; i < HASHBIN_SIZE; i ++ ) { | |
852 | entry = hashbin->hb_queue[ i]; | |
853 | if ( entry) { | |
854 | hashbin->hb_current = entry; | |
855 | return entry; | |
856 | } | |
857 | } | |
858 | /* | |
859 | * Did not find any item in hashbin | |
860 | */ | |
861 | return NULL; | |
862 | } | |
863 | EXPORT_SYMBOL(hashbin_get_first); | |
864 | ||
865 | /* | |
866 | * Function hashbin_get_next (hashbin) | |
867 | * | |
868 | * Get next item in hashbin. A series of hashbin_get_next() calls must | |
869 | * be started by a call to hashbin_get_first(). The function returns | |
870 | * NULL when all items have been traversed | |
6819bc2e | 871 | * |
1da177e4 LT |
872 | * The context of the search is stored within the hashbin, so you must |
873 | * protect yourself from concurrent enumerations. - Jean II | |
874 | */ | |
875 | irda_queue_t *hashbin_get_next( hashbin_t *hashbin) | |
876 | { | |
877 | irda_queue_t* entry; | |
878 | int bin; | |
879 | int i; | |
880 | ||
881 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
882 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
883 | ||
884 | if ( hashbin->hb_current == NULL) { | |
885 | IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); | |
886 | return NULL; | |
6819bc2e | 887 | } |
1da177e4 LT |
888 | entry = hashbin->hb_current->q_next; |
889 | bin = GET_HASHBIN( entry->q_hash); | |
890 | ||
6819bc2e | 891 | /* |
1da177e4 | 892 | * Make sure that we are not back at the beginning of the queue |
6819bc2e | 893 | * again |
1da177e4 LT |
894 | */ |
895 | if ( entry != hashbin->hb_queue[ bin ]) { | |
896 | hashbin->hb_current = entry; | |
897 | ||
898 | return entry; | |
899 | } | |
900 | ||
901 | /* | |
902 | * Check that this is not the last queue in hashbin | |
903 | */ | |
904 | if ( bin >= HASHBIN_SIZE) | |
905 | return NULL; | |
6819bc2e | 906 | |
1da177e4 LT |
907 | /* |
908 | * Move to next queue in hashbin | |
909 | */ | |
910 | bin++; | |
911 | for ( i = bin; i < HASHBIN_SIZE; i++ ) { | |
912 | entry = hashbin->hb_queue[ i]; | |
913 | if ( entry) { | |
914 | hashbin->hb_current = entry; | |
6819bc2e | 915 | |
1da177e4 LT |
916 | return entry; |
917 | } | |
918 | } | |
919 | return NULL; | |
920 | } | |
921 | EXPORT_SYMBOL(hashbin_get_next); |