]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /********************************************************************* |
6819bc2e | 2 | * |
1da177e4 LT |
3 | * Filename: irqueue.c |
4 | * Version: 0.3 | |
5 | * Description: General queue implementation | |
6 | * Status: Experimental. | |
7 | * Author: Dag Brattli <[email protected]> | |
8 | * Created at: Tue Jun 9 13:29:31 1998 | |
9 | * Modified at: Sun Dec 12 13:48:22 1999 | |
10 | * Modified by: Dag Brattli <[email protected]> | |
11 | * Modified at: Thu Jan 4 14:29:10 CET 2001 | |
12 | * Modified by: Marc Zyngier <[email protected]> | |
6819bc2e | 13 | * |
1da177e4 | 14 | * Copyright (C) 1998-1999, Aage Kvalnes <[email protected]> |
6819bc2e | 15 | * Copyright (C) 1998, Dag Brattli, |
1da177e4 LT |
16 | * All Rights Reserved. |
17 | * | |
18 | * This code is taken from the Vortex Operating System written by Aage | |
19 | * Kvalnes. Aage has agreed that this code can use the GPL licence, | |
20 | * although he does not use that licence in his own code. | |
6819bc2e | 21 | * |
1da177e4 LT |
22 | * This copyright does however _not_ include the ELF hash() function |
23 | * which I currently don't know which licence or copyright it | |
24 | * has. Please inform me if you know. | |
6819bc2e YH |
25 | * |
26 | * This program is free software; you can redistribute it and/or | |
27 | * modify it under the terms of the GNU General Public License as | |
28 | * published by the Free Software Foundation; either version 2 of | |
1da177e4 | 29 | * the License, or (at your option) any later version. |
6819bc2e | 30 | * |
96de0e25 | 31 | * Neither Dag Brattli nor University of Tromsø admit liability nor |
6819bc2e | 32 | * provide warranty for any of this software. This material is |
1da177e4 | 33 | * provided "AS-IS" and at no charge. |
6819bc2e | 34 | * |
1da177e4 LT |
35 | ********************************************************************/ |
36 | ||
37 | /* | |
38 | * NOTE : | |
39 | * There are various problems with this package : | |
40 | * o the hash function for ints is pathetic (but could be changed) | |
41 | * o locking is sometime suspicious (especially during enumeration) | |
42 | * o most users have only a few elements (== overhead) | |
43 | * o most users never use seach, so don't benefit from hashing | |
44 | * Problem already fixed : | |
45 | * o not 64 bit compliant (most users do hashv = (int) self) | |
46 | * o hashbin_remove() is broken => use hashbin_remove_this() | |
47 | * I think most users would be better served by a simple linked list | |
48 | * (like include/linux/list.h) with a global spinlock per list. | |
49 | * Jean II | |
50 | */ | |
51 | ||
52 | /* | |
53 | * Notes on the concurrent access to hashbin and other SMP issues | |
54 | * ------------------------------------------------------------- | |
55 | * Hashbins are very often in the IrDA stack a global repository of | |
56 | * information, and therefore used in a very asynchronous manner following | |
57 | * various events (driver calls, timers, user calls...). | |
58 | * Therefore, very often it is highly important to consider the | |
59 | * management of concurrent access to the hashbin and how to guarantee the | |
60 | * consistency of the operations on it. | |
61 | * | |
62 | * First, we need to define the objective of locking : | |
63 | * 1) Protect user data (content pointed by the hashbin) | |
64 | * 2) Protect hashbin structure itself (linked list in each bin) | |
65 | * | |
66 | * OLD LOCKING | |
67 | * ----------- | |
68 | * | |
69 | * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were | |
70 | * both inadequate in *both* aspect. | |
71 | * o HB_GLOBAL was using a spinlock for each bin (local locking). | |
72 | * o HB_LOCAL was disabling irq on *all* CPUs, so use a single | |
73 | * global semaphore. | |
74 | * The problems were : | |
75 | * A) Global irq disabling is no longer supported by the kernel | |
76 | * B) No protection for the hashbin struct global data | |
77 | * o hashbin_delete() | |
78 | * o hb_current | |
79 | * C) No protection for user data in some cases | |
80 | * | |
81 | * A) HB_LOCAL use global irq disabling, so doesn't work on kernel | |
82 | * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its | |
83 | * performance is not satisfactory on SMP setups. Most hashbins were | |
84 | * HB_LOCAL, so (A) definitely need fixing. | |
85 | * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL | |
86 | * lock only the individual bins, it will never be able to lock the | |
87 | * global data, so can't do (B). | |
88 | * C) Some functions return pointer to data that is still in the | |
89 | * hashbin : | |
90 | * o hashbin_find() | |
91 | * o hashbin_get_first() | |
92 | * o hashbin_get_next() | |
93 | * As the data is still in the hashbin, it may be changed or free'd | |
94 | * while the caller is examinimg the data. In those case, locking can't | |
95 | * be done within the hashbin, but must include use of the data within | |
96 | * the caller. | |
97 | * The caller can easily do this with HB_LOCAL (just disable irqs). | |
98 | * However, this is impossible with HB_GLOBAL because the caller has no | |
99 | * way to know the proper bin, so don't know which spinlock to use. | |
100 | * | |
101 | * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is | |
102 | * fundamentally broken and will never work. | |
103 | * | |
104 | * NEW LOCKING | |
105 | * ----------- | |
106 | * | |
107 | * To fix those problems, I've introduce a few changes in the | |
108 | * hashbin locking : | |
109 | * 1) New HB_LOCK scheme | |
110 | * 2) hashbin->hb_spinlock | |
111 | * 3) New hashbin usage policy | |
112 | * | |
113 | * HB_LOCK : | |
114 | * ------- | |
115 | * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL | |
116 | * and HB_GLOBAL. It uses a single spinlock to protect the whole content | |
117 | * of the hashbin. As it is a single spinlock, it can protect the global | |
118 | * data of the hashbin and not only the bins themselves. | |
119 | * HB_LOCK can only protect some of the hashbin calls, so it only lock | |
120 | * call that can be made 100% safe and leave other call unprotected. | |
121 | * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin | |
122 | * content is always small contention is not high, so it doesn't matter | |
123 | * much. HB_LOCK is probably faster than HB_LOCAL. | |
124 | * | |
125 | * hashbin->hb_spinlock : | |
126 | * -------------------- | |
127 | * The spinlock that HB_LOCK uses is available for caller, so that | |
128 | * the caller can protect unprotected calls (see below). | |
129 | * If the caller want to do entirely its own locking (HB_NOLOCK), he | |
130 | * can do so and may use safely this spinlock. | |
131 | * Locking is done like this : | |
132 | * spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
133 | * Releasing the lock : | |
134 | * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
135 | * | |
136 | * Safe & Protected calls : | |
137 | * ---------------------- | |
138 | * The following calls are safe or protected via HB_LOCK : | |
139 | * o hashbin_new() -> safe | |
140 | * o hashbin_delete() | |
141 | * o hashbin_insert() | |
142 | * o hashbin_remove_first() | |
143 | * o hashbin_remove() | |
144 | * o hashbin_remove_this() | |
145 | * o HASHBIN_GET_SIZE() -> atomic | |
146 | * | |
147 | * The following calls only protect the hashbin itself : | |
148 | * o hashbin_lock_find() | |
149 | * o hashbin_find_next() | |
150 | * | |
151 | * Unprotected calls : | |
152 | * ----------------- | |
153 | * The following calls need to be protected by the caller : | |
154 | * o hashbin_find() | |
155 | * o hashbin_get_first() | |
156 | * o hashbin_get_next() | |
157 | * | |
158 | * Locking Policy : | |
159 | * -------------- | |
160 | * If the hashbin is used only in a single thread of execution | |
161 | * (explicitly or implicitely), you can use HB_NOLOCK | |
162 | * If the calling module already provide concurrent access protection, | |
163 | * you may use HB_NOLOCK. | |
164 | * | |
165 | * In all other cases, you need to use HB_LOCK and lock the hashbin | |
166 | * every time before calling one of the unprotected calls. You also must | |
167 | * use the pointer returned by the unprotected call within the locked | |
168 | * region. | |
169 | * | |
170 | * Extra care for enumeration : | |
171 | * -------------------------- | |
172 | * hashbin_get_first() and hashbin_get_next() use the hashbin to | |
173 | * store the current position, in hb_current. | |
174 | * As long as the hashbin remains locked, this is safe. If you unlock | |
175 | * the hashbin, the current position may change if anybody else modify | |
176 | * or enumerate the hashbin. | |
177 | * Summary : do the full enumeration while locked. | |
178 | * | |
179 | * Alternatively, you may use hashbin_find_next(). But, this will | |
180 | * be slower, is more complex to use and doesn't protect the hashbin | |
181 | * content. So, care is needed here as well. | |
182 | * | |
183 | * Other issues : | |
184 | * ------------ | |
185 | * I believe that we are overdoing it by using spin_lock_irqsave() | |
186 | * and we should use only spin_lock_bh() or similar. But, I don't have | |
187 | * the balls to try it out. | |
188 | * Don't believe that because hashbin are now (somewhat) SMP safe | |
189 | * that the rest of the code is. Higher layers tend to be safest, | |
190 | * but LAP and LMP would need some serious dedicated love. | |
191 | * | |
192 | * Jean II | |
193 | */ | |
194 | #include <linux/module.h> | |
195 | ||
196 | #include <net/irda/irda.h> | |
197 | #include <net/irda/irqueue.h> | |
198 | ||
199 | /************************ QUEUE SUBROUTINES ************************/ | |
200 | ||
201 | /* | |
202 | * Hashbin | |
203 | */ | |
204 | #define GET_HASHBIN(x) ( x & HASHBIN_MASK ) | |
205 | ||
206 | /* | |
207 | * Function hash (name) | |
208 | * | |
209 | * This function hash the input string 'name' using the ELF hash | |
210 | * function for strings. | |
211 | */ | |
212 | static __u32 hash( const char* name) | |
213 | { | |
214 | __u32 h = 0; | |
215 | __u32 g; | |
6819bc2e | 216 | |
1da177e4 LT |
217 | while(*name) { |
218 | h = (h<<4) + *name++; | |
219 | if ((g = (h & 0xf0000000))) | |
220 | h ^=g>>24; | |
221 | h &=~g; | |
222 | } | |
223 | return h; | |
224 | } | |
225 | ||
226 | /* | |
227 | * Function enqueue_first (queue, proc) | |
228 | * | |
229 | * Insert item first in queue. | |
230 | * | |
231 | */ | |
232 | static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) | |
233 | { | |
6819bc2e | 234 | |
0dc47877 | 235 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
236 | |
237 | /* | |
238 | * Check if queue is empty. | |
239 | */ | |
240 | if ( *queue == NULL ) { | |
241 | /* | |
242 | * Queue is empty. Insert one element into the queue. | |
243 | */ | |
244 | element->q_next = element->q_prev = *queue = element; | |
6819bc2e | 245 | |
1da177e4 LT |
246 | } else { |
247 | /* | |
248 | * Queue is not empty. Insert element into front of queue. | |
249 | */ | |
250 | element->q_next = (*queue); | |
251 | (*queue)->q_prev->q_next = element; | |
252 | element->q_prev = (*queue)->q_prev; | |
253 | (*queue)->q_prev = element; | |
254 | (*queue) = element; | |
255 | } | |
256 | } | |
257 | ||
258 | ||
259 | /* | |
260 | * Function dequeue (queue) | |
261 | * | |
262 | * Remove first entry in queue | |
263 | * | |
264 | */ | |
265 | static irda_queue_t *dequeue_first(irda_queue_t **queue) | |
266 | { | |
267 | irda_queue_t *ret; | |
268 | ||
269 | IRDA_DEBUG( 4, "dequeue_first()\n"); | |
6819bc2e | 270 | |
1da177e4 LT |
271 | /* |
272 | * Set return value | |
273 | */ | |
274 | ret = *queue; | |
6819bc2e | 275 | |
1da177e4 LT |
276 | if ( *queue == NULL ) { |
277 | /* | |
278 | * Queue was empty. | |
279 | */ | |
280 | } else if ( (*queue)->q_next == *queue ) { | |
6819bc2e | 281 | /* |
1da177e4 | 282 | * Queue only contained a single element. It will now be |
6819bc2e | 283 | * empty. |
1da177e4 LT |
284 | */ |
285 | *queue = NULL; | |
286 | } else { | |
287 | /* | |
288 | * Queue contained several element. Remove the first one. | |
289 | */ | |
290 | (*queue)->q_prev->q_next = (*queue)->q_next; | |
291 | (*queue)->q_next->q_prev = (*queue)->q_prev; | |
292 | *queue = (*queue)->q_next; | |
293 | } | |
6819bc2e | 294 | |
1da177e4 LT |
295 | /* |
296 | * Return the removed entry (or NULL of queue was empty). | |
297 | */ | |
298 | return ret; | |
299 | } | |
300 | ||
301 | /* | |
302 | * Function dequeue_general (queue, element) | |
303 | * | |
304 | * | |
305 | */ | |
306 | static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) | |
307 | { | |
308 | irda_queue_t *ret; | |
6819bc2e | 309 | |
1da177e4 | 310 | IRDA_DEBUG( 4, "dequeue_general()\n"); |
6819bc2e | 311 | |
1da177e4 LT |
312 | /* |
313 | * Set return value | |
314 | */ | |
315 | ret = *queue; | |
6819bc2e | 316 | |
1da177e4 LT |
317 | if ( *queue == NULL ) { |
318 | /* | |
319 | * Queue was empty. | |
320 | */ | |
321 | } else if ( (*queue)->q_next == *queue ) { | |
6819bc2e | 322 | /* |
1da177e4 | 323 | * Queue only contained a single element. It will now be |
6819bc2e | 324 | * empty. |
1da177e4 LT |
325 | */ |
326 | *queue = NULL; | |
6819bc2e | 327 | |
1da177e4 LT |
328 | } else { |
329 | /* | |
330 | * Remove specific element. | |
331 | */ | |
332 | element->q_prev->q_next = element->q_next; | |
333 | element->q_next->q_prev = element->q_prev; | |
334 | if ( (*queue) == element) | |
335 | (*queue) = element->q_next; | |
336 | } | |
6819bc2e | 337 | |
1da177e4 LT |
338 | /* |
339 | * Return the removed entry (or NULL of queue was empty). | |
340 | */ | |
341 | return ret; | |
342 | } | |
343 | ||
344 | /************************ HASHBIN MANAGEMENT ************************/ | |
345 | ||
346 | /* | |
347 | * Function hashbin_create ( type, name ) | |
348 | * | |
349 | * Create hashbin! | |
350 | * | |
351 | */ | |
352 | hashbin_t *hashbin_new(int type) | |
353 | { | |
354 | hashbin_t* hashbin; | |
6819bc2e | 355 | |
1da177e4 LT |
356 | /* |
357 | * Allocate new hashbin | |
358 | */ | |
b3ab09f9 | 359 | hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC); |
1da177e4 LT |
360 | if (!hashbin) |
361 | return NULL; | |
362 | ||
363 | /* | |
364 | * Initialize structure | |
365 | */ | |
1da177e4 LT |
366 | hashbin->hb_type = type; |
367 | hashbin->magic = HB_MAGIC; | |
368 | //hashbin->hb_current = NULL; | |
369 | ||
370 | /* Make sure all spinlock's are unlocked */ | |
371 | if ( hashbin->hb_type & HB_LOCK ) { | |
372 | spin_lock_init(&hashbin->hb_spinlock); | |
373 | } | |
374 | ||
375 | return hashbin; | |
376 | } | |
377 | EXPORT_SYMBOL(hashbin_new); | |
378 | ||
379 | ||
380 | /* | |
381 | * Function hashbin_delete (hashbin, free_func) | |
382 | * | |
6819bc2e YH |
383 | * Destroy hashbin, the free_func can be a user supplied special routine |
384 | * for deallocating this structure if it's complex. If not the user can | |
1da177e4 LT |
385 | * just supply kfree, which should take care of the job. |
386 | */ | |
c7630a4b SO |
387 | #ifdef CONFIG_LOCKDEP |
388 | static int hashbin_lock_depth = 0; | |
389 | #endif | |
1da177e4 LT |
390 | int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) |
391 | { | |
392 | irda_queue_t* queue; | |
393 | unsigned long flags = 0; | |
394 | int i; | |
395 | ||
396 | IRDA_ASSERT(hashbin != NULL, return -1;); | |
397 | IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); | |
6819bc2e | 398 | |
1da177e4 LT |
399 | /* Synchronize */ |
400 | if ( hashbin->hb_type & HB_LOCK ) { | |
c7630a4b SO |
401 | spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, |
402 | hashbin_lock_depth++); | |
1da177e4 LT |
403 | } |
404 | ||
405 | /* | |
406 | * Free the entries in the hashbin, TODO: use hashbin_clear when | |
407 | * it has been shown to work | |
408 | */ | |
409 | for (i = 0; i < HASHBIN_SIZE; i ++ ) { | |
410 | queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); | |
411 | while (queue ) { | |
412 | if (free_func) | |
413 | (*free_func)(queue); | |
6819bc2e | 414 | queue = dequeue_first( |
1da177e4 LT |
415 | (irda_queue_t**) &hashbin->hb_queue[i]); |
416 | } | |
417 | } | |
6819bc2e | 418 | |
1da177e4 LT |
419 | /* Cleanup local data */ |
420 | hashbin->hb_current = NULL; | |
421 | hashbin->magic = ~HB_MAGIC; | |
422 | ||
423 | /* Release lock */ | |
424 | if ( hashbin->hb_type & HB_LOCK) { | |
425 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
c7630a4b SO |
426 | #ifdef CONFIG_LOCKDEP |
427 | hashbin_lock_depth--; | |
428 | #endif | |
1da177e4 LT |
429 | } |
430 | ||
431 | /* | |
432 | * Free the hashbin structure | |
433 | */ | |
434 | kfree(hashbin); | |
435 | ||
436 | return 0; | |
437 | } | |
438 | EXPORT_SYMBOL(hashbin_delete); | |
439 | ||
440 | /********************* HASHBIN LIST OPERATIONS *********************/ | |
441 | ||
442 | /* | |
443 | * Function hashbin_insert (hashbin, entry, name) | |
444 | * | |
445 | * Insert an entry into the hashbin | |
446 | * | |
447 | */ | |
6819bc2e | 448 | void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, |
1da177e4 LT |
449 | const char* name) |
450 | { | |
451 | unsigned long flags = 0; | |
452 | int bin; | |
453 | ||
0dc47877 | 454 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
455 | |
456 | IRDA_ASSERT( hashbin != NULL, return;); | |
457 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); | |
458 | ||
459 | /* | |
460 | * Locate hashbin | |
461 | */ | |
462 | if ( name ) | |
463 | hashv = hash( name ); | |
464 | bin = GET_HASHBIN( hashv ); | |
465 | ||
466 | /* Synchronize */ | |
467 | if ( hashbin->hb_type & HB_LOCK ) { | |
468 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
469 | } /* Default is no-lock */ | |
6819bc2e | 470 | |
1da177e4 LT |
471 | /* |
472 | * Store name and key | |
473 | */ | |
474 | entry->q_hash = hashv; | |
475 | if ( name ) | |
476 | strlcpy( entry->q_name, name, sizeof(entry->q_name)); | |
6819bc2e | 477 | |
1da177e4 LT |
478 | /* |
479 | * Insert new entry first | |
480 | */ | |
481 | enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
482 | entry); | |
483 | hashbin->hb_size++; | |
484 | ||
485 | /* Release lock */ | |
486 | if ( hashbin->hb_type & HB_LOCK ) { | |
487 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
488 | } /* Default is no-lock */ | |
489 | } | |
490 | EXPORT_SYMBOL(hashbin_insert); | |
491 | ||
6819bc2e | 492 | /* |
1da177e4 LT |
493 | * Function hashbin_remove_first (hashbin) |
494 | * | |
495 | * Remove first entry of the hashbin | |
496 | * | |
497 | * Note : this function no longer use hashbin_remove(), but does things | |
498 | * similar to hashbin_remove_this(), so can be considered safe. | |
499 | * Jean II | |
500 | */ | |
501 | void *hashbin_remove_first( hashbin_t *hashbin) | |
502 | { | |
503 | unsigned long flags = 0; | |
504 | irda_queue_t *entry = NULL; | |
505 | ||
506 | /* Synchronize */ | |
507 | if ( hashbin->hb_type & HB_LOCK ) { | |
508 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
509 | } /* Default is no-lock */ | |
510 | ||
511 | entry = hashbin_get_first( hashbin); | |
512 | if ( entry != NULL) { | |
513 | int bin; | |
514 | long hashv; | |
515 | /* | |
516 | * Locate hashbin | |
517 | */ | |
518 | hashv = entry->q_hash; | |
519 | bin = GET_HASHBIN( hashv ); | |
520 | ||
521 | /* | |
522 | * Dequeue the entry... | |
523 | */ | |
524 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
525 | (irda_queue_t*) entry ); | |
526 | hashbin->hb_size--; | |
527 | entry->q_next = NULL; | |
528 | entry->q_prev = NULL; | |
529 | ||
530 | /* | |
531 | * Check if this item is the currently selected item, and in | |
532 | * that case we must reset hb_current | |
533 | */ | |
534 | if ( entry == hashbin->hb_current) | |
535 | hashbin->hb_current = NULL; | |
536 | } | |
537 | ||
538 | /* Release lock */ | |
539 | if ( hashbin->hb_type & HB_LOCK ) { | |
540 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
541 | } /* Default is no-lock */ | |
542 | ||
543 | return entry; | |
544 | } | |
545 | ||
546 | ||
6819bc2e | 547 | /* |
1da177e4 LT |
548 | * Function hashbin_remove (hashbin, hashv, name) |
549 | * | |
550 | * Remove entry with the given name | |
551 | * | |
552 | * The use of this function is highly discouraged, because the whole | |
553 | * concept behind hashbin_remove() is broken. In many cases, it's not | |
554 | * possible to guarantee the unicity of the index (either hashv or name), | |
555 | * leading to removing the WRONG entry. | |
556 | * The only simple safe use is : | |
557 | * hashbin_remove(hasbin, (int) self, NULL); | |
558 | * In other case, you must think hard to guarantee unicity of the index. | |
559 | * Jean II | |
560 | */ | |
561 | void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) | |
562 | { | |
563 | int bin, found = FALSE; | |
564 | unsigned long flags = 0; | |
565 | irda_queue_t* entry; | |
566 | ||
0dc47877 | 567 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
568 | |
569 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
570 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
6819bc2e | 571 | |
1da177e4 LT |
572 | /* |
573 | * Locate hashbin | |
574 | */ | |
575 | if ( name ) | |
576 | hashv = hash( name ); | |
577 | bin = GET_HASHBIN( hashv ); | |
578 | ||
579 | /* Synchronize */ | |
580 | if ( hashbin->hb_type & HB_LOCK ) { | |
581 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
582 | } /* Default is no-lock */ | |
583 | ||
584 | /* | |
585 | * Search for entry | |
586 | */ | |
587 | entry = hashbin->hb_queue[ bin ]; | |
588 | if ( entry ) { | |
589 | do { | |
590 | /* | |
591 | * Check for key | |
592 | */ | |
593 | if ( entry->q_hash == hashv ) { | |
594 | /* | |
595 | * Name compare too? | |
596 | */ | |
597 | if ( name ) { | |
598 | if ( strcmp( entry->q_name, name) == 0) | |
599 | { | |
600 | found = TRUE; | |
601 | break; | |
602 | } | |
603 | } else { | |
604 | found = TRUE; | |
605 | break; | |
606 | } | |
607 | } | |
608 | entry = entry->q_next; | |
609 | } while ( entry != hashbin->hb_queue[ bin ] ); | |
610 | } | |
6819bc2e | 611 | |
1da177e4 LT |
612 | /* |
613 | * If entry was found, dequeue it | |
614 | */ | |
615 | if ( found ) { | |
616 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
617 | (irda_queue_t*) entry ); | |
618 | hashbin->hb_size--; | |
619 | ||
620 | /* | |
621 | * Check if this item is the currently selected item, and in | |
622 | * that case we must reset hb_current | |
623 | */ | |
624 | if ( entry == hashbin->hb_current) | |
625 | hashbin->hb_current = NULL; | |
626 | } | |
627 | ||
628 | /* Release lock */ | |
629 | if ( hashbin->hb_type & HB_LOCK ) { | |
630 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
631 | } /* Default is no-lock */ | |
6819bc2e YH |
632 | |
633 | ||
1da177e4 | 634 | /* Return */ |
6819bc2e | 635 | if ( found ) |
1da177e4 LT |
636 | return entry; |
637 | else | |
638 | return NULL; | |
6819bc2e | 639 | |
1da177e4 LT |
640 | } |
641 | EXPORT_SYMBOL(hashbin_remove); | |
642 | ||
6819bc2e | 643 | /* |
1da177e4 LT |
644 | * Function hashbin_remove_this (hashbin, entry) |
645 | * | |
646 | * Remove entry with the given name | |
647 | * | |
648 | * In some cases, the user of hashbin can't guarantee the unicity | |
649 | * of either the hashv or name. | |
650 | * In those cases, using the above function is guaranteed to cause troubles, | |
651 | * so we use this one instead... | |
652 | * And by the way, it's also faster, because we skip the search phase ;-) | |
653 | */ | |
654 | void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) | |
655 | { | |
656 | unsigned long flags = 0; | |
657 | int bin; | |
658 | long hashv; | |
659 | ||
0dc47877 | 660 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1da177e4 LT |
661 | |
662 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
663 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
664 | IRDA_ASSERT( entry != NULL, return NULL;); | |
6819bc2e | 665 | |
1da177e4 LT |
666 | /* Synchronize */ |
667 | if ( hashbin->hb_type & HB_LOCK ) { | |
668 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
669 | } /* Default is no-lock */ | |
670 | ||
671 | /* Check if valid and not already removed... */ | |
672 | if((entry->q_next == NULL) || (entry->q_prev == NULL)) { | |
673 | entry = NULL; | |
674 | goto out; | |
675 | } | |
676 | ||
677 | /* | |
678 | * Locate hashbin | |
679 | */ | |
680 | hashv = entry->q_hash; | |
681 | bin = GET_HASHBIN( hashv ); | |
682 | ||
683 | /* | |
684 | * Dequeue the entry... | |
685 | */ | |
686 | dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], | |
687 | (irda_queue_t*) entry ); | |
688 | hashbin->hb_size--; | |
689 | entry->q_next = NULL; | |
690 | entry->q_prev = NULL; | |
691 | ||
692 | /* | |
693 | * Check if this item is the currently selected item, and in | |
694 | * that case we must reset hb_current | |
695 | */ | |
696 | if ( entry == hashbin->hb_current) | |
697 | hashbin->hb_current = NULL; | |
698 | out: | |
699 | /* Release lock */ | |
700 | if ( hashbin->hb_type & HB_LOCK ) { | |
701 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
702 | } /* Default is no-lock */ | |
703 | ||
704 | return entry; | |
705 | } | |
706 | EXPORT_SYMBOL(hashbin_remove_this); | |
707 | ||
708 | /*********************** HASHBIN ENUMERATION ***********************/ | |
709 | ||
710 | /* | |
711 | * Function hashbin_common_find (hashbin, hashv, name) | |
712 | * | |
713 | * Find item with the given hashv or name | |
714 | * | |
715 | */ | |
716 | void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) | |
717 | { | |
718 | int bin; | |
719 | irda_queue_t* entry; | |
720 | ||
721 | IRDA_DEBUG( 4, "hashbin_find()\n"); | |
722 | ||
723 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
724 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
725 | ||
726 | /* | |
727 | * Locate hashbin | |
728 | */ | |
729 | if ( name ) | |
730 | hashv = hash( name ); | |
731 | bin = GET_HASHBIN( hashv ); | |
6819bc2e | 732 | |
1da177e4 LT |
733 | /* |
734 | * Search for entry | |
735 | */ | |
736 | entry = hashbin->hb_queue[ bin]; | |
737 | if ( entry ) { | |
738 | do { | |
739 | /* | |
740 | * Check for key | |
741 | */ | |
742 | if ( entry->q_hash == hashv ) { | |
743 | /* | |
744 | * Name compare too? | |
745 | */ | |
746 | if ( name ) { | |
747 | if ( strcmp( entry->q_name, name ) == 0 ) { | |
748 | return entry; | |
749 | } | |
750 | } else { | |
751 | return entry; | |
752 | } | |
753 | } | |
754 | entry = entry->q_next; | |
755 | } while ( entry != hashbin->hb_queue[ bin ] ); | |
756 | } | |
757 | ||
758 | return NULL; | |
759 | } | |
760 | EXPORT_SYMBOL(hashbin_find); | |
761 | ||
762 | /* | |
763 | * Function hashbin_lock_find (hashbin, hashv, name) | |
764 | * | |
765 | * Find item with the given hashv or name | |
766 | * | |
767 | * Same, but with spinlock protection... | |
768 | * I call it safe, but it's only safe with respect to the hashbin, not its | |
769 | * content. - Jean II | |
770 | */ | |
771 | void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name ) | |
772 | { | |
773 | unsigned long flags = 0; | |
774 | irda_queue_t* entry; | |
775 | ||
776 | /* Synchronize */ | |
777 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
778 | ||
779 | /* | |
780 | * Search for entry | |
781 | */ | |
782 | entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name ); | |
783 | ||
784 | /* Release lock */ | |
785 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
786 | ||
787 | return entry; | |
788 | } | |
789 | EXPORT_SYMBOL(hashbin_lock_find); | |
790 | ||
791 | /* | |
792 | * Function hashbin_find (hashbin, hashv, name, pnext) | |
793 | * | |
794 | * Find an item with the given hashv or name, and its successor | |
795 | * | |
796 | * This function allow to do concurrent enumerations without the | |
797 | * need to lock over the whole session, because the caller keep the | |
798 | * context of the search. On the other hand, it might fail and return | |
799 | * NULL if the entry is removed. - Jean II | |
800 | */ | |
801 | void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name, | |
802 | void ** pnext) | |
803 | { | |
804 | unsigned long flags = 0; | |
805 | irda_queue_t* entry; | |
806 | ||
807 | /* Synchronize */ | |
808 | spin_lock_irqsave(&hashbin->hb_spinlock, flags); | |
809 | ||
810 | /* | |
811 | * Search for current entry | |
812 | * This allow to check if the current item is still in the | |
813 | * hashbin or has been removed. | |
814 | */ | |
815 | entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name ); | |
816 | ||
817 | /* | |
818 | * Trick hashbin_get_next() to return what we want | |
819 | */ | |
820 | if(entry) { | |
821 | hashbin->hb_current = entry; | |
822 | *pnext = hashbin_get_next( hashbin ); | |
823 | } else | |
824 | *pnext = NULL; | |
825 | ||
826 | /* Release lock */ | |
827 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); | |
828 | ||
829 | return entry; | |
830 | } | |
1da177e4 LT |
831 | |
832 | /* | |
833 | * Function hashbin_get_first (hashbin) | |
834 | * | |
835 | * Get a pointer to first element in hashbin, this function must be | |
836 | * called before any calls to hashbin_get_next()! | |
837 | * | |
838 | */ | |
6819bc2e | 839 | irda_queue_t *hashbin_get_first( hashbin_t* hashbin) |
1da177e4 LT |
840 | { |
841 | irda_queue_t *entry; | |
842 | int i; | |
843 | ||
844 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
845 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
846 | ||
847 | if ( hashbin == NULL) | |
848 | return NULL; | |
849 | ||
850 | for ( i = 0; i < HASHBIN_SIZE; i ++ ) { | |
851 | entry = hashbin->hb_queue[ i]; | |
852 | if ( entry) { | |
853 | hashbin->hb_current = entry; | |
854 | return entry; | |
855 | } | |
856 | } | |
857 | /* | |
858 | * Did not find any item in hashbin | |
859 | */ | |
860 | return NULL; | |
861 | } | |
862 | EXPORT_SYMBOL(hashbin_get_first); | |
863 | ||
864 | /* | |
865 | * Function hashbin_get_next (hashbin) | |
866 | * | |
867 | * Get next item in hashbin. A series of hashbin_get_next() calls must | |
868 | * be started by a call to hashbin_get_first(). The function returns | |
869 | * NULL when all items have been traversed | |
6819bc2e | 870 | * |
1da177e4 LT |
871 | * The context of the search is stored within the hashbin, so you must |
872 | * protect yourself from concurrent enumerations. - Jean II | |
873 | */ | |
874 | irda_queue_t *hashbin_get_next( hashbin_t *hashbin) | |
875 | { | |
876 | irda_queue_t* entry; | |
877 | int bin; | |
878 | int i; | |
879 | ||
880 | IRDA_ASSERT( hashbin != NULL, return NULL;); | |
881 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | |
882 | ||
883 | if ( hashbin->hb_current == NULL) { | |
884 | IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); | |
885 | return NULL; | |
6819bc2e | 886 | } |
1da177e4 LT |
887 | entry = hashbin->hb_current->q_next; |
888 | bin = GET_HASHBIN( entry->q_hash); | |
889 | ||
6819bc2e | 890 | /* |
1da177e4 | 891 | * Make sure that we are not back at the beginning of the queue |
6819bc2e | 892 | * again |
1da177e4 LT |
893 | */ |
894 | if ( entry != hashbin->hb_queue[ bin ]) { | |
895 | hashbin->hb_current = entry; | |
896 | ||
897 | return entry; | |
898 | } | |
899 | ||
900 | /* | |
901 | * Check that this is not the last queue in hashbin | |
902 | */ | |
903 | if ( bin >= HASHBIN_SIZE) | |
904 | return NULL; | |
6819bc2e | 905 | |
1da177e4 LT |
906 | /* |
907 | * Move to next queue in hashbin | |
908 | */ | |
909 | bin++; | |
910 | for ( i = bin; i < HASHBIN_SIZE; i++ ) { | |
911 | entry = hashbin->hb_queue[ i]; | |
912 | if ( entry) { | |
913 | hashbin->hb_current = entry; | |
6819bc2e | 914 | |
1da177e4 LT |
915 | return entry; |
916 | } | |
917 | } | |
918 | return NULL; | |
919 | } | |
920 | EXPORT_SYMBOL(hashbin_get_next); |