2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Modified by Nadia Derbey to make it RCU safe.
11 * Small id to pointer translation service.
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
39 static struct kmem_cache *idr_layer_cache;
40 static DEFINE_SPINLOCK(simple_ida_lock);
42 /* the maximum ID which can be allocated given idr->layers */
43 static int idr_max(int layers)
45 int bits = min_t(int, layers * IDR_BITS, MAX_ID_SHIFT);
47 return (1 << bits) - 1;
50 static struct idr_layer *get_from_free_list(struct idr *idp)
55 spin_lock_irqsave(&idp->lock, flags);
56 if ((p = idp->id_free)) {
57 idp->id_free = p->ary[0];
61 spin_unlock_irqrestore(&idp->lock, flags);
65 static void idr_layer_rcu_free(struct rcu_head *head)
67 struct idr_layer *layer;
69 layer = container_of(head, struct idr_layer, rcu_head);
70 kmem_cache_free(idr_layer_cache, layer);
73 static inline void free_layer(struct idr_layer *p)
75 call_rcu(&p->rcu_head, idr_layer_rcu_free);
78 /* only called when idp->lock is held */
79 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
81 p->ary[0] = idp->id_free;
86 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
91 * Depends on the return element being zeroed.
93 spin_lock_irqsave(&idp->lock, flags);
94 __move_to_free_list(idp, p);
95 spin_unlock_irqrestore(&idp->lock, flags);
98 static void idr_mark_full(struct idr_layer **pa, int id)
100 struct idr_layer *p = pa[0];
103 __set_bit(id & IDR_MASK, &p->bitmap);
105 * If this layer is full mark the bit in the layer above to
106 * show that this part of the radix tree is full. This may
107 * complete the layer above and require walking up the radix
110 while (p->bitmap == IDR_FULL) {
114 __set_bit((id & IDR_MASK), &p->bitmap);
119 * idr_pre_get - reserve resources for idr allocation
121 * @gfp_mask: memory allocation flags
123 * This function should be called prior to calling the idr_get_new* functions.
124 * It preallocates enough memory to satisfy the worst possible allocation. The
125 * caller should pass in GFP_KERNEL if possible. This of course requires that
126 * no spinning locks be held.
128 * If the system is REALLY out of memory this function returns %0,
131 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
133 while (idp->id_free_cnt < IDR_FREE_MAX) {
134 struct idr_layer *new;
135 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
138 move_to_free_list(idp, new);
142 EXPORT_SYMBOL(idr_pre_get);
144 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
147 struct idr_layer *p, *new;
158 * We run around this while until we reach the leaf node...
160 n = (id >> (IDR_BITS*l)) & IDR_MASK;
162 m = find_next_bit(&bm, IDR_SIZE, n);
164 /* no space available go back to previous layer. */
167 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
169 /* if already at the top layer, we need to grow */
170 if (id >= 1 << (idp->layers * IDR_BITS)) {
172 return IDR_NEED_TO_GROW;
177 /* If we need to go up one layer, continue the
178 * loop; otherwise, restart from the top.
180 sh = IDR_BITS * (l + 1);
181 if (oid >> sh == id >> sh)
188 id = ((id >> sh) ^ n ^ m) << sh;
190 if ((id >= MAX_ID_BIT) || (id < 0))
191 return IDR_NOMORE_SPACE;
195 * Create the layer below if it is missing.
198 new = get_from_free_list(idp);
202 rcu_assign_pointer(p->ary[m], new);
213 static int idr_get_empty_slot(struct idr *idp, int starting_id,
214 struct idr_layer **pa)
216 struct idr_layer *p, *new;
223 layers = idp->layers;
225 if (!(p = get_from_free_list(idp)))
231 * Add a new layer to the top of the tree if the requested
232 * id is larger than the currently allocated space.
234 while (id > idr_max(layers)) {
237 /* special case: if the tree is currently empty,
238 * then we grow the tree by moving the top node
244 if (!(new = get_from_free_list(idp))) {
246 * The allocation failed. If we built part of
247 * the structure tear it down.
249 spin_lock_irqsave(&idp->lock, flags);
250 for (new = p; p && p != idp->top; new = p) {
253 new->bitmap = new->count = 0;
254 __move_to_free_list(idp, new);
256 spin_unlock_irqrestore(&idp->lock, flags);
261 new->layer = layers-1;
262 if (p->bitmap == IDR_FULL)
263 __set_bit(0, &new->bitmap);
266 rcu_assign_pointer(idp->top, p);
267 idp->layers = layers;
268 v = sub_alloc(idp, &id, pa);
269 if (v == IDR_NEED_TO_GROW)
274 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
276 struct idr_layer *pa[MAX_LEVEL + 1];
279 id = idr_get_empty_slot(idp, starting_id, pa);
282 * Successfully found an empty slot. Install the user
283 * pointer and mark the slot full.
285 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
286 (struct idr_layer *)ptr);
288 idr_mark_full(pa, id);
295 * idr_get_new_above - allocate new idr entry above or equal to a start id
297 * @ptr: pointer you want associated with the id
298 * @starting_id: id to start search at
299 * @id: pointer to the allocated handle
301 * This is the allocate id function. It should be called with any
304 * If allocation from IDR's private freelist fails, idr_get_new_above() will
305 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
306 * IDR's preallocation and then retry the idr_get_new_above() call.
308 * If the idr is full idr_get_new_above() will return %-ENOSPC.
310 * @id returns a value in the range @starting_id ... %0x7fffffff
312 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
316 rv = idr_get_new_above_int(idp, ptr, starting_id);
318 * This is a cheap hack until the IDR code can be fixed to
319 * return proper error values.
322 return _idr_rc_to_errno(rv);
326 EXPORT_SYMBOL(idr_get_new_above);
329 * idr_get_new - allocate new idr entry
331 * @ptr: pointer you want associated with the id
332 * @id: pointer to the allocated handle
334 * If allocation from IDR's private freelist fails, idr_get_new_above() will
335 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
336 * IDR's preallocation and then retry the idr_get_new_above() call.
338 * If the idr is full idr_get_new_above() will return %-ENOSPC.
340 * @id returns a value in the range %0 ... %0x7fffffff
342 int idr_get_new(struct idr *idp, void *ptr, int *id)
346 rv = idr_get_new_above_int(idp, ptr, 0);
348 * This is a cheap hack until the IDR code can be fixed to
349 * return proper error values.
352 return _idr_rc_to_errno(rv);
356 EXPORT_SYMBOL(idr_get_new);
358 static void idr_remove_warning(int id)
361 "idr_remove called for id=%d which is not allocated.\n", id);
365 static void sub_remove(struct idr *idp, int shift, int id)
367 struct idr_layer *p = idp->top;
368 struct idr_layer **pa[MAX_LEVEL + 1];
369 struct idr_layer ***paa = &pa[0];
370 struct idr_layer *to_free;
376 while ((shift > 0) && p) {
377 n = (id >> shift) & IDR_MASK;
378 __clear_bit(n, &p->bitmap);
384 if (likely(p != NULL && test_bit(n, &p->bitmap))){
385 __clear_bit(n, &p->bitmap);
386 rcu_assign_pointer(p->ary[n], NULL);
388 while(*paa && ! --((**paa)->count)){
399 idr_remove_warning(id);
403 * idr_remove - remove the given id and free its slot
407 void idr_remove(struct idr *idp, int id)
410 struct idr_layer *to_free;
412 /* Mask off upper bits we don't use for the search. */
415 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
416 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
419 * Single child at leftmost slot: we can shrink the tree.
420 * This level is not needed anymore since when layers are
421 * inserted, they are inserted at the top of the existing
425 p = idp->top->ary[0];
426 rcu_assign_pointer(idp->top, p);
428 to_free->bitmap = to_free->count = 0;
431 while (idp->id_free_cnt >= IDR_FREE_MAX) {
432 p = get_from_free_list(idp);
434 * Note: we don't call the rcu callback here, since the only
435 * layers that fall into the freelist are those that have been
438 kmem_cache_free(idr_layer_cache, p);
442 EXPORT_SYMBOL(idr_remove);
445 * idr_remove_all - remove all ids from the given idr tree
448 * idr_destroy() only frees up unused, cached idp_layers, but this
449 * function will remove all id mappings and leave all idp_layers
452 * A typical clean-up sequence for objects stored in an idr tree will
453 * use idr_for_each() to free all objects, if necessay, then
454 * idr_remove_all() to remove all ids, and idr_destroy() to free
455 * up the cached idr_layers.
457 void idr_remove_all(struct idr *idp)
462 struct idr_layer *pa[MAX_LEVEL + 1];
463 struct idr_layer **paa = &pa[0];
465 n = idp->layers * IDR_BITS;
467 rcu_assign_pointer(idp->top, NULL);
468 max = idr_max(idp->layers);
471 while (id >= 0 && id <= max) {
472 while (n > IDR_BITS && p) {
475 p = p->ary[(id >> n) & IDR_MASK];
480 /* Get the highest bit that the above add changed from 0->1. */
481 while (n < fls(id ^ bt_mask)) {
490 EXPORT_SYMBOL(idr_remove_all);
493 * idr_destroy - release all cached layers within an idr tree
496 void idr_destroy(struct idr *idp)
498 while (idp->id_free_cnt) {
499 struct idr_layer *p = get_from_free_list(idp);
500 kmem_cache_free(idr_layer_cache, p);
503 EXPORT_SYMBOL(idr_destroy);
506 * idr_find - return pointer for given id
510 * Return the pointer given the id it has been registered with. A %NULL
511 * return indicates that @id is not valid or you passed %NULL in
514 * This function can be called under rcu_read_lock(), given that the leaf
515 * pointers lifetimes are correctly managed.
517 void *idr_find(struct idr *idp, int id)
522 p = rcu_dereference_raw(idp->top);
525 n = (p->layer+1) * IDR_BITS;
527 /* Mask off upper bits we don't use for the search. */
530 if (id > idr_max(p->layer + 1))
536 BUG_ON(n != p->layer*IDR_BITS);
537 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
541 EXPORT_SYMBOL(idr_find);
544 * idr_for_each - iterate through all stored pointers
546 * @fn: function to be called for each pointer
547 * @data: data passed back to callback function
549 * Iterate over the pointers registered with the given idr. The
550 * callback function will be called for each pointer currently
551 * registered, passing the id, the pointer and the data pointer passed
552 * to this function. It is not safe to modify the idr tree while in
553 * the callback, so functions such as idr_get_new and idr_remove are
556 * We check the return of @fn each time. If it returns anything other
557 * than %0, we break out and return that value.
559 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
561 int idr_for_each(struct idr *idp,
562 int (*fn)(int id, void *p, void *data), void *data)
564 int n, id, max, error = 0;
566 struct idr_layer *pa[MAX_LEVEL + 1];
567 struct idr_layer **paa = &pa[0];
569 n = idp->layers * IDR_BITS;
570 p = rcu_dereference_raw(idp->top);
571 max = idr_max(idp->layers);
574 while (id >= 0 && id <= max) {
578 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
582 error = fn(id, (void *)p, data);
588 while (n < fls(id)) {
596 EXPORT_SYMBOL(idr_for_each);
599 * idr_get_next - lookup next object of id to given id.
601 * @nextidp: pointer to lookup key
603 * Returns pointer to registered object with id, which is next number to
604 * given id. After being looked up, *@nextidp will be updated for the next
607 * This function can be called under rcu_read_lock(), given that the leaf
608 * pointers lifetimes are correctly managed.
610 void *idr_get_next(struct idr *idp, int *nextidp)
612 struct idr_layer *p, *pa[MAX_LEVEL + 1];
613 struct idr_layer **paa = &pa[0];
618 p = rcu_dereference_raw(idp->top);
621 n = (p->layer + 1) * IDR_BITS;
622 max = idr_max(p->layer + 1);
624 while (id >= 0 && id <= max) {
628 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
637 * Proceed to the next layer at the current level. Unlike
638 * idr_for_each(), @id isn't guaranteed to be aligned to
639 * layer boundary at this point and adding 1 << n may
640 * incorrectly skip IDs. Make sure we jump to the
641 * beginning of the next layer using round_up().
643 id = round_up(id + 1, 1 << n);
644 while (n < fls(id)) {
651 EXPORT_SYMBOL(idr_get_next);
655 * idr_replace - replace pointer for given id
657 * @ptr: pointer you want associated with the id
660 * Replace the pointer registered with an id and return the old value.
661 * A %-ENOENT return indicates that @id was not found.
662 * A %-EINVAL return indicates that @id was not within valid constraints.
664 * The caller must serialize with writers.
666 void *idr_replace(struct idr *idp, void *ptr, int id)
669 struct idr_layer *p, *old_p;
673 return ERR_PTR(-EINVAL);
675 n = (p->layer+1) * IDR_BITS;
680 return ERR_PTR(-EINVAL);
683 while ((n > 0) && p) {
684 p = p->ary[(id >> n) & IDR_MASK];
689 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
690 return ERR_PTR(-ENOENT);
693 rcu_assign_pointer(p->ary[n], ptr);
697 EXPORT_SYMBOL(idr_replace);
699 void __init idr_init_cache(void)
701 idr_layer_cache = kmem_cache_create("idr_layer_cache",
702 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
706 * idr_init - initialize idr handle
709 * This function is use to set up the handle (@idp) that you will pass
710 * to the rest of the functions.
712 void idr_init(struct idr *idp)
714 memset(idp, 0, sizeof(struct idr));
715 spin_lock_init(&idp->lock);
717 EXPORT_SYMBOL(idr_init);
721 * DOC: IDA description
722 * IDA - IDR based ID allocator
724 * This is id allocator without id -> pointer translation. Memory
725 * usage is much lower than full blown idr because each id only
726 * occupies a bit. ida uses a custom leaf node which contains
727 * IDA_BITMAP_BITS slots.
729 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
732 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
736 if (!ida->free_bitmap) {
737 spin_lock_irqsave(&ida->idr.lock, flags);
738 if (!ida->free_bitmap) {
739 ida->free_bitmap = bitmap;
742 spin_unlock_irqrestore(&ida->idr.lock, flags);
749 * ida_pre_get - reserve resources for ida allocation
751 * @gfp_mask: memory allocation flag
753 * This function should be called prior to locking and calling the
754 * following function. It preallocates enough memory to satisfy the
755 * worst possible allocation.
757 * If the system is REALLY out of memory this function returns %0,
760 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
762 /* allocate idr_layers */
763 if (!idr_pre_get(&ida->idr, gfp_mask))
766 /* allocate free_bitmap */
767 if (!ida->free_bitmap) {
768 struct ida_bitmap *bitmap;
770 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
774 free_bitmap(ida, bitmap);
779 EXPORT_SYMBOL(ida_pre_get);
782 * ida_get_new_above - allocate new ID above or equal to a start id
784 * @starting_id: id to start search at
785 * @p_id: pointer to the allocated handle
787 * Allocate new ID above or equal to @starting_id. It should be called
788 * with any required locks.
790 * If memory is required, it will return %-EAGAIN, you should unlock
791 * and go back to the ida_pre_get() call. If the ida is full, it will
794 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
796 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
798 struct idr_layer *pa[MAX_LEVEL + 1];
799 struct ida_bitmap *bitmap;
801 int idr_id = starting_id / IDA_BITMAP_BITS;
802 int offset = starting_id % IDA_BITMAP_BITS;
806 /* get vacant slot */
807 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
809 return _idr_rc_to_errno(t);
811 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
818 /* if bitmap isn't there, create a new one */
819 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
821 spin_lock_irqsave(&ida->idr.lock, flags);
822 bitmap = ida->free_bitmap;
823 ida->free_bitmap = NULL;
824 spin_unlock_irqrestore(&ida->idr.lock, flags);
829 memset(bitmap, 0, sizeof(struct ida_bitmap));
830 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
835 /* lookup for empty slot */
836 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
837 if (t == IDA_BITMAP_BITS) {
838 /* no empty slot after offset, continue to the next chunk */
844 id = idr_id * IDA_BITMAP_BITS + t;
845 if (id >= MAX_ID_BIT)
848 __set_bit(t, bitmap->bitmap);
849 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
850 idr_mark_full(pa, idr_id);
854 /* Each leaf node can handle nearly a thousand slots and the
855 * whole idea of ida is to have small memory foot print.
856 * Throw away extra resources one by one after each successful
859 if (ida->idr.id_free_cnt || ida->free_bitmap) {
860 struct idr_layer *p = get_from_free_list(&ida->idr);
862 kmem_cache_free(idr_layer_cache, p);
867 EXPORT_SYMBOL(ida_get_new_above);
870 * ida_get_new - allocate new ID
872 * @p_id: pointer to the allocated handle
874 * Allocate new ID. It should be called with any required locks.
876 * If memory is required, it will return %-EAGAIN, you should unlock
877 * and go back to the idr_pre_get() call. If the idr is full, it will
880 * @p_id returns a value in the range %0 ... %0x7fffffff.
882 int ida_get_new(struct ida *ida, int *p_id)
884 return ida_get_new_above(ida, 0, p_id);
886 EXPORT_SYMBOL(ida_get_new);
889 * ida_remove - remove the given ID
893 void ida_remove(struct ida *ida, int id)
895 struct idr_layer *p = ida->idr.top;
896 int shift = (ida->idr.layers - 1) * IDR_BITS;
897 int idr_id = id / IDA_BITMAP_BITS;
898 int offset = id % IDA_BITMAP_BITS;
900 struct ida_bitmap *bitmap;
902 /* clear full bits while looking up the leaf idr_layer */
903 while ((shift > 0) && p) {
904 n = (idr_id >> shift) & IDR_MASK;
905 __clear_bit(n, &p->bitmap);
913 n = idr_id & IDR_MASK;
914 __clear_bit(n, &p->bitmap);
916 bitmap = (void *)p->ary[n];
917 if (!test_bit(offset, bitmap->bitmap))
920 /* update bitmap and remove it if empty */
921 __clear_bit(offset, bitmap->bitmap);
922 if (--bitmap->nr_busy == 0) {
923 __set_bit(n, &p->bitmap); /* to please idr_remove() */
924 idr_remove(&ida->idr, idr_id);
925 free_bitmap(ida, bitmap);
932 "ida_remove called for id=%d which is not allocated.\n", id);
934 EXPORT_SYMBOL(ida_remove);
937 * ida_destroy - release all cached layers within an ida tree
940 void ida_destroy(struct ida *ida)
942 idr_destroy(&ida->idr);
943 kfree(ida->free_bitmap);
945 EXPORT_SYMBOL(ida_destroy);
948 * ida_simple_get - get a new id.
949 * @ida: the (initialized) ida.
950 * @start: the minimum id (inclusive, < 0x8000000)
951 * @end: the maximum id (exclusive, < 0x8000000 or 0)
952 * @gfp_mask: memory allocation flags
954 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
955 * On memory allocation failure, returns -ENOMEM.
957 * Use ida_simple_remove() to get rid of an id.
959 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
966 BUG_ON((int)start < 0);
967 BUG_ON((int)end < 0);
977 if (!ida_pre_get(ida, gfp_mask))
980 spin_lock_irqsave(&simple_ida_lock, flags);
981 ret = ida_get_new_above(ida, start, &id);
990 spin_unlock_irqrestore(&simple_ida_lock, flags);
992 if (unlikely(ret == -EAGAIN))
997 EXPORT_SYMBOL(ida_simple_get);
1000 * ida_simple_remove - remove an allocated id.
1001 * @ida: the (initialized) ida.
1002 * @id: the id returned by ida_simple_get.
1004 void ida_simple_remove(struct ida *ida, unsigned int id)
1006 unsigned long flags;
1008 BUG_ON((int)id < 0);
1009 spin_lock_irqsave(&simple_ida_lock, flags);
1010 ida_remove(ida, id);
1011 spin_unlock_irqrestore(&simple_ida_lock, flags);
1013 EXPORT_SYMBOL(ida_simple_remove);
1016 * ida_init - initialize ida handle
1019 * This function is use to set up the handle (@ida) that you will pass
1020 * to the rest of the functions.
1022 void ida_init(struct ida *ida)
1024 memset(ida, 0, sizeof(struct ida));
1025 idr_init(&ida->idr);
1028 EXPORT_SYMBOL(ida_init);