OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [lib/] [idr.c] - Blame information for rev 11

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3
 *      Copyright (C) 2002 by Concurrent Computer Corporation
4
 *      Distributed under the GNU GPL license version 2.
5
 *
6
 * Modified by George Anzinger to reuse immediately and to use
7
 * find bit instructions.  Also removed _irq on spinlocks.
8
 *
9
 * Small id to pointer translation service.
10
 *
11
 * It uses a radix tree like structure as a sparse array indexed
12
 * by the id to obtain the pointer.  The bitmap makes allocating
13
 * a new id quick.
14
 *
15
 * You call it to allocate an id (an int) an associate with that id a
16
 * pointer or what ever, we treat it as a (void *).  You can pass this
17
 * id to a user for him to pass back at a later time.  You then pass
18
 * that id to this code and it returns your pointer.
19
 
20
 * You can release ids at any time. When all ids are released, most of
21
 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
22
 * don't need to go to the memory "store" during an id allocate, just
23
 * so you don't need to be too concerned about locking and conflicts
24
 * with the slab allocator.
25
 */
26
 
27
#ifndef TEST                        // to test in user space...
28
#include <linux/slab.h>
29
#include <linux/init.h>
30
#include <linux/module.h>
31
#endif
32
#include <linux/err.h>
33
#include <linux/string.h>
34
#include <linux/idr.h>
35
 
36
static struct kmem_cache *idr_layer_cache;
37
 
38
static struct idr_layer *alloc_layer(struct idr *idp)
39
{
40
        struct idr_layer *p;
41
        unsigned long flags;
42
 
43
        spin_lock_irqsave(&idp->lock, flags);
44
        if ((p = idp->id_free)) {
45
                idp->id_free = p->ary[0];
46
                idp->id_free_cnt--;
47
                p->ary[0] = NULL;
48
        }
49
        spin_unlock_irqrestore(&idp->lock, flags);
50
        return(p);
51
}
52
 
53
/* only called when idp->lock is held */
54
static void __free_layer(struct idr *idp, struct idr_layer *p)
55
{
56
        p->ary[0] = idp->id_free;
57
        idp->id_free = p;
58
        idp->id_free_cnt++;
59
}
60
 
61
static void free_layer(struct idr *idp, struct idr_layer *p)
62
{
63
        unsigned long flags;
64
 
65
        /*
66
         * Depends on the return element being zeroed.
67
         */
68
        spin_lock_irqsave(&idp->lock, flags);
69
        __free_layer(idp, p);
70
        spin_unlock_irqrestore(&idp->lock, flags);
71
}
72
 
73
static void idr_mark_full(struct idr_layer **pa, int id)
74
{
75
        struct idr_layer *p = pa[0];
76
        int l = 0;
77
 
78
        __set_bit(id & IDR_MASK, &p->bitmap);
79
        /*
80
         * If this layer is full mark the bit in the layer above to
81
         * show that this part of the radix tree is full.  This may
82
         * complete the layer above and require walking up the radix
83
         * tree.
84
         */
85
        while (p->bitmap == IDR_FULL) {
86
                if (!(p = pa[++l]))
87
                        break;
88
                id = id >> IDR_BITS;
89
                __set_bit((id & IDR_MASK), &p->bitmap);
90
        }
91
}
92
 
93
/**
94
 * idr_pre_get - reserver resources for idr allocation
95
 * @idp:        idr handle
96
 * @gfp_mask:   memory allocation flags
97
 *
98
 * This function should be called prior to locking and calling the
99
 * following function.  It preallocates enough memory to satisfy
100
 * the worst possible allocation.
101
 *
102
 * If the system is REALLY out of memory this function returns 0,
103
 * otherwise 1.
104
 */
105
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
106
{
107
        while (idp->id_free_cnt < IDR_FREE_MAX) {
108
                struct idr_layer *new;
109
                new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110
                if (new == NULL)
111
                        return (0);
112
                free_layer(idp, new);
113
        }
114
        return 1;
115
}
116
EXPORT_SYMBOL(idr_pre_get);
117
 
118
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
119
{
120
        int n, m, sh;
121
        struct idr_layer *p, *new;
122
        int l, id, oid;
123
        unsigned long bm;
124
 
125
        id = *starting_id;
126
 restart:
127
        p = idp->top;
128
        l = idp->layers;
129
        pa[l--] = NULL;
130
        while (1) {
131
                /*
132
                 * We run around this while until we reach the leaf node...
133
                 */
134
                n = (id >> (IDR_BITS*l)) & IDR_MASK;
135
                bm = ~p->bitmap;
136
                m = find_next_bit(&bm, IDR_SIZE, n);
137
                if (m == IDR_SIZE) {
138
                        /* no space available go back to previous layer. */
139
                        l++;
140
                        oid = id;
141
                        id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
142
 
143
                        /* if already at the top layer, we need to grow */
144
                        if (!(p = pa[l])) {
145
                                *starting_id = id;
146
                                return -2;
147
                        }
148
 
149
                        /* If we need to go up one layer, continue the
150
                         * loop; otherwise, restart from the top.
151
                         */
152
                        sh = IDR_BITS * (l + 1);
153
                        if (oid >> sh == id >> sh)
154
                                continue;
155
                        else
156
                                goto restart;
157
                }
158
                if (m != n) {
159
                        sh = IDR_BITS*l;
160
                        id = ((id >> sh) ^ n ^ m) << sh;
161
                }
162
                if ((id >= MAX_ID_BIT) || (id < 0))
163
                        return -3;
164
                if (l == 0)
165
                        break;
166
                /*
167
                 * Create the layer below if it is missing.
168
                 */
169
                if (!p->ary[m]) {
170
                        if (!(new = alloc_layer(idp)))
171
                                return -1;
172
                        p->ary[m] = new;
173
                        p->count++;
174
                }
175
                pa[l--] = p;
176
                p = p->ary[m];
177
        }
178
 
179
        pa[l] = p;
180
        return id;
181
}
182
 
183
static int idr_get_empty_slot(struct idr *idp, int starting_id,
184
                              struct idr_layer **pa)
185
{
186
        struct idr_layer *p, *new;
187
        int layers, v, id;
188
        unsigned long flags;
189
 
190
        id = starting_id;
191
build_up:
192
        p = idp->top;
193
        layers = idp->layers;
194
        if (unlikely(!p)) {
195
                if (!(p = alloc_layer(idp)))
196
                        return -1;
197
                layers = 1;
198
        }
199
        /*
200
         * Add a new layer to the top of the tree if the requested
201
         * id is larger than the currently allocated space.
202
         */
203
        while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
204
                layers++;
205
                if (!p->count)
206
                        continue;
207
                if (!(new = alloc_layer(idp))) {
208
                        /*
209
                         * The allocation failed.  If we built part of
210
                         * the structure tear it down.
211
                         */
212
                        spin_lock_irqsave(&idp->lock, flags);
213
                        for (new = p; p && p != idp->top; new = p) {
214
                                p = p->ary[0];
215
                                new->ary[0] = NULL;
216
                                new->bitmap = new->count = 0;
217
                                __free_layer(idp, new);
218
                        }
219
                        spin_unlock_irqrestore(&idp->lock, flags);
220
                        return -1;
221
                }
222
                new->ary[0] = p;
223
                new->count = 1;
224
                if (p->bitmap == IDR_FULL)
225
                        __set_bit(0, &new->bitmap);
226
                p = new;
227
        }
228
        idp->top = p;
229
        idp->layers = layers;
230
        v = sub_alloc(idp, &id, pa);
231
        if (v == -2)
232
                goto build_up;
233
        return(v);
234
}
235
 
236
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
237
{
238
        struct idr_layer *pa[MAX_LEVEL];
239
        int id;
240
 
241
        id = idr_get_empty_slot(idp, starting_id, pa);
242
        if (id >= 0) {
243
                /*
244
                 * Successfully found an empty slot.  Install the user
245
                 * pointer and mark the slot full.
246
                 */
247
                pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
248
                pa[0]->count++;
249
                idr_mark_full(pa, id);
250
        }
251
 
252
        return id;
253
}
254
 
255
/**
256
 * idr_get_new_above - allocate new idr entry above or equal to a start id
257
 * @idp: idr handle
258
 * @ptr: pointer you want associated with the ide
259
 * @start_id: id to start search at
260
 * @id: pointer to the allocated handle
261
 *
262
 * This is the allocate id function.  It should be called with any
263
 * required locks.
264
 *
265
 * If memory is required, it will return -EAGAIN, you should unlock
266
 * and go back to the idr_pre_get() call.  If the idr is full, it will
267
 * return -ENOSPC.
268
 *
269
 * @id returns a value in the range 0 ... 0x7fffffff
270
 */
271
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
272
{
273
        int rv;
274
 
275
        rv = idr_get_new_above_int(idp, ptr, starting_id);
276
        /*
277
         * This is a cheap hack until the IDR code can be fixed to
278
         * return proper error values.
279
         */
280
        if (rv < 0) {
281
                if (rv == -1)
282
                        return -EAGAIN;
283
                else /* Will be -3 */
284
                        return -ENOSPC;
285
        }
286
        *id = rv;
287
        return 0;
288
}
289
EXPORT_SYMBOL(idr_get_new_above);
290
 
291
/**
292
 * idr_get_new - allocate new idr entry
293
 * @idp: idr handle
294
 * @ptr: pointer you want associated with the ide
295
 * @id: pointer to the allocated handle
296
 *
297
 * This is the allocate id function.  It should be called with any
298
 * required locks.
299
 *
300
 * If memory is required, it will return -EAGAIN, you should unlock
301
 * and go back to the idr_pre_get() call.  If the idr is full, it will
302
 * return -ENOSPC.
303
 *
304
 * @id returns a value in the range 0 ... 0x7fffffff
305
 */
306
int idr_get_new(struct idr *idp, void *ptr, int *id)
307
{
308
        int rv;
309
 
310
        rv = idr_get_new_above_int(idp, ptr, 0);
311
        /*
312
         * This is a cheap hack until the IDR code can be fixed to
313
         * return proper error values.
314
         */
315
        if (rv < 0) {
316
                if (rv == -1)
317
                        return -EAGAIN;
318
                else /* Will be -3 */
319
                        return -ENOSPC;
320
        }
321
        *id = rv;
322
        return 0;
323
}
324
EXPORT_SYMBOL(idr_get_new);
325
 
326
static void idr_remove_warning(int id)
327
{
328
        printk("idr_remove called for id=%d which is not allocated.\n", id);
329
        dump_stack();
330
}
331
 
332
static void sub_remove(struct idr *idp, int shift, int id)
333
{
334
        struct idr_layer *p = idp->top;
335
        struct idr_layer **pa[MAX_LEVEL];
336
        struct idr_layer ***paa = &pa[0];
337
        int n;
338
 
339
        *paa = NULL;
340
        *++paa = &idp->top;
341
 
342
        while ((shift > 0) && p) {
343
                n = (id >> shift) & IDR_MASK;
344
                __clear_bit(n, &p->bitmap);
345
                *++paa = &p->ary[n];
346
                p = p->ary[n];
347
                shift -= IDR_BITS;
348
        }
349
        n = id & IDR_MASK;
350
        if (likely(p != NULL && test_bit(n, &p->bitmap))){
351
                __clear_bit(n, &p->bitmap);
352
                p->ary[n] = NULL;
353
                while(*paa && ! --((**paa)->count)){
354
                        free_layer(idp, **paa);
355
                        **paa-- = NULL;
356
                }
357
                if (!*paa)
358
                        idp->layers = 0;
359
        } else
360
                idr_remove_warning(id);
361
}
362
 
363
/**
364
 * idr_remove - remove the given id and free it's slot
365
 * @idp: idr handle
366
 * @id: unique key
367
 */
368
void idr_remove(struct idr *idp, int id)
369
{
370
        struct idr_layer *p;
371
 
372
        /* Mask off upper bits we don't use for the search. */
373
        id &= MAX_ID_MASK;
374
 
375
        sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
376
        if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377
            idp->top->ary[0]) {  // We can drop a layer
378
 
379
                p = idp->top->ary[0];
380
                idp->top->bitmap = idp->top->count = 0;
381
                free_layer(idp, idp->top);
382
                idp->top = p;
383
                --idp->layers;
384
        }
385
        while (idp->id_free_cnt >= IDR_FREE_MAX) {
386
                p = alloc_layer(idp);
387
                kmem_cache_free(idr_layer_cache, p);
388
                return;
389
        }
390
}
391
EXPORT_SYMBOL(idr_remove);
392
 
393
/**
394
 * idr_remove_all - remove all ids from the given idr tree
395
 * @idp: idr handle
396
 *
397
 * idr_destroy() only frees up unused, cached idp_layers, but this
398
 * function will remove all id mappings and leave all idp_layers
399
 * unused.
400
 *
401
 * A typical clean-up sequence for objects stored in an idr tree, will
402
 * use idr_for_each() to free all objects, if necessay, then
403
 * idr_remove_all() to remove all ids, and idr_destroy() to free
404
 * up the cached idr_layers.
405
 */
406
void idr_remove_all(struct idr *idp)
407
{
408
        int n, id, max;
409
        struct idr_layer *p;
410
        struct idr_layer *pa[MAX_LEVEL];
411
        struct idr_layer **paa = &pa[0];
412
 
413
        n = idp->layers * IDR_BITS;
414
        p = idp->top;
415
        max = 1 << n;
416
 
417
        id = 0;
418
        while (id < max) {
419
                while (n > IDR_BITS && p) {
420
                        n -= IDR_BITS;
421
                        *paa++ = p;
422
                        p = p->ary[(id >> n) & IDR_MASK];
423
                }
424
 
425
                id += 1 << n;
426
                while (n < fls(id)) {
427
                        if (p) {
428
                                memset(p, 0, sizeof *p);
429
                                free_layer(idp, p);
430
                        }
431
                        n += IDR_BITS;
432
                        p = *--paa;
433
                }
434
        }
435
        idp->top = NULL;
436
        idp->layers = 0;
437
}
438
EXPORT_SYMBOL(idr_remove_all);
439
 
440
/**
441
 * idr_destroy - release all cached layers within an idr tree
442
 * idp: idr handle
443
 */
444
void idr_destroy(struct idr *idp)
445
{
446
        while (idp->id_free_cnt) {
447
                struct idr_layer *p = alloc_layer(idp);
448
                kmem_cache_free(idr_layer_cache, p);
449
        }
450
}
451
EXPORT_SYMBOL(idr_destroy);
452
 
453
/**
454
 * idr_find - return pointer for given id
455
 * @idp: idr handle
456
 * @id: lookup key
457
 *
458
 * Return the pointer given the id it has been registered with.  A %NULL
459
 * return indicates that @id is not valid or you passed %NULL in
460
 * idr_get_new().
461
 *
462
 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
463
 */
464
void *idr_find(struct idr *idp, int id)
465
{
466
        int n;
467
        struct idr_layer *p;
468
 
469
        n = idp->layers * IDR_BITS;
470
        p = idp->top;
471
 
472
        /* Mask off upper bits we don't use for the search. */
473
        id &= MAX_ID_MASK;
474
 
475
        if (id >= (1 << n))
476
                return NULL;
477
 
478
        while (n > 0 && p) {
479
                n -= IDR_BITS;
480
                p = p->ary[(id >> n) & IDR_MASK];
481
        }
482
        return((void *)p);
483
}
484
EXPORT_SYMBOL(idr_find);
485
 
486
/**
487
 * idr_for_each - iterate through all stored pointers
488
 * @idp: idr handle
489
 * @fn: function to be called for each pointer
490
 * @data: data passed back to callback function
491
 *
492
 * Iterate over the pointers registered with the given idr.  The
493
 * callback function will be called for each pointer currently
494
 * registered, passing the id, the pointer and the data pointer passed
495
 * to this function.  It is not safe to modify the idr tree while in
496
 * the callback, so functions such as idr_get_new and idr_remove are
497
 * not allowed.
498
 *
499
 * We check the return of @fn each time. If it returns anything other
500
 * than 0, we break out and return that value.
501
 *
502
 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
503
 */
504
int idr_for_each(struct idr *idp,
505
                 int (*fn)(int id, void *p, void *data), void *data)
506
{
507
        int n, id, max, error = 0;
508
        struct idr_layer *p;
509
        struct idr_layer *pa[MAX_LEVEL];
510
        struct idr_layer **paa = &pa[0];
511
 
512
        n = idp->layers * IDR_BITS;
513
        p = idp->top;
514
        max = 1 << n;
515
 
516
        id = 0;
517
        while (id < max) {
518
                while (n > 0 && p) {
519
                        n -= IDR_BITS;
520
                        *paa++ = p;
521
                        p = p->ary[(id >> n) & IDR_MASK];
522
                }
523
 
524
                if (p) {
525
                        error = fn(id, (void *)p, data);
526
                        if (error)
527
                                break;
528
                }
529
 
530
                id += 1 << n;
531
                while (n < fls(id)) {
532
                        n += IDR_BITS;
533
                        p = *--paa;
534
                }
535
        }
536
 
537
        return error;
538
}
539
EXPORT_SYMBOL(idr_for_each);
540
 
541
/**
542
 * idr_replace - replace pointer for given id
543
 * @idp: idr handle
544
 * @ptr: pointer you want associated with the id
545
 * @id: lookup key
546
 *
547
 * Replace the pointer registered with an id and return the old value.
548
 * A -ENOENT return indicates that @id was not found.
549
 * A -EINVAL return indicates that @id was not within valid constraints.
550
 *
551
 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
552
 */
553
void *idr_replace(struct idr *idp, void *ptr, int id)
554
{
555
        int n;
556
        struct idr_layer *p, *old_p;
557
 
558
        n = idp->layers * IDR_BITS;
559
        p = idp->top;
560
 
561
        id &= MAX_ID_MASK;
562
 
563
        if (id >= (1 << n))
564
                return ERR_PTR(-EINVAL);
565
 
566
        n -= IDR_BITS;
567
        while ((n > 0) && p) {
568
                p = p->ary[(id >> n) & IDR_MASK];
569
                n -= IDR_BITS;
570
        }
571
 
572
        n = id & IDR_MASK;
573
        if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
574
                return ERR_PTR(-ENOENT);
575
 
576
        old_p = p->ary[n];
577
        p->ary[n] = ptr;
578
 
579
        return old_p;
580
}
581
EXPORT_SYMBOL(idr_replace);
582
 
583
static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
584
{
585
        memset(idr_layer, 0, sizeof(struct idr_layer));
586
}
587
 
588
static  int init_id_cache(void)
589
{
590
        if (!idr_layer_cache)
591
                idr_layer_cache = kmem_cache_create("idr_layer_cache",
592
                        sizeof(struct idr_layer), 0, 0, idr_cache_ctor);
593
        return 0;
594
}
595
 
596
/**
597
 * idr_init - initialize idr handle
598
 * @idp:        idr handle
599
 *
600
 * This function is use to set up the handle (@idp) that you will pass
601
 * to the rest of the functions.
602
 */
603
void idr_init(struct idr *idp)
604
{
605
        init_id_cache();
606
        memset(idp, 0, sizeof(struct idr));
607
        spin_lock_init(&idp->lock);
608
}
609
EXPORT_SYMBOL(idr_init);
610
 
611
 
612
/*
613
 * IDA - IDR based ID allocator
614
 *
615
 * this is id allocator without id -> pointer translation.  Memory
616
 * usage is much lower than full blown idr because each id only
617
 * occupies a bit.  ida uses a custom leaf node which contains
618
 * IDA_BITMAP_BITS slots.
619
 *
620
 * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
621
 */
622
 
623
static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
624
{
625
        unsigned long flags;
626
 
627
        if (!ida->free_bitmap) {
628
                spin_lock_irqsave(&ida->idr.lock, flags);
629
                if (!ida->free_bitmap) {
630
                        ida->free_bitmap = bitmap;
631
                        bitmap = NULL;
632
                }
633
                spin_unlock_irqrestore(&ida->idr.lock, flags);
634
        }
635
 
636
        kfree(bitmap);
637
}
638
 
639
/**
640
 * ida_pre_get - reserve resources for ida allocation
641
 * @ida:        ida handle
642
 * @gfp_mask:   memory allocation flag
643
 *
644
 * This function should be called prior to locking and calling the
645
 * following function.  It preallocates enough memory to satisfy the
646
 * worst possible allocation.
647
 *
648
 * If the system is REALLY out of memory this function returns 0,
649
 * otherwise 1.
650
 */
651
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
652
{
653
        /* allocate idr_layers */
654
        if (!idr_pre_get(&ida->idr, gfp_mask))
655
                return 0;
656
 
657
        /* allocate free_bitmap */
658
        if (!ida->free_bitmap) {
659
                struct ida_bitmap *bitmap;
660
 
661
                bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
662
                if (!bitmap)
663
                        return 0;
664
 
665
                free_bitmap(ida, bitmap);
666
        }
667
 
668
        return 1;
669
}
670
EXPORT_SYMBOL(ida_pre_get);
671
 
672
/**
673
 * ida_get_new_above - allocate new ID above or equal to a start id
674
 * @ida:        ida handle
675
 * @staring_id: id to start search at
676
 * @p_id:       pointer to the allocated handle
677
 *
678
 * Allocate new ID above or equal to @ida.  It should be called with
679
 * any required locks.
680
 *
681
 * If memory is required, it will return -EAGAIN, you should unlock
682
 * and go back to the ida_pre_get() call.  If the ida is full, it will
683
 * return -ENOSPC.
684
 *
685
 * @p_id returns a value in the range 0 ... 0x7fffffff.
686
 */
687
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
688
{
689
        struct idr_layer *pa[MAX_LEVEL];
690
        struct ida_bitmap *bitmap;
691
        unsigned long flags;
692
        int idr_id = starting_id / IDA_BITMAP_BITS;
693
        int offset = starting_id % IDA_BITMAP_BITS;
694
        int t, id;
695
 
696
 restart:
697
        /* get vacant slot */
698
        t = idr_get_empty_slot(&ida->idr, idr_id, pa);
699
        if (t < 0) {
700
                if (t == -1)
701
                        return -EAGAIN;
702
                else /* will be -3 */
703
                        return -ENOSPC;
704
        }
705
 
706
        if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
707
                return -ENOSPC;
708
 
709
        if (t != idr_id)
710
                offset = 0;
711
        idr_id = t;
712
 
713
        /* if bitmap isn't there, create a new one */
714
        bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
715
        if (!bitmap) {
716
                spin_lock_irqsave(&ida->idr.lock, flags);
717
                bitmap = ida->free_bitmap;
718
                ida->free_bitmap = NULL;
719
                spin_unlock_irqrestore(&ida->idr.lock, flags);
720
 
721
                if (!bitmap)
722
                        return -EAGAIN;
723
 
724
                memset(bitmap, 0, sizeof(struct ida_bitmap));
725
                pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
726
                pa[0]->count++;
727
        }
728
 
729
        /* lookup for empty slot */
730
        t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
731
        if (t == IDA_BITMAP_BITS) {
732
                /* no empty slot after offset, continue to the next chunk */
733
                idr_id++;
734
                offset = 0;
735
                goto restart;
736
        }
737
 
738
        id = idr_id * IDA_BITMAP_BITS + t;
739
        if (id >= MAX_ID_BIT)
740
                return -ENOSPC;
741
 
742
        __set_bit(t, bitmap->bitmap);
743
        if (++bitmap->nr_busy == IDA_BITMAP_BITS)
744
                idr_mark_full(pa, idr_id);
745
 
746
        *p_id = id;
747
 
748
        /* Each leaf node can handle nearly a thousand slots and the
749
         * whole idea of ida is to have small memory foot print.
750
         * Throw away extra resources one by one after each successful
751
         * allocation.
752
         */
753
        if (ida->idr.id_free_cnt || ida->free_bitmap) {
754
                struct idr_layer *p = alloc_layer(&ida->idr);
755
                if (p)
756
                        kmem_cache_free(idr_layer_cache, p);
757
        }
758
 
759
        return 0;
760
}
761
EXPORT_SYMBOL(ida_get_new_above);
762
 
763
/**
764
 * ida_get_new - allocate new ID
765
 * @ida:        idr handle
766
 * @p_id:       pointer to the allocated handle
767
 *
768
 * Allocate new ID.  It should be called with any required locks.
769
 *
770
 * If memory is required, it will return -EAGAIN, you should unlock
771
 * and go back to the idr_pre_get() call.  If the idr is full, it will
772
 * return -ENOSPC.
773
 *
774
 * @id returns a value in the range 0 ... 0x7fffffff.
775
 */
776
int ida_get_new(struct ida *ida, int *p_id)
777
{
778
        return ida_get_new_above(ida, 0, p_id);
779
}
780
EXPORT_SYMBOL(ida_get_new);
781
 
782
/**
783
 * ida_remove - remove the given ID
784
 * @ida:        ida handle
785
 * @id:         ID to free
786
 */
787
void ida_remove(struct ida *ida, int id)
788
{
789
        struct idr_layer *p = ida->idr.top;
790
        int shift = (ida->idr.layers - 1) * IDR_BITS;
791
        int idr_id = id / IDA_BITMAP_BITS;
792
        int offset = id % IDA_BITMAP_BITS;
793
        int n;
794
        struct ida_bitmap *bitmap;
795
 
796
        /* clear full bits while looking up the leaf idr_layer */
797
        while ((shift > 0) && p) {
798
                n = (idr_id >> shift) & IDR_MASK;
799
                __clear_bit(n, &p->bitmap);
800
                p = p->ary[n];
801
                shift -= IDR_BITS;
802
        }
803
 
804
        if (p == NULL)
805
                goto err;
806
 
807
        n = idr_id & IDR_MASK;
808
        __clear_bit(n, &p->bitmap);
809
 
810
        bitmap = (void *)p->ary[n];
811
        if (!test_bit(offset, bitmap->bitmap))
812
                goto err;
813
 
814
        /* update bitmap and remove it if empty */
815
        __clear_bit(offset, bitmap->bitmap);
816
        if (--bitmap->nr_busy == 0) {
817
                __set_bit(n, &p->bitmap);       /* to please idr_remove() */
818
                idr_remove(&ida->idr, idr_id);
819
                free_bitmap(ida, bitmap);
820
        }
821
 
822
        return;
823
 
824
 err:
825
        printk(KERN_WARNING
826
               "ida_remove called for id=%d which is not allocated.\n", id);
827
}
828
EXPORT_SYMBOL(ida_remove);
829
 
830
/**
831
 * ida_destroy - release all cached layers within an ida tree
832
 * ida:         ida handle
833
 */
834
void ida_destroy(struct ida *ida)
835
{
836
        idr_destroy(&ida->idr);
837
        kfree(ida->free_bitmap);
838
}
839
EXPORT_SYMBOL(ida_destroy);
840
 
841
/**
842
 * ida_init - initialize ida handle
843
 * @ida:        ida handle
844
 *
845
 * This function is use to set up the handle (@ida) that you will pass
846
 * to the rest of the functions.
847
 */
848
void ida_init(struct ida *ida)
849
{
850
        memset(ida, 0, sizeof(struct ida));
851
        idr_init(&ida->idr);
852
 
853
}
854
EXPORT_SYMBOL(ida_init);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.