OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [include/] [linux/] [slub_def.h] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
#ifndef _LINUX_SLUB_DEF_H
2
#define _LINUX_SLUB_DEF_H
3
 
4
/*
5
 * SLUB : A Slab allocator without object queues.
6
 *
7
 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
8
 */
9
#include <linux/types.h>
10
#include <linux/gfp.h>
11
#include <linux/workqueue.h>
12
#include <linux/kobject.h>
13
 
14
struct kmem_cache_cpu {
15
        void **freelist;
16
        struct page *page;
17
        int node;
18
        unsigned int offset;
19
        unsigned int objsize;
20
};
21
 
22
struct kmem_cache_node {
23
        spinlock_t list_lock;   /* Protect partial list and nr_partial */
24
        unsigned long nr_partial;
25
        atomic_long_t nr_slabs;
26
        struct list_head partial;
27
#ifdef CONFIG_SLUB_DEBUG
28
        struct list_head full;
29
#endif
30
};
31
 
32
/*
33
 * Slab cache management.
34
 */
35
struct kmem_cache {
36
        /* Used for retriving partial slabs etc */
37
        unsigned long flags;
38
        int size;               /* The size of an object including meta data */
39
        int objsize;            /* The size of an object without meta data */
40
        int offset;             /* Free pointer offset. */
41
        int order;
42
 
43
        /*
44
         * Avoid an extra cache line for UP, SMP and for the node local to
45
         * struct kmem_cache.
46
         */
47
        struct kmem_cache_node local_node;
48
 
49
        /* Allocation and freeing of slabs */
50
        int objects;            /* Number of objects in slab */
51
        int refcount;           /* Refcount for slab cache destroy */
52
        void (*ctor)(struct kmem_cache *, void *);
53
        int inuse;              /* Offset to metadata */
54
        int align;              /* Alignment */
55
        const char *name;       /* Name (only for display!) */
56
        struct list_head list;  /* List of slab caches */
57
#ifdef CONFIG_SLUB_DEBUG
58
        struct kobject kobj;    /* For sysfs */
59
#endif
60
 
61
#ifdef CONFIG_NUMA
62
        int defrag_ratio;
63
        struct kmem_cache_node *node[MAX_NUMNODES];
64
#endif
65
#ifdef CONFIG_SMP
66
        struct kmem_cache_cpu *cpu_slab[NR_CPUS];
67
#else
68
        struct kmem_cache_cpu cpu_slab;
69
#endif
70
};
71
 
72
/*
73
 * Kmalloc subsystem.
74
 */
75
#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
76
#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
77
#else
78
#define KMALLOC_MIN_SIZE 8
79
#endif
80
 
81
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
82
 
83
/*
84
 * We keep the general caches in an array of slab caches that are used for
85
 * 2^x bytes of allocations.
86
 */
87
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
88
 
89
/*
90
 * Sorry that the following has to be that ugly but some versions of GCC
91
 * have trouble with constant propagation and loops.
92
 */
93
static __always_inline int kmalloc_index(size_t size)
94
{
95
        if (!size)
96
                return 0;
97
 
98
        if (size <= KMALLOC_MIN_SIZE)
99
                return KMALLOC_SHIFT_LOW;
100
 
101
        if (size > 64 && size <= 96)
102
                return 1;
103
        if (size > 128 && size <= 192)
104
                return 2;
105
        if (size <=          8) return 3;
106
        if (size <=         16) return 4;
107
        if (size <=         32) return 5;
108
        if (size <=         64) return 6;
109
        if (size <=        128) return 7;
110
        if (size <=        256) return 8;
111
        if (size <=        512) return 9;
112
        if (size <=       1024) return 10;
113
        if (size <=   2 * 1024) return 11;
114
/*
115
 * The following is only needed to support architectures with a larger page
116
 * size than 4k.
117
 */
118
        if (size <=   4 * 1024) return 12;
119
        if (size <=   8 * 1024) return 13;
120
        if (size <=  16 * 1024) return 14;
121
        if (size <=  32 * 1024) return 15;
122
        if (size <=  64 * 1024) return 16;
123
        if (size <= 128 * 1024) return 17;
124
        if (size <= 256 * 1024) return 18;
125
        if (size <= 512 * 1024) return 19;
126
        if (size <= 1024 * 1024) return 20;
127
        if (size <=  2 * 1024 * 1024) return 21;
128
        return -1;
129
 
130
/*
131
 * What we really wanted to do and cannot do because of compiler issues is:
132
 *      int i;
133
 *      for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
134
 *              if (size <= (1 << i))
135
 *                      return i;
136
 */
137
}
138
 
139
/*
140
 * Find the slab cache for a given combination of allocation flags and size.
141
 *
142
 * This ought to end up with a global pointer to the right cache
143
 * in kmalloc_caches.
144
 */
145
static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
146
{
147
        int index = kmalloc_index(size);
148
 
149
        if (index == 0)
150
                return NULL;
151
 
152
        return &kmalloc_caches[index];
153
}
154
 
155
#ifdef CONFIG_ZONE_DMA
156
#define SLUB_DMA __GFP_DMA
157
#else
158
/* Disable DMA functionality */
159
#define SLUB_DMA (__force gfp_t)0
160
#endif
161
 
162
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
163
void *__kmalloc(size_t size, gfp_t flags);
164
 
165
static __always_inline void *kmalloc(size_t size, gfp_t flags)
166
{
167
        if (__builtin_constant_p(size)) {
168
                if (size > PAGE_SIZE / 2)
169
                        return (void *)__get_free_pages(flags | __GFP_COMP,
170
                                                        get_order(size));
171
 
172
                if (!(flags & SLUB_DMA)) {
173
                        struct kmem_cache *s = kmalloc_slab(size);
174
 
175
                        if (!s)
176
                                return ZERO_SIZE_PTR;
177
 
178
                        return kmem_cache_alloc(s, flags);
179
                }
180
        }
181
        return __kmalloc(size, flags);
182
}
183
 
184
#ifdef CONFIG_NUMA
185
void *__kmalloc_node(size_t size, gfp_t flags, int node);
186
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
187
 
188
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
189
{
190
        if (__builtin_constant_p(size) &&
191
                size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
192
                        struct kmem_cache *s = kmalloc_slab(size);
193
 
194
                if (!s)
195
                        return ZERO_SIZE_PTR;
196
 
197
                return kmem_cache_alloc_node(s, flags, node);
198
        }
199
        return __kmalloc_node(size, flags, node);
200
}
201
#endif
202
 
203
#endif /* _LINUX_SLUB_DEF_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.