OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [include/] [linux/] [mempolicy.h] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
#ifndef _LINUX_MEMPOLICY_H
2
#define _LINUX_MEMPOLICY_H 1
3
 
4
#include <linux/errno.h>
5
 
6
/*
7
 * NUMA memory policies for Linux.
8
 * Copyright 2003,2004 Andi Kleen SuSE Labs
9
 */
10
 
11
/* Policies */
12
#define MPOL_DEFAULT    0
13
#define MPOL_PREFERRED  1
14
#define MPOL_BIND       2
15
#define MPOL_INTERLEAVE 3
16
 
17
#define MPOL_MAX MPOL_INTERLEAVE
18
 
19
/* Flags for get_mem_policy */
20
#define MPOL_F_NODE     (1<<0)  /* return next IL mode instead of node mask */
21
#define MPOL_F_ADDR     (1<<1)  /* look up vma using address */
22
#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
23
 
24
/* Flags for mbind */
25
#define MPOL_MF_STRICT  (1<<0)  /* Verify existing pages in the mapping */
26
#define MPOL_MF_MOVE    (1<<1)  /* Move pages owned by this process to conform to mapping */
27
#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
28
#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
29
 
30
#ifdef __KERNEL__
31
 
32
#include <linux/mmzone.h>
33
#include <linux/slab.h>
34
#include <linux/rbtree.h>
35
#include <linux/spinlock.h>
36
#include <linux/nodemask.h>
37
 
38
struct vm_area_struct;
39
struct mm_struct;
40
 
41
#ifdef CONFIG_NUMA
42
 
43
/*
44
 * Describe a memory policy.
45
 *
46
 * A mempolicy can be either associated with a process or with a VMA.
47
 * For VMA related allocations the VMA policy is preferred, otherwise
48
 * the process policy is used. Interrupts ignore the memory policy
49
 * of the current process.
50
 *
51
 * Locking policy for interlave:
52
 * In process context there is no locking because only the process accesses
53
 * its own state. All vma manipulation is somewhat protected by a down_read on
54
 * mmap_sem.
55
 *
56
 * Freeing policy:
57
 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
58
 * All other policies don't have any external state. mpol_free() handles this.
59
 *
60
 * Copying policy objects:
61
 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
62
 */
63
struct mempolicy {
64
        atomic_t refcnt;
65
        short policy;   /* See MPOL_* above */
66
        union {
67
                struct zonelist  *zonelist;     /* bind */
68
                short            preferred_node; /* preferred */
69
                nodemask_t       nodes;         /* interleave */
70
                /* undefined for default */
71
        } v;
72
        nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
73
};
74
 
75
/*
76
 * Support for managing mempolicy data objects (clone, copy, destroy)
77
 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
78
 */
79
 
80
extern void __mpol_free(struct mempolicy *pol);
81
static inline void mpol_free(struct mempolicy *pol)
82
{
83
        if (pol)
84
                __mpol_free(pol);
85
}
86
 
87
extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
88
static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
89
{
90
        if (pol)
91
                pol = __mpol_copy(pol);
92
        return pol;
93
}
94
 
95
#define vma_policy(vma) ((vma)->vm_policy)
96
#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
97
 
98
static inline void mpol_get(struct mempolicy *pol)
99
{
100
        if (pol)
101
                atomic_inc(&pol->refcnt);
102
}
103
 
104
extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
105
static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
106
{
107
        if (a == b)
108
                return 1;
109
        return __mpol_equal(a, b);
110
}
111
#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
112
 
113
/* Could later add inheritance of the process policy here. */
114
 
115
#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
116
 
117
/*
118
 * Tree of shared policies for a shared memory region.
119
 * Maintain the policies in a pseudo mm that contains vmas. The vmas
120
 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
121
 * bytes, so that we can work with shared memory segments bigger than
122
 * unsigned long.
123
 */
124
 
125
struct sp_node {
126
        struct rb_node nd;
127
        unsigned long start, end;
128
        struct mempolicy *policy;
129
};
130
 
131
struct shared_policy {
132
        struct rb_root root;
133
        spinlock_t lock;
134
};
135
 
136
void mpol_shared_policy_init(struct shared_policy *info, int policy,
137
                                nodemask_t *nodes);
138
int mpol_set_shared_policy(struct shared_policy *info,
139
                                struct vm_area_struct *vma,
140
                                struct mempolicy *new);
141
void mpol_free_shared_policy(struct shared_policy *p);
142
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
143
                                            unsigned long idx);
144
 
145
extern void numa_default_policy(void);
146
extern void numa_policy_init(void);
147
extern void mpol_rebind_task(struct task_struct *tsk,
148
                                        const nodemask_t *new);
149
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
150
extern void mpol_fix_fork_child_flag(struct task_struct *p);
151
 
152
extern struct mempolicy default_policy;
153
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
154
                unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol);
155
extern unsigned slab_node(struct mempolicy *policy);
156
 
157
extern enum zone_type policy_zone;
158
 
159
static inline void check_highest_zone(enum zone_type k)
160
{
161
        if (k > policy_zone && k != ZONE_MOVABLE)
162
                policy_zone = k;
163
}
164
 
165
int do_migrate_pages(struct mm_struct *mm,
166
        const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
167
 
168
#else
169
 
170
struct mempolicy {};
171
 
172
static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
173
{
174
        return 1;
175
}
176
#define vma_mpol_equal(a,b) 1
177
 
178
#define mpol_set_vma_default(vma) do {} while(0)
179
 
180
static inline void mpol_free(struct mempolicy *p)
181
{
182
}
183
 
184
static inline void mpol_get(struct mempolicy *pol)
185
{
186
}
187
 
188
static inline struct mempolicy *mpol_copy(struct mempolicy *old)
189
{
190
        return NULL;
191
}
192
 
193
struct shared_policy {};
194
 
195
static inline int mpol_set_shared_policy(struct shared_policy *info,
196
                                        struct vm_area_struct *vma,
197
                                        struct mempolicy *new)
198
{
199
        return -EINVAL;
200
}
201
 
202
static inline void mpol_shared_policy_init(struct shared_policy *info,
203
                                        int policy, nodemask_t *nodes)
204
{
205
}
206
 
207
static inline void mpol_free_shared_policy(struct shared_policy *p)
208
{
209
}
210
 
211
static inline struct mempolicy *
212
mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
213
{
214
        return NULL;
215
}
216
 
217
#define vma_policy(vma) NULL
218
#define vma_set_policy(vma, pol) do {} while(0)
219
 
220
static inline void numa_policy_init(void)
221
{
222
}
223
 
224
static inline void numa_default_policy(void)
225
{
226
}
227
 
228
static inline void mpol_rebind_task(struct task_struct *tsk,
229
                                        const nodemask_t *new)
230
{
231
}
232
 
233
static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
234
{
235
}
236
 
237
static inline void mpol_fix_fork_child_flag(struct task_struct *p)
238
{
239
}
240
 
241
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
242
                unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
243
{
244
        return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
245
}
246
 
247
static inline int do_migrate_pages(struct mm_struct *mm,
248
                        const nodemask_t *from_nodes,
249
                        const nodemask_t *to_nodes, int flags)
250
{
251
        return 0;
252
}
253
 
254
static inline void check_highest_zone(int k)
255
{
256
}
257
#endif /* CONFIG_NUMA */
258
#endif /* __KERNEL__ */
259
 
260
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.