1 |
1633 |
jcastillo |
#ifndef _LINUX_MM_H
|
2 |
|
|
#define _LINUX_MM_H
|
3 |
|
|
|
4 |
|
|
#include <linux/sched.h>
|
5 |
|
|
#include <linux/errno.h>
|
6 |
|
|
#include <linux/kernel.h>
|
7 |
|
|
|
8 |
|
|
#ifdef __KERNEL__
|
9 |
|
|
|
10 |
|
|
#include <linux/string.h>
|
11 |
|
|
|
12 |
|
|
extern unsigned long high_memory;
|
13 |
|
|
|
14 |
|
|
#include <asm/page.h>
|
15 |
|
|
#include <asm/atomic.h>
|
16 |
|
|
|
17 |
|
|
#define VERIFY_READ 0
|
18 |
|
|
#define VERIFY_WRITE 1
|
19 |
|
|
|
20 |
|
|
#ifdef DEBUG_VERIFY_AREA
|
21 |
|
|
#undef verify_area
|
22 |
|
|
extern int verify_area(int, const void *, unsigned long);
|
23 |
|
|
extern int verify_area_flf(int, const void *, unsigned long, char*file, int line, char*function);
|
24 |
|
|
#define verify_area(a,b,c) verify_area_flf(a,b,c,__FILE__,__LINE__,__FUNCTION__)
|
25 |
|
|
|
26 |
|
|
#else /* !DEBUG_VERIFY_AREA */
|
27 |
|
|
extern int verify_area(int, const void *, unsigned long);
|
28 |
|
|
#endif /* !DEBUG_VERIFY_AREA */
|
29 |
|
|
|
30 |
|
|
#ifdef MAGIC_ROM_PTR
|
31 |
|
|
extern int is_in_rom(unsigned long);
|
32 |
|
|
#endif /* !MAGIC_ROM_PTR */
|
33 |
|
|
|
34 |
|
|
/*
|
35 |
|
|
* Linux kernel virtual memory manager primitives.
|
36 |
|
|
* The idea being to have a "virtual" mm in the same way
|
37 |
|
|
* we have a virtual fs - giving a cleaner interface to the
|
38 |
|
|
* mm details, and allowing different kinds of memory mappings
|
39 |
|
|
* (from shared memory to executable loading to arbitrary
|
40 |
|
|
* mmap() functions).
|
41 |
|
|
*/
|
42 |
|
|
|
43 |
|
|
#ifndef NO_MM
|
44 |
|
|
|
45 |
|
|
/*
|
46 |
|
|
* This struct defines a memory VMM memory area. There is one of these
|
47 |
|
|
* per VM-area/task. A VM area is any part of the process virtual memory
|
48 |
|
|
* space that has a special rule for the page-fault handlers (ie a shared
|
49 |
|
|
* library, the executable area etc).
|
50 |
|
|
*/
|
51 |
|
|
struct vm_area_struct {
|
52 |
|
|
struct mm_struct * vm_mm; /* VM area parameters */
|
53 |
|
|
unsigned long vm_start;
|
54 |
|
|
unsigned long vm_end;
|
55 |
|
|
pgprot_t vm_page_prot;
|
56 |
|
|
unsigned short vm_flags;
|
57 |
|
|
/* AVL tree of VM areas per task, sorted by address */
|
58 |
|
|
short vm_avl_height;
|
59 |
|
|
struct vm_area_struct * vm_avl_left;
|
60 |
|
|
struct vm_area_struct * vm_avl_right;
|
61 |
|
|
/* linked list of VM areas per task, sorted by address */
|
62 |
|
|
struct vm_area_struct * vm_next;
|
63 |
|
|
/* for areas with inode, the circular list inode->i_mmap */
|
64 |
|
|
/* for shm areas, the circular list of attaches */
|
65 |
|
|
/* otherwise unused */
|
66 |
|
|
struct vm_area_struct * vm_next_share;
|
67 |
|
|
struct vm_area_struct * vm_prev_share;
|
68 |
|
|
/* more */
|
69 |
|
|
struct vm_operations_struct * vm_ops;
|
70 |
|
|
unsigned long vm_offset;
|
71 |
|
|
struct inode * vm_inode;
|
72 |
|
|
unsigned long vm_pte; /* shared mem */
|
73 |
|
|
};
|
74 |
|
|
|
75 |
|
|
#else /* NO_MM */
|
76 |
|
|
|
77 |
|
|
|
78 |
|
|
/* This dummy vm_area_struct does not define a VM area, it is only
|
79 |
|
|
used to convey data between do_mmap and a f_op's mmap function. */
|
80 |
|
|
|
81 |
|
|
struct vm_area_struct {
|
82 |
|
|
unsigned long vm_start;
|
83 |
|
|
unsigned long vm_end;
|
84 |
|
|
unsigned short vm_flags;
|
85 |
|
|
unsigned long vm_offset;
|
86 |
|
|
};
|
87 |
|
|
|
88 |
|
|
#endif /* NO_MM */
|
89 |
|
|
|
90 |
|
|
/*
|
91 |
|
|
* vm_flags..
|
92 |
|
|
*/
|
93 |
|
|
#define VM_READ 0x0001 /* currently active flags */
|
94 |
|
|
#define VM_WRITE 0x0002
|
95 |
|
|
#define VM_EXEC 0x0004
|
96 |
|
|
#define VM_SHARED 0x0008
|
97 |
|
|
|
98 |
|
|
#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
|
99 |
|
|
#define VM_MAYWRITE 0x0020
|
100 |
|
|
#define VM_MAYEXEC 0x0040
|
101 |
|
|
#define VM_MAYSHARE 0x0080
|
102 |
|
|
|
103 |
|
|
#define VM_GROWSDOWN 0x0100 /* general info on the segment */
|
104 |
|
|
#define VM_GROWSUP 0x0200
|
105 |
|
|
#define VM_SHM 0x0400 /* shared memory area, don't swap out */
|
106 |
|
|
#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
|
107 |
|
|
|
108 |
|
|
#define VM_EXECUTABLE 0x1000
|
109 |
|
|
#define VM_LOCKED 0x2000
|
110 |
|
|
|
111 |
|
|
#define VM_STACK_FLAGS 0x0177
|
112 |
|
|
|
113 |
|
|
#ifndef NO_MM
|
114 |
|
|
|
115 |
|
|
/*
|
116 |
|
|
* mapping from the currently active vm_flags protection bits (the
|
117 |
|
|
* low four bits) to a page protection mask..
|
118 |
|
|
*/
|
119 |
|
|
extern pgprot_t protection_map[16];
|
120 |
|
|
|
121 |
|
|
|
122 |
|
|
/*
|
123 |
|
|
* These are the virtual MM functions - opening of an area, closing and
|
124 |
|
|
* unmapping it (needed to keep files on disk up-to-date etc), pointer
|
125 |
|
|
* to the functions called when a no-page or a wp-page exception occurs.
|
126 |
|
|
*/
|
127 |
|
|
struct vm_operations_struct {
|
128 |
|
|
void (*open)(struct vm_area_struct * area);
|
129 |
|
|
void (*close)(struct vm_area_struct * area);
|
130 |
|
|
void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
|
131 |
|
|
void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
|
132 |
|
|
int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
|
133 |
|
|
void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
|
134 |
|
|
unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
|
135 |
|
|
unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
|
136 |
|
|
unsigned long page);
|
137 |
|
|
int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
|
138 |
|
|
pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
|
139 |
|
|
};
|
140 |
|
|
|
141 |
|
|
#endif /* !NO_MM */
|
142 |
|
|
|
143 |
|
|
/*
|
144 |
|
|
* Try to keep the most commonly accessed fields in single cache lines
|
145 |
|
|
* here (16 bytes or greater). This ordering should be particularly
|
146 |
|
|
* beneficial on 32-bit processors.
|
147 |
|
|
*
|
148 |
|
|
* The first line is data used in page cache lookup, the second line
|
149 |
|
|
* is used for linear searches (eg. clock algorithm scans).
|
150 |
|
|
*/
|
151 |
|
|
typedef struct page {
|
152 |
|
|
/* these must be first (free area handling) */
|
153 |
|
|
struct page *next;
|
154 |
|
|
struct page *prev;
|
155 |
|
|
struct inode *inode;
|
156 |
|
|
unsigned long offset;
|
157 |
|
|
struct page *next_hash;
|
158 |
|
|
atomic_t count;
|
159 |
|
|
unsigned flags; /* atomic flags, some possibly updated asynchronously */
|
160 |
|
|
unsigned dirty:16,
|
161 |
|
|
age:8;
|
162 |
|
|
struct wait_queue *wait;
|
163 |
|
|
struct page *prev_hash;
|
164 |
|
|
struct buffer_head * buffers;
|
165 |
|
|
unsigned long swap_unlock_entry;
|
166 |
|
|
unsigned long map_nr; /* page->map_nr == page - mem_map */
|
167 |
|
|
} mem_map_t;
|
168 |
|
|
|
169 |
|
|
/* Page flag bit values */
|
170 |
|
|
#define PG_locked 0
|
171 |
|
|
#define PG_error 1
|
172 |
|
|
#define PG_referenced 2
|
173 |
|
|
#define PG_uptodate 3
|
174 |
|
|
#define PG_free_after 4
|
175 |
|
|
#define PG_decr_after 5
|
176 |
|
|
#define PG_swap_unlock_after 6
|
177 |
|
|
#define PG_DMA 7
|
178 |
|
|
#define PG_reserved 31
|
179 |
|
|
|
180 |
|
|
/* Make it prettier to test the above... */
|
181 |
|
|
#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
|
182 |
|
|
#define PageError(page) (test_bit(PG_error, &(page)->flags))
|
183 |
|
|
#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
|
184 |
|
|
#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
|
185 |
|
|
#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
|
186 |
|
|
#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
|
187 |
|
|
#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
|
188 |
|
|
#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
|
189 |
|
|
#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
|
190 |
|
|
#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
|
191 |
|
|
|
192 |
|
|
/*
|
193 |
|
|
* page->reserved denotes a page which must never be accessed (which
|
194 |
|
|
* may not even be present).
|
195 |
|
|
*
|
196 |
|
|
* page->dma is set for those pages which lie in the range of
|
197 |
|
|
* physical addresses capable of carrying DMA transfers.
|
198 |
|
|
*
|
199 |
|
|
* Multiple processes may "see" the same page. E.g. for untouched
|
200 |
|
|
* mappings of /dev/null, all processes see the same page full of
|
201 |
|
|
* zeroes, and text pages of executables and shared libraries have
|
202 |
|
|
* only one copy in memory, at most, normally.
|
203 |
|
|
*
|
204 |
|
|
* For the non-reserved pages, page->count denotes a reference count.
|
205 |
|
|
* page->count == 0 means the page is free.
|
206 |
|
|
* page->count == 1 means the page is used for exactly one purpose
|
207 |
|
|
* (e.g. a private data page of one process).
|
208 |
|
|
*
|
209 |
|
|
* A page may be used for kmalloc() or anyone else who does a
|
210 |
|
|
* get_free_page(). In this case the page->count is at least 1, and
|
211 |
|
|
* all other fields are unused but should be 0 or NULL. The
|
212 |
|
|
* management of this page is the responsibility of the one who uses
|
213 |
|
|
* it.
|
214 |
|
|
*
|
215 |
|
|
* The other pages (we may call them "process pages") are completely
|
216 |
|
|
* managed by the Linux memory manager: I/O, buffers, swapping etc.
|
217 |
|
|
* The following discussion applies only to them.
|
218 |
|
|
*
|
219 |
|
|
* A page may belong to an inode's memory mapping. In this case,
|
220 |
|
|
* page->inode is the inode, and page->offset is the file offset
|
221 |
|
|
* of the page (not necessarily a multiple of PAGE_SIZE).
|
222 |
|
|
*
|
223 |
|
|
* A page may have buffers allocated to it. In this case,
|
224 |
|
|
* page->buffers is a circular list of these buffer heads. Else,
|
225 |
|
|
* page->buffers == NULL.
|
226 |
|
|
*
|
227 |
|
|
* For pages belonging to inodes, the page->count is the number of
|
228 |
|
|
* attaches, plus 1 if buffers are allocated to the page.
|
229 |
|
|
*
|
230 |
|
|
* All pages belonging to an inode make up a doubly linked list
|
231 |
|
|
* inode->i_pages, using the fields page->next and page->prev. (These
|
232 |
|
|
* fields are also used for freelist management when page->count==0.)
|
233 |
|
|
* There is also a hash table mapping (inode,offset) to the page
|
234 |
|
|
* in memory if present. The lists for this hash table use the fields
|
235 |
|
|
* page->next_hash and page->prev_hash.
|
236 |
|
|
*
|
237 |
|
|
* All process pages can do I/O:
|
238 |
|
|
* - inode pages may need to be read from disk,
|
239 |
|
|
* - inode pages which have been modified and are MAP_SHARED may need
|
240 |
|
|
* to be written to disk,
|
241 |
|
|
* - private pages which have been modified may need to be swapped out
|
242 |
|
|
* to swap space and (later) to be read back into memory.
|
243 |
|
|
* During disk I/O, page->locked is true. This bit is set before I/O
|
244 |
|
|
* and reset when I/O completes. page->wait is a wait queue of all
|
245 |
|
|
* tasks waiting for the I/O on this page to complete.
|
246 |
|
|
* page->uptodate tells whether the page's contents is valid.
|
247 |
|
|
* When a read completes, the page becomes uptodate, unless a disk I/O
|
248 |
|
|
* error happened.
|
249 |
|
|
* When a write completes, and page->free_after is true, the page is
|
250 |
|
|
* freed without any further delay.
|
251 |
|
|
*
|
252 |
|
|
* For choosing which pages to swap out, inode pages carry a
|
253 |
|
|
* page->referenced bit, which is set any time the system accesses
|
254 |
|
|
* that page through the (inode,offset) hash table.
|
255 |
|
|
* There is also the page->age counter, which implements a linear
|
256 |
|
|
* decay (why not an exponential decay?), see swapctl.h.
|
257 |
|
|
*/
|
258 |
|
|
|
259 |
|
|
extern mem_map_t * mem_map;
|
260 |
|
|
|
261 |
|
|
#ifdef DEBUG_FREE_PAGES
|
262 |
|
|
|
263 |
|
|
#undef __get_free_pages
|
264 |
|
|
#undef get_free_page
|
265 |
|
|
#undef free_page
|
266 |
|
|
#undef __free_page
|
267 |
|
|
|
268 |
|
|
extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
|
269 |
|
|
extern void free_pages(unsigned long addr, unsigned long order);
|
270 |
|
|
extern unsigned long get_free_page(int priority);
|
271 |
|
|
extern void __free_page(struct page * ptr);
|
272 |
|
|
|
273 |
|
|
/*
|
274 |
|
|
* This is timing-critical - most of the time in getting a new page
|
275 |
|
|
* goes to clearing the page. If you want a page without the clearing
|
276 |
|
|
* overhead, just use __get_free_page() directly..
|
277 |
|
|
*/
|
278 |
|
|
#define __get_free_page(priority) __get_free_pages((priority),0,0)
|
279 |
|
|
#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
|
280 |
|
|
extern unsigned long __get_free_pages_flf(int priority, unsigned long gfporder, int dma, char *file, int line, char*function);
|
281 |
|
|
extern unsigned long get_free_page_flf(int priority, char * file, int line, char*function);
|
282 |
|
|
|
283 |
|
|
#define __get_free_pages(priority,order,dma) __get_free_pages_flf(priority,order,dma,__FILE__,__LINE__,__FUNCTION__)
|
284 |
|
|
#define get_free_page(priority) get_free_page_flf(priority,__FILE__,__LINE__,__FUNCTION__)
|
285 |
|
|
|
286 |
|
|
|
287 |
|
|
/* memory.c & swap.c*/
|
288 |
|
|
|
289 |
|
|
|
290 |
|
|
#define free_page(addr) free_pages((addr),0)
|
291 |
|
|
extern void free_pages_flf(unsigned long addr, unsigned long order, char*file, int line, char*function);
|
292 |
|
|
extern void __free_page_flf(struct page *, char*file, int line, char*function);
|
293 |
|
|
|
294 |
|
|
#define free_pages(addr, order) free_pages_flf(addr, order, __FILE__, __LINE__, __FUNCTION__)
|
295 |
|
|
#define __free_page(page) __free_page_flf(page, __FILE__, __LINE__, __FUNCTION__)
|
296 |
|
|
|
297 |
|
|
#else /* !DEBUG_FREE_PAGES */
|
298 |
|
|
|
299 |
|
|
/*
|
300 |
|
|
* This is timing-critical - most of the time in getting a new page
|
301 |
|
|
* goes to clearing the page. If you want a page without the clearing
|
302 |
|
|
* overhead, just use __get_free_page() directly..
|
303 |
|
|
*/
|
304 |
|
|
#define __get_free_page(priority) __get_free_pages((priority),0,0)
|
305 |
|
|
#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
|
306 |
|
|
extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
|
307 |
|
|
|
308 |
|
|
extern inline unsigned long get_free_page(int priority)
|
309 |
|
|
{
|
310 |
|
|
unsigned long page;
|
311 |
|
|
|
312 |
|
|
page = __get_free_page(priority);
|
313 |
|
|
if (page)
|
314 |
|
|
memset((void *) page, 0, PAGE_SIZE);
|
315 |
|
|
return page;
|
316 |
|
|
}
|
317 |
|
|
|
318 |
|
|
/* memory.c & swap.c*/
|
319 |
|
|
|
320 |
|
|
#define free_page(addr) free_pages((addr),0)
|
321 |
|
|
extern void free_pages(unsigned long addr, unsigned long order);
|
322 |
|
|
extern void __free_page(struct page *);
|
323 |
|
|
|
324 |
|
|
#endif /* !DEBUG_FREE_PAGES */
|
325 |
|
|
|
326 |
|
|
extern void show_free_areas(void);
|
327 |
|
|
extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
|
328 |
|
|
unsigned long address);
|
329 |
|
|
|
330 |
|
|
|
331 |
|
|
extern void free_page_tables(struct mm_struct * mm);
|
332 |
|
|
extern void clear_page_tables(struct task_struct * tsk);
|
333 |
|
|
extern int new_page_tables(struct task_struct * tsk);
|
334 |
|
|
extern int copy_page_tables(struct task_struct * to);
|
335 |
|
|
|
336 |
|
|
#ifndef NO_MM
|
337 |
|
|
|
338 |
|
|
extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
|
339 |
|
|
extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
|
340 |
|
|
extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
|
341 |
|
|
extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
|
342 |
|
|
|
343 |
|
|
extern void vmtruncate(struct inode * inode, unsigned long offset);
|
344 |
|
|
extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
|
345 |
|
|
extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
|
346 |
|
|
extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
|
347 |
|
|
|
348 |
|
|
#endif /* !NO_MM */
|
349 |
|
|
|
350 |
|
|
extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
|
351 |
|
|
extern void mem_init(unsigned long start_mem, unsigned long end_mem);
|
352 |
|
|
extern void show_mem(void);
|
353 |
|
|
extern void oom(struct task_struct * tsk);
|
354 |
|
|
extern void si_meminfo(struct sysinfo * val);
|
355 |
|
|
|
356 |
|
|
/* vmalloc.c */
|
357 |
|
|
|
358 |
|
|
extern void * vmalloc(unsigned long size);
|
359 |
|
|
extern void * vremap(unsigned long offset, unsigned long size);
|
360 |
|
|
extern void vfree(void * addr);
|
361 |
|
|
extern int vread(char *buf, char *addr, int count);
|
362 |
|
|
|
363 |
|
|
/* mmap.c */
|
364 |
|
|
#ifdef DEBUG_MMAP
|
365 |
|
|
|
366 |
|
|
#undef do_mmap
|
367 |
|
|
#undef do_munmap
|
368 |
|
|
extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
|
369 |
|
|
unsigned long prot, unsigned long flags, unsigned long off);
|
370 |
|
|
extern int do_munmap(unsigned long, size_t);
|
371 |
|
|
|
372 |
|
|
extern unsigned long do_mmap_flf(struct file * file, unsigned long addr, unsigned long len,
|
373 |
|
|
unsigned long prot, unsigned long flags, unsigned long off, char*filename, int line, char*function);
|
374 |
|
|
extern int do_munmap_flf(unsigned long, size_t, char*file, int line, char*function);
|
375 |
|
|
|
376 |
|
|
#define do_mmap(file,addr,len,prot,flags,off) do_mmap_flf(file,addr,len,prot,flags,off,__FILE__,__LINE__,__FUNCTION__)
|
377 |
|
|
#define do_munmap(addr, size) do_munmap_flf(addr, size,__FILE__,__LINE__,__FUNCTION__)
|
378 |
|
|
|
379 |
|
|
#else /* !DEBUG_MMAP */
|
380 |
|
|
extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
|
381 |
|
|
unsigned long prot, unsigned long flags, unsigned long off);
|
382 |
|
|
extern int do_munmap(unsigned long, size_t);
|
383 |
|
|
#endif /* DEBUG_MMAP */
|
384 |
|
|
extern void exit_mmap(struct mm_struct *);
|
385 |
|
|
#ifndef NO_MM
|
386 |
|
|
extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
|
387 |
|
|
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
|
388 |
|
|
extern void remove_shared_vm_struct(struct vm_area_struct *);
|
389 |
|
|
extern void build_mmap_avl(struct mm_struct *);
|
390 |
|
|
extern unsigned long get_unmapped_area(unsigned long, unsigned long);
|
391 |
|
|
#endif /* !NO_MM */
|
392 |
|
|
|
393 |
|
|
/* filemap.c */
|
394 |
|
|
extern unsigned long page_unuse(unsigned long);
|
395 |
|
|
extern int shrink_mmap(int, int, int);
|
396 |
|
|
extern void truncate_inode_pages(struct inode *, unsigned long);
|
397 |
|
|
|
398 |
|
|
#define GFP_BUFFER 0x00
|
399 |
|
|
#define GFP_ATOMIC 0x01
|
400 |
|
|
#define GFP_USER 0x02
|
401 |
|
|
#define GFP_KERNEL 0x03
|
402 |
|
|
#define GFP_NOBUFFER 0x04
|
403 |
|
|
#define GFP_NFS 0x05
|
404 |
|
|
#define GFP_IO 0x06
|
405 |
|
|
|
406 |
|
|
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
|
407 |
|
|
platforms, used as appropriate on others */
|
408 |
|
|
|
409 |
|
|
#define GFP_DMA 0x80
|
410 |
|
|
|
411 |
|
|
#define GFP_LEVEL_MASK 0xf
|
412 |
|
|
|
413 |
|
|
#ifndef NO_MM
|
414 |
|
|
/* vma is the first one with address < vma->vm_end,
|
415 |
|
|
* and even address < vma->vm_start. Have to extend vma. */
|
416 |
|
|
static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
|
417 |
|
|
{
|
418 |
|
|
unsigned long grow;
|
419 |
|
|
|
420 |
|
|
address &= PAGE_MASK;
|
421 |
|
|
grow = vma->vm_start - address;
|
422 |
|
|
if (vma->vm_end - address
|
423 |
|
|
> (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
|
424 |
|
|
(vma->vm_mm->total_vm << PAGE_SHIFT) + grow
|
425 |
|
|
> (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
|
426 |
|
|
return -ENOMEM;
|
427 |
|
|
vma->vm_start = address;
|
428 |
|
|
vma->vm_offset -= grow;
|
429 |
|
|
vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
|
430 |
|
|
if (vma->vm_flags & VM_LOCKED)
|
431 |
|
|
vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
|
432 |
|
|
return 0;
|
433 |
|
|
}
|
434 |
|
|
|
435 |
|
|
#define avl_empty (struct vm_area_struct *) NULL
|
436 |
|
|
|
437 |
|
|
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
438 |
|
|
static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
|
439 |
|
|
{
|
440 |
|
|
struct vm_area_struct * result = NULL;
|
441 |
|
|
|
442 |
|
|
if (mm) {
|
443 |
|
|
struct vm_area_struct * tree = mm->mmap_avl;
|
444 |
|
|
for (;;) {
|
445 |
|
|
if (tree == avl_empty)
|
446 |
|
|
break;
|
447 |
|
|
if (tree->vm_end > addr) {
|
448 |
|
|
result = tree;
|
449 |
|
|
if (tree->vm_start <= addr)
|
450 |
|
|
break;
|
451 |
|
|
tree = tree->vm_avl_left;
|
452 |
|
|
} else
|
453 |
|
|
tree = tree->vm_avl_right;
|
454 |
|
|
}
|
455 |
|
|
}
|
456 |
|
|
return result;
|
457 |
|
|
}
|
458 |
|
|
|
459 |
|
|
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
|
460 |
|
|
NULL if none. Assume start_addr < end_addr. */
|
461 |
|
|
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
|
462 |
|
|
{
|
463 |
|
|
struct vm_area_struct * vma;
|
464 |
|
|
|
465 |
|
|
vma = find_vma(mm,start_addr);
|
466 |
|
|
if (vma && end_addr <= vma->vm_start)
|
467 |
|
|
vma = NULL;
|
468 |
|
|
return vma;
|
469 |
|
|
}
|
470 |
|
|
|
471 |
|
|
#endif /* !NO_MM */
|
472 |
|
|
|
473 |
|
|
#endif /* __KERNEL__ */
|
474 |
|
|
|
475 |
|
|
#endif
|