1 |
1275 |
phoenix |
#ifndef _LINUX_MM_H
|
2 |
|
|
#define _LINUX_MM_H
|
3 |
|
|
|
4 |
|
|
#include <linux/sched.h>
|
5 |
|
|
#include <linux/errno.h>
|
6 |
|
|
|
7 |
|
|
#ifdef __KERNEL__
|
8 |
|
|
|
9 |
|
|
#include <linux/config.h>
|
10 |
|
|
#include <linux/string.h>
|
11 |
|
|
#include <linux/list.h>
|
12 |
|
|
#include <linux/mmzone.h>
|
13 |
|
|
#include <linux/swap.h>
|
14 |
|
|
#include <linux/rbtree.h>
|
15 |
|
|
|
16 |
|
|
extern unsigned long max_mapnr;
|
17 |
|
|
extern unsigned long num_physpages;
|
18 |
|
|
extern unsigned long num_mappedpages;
|
19 |
|
|
extern void * high_memory;
|
20 |
|
|
extern int page_cluster;
|
21 |
|
|
/* The inactive_clean lists are per zone. */
|
22 |
|
|
extern struct list_head active_list;
|
23 |
|
|
extern struct list_head inactive_list;
|
24 |
|
|
|
25 |
|
|
#include <asm/page.h>
|
26 |
|
|
#include <asm/pgtable.h>
|
27 |
|
|
#include <asm/atomic.h>
|
28 |
|
|
|
29 |
|
|
/*
|
30 |
|
|
* Linux kernel virtual memory manager primitives.
|
31 |
|
|
* The idea being to have a "virtual" mm in the same way
|
32 |
|
|
* we have a virtual fs - giving a cleaner interface to the
|
33 |
|
|
* mm details, and allowing different kinds of memory mappings
|
34 |
|
|
* (from shared memory to executable loading to arbitrary
|
35 |
|
|
* mmap() functions).
|
36 |
|
|
*/
|
37 |
|
|
|
38 |
|
|
/*
|
39 |
|
|
* This struct defines a memory VMM memory area. There is one of these
|
40 |
|
|
* per VM-area/task. A VM area is any part of the process virtual memory
|
41 |
|
|
* space that has a special rule for the page-fault handlers (ie a shared
|
42 |
|
|
* library, the executable area etc).
|
43 |
|
|
*/
|
44 |
|
|
struct vm_area_struct {
|
45 |
|
|
struct mm_struct * vm_mm; /* The address space we belong to. */
|
46 |
|
|
unsigned long vm_start; /* Our start address within vm_mm. */
|
47 |
|
|
unsigned long vm_end; /* The first byte after our end address
|
48 |
|
|
within vm_mm. */
|
49 |
|
|
|
50 |
|
|
/* linked list of VM areas per task, sorted by address */
|
51 |
|
|
struct vm_area_struct *vm_next;
|
52 |
|
|
|
53 |
|
|
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
|
54 |
|
|
unsigned long vm_flags; /* Flags, listed below. */
|
55 |
|
|
|
56 |
|
|
rb_node_t vm_rb;
|
57 |
|
|
|
58 |
|
|
/*
|
59 |
|
|
* For areas with an address space and backing store,
|
60 |
|
|
* one of the address_space->i_mmap{,shared} lists,
|
61 |
|
|
* for shm areas, the list of attaches, otherwise unused.
|
62 |
|
|
*/
|
63 |
|
|
struct vm_area_struct *vm_next_share;
|
64 |
|
|
struct vm_area_struct **vm_pprev_share;
|
65 |
|
|
|
66 |
|
|
/* Function pointers to deal with this struct. */
|
67 |
|
|
struct vm_operations_struct * vm_ops;
|
68 |
|
|
|
69 |
|
|
/* Information about our backing store: */
|
70 |
|
|
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
|
71 |
|
|
units, *not* PAGE_CACHE_SIZE */
|
72 |
|
|
struct file * vm_file; /* File we map to (can be NULL). */
|
73 |
|
|
unsigned long vm_raend; /* XXX: put full readahead info here. */
|
74 |
|
|
void * vm_private_data; /* was vm_pte (shared mem) */
|
75 |
|
|
};
|
76 |
|
|
|
77 |
|
|
/*
|
78 |
|
|
* vm_flags..
|
79 |
|
|
*/
|
80 |
|
|
#define VM_READ 0x00000001 /* currently active flags */
|
81 |
|
|
#define VM_WRITE 0x00000002
|
82 |
|
|
#define VM_EXEC 0x00000004
|
83 |
|
|
#define VM_SHARED 0x00000008
|
84 |
|
|
|
85 |
|
|
#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
|
86 |
|
|
#define VM_MAYWRITE 0x00000020
|
87 |
|
|
#define VM_MAYEXEC 0x00000040
|
88 |
|
|
#define VM_MAYSHARE 0x00000080
|
89 |
|
|
|
90 |
|
|
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
|
91 |
|
|
#define VM_GROWSUP 0x00000200
|
92 |
|
|
#define VM_SHM 0x00000400 /* shared memory area, don't swap out */
|
93 |
|
|
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
|
94 |
|
|
|
95 |
|
|
#define VM_EXECUTABLE 0x00001000
|
96 |
|
|
#define VM_LOCKED 0x00002000
|
97 |
|
|
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
|
98 |
|
|
|
99 |
|
|
/* Used by sys_madvise() */
|
100 |
|
|
#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
|
101 |
|
|
#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
|
102 |
|
|
|
103 |
|
|
#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
|
104 |
|
|
#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
|
105 |
|
|
#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
|
106 |
|
|
|
107 |
|
|
#ifndef VM_STACK_FLAGS
|
108 |
|
|
#define VM_STACK_FLAGS 0x00000177
|
109 |
|
|
#endif
|
110 |
|
|
|
111 |
|
|
#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
|
112 |
|
|
#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
|
113 |
|
|
#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
|
114 |
|
|
#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
|
115 |
|
|
#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
|
116 |
|
|
|
117 |
|
|
/* read ahead limits */
|
118 |
|
|
extern int vm_min_readahead;
|
119 |
|
|
extern int vm_max_readahead;
|
120 |
|
|
|
121 |
|
|
/*
|
122 |
|
|
* mapping from the currently active vm_flags protection bits (the
|
123 |
|
|
* low four bits) to a page protection mask..
|
124 |
|
|
*/
|
125 |
|
|
extern pgprot_t protection_map[16];
|
126 |
|
|
|
127 |
|
|
|
128 |
|
|
/*
|
129 |
|
|
* These are the virtual MM functions - opening of an area, closing and
|
130 |
|
|
* unmapping it (needed to keep files on disk up-to-date etc), pointer
|
131 |
|
|
* to the functions called when a no-page or a wp-page exception occurs.
|
132 |
|
|
*/
|
133 |
|
|
struct vm_operations_struct {
|
134 |
|
|
void (*open)(struct vm_area_struct * area);
|
135 |
|
|
void (*close)(struct vm_area_struct * area);
|
136 |
|
|
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused);
|
137 |
|
|
};
|
138 |
|
|
|
139 |
|
|
/*
|
140 |
|
|
* Each physical page in the system has a struct page associated with
|
141 |
|
|
* it to keep track of whatever it is we are using the page for at the
|
142 |
|
|
* moment. Note that we have no way to track which tasks are using
|
143 |
|
|
* a page.
|
144 |
|
|
*
|
145 |
|
|
* Try to keep the most commonly accessed fields in single cache lines
|
146 |
|
|
* here (16 bytes or greater). This ordering should be particularly
|
147 |
|
|
* beneficial on 32-bit processors.
|
148 |
|
|
*
|
149 |
|
|
* The first line is data used in page cache lookup, the second line
|
150 |
|
|
* is used for linear searches (eg. clock algorithm scans).
|
151 |
|
|
*
|
152 |
|
|
* TODO: make this structure smaller, it could be as small as 32 bytes.
|
153 |
|
|
*/
|
154 |
|
|
typedef struct page {
|
155 |
|
|
struct list_head list; /* ->mapping has some page lists. */
|
156 |
|
|
struct address_space *mapping; /* The inode (or ...) we belong to. */
|
157 |
|
|
unsigned long index; /* Our offset within mapping. */
|
158 |
|
|
struct page *next_hash; /* Next page sharing our hash bucket in
|
159 |
|
|
the pagecache hash table. */
|
160 |
|
|
atomic_t count; /* Usage count, see below. */
|
161 |
|
|
unsigned long flags; /* atomic flags, some possibly
|
162 |
|
|
updated asynchronously */
|
163 |
|
|
struct list_head lru; /* Pageout list, eg. active_list;
|
164 |
|
|
protected by pagemap_lru_lock !! */
|
165 |
|
|
struct page **pprev_hash; /* Complement to *next_hash. */
|
166 |
|
|
struct buffer_head * buffers; /* Buffer maps us to a disk block. */
|
167 |
|
|
|
168 |
|
|
/*
|
169 |
|
|
* On machines where all RAM is mapped into kernel address space,
|
170 |
|
|
* we can simply calculate the virtual address. On machines with
|
171 |
|
|
* highmem some memory is mapped into kernel virtual memory
|
172 |
|
|
* dynamically, so we need a place to store that address.
|
173 |
|
|
* Note that this field could be 16 bits on x86 ... ;)
|
174 |
|
|
*
|
175 |
|
|
* Architectures with slow multiplication can define
|
176 |
|
|
* WANT_PAGE_VIRTUAL in asm/page.h
|
177 |
|
|
*/
|
178 |
|
|
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
|
179 |
|
|
void *virtual; /* Kernel virtual address (NULL if
|
180 |
|
|
not kmapped, ie. highmem) */
|
181 |
|
|
#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
|
182 |
|
|
} mem_map_t;
|
183 |
|
|
|
184 |
|
|
/*
|
185 |
|
|
* Methods to modify the page usage count.
|
186 |
|
|
*
|
187 |
|
|
* What counts for a page usage:
|
188 |
|
|
* - cache mapping (page->mapping)
|
189 |
|
|
* - disk mapping (page->buffers)
|
190 |
|
|
* - page mapped in a task's page tables, each mapping
|
191 |
|
|
* is counted separately
|
192 |
|
|
*
|
193 |
|
|
* Also, many kernel routines increase the page count before a critical
|
194 |
|
|
* routine so they can be sure the page doesn't go away from under them.
|
195 |
|
|
*/
|
196 |
|
|
#define get_page(p) atomic_inc(&(p)->count)
|
197 |
|
|
#define put_page(p) __free_page(p)
|
198 |
|
|
#define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
|
199 |
|
|
#define page_count(p) atomic_read(&(p)->count)
|
200 |
|
|
#define set_page_count(p,v) atomic_set(&(p)->count, v)
|
201 |
|
|
|
202 |
|
|
/*
|
203 |
|
|
* Various page->flags bits:
|
204 |
|
|
*
|
205 |
|
|
* PG_reserved is set for special pages, which can never be swapped
|
206 |
|
|
* out. Some of them might not even exist (eg empty_bad_page)...
|
207 |
|
|
*
|
208 |
|
|
* Multiple processes may "see" the same page. E.g. for untouched
|
209 |
|
|
* mappings of /dev/null, all processes see the same page full of
|
210 |
|
|
* zeroes, and text pages of executables and shared libraries have
|
211 |
|
|
* only one copy in memory, at most, normally.
|
212 |
|
|
*
|
213 |
|
|
* For the non-reserved pages, page->count denotes a reference count.
|
214 |
|
|
* page->count == 0 means the page is free.
|
215 |
|
|
* page->count == 1 means the page is used for exactly one purpose
|
216 |
|
|
* (e.g. a private data page of one process).
|
217 |
|
|
*
|
218 |
|
|
* A page may be used for kmalloc() or anyone else who does a
|
219 |
|
|
* __get_free_page(). In this case the page->count is at least 1, and
|
220 |
|
|
* all other fields are unused but should be 0 or NULL. The
|
221 |
|
|
* management of this page is the responsibility of the one who uses
|
222 |
|
|
* it.
|
223 |
|
|
*
|
224 |
|
|
* The other pages (we may call them "process pages") are completely
|
225 |
|
|
* managed by the Linux memory manager: I/O, buffers, swapping etc.
|
226 |
|
|
* The following discussion applies only to them.
|
227 |
|
|
*
|
228 |
|
|
* A page may belong to an inode's memory mapping. In this case,
|
229 |
|
|
* page->mapping is the pointer to the inode, and page->index is the
|
230 |
|
|
* file offset of the page, in units of PAGE_CACHE_SIZE.
|
231 |
|
|
*
|
232 |
|
|
* A page may have buffers allocated to it. In this case,
|
233 |
|
|
* page->buffers is a circular list of these buffer heads. Else,
|
234 |
|
|
* page->buffers == NULL.
|
235 |
|
|
*
|
236 |
|
|
* For pages belonging to inodes, the page->count is the number of
|
237 |
|
|
* attaches, plus 1 if buffers are allocated to the page, plus one
|
238 |
|
|
* for the page cache itself.
|
239 |
|
|
*
|
240 |
|
|
* All pages belonging to an inode are in these doubly linked lists:
|
241 |
|
|
* mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
|
242 |
|
|
* using the page->list list_head. These fields are also used for
|
243 |
|
|
* freelist managemet (when page->count==0).
|
244 |
|
|
*
|
245 |
|
|
* There is also a hash table mapping (mapping,index) to the page
|
246 |
|
|
* in memory if present. The lists for this hash table use the fields
|
247 |
|
|
* page->next_hash and page->pprev_hash.
|
248 |
|
|
*
|
249 |
|
|
* All process pages can do I/O:
|
250 |
|
|
* - inode pages may need to be read from disk,
|
251 |
|
|
* - inode pages which have been modified and are MAP_SHARED may need
|
252 |
|
|
* to be written to disk,
|
253 |
|
|
* - private pages which have been modified may need to be swapped out
|
254 |
|
|
* to swap space and (later) to be read back into memory.
|
255 |
|
|
* During disk I/O, PG_locked is used. This bit is set before I/O
|
256 |
|
|
* and reset when I/O completes. page_waitqueue(page) is a wait queue of all
|
257 |
|
|
* tasks waiting for the I/O on this page to complete.
|
258 |
|
|
* PG_uptodate tells whether the page's contents is valid.
|
259 |
|
|
* When a read completes, the page becomes uptodate, unless a disk I/O
|
260 |
|
|
* error happened.
|
261 |
|
|
*
|
262 |
|
|
* For choosing which pages to swap out, inode pages carry a
|
263 |
|
|
* PG_referenced bit, which is set any time the system accesses
|
264 |
|
|
* that page through the (mapping,index) hash table. This referenced
|
265 |
|
|
* bit, together with the referenced bit in the page tables, is used
|
266 |
|
|
* to manipulate page->age and move the page across the active,
|
267 |
|
|
* inactive_dirty and inactive_clean lists.
|
268 |
|
|
*
|
269 |
|
|
* Note that the referenced bit, the page->lru list_head and the
|
270 |
|
|
* active, inactive_dirty and inactive_clean lists are protected by
|
271 |
|
|
* the pagemap_lru_lock, and *NOT* by the usual PG_locked bit!
|
272 |
|
|
*
|
273 |
|
|
* PG_skip is used on sparc/sparc64 architectures to "skip" certain
|
274 |
|
|
* parts of the address space.
|
275 |
|
|
*
|
276 |
|
|
* PG_error is set to indicate that an I/O error occurred on this page.
|
277 |
|
|
*
|
278 |
|
|
* PG_arch_1 is an architecture specific page state bit. The generic
|
279 |
|
|
* code guarantees that this bit is cleared for a page when it first
|
280 |
|
|
* is entered into the page cache.
|
281 |
|
|
*
|
282 |
|
|
* PG_highmem pages are not permanently mapped into the kernel virtual
|
283 |
|
|
* address space, they need to be kmapped separately for doing IO on
|
284 |
|
|
* the pages. The struct page (these bits with information) are always
|
285 |
|
|
* mapped into kernel address space...
|
286 |
|
|
*/
|
287 |
|
|
#define PG_locked 0 /* Page is locked. Don't touch. */
|
288 |
|
|
#define PG_error 1
|
289 |
|
|
#define PG_referenced 2
|
290 |
|
|
#define PG_uptodate 3
|
291 |
|
|
#define PG_dirty 4
|
292 |
|
|
#define PG_unused 5
|
293 |
|
|
#define PG_lru 6
|
294 |
|
|
#define PG_active 7
|
295 |
|
|
#define PG_slab 8
|
296 |
|
|
#define PG_skip 10
|
297 |
|
|
#define PG_highmem 11
|
298 |
|
|
#define PG_checked 12 /* kill me in 2.5.<early>. */
|
299 |
|
|
#define PG_arch_1 13
|
300 |
|
|
#define PG_reserved 14
|
301 |
|
|
#define PG_launder 15 /* written out by VM pressure.. */
|
302 |
|
|
#define PG_fs_1 16 /* Filesystem specific */
|
303 |
|
|
|
304 |
|
|
#ifndef arch_set_page_uptodate
|
305 |
|
|
#define arch_set_page_uptodate(page)
|
306 |
|
|
#endif
|
307 |
|
|
|
308 |
|
|
/* Make it prettier to test the above... */
|
309 |
|
|
#define UnlockPage(page) unlock_page(page)
|
310 |
|
|
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
|
311 |
|
|
#define SetPageUptodate(page) \
|
312 |
|
|
do { \
|
313 |
|
|
arch_set_page_uptodate(page); \
|
314 |
|
|
set_bit(PG_uptodate, &(page)->flags); \
|
315 |
|
|
} while (0)
|
316 |
|
|
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
|
317 |
|
|
#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
|
318 |
|
|
#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
|
319 |
|
|
#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
|
320 |
|
|
#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
|
321 |
|
|
#define LockPage(page) set_bit(PG_locked, &(page)->flags)
|
322 |
|
|
#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
|
323 |
|
|
#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
|
324 |
|
|
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
|
325 |
|
|
#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
|
326 |
|
|
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
|
327 |
|
|
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
|
328 |
|
|
#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
|
329 |
|
|
#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
|
330 |
|
|
|
331 |
|
|
/*
|
332 |
|
|
* The zone field is never updated after free_area_init_core()
|
333 |
|
|
* sets it, so none of the operations on it need to be atomic.
|
334 |
|
|
*/
|
335 |
|
|
#define NODE_SHIFT 4
|
336 |
|
|
#define ZONE_SHIFT (BITS_PER_LONG - 8)
|
337 |
|
|
|
338 |
|
|
struct zone_struct;
|
339 |
|
|
extern struct zone_struct *zone_table[];
|
340 |
|
|
|
341 |
|
|
static inline zone_t *page_zone(struct page *page)
|
342 |
|
|
{
|
343 |
|
|
return zone_table[page->flags >> ZONE_SHIFT];
|
344 |
|
|
}
|
345 |
|
|
|
346 |
|
|
static inline void set_page_zone(struct page *page, unsigned long zone_num)
|
347 |
|
|
{
|
348 |
|
|
page->flags &= ~(~0UL << ZONE_SHIFT);
|
349 |
|
|
page->flags |= zone_num << ZONE_SHIFT;
|
350 |
|
|
}
|
351 |
|
|
|
352 |
|
|
/*
|
353 |
|
|
* In order to avoid #ifdefs within C code itself, we define
|
354 |
|
|
* set_page_address to a noop for non-highmem machines, where
|
355 |
|
|
* the field isn't useful.
|
356 |
|
|
* The same is true for page_address() in arch-dependent code.
|
357 |
|
|
*/
|
358 |
|
|
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
|
359 |
|
|
|
360 |
|
|
#define set_page_address(page, address) \
|
361 |
|
|
do { \
|
362 |
|
|
(page)->virtual = (address); \
|
363 |
|
|
} while(0)
|
364 |
|
|
|
365 |
|
|
#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
|
366 |
|
|
#define set_page_address(page, address) do { } while(0)
|
367 |
|
|
#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
|
368 |
|
|
|
369 |
|
|
/*
|
370 |
|
|
* Permanent address of a page. Obviously must never be
|
371 |
|
|
* called on a highmem page.
|
372 |
|
|
*/
|
373 |
|
|
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
|
374 |
|
|
|
375 |
|
|
#define page_address(page) ((page)->virtual)
|
376 |
|
|
|
377 |
|
|
#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
|
378 |
|
|
|
379 |
|
|
#define page_address(page) \
|
380 |
|
|
__va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
|
381 |
|
|
+ page_zone(page)->zone_start_paddr)
|
382 |
|
|
|
383 |
|
|
#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
|
384 |
|
|
|
385 |
|
|
extern void FASTCALL(set_page_dirty(struct page *));
|
386 |
|
|
|
387 |
|
|
/*
|
388 |
|
|
* The first mb is necessary to safely close the critical section opened by the
|
389 |
|
|
* TryLockPage(), the second mb is necessary to enforce ordering between
|
390 |
|
|
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
|
391 |
|
|
* parallel wait_on_page).
|
392 |
|
|
*/
|
393 |
|
|
#define PageError(page) test_bit(PG_error, &(page)->flags)
|
394 |
|
|
#define SetPageError(page) set_bit(PG_error, &(page)->flags)
|
395 |
|
|
#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
|
396 |
|
|
#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
|
397 |
|
|
#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
|
398 |
|
|
#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
|
399 |
|
|
#define PageTestandClearReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
|
400 |
|
|
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
|
401 |
|
|
#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
|
402 |
|
|
#define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags)
|
403 |
|
|
#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
|
404 |
|
|
|
405 |
|
|
#define PageActive(page) test_bit(PG_active, &(page)->flags)
|
406 |
|
|
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
|
407 |
|
|
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
|
408 |
|
|
|
409 |
|
|
#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
|
410 |
|
|
#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
|
411 |
|
|
#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
|
412 |
|
|
|
413 |
|
|
#ifdef CONFIG_HIGHMEM
|
414 |
|
|
#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
|
415 |
|
|
#else
|
416 |
|
|
#define PageHighMem(page) 0 /* needed to optimize away at compile time */
|
417 |
|
|
#endif
|
418 |
|
|
|
419 |
|
|
#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
|
420 |
|
|
#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
|
421 |
|
|
|
422 |
|
|
/*
|
423 |
|
|
* Error return values for the *_nopage functions
|
424 |
|
|
*/
|
425 |
|
|
#define NOPAGE_SIGBUS (NULL)
|
426 |
|
|
#define NOPAGE_OOM ((struct page *) (-1))
|
427 |
|
|
|
428 |
|
|
/* The array of struct pages */
|
429 |
|
|
extern mem_map_t * mem_map;
|
430 |
|
|
|
431 |
|
|
/*
|
432 |
|
|
* There is only one page-allocator function, and two main namespaces to
|
433 |
|
|
* it. The alloc_page*() variants return 'struct page *' and as such
|
434 |
|
|
* can allocate highmem pages, the *get*page*() variants return
|
435 |
|
|
* virtual kernel addresses to the allocated page(s).
|
436 |
|
|
*/
|
437 |
|
|
extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int order));
|
438 |
|
|
extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist));
|
439 |
|
|
extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order);
|
440 |
|
|
|
441 |
|
|
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
|
442 |
|
|
{
|
443 |
|
|
/*
|
444 |
|
|
* Gets optimized away by the compiler.
|
445 |
|
|
*/
|
446 |
|
|
if (order >= MAX_ORDER)
|
447 |
|
|
return NULL;
|
448 |
|
|
return _alloc_pages(gfp_mask, order);
|
449 |
|
|
}
|
450 |
|
|
|
451 |
|
|
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
|
452 |
|
|
|
453 |
|
|
extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned int order));
|
454 |
|
|
extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
|
455 |
|
|
|
456 |
|
|
#define __get_free_page(gfp_mask) \
|
457 |
|
|
__get_free_pages((gfp_mask),0)
|
458 |
|
|
|
459 |
|
|
#define __get_dma_pages(gfp_mask, order) \
|
460 |
|
|
__get_free_pages((gfp_mask) | GFP_DMA,(order))
|
461 |
|
|
|
462 |
|
|
/*
|
463 |
|
|
* The old interface name will be removed in 2.5:
|
464 |
|
|
*/
|
465 |
|
|
#define get_free_page get_zeroed_page
|
466 |
|
|
|
467 |
|
|
/*
|
468 |
|
|
* There is only one 'core' page-freeing function.
|
469 |
|
|
*/
|
470 |
|
|
extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
|
471 |
|
|
extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
|
472 |
|
|
|
473 |
|
|
#define __free_page(page) __free_pages((page), 0)
|
474 |
|
|
#define free_page(addr) free_pages((addr),0)
|
475 |
|
|
|
476 |
|
|
extern void show_free_areas(void);
|
477 |
|
|
extern void show_free_areas_node(pg_data_t *pgdat);
|
478 |
|
|
|
479 |
|
|
extern void clear_page_tables(struct mm_struct *, unsigned long, int);
|
480 |
|
|
|
481 |
|
|
extern int fail_writepage(struct page *);
|
482 |
|
|
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
|
483 |
|
|
struct file *shmem_file_setup(char * name, loff_t size);
|
484 |
|
|
extern void shmem_lock(struct file * file, int lock);
|
485 |
|
|
extern int shmem_zero_setup(struct vm_area_struct *);
|
486 |
|
|
|
487 |
|
|
extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
|
488 |
|
|
extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
|
489 |
|
|
extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
|
490 |
|
|
extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
|
491 |
|
|
|
492 |
|
|
extern int vmtruncate(struct inode * inode, loff_t offset);
|
493 |
|
|
extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
|
494 |
|
|
extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
|
495 |
|
|
extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
|
496 |
|
|
extern int make_pages_present(unsigned long addr, unsigned long end);
|
497 |
|
|
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
|
498 |
|
|
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len);
|
499 |
|
|
extern int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len);
|
500 |
|
|
extern int ptrace_attach(struct task_struct *tsk);
|
501 |
|
|
extern int ptrace_detach(struct task_struct *, unsigned int);
|
502 |
|
|
extern void ptrace_disable(struct task_struct *);
|
503 |
|
|
extern int ptrace_check_attach(struct task_struct *task, int kill);
|
504 |
|
|
|
505 |
|
|
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
|
506 |
|
|
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
|
507 |
|
|
|
508 |
|
|
/*
|
509 |
|
|
* On a two-level page table, this ends up being trivial. Thus the
|
510 |
|
|
* inlining and the symmetry break with pte_alloc() that does all
|
511 |
|
|
* of this out-of-line.
|
512 |
|
|
*/
|
513 |
|
|
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
|
514 |
|
|
{
|
515 |
|
|
if (pgd_none(*pgd))
|
516 |
|
|
return __pmd_alloc(mm, pgd, address);
|
517 |
|
|
return pmd_offset(pgd, address);
|
518 |
|
|
}
|
519 |
|
|
|
520 |
|
|
extern int pgt_cache_water[2];
|
521 |
|
|
extern int check_pgt_cache(void);
|
522 |
|
|
|
523 |
|
|
extern void free_area_init(unsigned long * zones_size);
|
524 |
|
|
extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
|
525 |
|
|
unsigned long * zones_size, unsigned long zone_start_paddr,
|
526 |
|
|
unsigned long *zholes_size);
|
527 |
|
|
extern void mem_init(void);
|
528 |
|
|
extern void show_mem(void);
|
529 |
|
|
extern void si_meminfo(struct sysinfo * val);
|
530 |
|
|
extern void swapin_readahead(swp_entry_t);
|
531 |
|
|
|
532 |
|
|
extern struct address_space swapper_space;
|
533 |
|
|
#define PageSwapCache(page) ((page)->mapping == &swapper_space)
|
534 |
|
|
|
535 |
|
|
static inline int is_page_cache_freeable(struct page * page)
|
536 |
|
|
{
|
537 |
|
|
return page_count(page) - !!page->buffers == 1;
|
538 |
|
|
}
|
539 |
|
|
|
540 |
|
|
extern int FASTCALL(can_share_swap_page(struct page *));
|
541 |
|
|
extern int FASTCALL(remove_exclusive_swap_page(struct page *));
|
542 |
|
|
|
543 |
|
|
extern void __free_pte(pte_t);
|
544 |
|
|
|
545 |
|
|
/* mmap.c */
|
546 |
|
|
extern void lock_vma_mappings(struct vm_area_struct *);
|
547 |
|
|
extern void unlock_vma_mappings(struct vm_area_struct *);
|
548 |
|
|
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
|
549 |
|
|
extern void __insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
|
550 |
|
|
extern void build_mmap_rb(struct mm_struct *);
|
551 |
|
|
extern void exit_mmap(struct mm_struct *);
|
552 |
|
|
|
553 |
|
|
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
|
554 |
|
|
|
555 |
|
|
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
556 |
|
|
unsigned long len, unsigned long prot,
|
557 |
|
|
unsigned long flag, unsigned long pgoff);
|
558 |
|
|
|
559 |
|
|
static inline unsigned long do_mmap(struct file *file, unsigned long addr,
|
560 |
|
|
unsigned long len, unsigned long prot,
|
561 |
|
|
unsigned long flag, unsigned long offset)
|
562 |
|
|
{
|
563 |
|
|
unsigned long ret = -EINVAL;
|
564 |
|
|
if ((offset + PAGE_ALIGN(len)) < offset)
|
565 |
|
|
goto out;
|
566 |
|
|
if (!(offset & ~PAGE_MASK))
|
567 |
|
|
ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
|
568 |
|
|
out:
|
569 |
|
|
return ret;
|
570 |
|
|
}
|
571 |
|
|
|
572 |
|
|
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
|
573 |
|
|
|
574 |
|
|
extern unsigned long do_brk(unsigned long, unsigned long);
|
575 |
|
|
|
576 |
|
|
static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev)
|
577 |
|
|
{
|
578 |
|
|
prev->vm_next = vma->vm_next;
|
579 |
|
|
rb_erase(&vma->vm_rb, &mm->mm_rb);
|
580 |
|
|
if (mm->mmap_cache == vma)
|
581 |
|
|
mm->mmap_cache = prev;
|
582 |
|
|
}
|
583 |
|
|
|
584 |
|
|
static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags)
|
585 |
|
|
{
|
586 |
|
|
if (!vma->vm_file && vma->vm_flags == vm_flags)
|
587 |
|
|
return 1;
|
588 |
|
|
else
|
589 |
|
|
return 0;
|
590 |
|
|
}
|
591 |
|
|
|
592 |
|
|
struct zone_t;
|
593 |
|
|
/* filemap.c */
|
594 |
|
|
extern void remove_inode_page(struct page *);
|
595 |
|
|
extern unsigned long page_unuse(struct page *);
|
596 |
|
|
extern void truncate_inode_pages(struct address_space *, loff_t);
|
597 |
|
|
|
598 |
|
|
/* generic vm_area_ops exported for stackable file systems */
|
599 |
|
|
extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t, unsigned int);
|
600 |
|
|
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
|
601 |
|
|
|
602 |
|
|
/*
|
603 |
|
|
* GFP bitmasks..
|
604 |
|
|
*/
|
605 |
|
|
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
|
606 |
|
|
#define __GFP_DMA 0x01
|
607 |
|
|
#define __GFP_HIGHMEM 0x02
|
608 |
|
|
|
609 |
|
|
/* Action modifiers - doesn't change the zoning */
|
610 |
|
|
#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
|
611 |
|
|
#define __GFP_HIGH 0x20 /* Should access emergency pools? */
|
612 |
|
|
#define __GFP_IO 0x40 /* Can start low memory physical IO? */
|
613 |
|
|
#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
|
614 |
|
|
#define __GFP_FS 0x100 /* Can call down to low-level FS? */
|
615 |
|
|
|
616 |
|
|
#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
|
617 |
|
|
#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
|
618 |
|
|
#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
|
619 |
|
|
#define GFP_ATOMIC (__GFP_HIGH)
|
620 |
|
|
#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
|
621 |
|
|
#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
|
622 |
|
|
#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
|
623 |
|
|
#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
|
624 |
|
|
#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
|
625 |
|
|
|
626 |
|
|
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
|
627 |
|
|
platforms, used as appropriate on others */
|
628 |
|
|
|
629 |
|
|
#define GFP_DMA __GFP_DMA
|
630 |
|
|
|
631 |
|
|
static inline unsigned int pf_gfp_mask(unsigned int gfp_mask)
|
632 |
|
|
{
|
633 |
|
|
/* avoid all memory balancing I/O methods if this task cannot block on I/O */
|
634 |
|
|
if (current->flags & PF_NOIO)
|
635 |
|
|
gfp_mask &= ~(__GFP_IO | __GFP_HIGHIO | __GFP_FS);
|
636 |
|
|
|
637 |
|
|
return gfp_mask;
|
638 |
|
|
}
|
639 |
|
|
|
640 |
|
|
/* vma is the first one with address < vma->vm_end,
|
641 |
|
|
* and even address < vma->vm_start. Have to extend vma. */
|
642 |
|
|
static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
|
643 |
|
|
{
|
644 |
|
|
unsigned long grow;
|
645 |
|
|
|
646 |
|
|
/*
|
647 |
|
|
* vma->vm_start/vm_end cannot change under us because the caller is required
|
648 |
|
|
* to hold the mmap_sem in write mode. We need to get the spinlock only
|
649 |
|
|
* before relocating the vma range ourself.
|
650 |
|
|
*/
|
651 |
|
|
address &= PAGE_MASK;
|
652 |
|
|
spin_lock(&vma->vm_mm->page_table_lock);
|
653 |
|
|
grow = (vma->vm_start - address) >> PAGE_SHIFT;
|
654 |
|
|
if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
|
655 |
|
|
((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) {
|
656 |
|
|
spin_unlock(&vma->vm_mm->page_table_lock);
|
657 |
|
|
return -ENOMEM;
|
658 |
|
|
}
|
659 |
|
|
vma->vm_start = address;
|
660 |
|
|
vma->vm_pgoff -= grow;
|
661 |
|
|
vma->vm_mm->total_vm += grow;
|
662 |
|
|
if (vma->vm_flags & VM_LOCKED)
|
663 |
|
|
vma->vm_mm->locked_vm += grow;
|
664 |
|
|
spin_unlock(&vma->vm_mm->page_table_lock);
|
665 |
|
|
return 0;
|
666 |
|
|
}
|
667 |
|
|
|
668 |
|
|
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
669 |
|
|
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
|
670 |
|
|
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
|
671 |
|
|
struct vm_area_struct **pprev);
|
672 |
|
|
|
673 |
|
|
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
|
674 |
|
|
NULL if none. Assume start_addr < end_addr. */
|
675 |
|
|
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
|
676 |
|
|
{
|
677 |
|
|
struct vm_area_struct * vma = find_vma(mm,start_addr);
|
678 |
|
|
|
679 |
|
|
if (vma && end_addr <= vma->vm_start)
|
680 |
|
|
vma = NULL;
|
681 |
|
|
return vma;
|
682 |
|
|
}
|
683 |
|
|
|
684 |
|
|
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
|
685 |
|
|
|
686 |
|
|
extern struct page * vmalloc_to_page(void *addr);
|
687 |
|
|
|
688 |
|
|
#endif /* __KERNEL__ */
|
689 |
|
|
|
690 |
|
|
#endif
|