OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [include/] [linux/] [pagemap.h] - Blame information for rev 81

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
#ifndef _LINUX_PAGEMAP_H
2
#define _LINUX_PAGEMAP_H
3
 
4
/*
5
 * Copyright 1995 Linus Torvalds
6
 */
7
#include <linux/mm.h>
8
#include <linux/fs.h>
9
#include <linux/list.h>
10
#include <linux/highmem.h>
11
#include <linux/compiler.h>
12
#include <asm/uaccess.h>
13
#include <linux/gfp.h>
14
#include <linux/bitops.h>
15
 
16
/*
17
 * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
18
 * allocation mode flags.
19
 */
20
#define AS_EIO          (__GFP_BITS_SHIFT + 0)  /* IO error on async write */
21
#define AS_ENOSPC       (__GFP_BITS_SHIFT + 1)  /* ENOSPC on async write */
22
 
23
static inline void mapping_set_error(struct address_space *mapping, int error)
24
{
25
        if (error) {
26
                if (error == -ENOSPC)
27
                        set_bit(AS_ENOSPC, &mapping->flags);
28
                else
29
                        set_bit(AS_EIO, &mapping->flags);
30
        }
31
}
32
 
33
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
34
{
35
        return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
36
}
37
 
38
/*
39
 * This is non-atomic.  Only to be used before the mapping is activated.
40
 * Probably needs a barrier...
41
 */
42
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
43
{
44
        m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
45
                                (__force unsigned long)mask;
46
}
47
 
48
/*
49
 * The page cache can done in larger chunks than
50
 * one page, because it allows for more efficient
51
 * throughput (it can then be mapped into user
52
 * space in smaller chunks for same flexibility).
53
 *
54
 * Or rather, it _will_ be done in larger chunks.
55
 */
56
#define PAGE_CACHE_SHIFT        PAGE_SHIFT
57
#define PAGE_CACHE_SIZE         PAGE_SIZE
58
#define PAGE_CACHE_MASK         PAGE_MASK
59
#define PAGE_CACHE_ALIGN(addr)  (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
60
 
61
#define page_cache_get(page)            get_page(page)
62
#define page_cache_release(page)        put_page(page)
63
void release_pages(struct page **pages, int nr, int cold);
64
 
65
#ifdef CONFIG_NUMA
66
extern struct page *__page_cache_alloc(gfp_t gfp);
67
#else
68
static inline struct page *__page_cache_alloc(gfp_t gfp)
69
{
70
        return alloc_pages(gfp, 0);
71
}
72
#endif
73
 
74
static inline struct page *page_cache_alloc(struct address_space *x)
75
{
76
        return __page_cache_alloc(mapping_gfp_mask(x));
77
}
78
 
79
static inline struct page *page_cache_alloc_cold(struct address_space *x)
80
{
81
        return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
82
}
83
 
84
typedef int filler_t(void *, struct page *);
85
 
86
extern struct page * find_get_page(struct address_space *mapping,
87
                                pgoff_t index);
88
extern struct page * find_lock_page(struct address_space *mapping,
89
                                pgoff_t index);
90
extern struct page * find_or_create_page(struct address_space *mapping,
91
                                pgoff_t index, gfp_t gfp_mask);
92
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
93
                        unsigned int nr_pages, struct page **pages);
94
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
95
                               unsigned int nr_pages, struct page **pages);
96
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
97
                        int tag, unsigned int nr_pages, struct page **pages);
98
 
99
struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
100
 
101
/*
102
 * Returns locked page at given index in given cache, creating it if needed.
103
 */
104
static inline struct page *grab_cache_page(struct address_space *mapping,
105
                                                                pgoff_t index)
106
{
107
        return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
108
}
109
 
110
extern struct page * grab_cache_page_nowait(struct address_space *mapping,
111
                                pgoff_t index);
112
extern struct page * read_cache_page_async(struct address_space *mapping,
113
                                pgoff_t index, filler_t *filler,
114
                                void *data);
115
extern struct page * read_cache_page(struct address_space *mapping,
116
                                pgoff_t index, filler_t *filler,
117
                                void *data);
118
extern int read_cache_pages(struct address_space *mapping,
119
                struct list_head *pages, filler_t *filler, void *data);
120
 
121
static inline struct page *read_mapping_page_async(
122
                                                struct address_space *mapping,
123
                                                     pgoff_t index, void *data)
124
{
125
        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
126
        return read_cache_page_async(mapping, index, filler, data);
127
}
128
 
129
static inline struct page *read_mapping_page(struct address_space *mapping,
130
                                             pgoff_t index, void *data)
131
{
132
        filler_t *filler = (filler_t *)mapping->a_ops->readpage;
133
        return read_cache_page(mapping, index, filler, data);
134
}
135
 
136
int add_to_page_cache(struct page *page, struct address_space *mapping,
137
                                pgoff_t index, gfp_t gfp_mask);
138
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
139
                                pgoff_t index, gfp_t gfp_mask);
140
extern void remove_from_page_cache(struct page *page);
141
extern void __remove_from_page_cache(struct page *page);
142
 
143
/*
144
 * Return byte-offset into filesystem object for page.
145
 */
146
static inline loff_t page_offset(struct page *page)
147
{
148
        return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
149
}
150
 
151
static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
152
                                        unsigned long address)
153
{
154
        pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
155
        pgoff += vma->vm_pgoff;
156
        return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
157
}
158
 
159
extern void FASTCALL(__lock_page(struct page *page));
160
extern void FASTCALL(__lock_page_nosync(struct page *page));
161
extern void FASTCALL(unlock_page(struct page *page));
162
 
163
/*
164
 * lock_page may only be called if we have the page's inode pinned.
165
 */
166
static inline void lock_page(struct page *page)
167
{
168
        might_sleep();
169
        if (TestSetPageLocked(page))
170
                __lock_page(page);
171
}
172
 
173
/*
174
 * lock_page_nosync should only be used if we can't pin the page's inode.
175
 * Doesn't play quite so well with block device plugging.
176
 */
177
static inline void lock_page_nosync(struct page *page)
178
{
179
        might_sleep();
180
        if (TestSetPageLocked(page))
181
                __lock_page_nosync(page);
182
}
183
 
184
/*
185
 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
186
 * Never use this directly!
187
 */
188
extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
189
 
190
/*
191
 * Wait for a page to be unlocked.
192
 *
193
 * This must be called with the caller "holding" the page,
194
 * ie with increased "page->count" so that the page won't
195
 * go away during the wait..
196
 */
197
static inline void wait_on_page_locked(struct page *page)
198
{
199
        if (PageLocked(page))
200
                wait_on_page_bit(page, PG_locked);
201
}
202
 
203
/*
204
 * Wait for a page to complete writeback
205
 */
206
static inline void wait_on_page_writeback(struct page *page)
207
{
208
        if (PageWriteback(page))
209
                wait_on_page_bit(page, PG_writeback);
210
}
211
 
212
extern void end_page_writeback(struct page *page);
213
 
214
/*
215
 * Fault a userspace page into pagetables.  Return non-zero on a fault.
216
 *
217
 * This assumes that two userspace pages are always sufficient.  That's
218
 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
219
 */
220
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
221
{
222
        int ret;
223
 
224
        if (unlikely(size == 0))
225
                return 0;
226
 
227
        /*
228
         * Writing zeroes into userspace here is OK, because we know that if
229
         * the zero gets there, we'll be overwriting it.
230
         */
231
        ret = __put_user(0, uaddr);
232
        if (ret == 0) {
233
                char __user *end = uaddr + size - 1;
234
 
235
                /*
236
                 * If the page was already mapped, this will get a cache miss
237
                 * for sure, so try to avoid doing it.
238
                 */
239
                if (((unsigned long)uaddr & PAGE_MASK) !=
240
                                ((unsigned long)end & PAGE_MASK))
241
                        ret = __put_user(0, end);
242
        }
243
        return ret;
244
}
245
 
246
static inline int fault_in_pages_readable(const char __user *uaddr, int size)
247
{
248
        volatile char c;
249
        int ret;
250
 
251
        if (unlikely(size == 0))
252
                return 0;
253
 
254
        ret = __get_user(c, uaddr);
255
        if (ret == 0) {
256
                const char __user *end = uaddr + size - 1;
257
 
258
                if (((unsigned long)uaddr & PAGE_MASK) !=
259
                                ((unsigned long)end & PAGE_MASK))
260
                        ret = __get_user(c, end);
261
        }
262
        return ret;
263
}
264
 
265
#endif /* _LINUX_PAGEMAP_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.