OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [mm/] [swap_state.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/mm/swap_state.c
3
 *
4
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5
 *  Swap reorganised 29.12.95, Stephen Tweedie
6
 *
7
 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8
 */
9
 
10
#include <linux/mm.h>
11
#include <linux/kernel_stat.h>
12
#include <linux/swap.h>
13
#include <linux/swapctl.h>
14
#include <linux/init.h>
15
#include <linux/pagemap.h>
16
#include <linux/smp_lock.h>
17
 
18
#include <asm/pgtable.h>
19
 
20
/*
21
 * We may have stale swap cache pages in memory: notice
22
 * them here and get rid of the unnecessary final write.
23
 */
24
static int swap_writepage(struct page *page)
25
{
26
        if (remove_exclusive_swap_page(page)) {
27
                UnlockPage(page);
28
                return 0;
29
        }
30
        rw_swap_page(WRITE, page);
31
        return 0;
32
}
33
 
34
static struct address_space_operations swap_aops = {
35
        writepage: swap_writepage,
36
        sync_page: block_sync_page,
37
};
38
 
39
struct address_space swapper_space = {
40
        LIST_HEAD_INIT(swapper_space.clean_pages),
41
        LIST_HEAD_INIT(swapper_space.dirty_pages),
42
        LIST_HEAD_INIT(swapper_space.locked_pages),
43
        0,                               /* nrpages      */
44
        &swap_aops,
45
};
46
 
47
#ifdef SWAP_CACHE_INFO
48
#define INC_CACHE_INFO(x)       (swap_cache_info.x++)
49
 
50
static struct {
51
        unsigned long add_total;
52
        unsigned long del_total;
53
        unsigned long find_success;
54
        unsigned long find_total;
55
        unsigned long noent_race;
56
        unsigned long exist_race;
57
} swap_cache_info;
58
 
59
void show_swap_cache_info(void)
60
{
61
        printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
62
                swap_cache_info.add_total, swap_cache_info.del_total,
63
                swap_cache_info.find_success, swap_cache_info.find_total,
64
                swap_cache_info.noent_race, swap_cache_info.exist_race);
65
}
66
#else
67
#define INC_CACHE_INFO(x)       do { } while (0)
68
#endif
69
 
70
int add_to_swap_cache(struct page *page, swp_entry_t entry)
71
{
72
        if (page->mapping)
73
                BUG();
74
        if (!swap_duplicate(entry)) {
75
                INC_CACHE_INFO(noent_race);
76
                return -ENOENT;
77
        }
78
        if (add_to_page_cache_unique(page, &swapper_space, entry.val,
79
                        page_hash(&swapper_space, entry.val)) != 0) {
80
                swap_free(entry);
81
                INC_CACHE_INFO(exist_race);
82
                return -EEXIST;
83
        }
84
        if (!PageLocked(page))
85
                BUG();
86
        if (!PageSwapCache(page))
87
                BUG();
88
        INC_CACHE_INFO(add_total);
89
        return 0;
90
}
91
 
92
/*
93
 * This must be called only on pages that have
94
 * been verified to be in the swap cache.
95
 */
96
void __delete_from_swap_cache(struct page *page)
97
{
98
        if (!PageLocked(page))
99
                BUG();
100
        if (!PageSwapCache(page))
101
                BUG();
102
        ClearPageDirty(page);
103
        __remove_inode_page(page);
104
        INC_CACHE_INFO(del_total);
105
}
106
 
107
/*
108
 * This must be called only on pages that have
109
 * been verified to be in the swap cache and locked.
110
 * It will never put the page into the free list,
111
 * the caller has a reference on the page.
112
 */
113
void delete_from_swap_cache(struct page *page)
114
{
115
        swp_entry_t entry;
116
 
117
        if (!PageLocked(page))
118
                BUG();
119
 
120
        if (unlikely(!block_flushpage(page, 0)))
121
                BUG();  /* an anonymous page cannot have page->buffers set */
122
 
123
        entry.val = page->index;
124
 
125
        spin_lock(&pagecache_lock);
126
        __delete_from_swap_cache(page);
127
        spin_unlock(&pagecache_lock);
128
 
129
        swap_free(entry);
130
        page_cache_release(page);
131
}
132
 
133
/*
134
 * Perform a free_page(), also freeing any swap cache associated with
135
 * this page if it is the last user of the page. Can not do a lock_page,
136
 * as we are holding the page_table_lock spinlock.
137
 */
138
void free_page_and_swap_cache(struct page *page)
139
{
140
        /*
141
         * If we are the only user, then try to free up the swap cache.
142
         *
143
         * Its ok to check for PageSwapCache without the page lock
144
         * here because we are going to recheck again inside
145
         * exclusive_swap_page() _with_ the lock.
146
         *                                      - Marcelo
147
         */
148
        if (PageSwapCache(page) && !TryLockPage(page)) {
149
                remove_exclusive_swap_page(page);
150
                UnlockPage(page);
151
        }
152
        page_cache_release(page);
153
}
154
 
155
/*
156
 * Lookup a swap entry in the swap cache. A found page will be returned
157
 * unlocked and with its refcount incremented - we rely on the kernel
158
 * lock getting page table operations atomic even if we drop the page
159
 * lock before returning.
160
 */
161
struct page * lookup_swap_cache(swp_entry_t entry)
162
{
163
        struct page *found;
164
 
165
        found = find_get_page(&swapper_space, entry.val);
166
        /*
167
         * Unsafe to assert PageSwapCache and mapping on page found:
168
         * if SMP nothing prevents swapoff from deleting this page from
169
         * the swap cache at this moment.  find_lock_page would prevent
170
         * that, but no need to change: we _have_ got the right page.
171
         */
172
        INC_CACHE_INFO(find_total);
173
        if (found)
174
                INC_CACHE_INFO(find_success);
175
        return found;
176
}
177
 
178
/*
179
 * Locate a page of swap in physical memory, reserving swap cache space
180
 * and reading the disk if it is not already cached.
181
 * A failure return means that either the page allocation failed or that
182
 * the swap entry is no longer in use.
183
 */
184
struct page * read_swap_cache_async(swp_entry_t entry)
185
{
186
        struct page *found_page, *new_page = NULL;
187
        int err;
188
 
189
        do {
190
                /*
191
                 * First check the swap cache.  Since this is normally
192
                 * called after lookup_swap_cache() failed, re-calling
193
                 * that would confuse statistics: use find_get_page()
194
                 * directly.
195
                 */
196
                found_page = find_get_page(&swapper_space, entry.val);
197
                if (found_page)
198
                        break;
199
 
200
                /*
201
                 * Get a new page to read into from swap.
202
                 */
203
                if (!new_page) {
204
                        new_page = alloc_page(GFP_HIGHUSER);
205
                        if (!new_page)
206
                                break;          /* Out of memory */
207
                }
208
 
209
                /*
210
                 * Associate the page with swap entry in the swap cache.
211
                 * May fail (-ENOENT) if swap entry has been freed since
212
                 * our caller observed it.  May fail (-EEXIST) if there
213
                 * is already a page associated with this entry in the
214
                 * swap cache: added by a racing read_swap_cache_async,
215
                 * or by try_to_swap_out (or shmem_writepage) re-using
216
                 * the just freed swap entry for an existing page.
217
                 */
218
                err = add_to_swap_cache(new_page, entry);
219
                if (!err) {
220
                        /*
221
                         * Initiate read into locked page and return.
222
                         */
223
                        rw_swap_page(READ, new_page);
224
                        return new_page;
225
                }
226
        } while (err != -ENOENT);
227
 
228
        if (new_page)
229
                page_cache_release(new_page);
230
        return found_page;
231
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.