OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [mm/] [swapfile.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/mm/swapfile.c
3
 *
4
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5
 *  Swap reorganised 29.12.95, Stephen Tweedie
6
 */
7
 
8
#include <linux/slab.h>
9
#include <linux/smp_lock.h>
10
#include <linux/kernel_stat.h>
11
#include <linux/swap.h>
12
#include <linux/swapctl.h>
13
#include <linux/blkdev.h> /* for blk_size */
14
#include <linux/vmalloc.h>
15
#include <linux/pagemap.h>
16
#include <linux/shm.h>
17
 
18
#include <asm/pgtable.h>
19
 
20
spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
21
unsigned int nr_swapfiles;
22
int total_swap_pages;
23
static int swap_overflow;
24
 
25
static const char Bad_file[] = "Bad swap file entry ";
26
static const char Unused_file[] = "Unused swap file entry ";
27
static const char Bad_offset[] = "Bad swap offset entry ";
28
static const char Unused_offset[] = "Unused swap offset entry ";
29
 
30
struct swap_list_t swap_list = {-1, -1};
31
 
32
struct swap_info_struct swap_info[MAX_SWAPFILES];
33
 
34
#define SWAPFILE_CLUSTER 256
35
 
36
static inline int scan_swap_map(struct swap_info_struct *si)
37
{
38
        unsigned long offset;
39
        /*
40
         * We try to cluster swap pages by allocating them
41
         * sequentially in swap.  Once we've allocated
42
         * SWAPFILE_CLUSTER pages this way, however, we resort to
43
         * first-free allocation, starting a new cluster.  This
44
         * prevents us from scattering swap pages all over the entire
45
         * swap partition, so that we reduce overall disk seek times
46
         * between swap pages.  -- sct */
47
        if (si->cluster_nr) {
48
                while (si->cluster_next <= si->highest_bit) {
49
                        offset = si->cluster_next++;
50
                        if (si->swap_map[offset])
51
                                continue;
52
                        si->cluster_nr--;
53
                        goto got_page;
54
                }
55
        }
56
        si->cluster_nr = SWAPFILE_CLUSTER;
57
 
58
        /* try to find an empty (even not aligned) cluster. */
59
        offset = si->lowest_bit;
60
 check_next_cluster:
61
        if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
62
        {
63
                int nr;
64
                for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
65
                        if (si->swap_map[nr])
66
                        {
67
                                offset = nr+1;
68
                                goto check_next_cluster;
69
                        }
70
                /* We found a completly empty cluster, so start
71
                 * using it.
72
                 */
73
                goto got_page;
74
        }
75
        /* No luck, so now go finegrined as usual. -Andrea */
76
        for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
77
                if (si->swap_map[offset])
78
                        continue;
79
                si->lowest_bit = offset+1;
80
        got_page:
81
                if (offset == si->lowest_bit)
82
                        si->lowest_bit++;
83
                if (offset == si->highest_bit)
84
                        si->highest_bit--;
85
                if (si->lowest_bit > si->highest_bit) {
86
                        si->lowest_bit = si->max;
87
                        si->highest_bit = 0;
88
                }
89
                si->swap_map[offset] = 1;
90
                nr_swap_pages--;
91
                si->cluster_next = offset+1;
92
                return offset;
93
        }
94
        si->lowest_bit = si->max;
95
        si->highest_bit = 0;
96
        return 0;
97
}
98
 
99
swp_entry_t get_swap_page(void)
100
{
101
        struct swap_info_struct * p;
102
        unsigned long offset;
103
        swp_entry_t entry;
104
        int type, wrapped = 0;
105
 
106
        entry.val = 0;   /* Out of memory */
107
        swap_list_lock();
108
        type = swap_list.next;
109
        if (type < 0)
110
                goto out;
111
        if (nr_swap_pages <= 0)
112
                goto out;
113
 
114
        while (1) {
115
                p = &swap_info[type];
116
                if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
117
                        swap_device_lock(p);
118
                        offset = scan_swap_map(p);
119
                        swap_device_unlock(p);
120
                        if (offset) {
121
                                entry = SWP_ENTRY(type,offset);
122
                                type = swap_info[type].next;
123
                                if (type < 0 ||
124
                                        p->prio != swap_info[type].prio) {
125
                                                swap_list.next = swap_list.head;
126
                                } else {
127
                                        swap_list.next = type;
128
                                }
129
                                goto out;
130
                        }
131
                }
132
                type = p->next;
133
                if (!wrapped) {
134
                        if (type < 0 || p->prio != swap_info[type].prio) {
135
                                type = swap_list.head;
136
                                wrapped = 1;
137
                        }
138
                } else
139
                        if (type < 0)
140
                                goto out;       /* out of swap space */
141
        }
142
out:
143
        swap_list_unlock();
144
        return entry;
145
}
146
 
147
static struct swap_info_struct * swap_info_get(swp_entry_t entry)
148
{
149
        struct swap_info_struct * p;
150
        unsigned long offset, type;
151
 
152
        if (!entry.val)
153
                goto out;
154
        type = SWP_TYPE(entry);
155
        if (type >= nr_swapfiles)
156
                goto bad_nofile;
157
        p = & swap_info[type];
158
        if (!(p->flags & SWP_USED))
159
                goto bad_device;
160
        offset = SWP_OFFSET(entry);
161
        if (offset >= p->max)
162
                goto bad_offset;
163
        if (!p->swap_map[offset])
164
                goto bad_free;
165
        swap_list_lock();
166
        if (p->prio > swap_info[swap_list.next].prio)
167
                swap_list.next = type;
168
        swap_device_lock(p);
169
        return p;
170
 
171
bad_free:
172
        printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
173
        goto out;
174
bad_offset:
175
        printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
176
        goto out;
177
bad_device:
178
        printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
179
        goto out;
180
bad_nofile:
181
        printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
182
out:
183
        return NULL;
184
}
185
 
186
static void swap_info_put(struct swap_info_struct * p)
187
{
188
        swap_device_unlock(p);
189
        swap_list_unlock();
190
}
191
 
192
static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
193
{
194
        int count = p->swap_map[offset];
195
 
196
        if (count < SWAP_MAP_MAX) {
197
                count--;
198
                p->swap_map[offset] = count;
199
                if (!count) {
200
                        if (offset < p->lowest_bit)
201
                                p->lowest_bit = offset;
202
                        if (offset > p->highest_bit)
203
                                p->highest_bit = offset;
204
                        nr_swap_pages++;
205
                }
206
        }
207
        return count;
208
}
209
 
210
/*
211
 * Caller has made sure that the swapdevice corresponding to entry
212
 * is still around or has not been recycled.
213
 */
214
void swap_free(swp_entry_t entry)
215
{
216
        struct swap_info_struct * p;
217
 
218
        p = swap_info_get(entry);
219
        if (p) {
220
                swap_entry_free(p, SWP_OFFSET(entry));
221
                swap_info_put(p);
222
        }
223
}
224
 
225
/*
226
 * Check if we're the only user of a swap page,
227
 * when the page is locked.
228
 */
229
static int exclusive_swap_page(struct page *page)
230
{
231
        int retval = 0;
232
        struct swap_info_struct * p;
233
        swp_entry_t entry;
234
 
235
        entry.val = page->index;
236
        p = swap_info_get(entry);
237
        if (p) {
238
                /* Is the only swap cache user the cache itself? */
239
                if (p->swap_map[SWP_OFFSET(entry)] == 1) {
240
                        /* Recheck the page count with the pagecache lock held.. */
241
                        spin_lock(&pagecache_lock);
242
                        if (page_count(page) - !!page->buffers == 2)
243
                                retval = 1;
244
                        spin_unlock(&pagecache_lock);
245
                }
246
                swap_info_put(p);
247
        }
248
        return retval;
249
}
250
 
251
/*
252
 * We can use this swap cache entry directly
253
 * if there are no other references to it.
254
 *
255
 * Here "exclusive_swap_page()" does the real
256
 * work, but we opportunistically check whether
257
 * we need to get all the locks first..
258
 */
259
int can_share_swap_page(struct page *page)
260
{
261
        int retval = 0;
262
 
263
        if (!PageLocked(page))
264
                BUG();
265
        switch (page_count(page)) {
266
        case 3:
267
                if (!page->buffers)
268
                        break;
269
                /* Fallthrough */
270
        case 2:
271
                if (!PageSwapCache(page))
272
                        break;
273
                retval = exclusive_swap_page(page);
274
                break;
275
        case 1:
276
                if (PageReserved(page))
277
                        break;
278
                retval = 1;
279
        }
280
        return retval;
281
}
282
 
283
/*
284
 * Work out if there are any other processes sharing this
285
 * swap cache page. Free it if you can. Return success.
286
 */
287
int remove_exclusive_swap_page(struct page *page)
288
{
289
        int retval;
290
        struct swap_info_struct * p;
291
        swp_entry_t entry;
292
 
293
        if (!PageLocked(page))
294
                BUG();
295
        if (!PageSwapCache(page))
296
                return 0;
297
        if (page_count(page) - !!page->buffers != 2)    /* 2: us + cache */
298
                return 0;
299
 
300
        entry.val = page->index;
301
        p = swap_info_get(entry);
302
        if (!p)
303
                return 0;
304
 
305
        /* Is the only swap cache user the cache itself? */
306
        retval = 0;
307
        if (p->swap_map[SWP_OFFSET(entry)] == 1) {
308
                /* Recheck the page count with the pagecache lock held.. */
309
                spin_lock(&pagecache_lock);
310
                if (page_count(page) - !!page->buffers == 2) {
311
                        __delete_from_swap_cache(page);
312
                        SetPageDirty(page);
313
                        retval = 1;
314
                }
315
                spin_unlock(&pagecache_lock);
316
        }
317
        swap_info_put(p);
318
 
319
        if (retval) {
320
                block_flushpage(page, 0);
321
                swap_free(entry);
322
                page_cache_release(page);
323
        }
324
 
325
        return retval;
326
}
327
 
328
/*
329
 * Free the swap entry like above, but also try to
330
 * free the page cache entry if it is the last user.
331
 */
332
void free_swap_and_cache(swp_entry_t entry)
333
{
334
        struct swap_info_struct * p;
335
        struct page *page = NULL;
336
 
337
        p = swap_info_get(entry);
338
        if (p) {
339
                if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
340
                        page = find_trylock_page(&swapper_space, entry.val);
341
                swap_info_put(p);
342
        }
343
        if (page) {
344
                page_cache_get(page);
345
                /* Only cache user (+us), or swap space full? Free it! */
346
                if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
347
                        delete_from_swap_cache(page);
348
                        SetPageDirty(page);
349
                }
350
                UnlockPage(page);
351
                page_cache_release(page);
352
        }
353
}
354
 
355
/*
356
 * The swap entry has been read in advance, and we return 1 to indicate
357
 * that the page has been used or is no longer needed.
358
 *
359
 * Always set the resulting pte to be nowrite (the same as COW pages
360
 * after one process has exited).  We don't know just how many PTEs will
361
 * share this swap entry, so be cautious and let do_wp_page work out
362
 * what to do if a write is requested later.
363
 */
364
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
365
static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
366
        pte_t *dir, swp_entry_t entry, struct page* page)
367
{
368
        pte_t pte = *dir;
369
 
370
        if (likely(pte_to_swp_entry(pte).val != entry.val))
371
                return;
372
        if (unlikely(pte_none(pte) || pte_present(pte)))
373
                return;
374
        get_page(page);
375
        set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
376
        swap_free(entry);
377
        ++vma->vm_mm->rss;
378
}
379
 
380
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
381
static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
382
        unsigned long address, unsigned long size, unsigned long offset,
383
        swp_entry_t entry, struct page* page)
384
{
385
        pte_t * pte;
386
        unsigned long end;
387
 
388
        if (pmd_none(*dir))
389
                return;
390
        if (pmd_bad(*dir)) {
391
                pmd_ERROR(*dir);
392
                pmd_clear(dir);
393
                return;
394
        }
395
        pte = pte_offset(dir, address);
396
        offset += address & PMD_MASK;
397
        address &= ~PMD_MASK;
398
        end = address + size;
399
        if (end > PMD_SIZE)
400
                end = PMD_SIZE;
401
        do {
402
                unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page);
403
                address += PAGE_SIZE;
404
                pte++;
405
        } while (address && (address < end));
406
}
407
 
408
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
409
static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
410
        unsigned long address, unsigned long size,
411
        swp_entry_t entry, struct page* page)
412
{
413
        pmd_t * pmd;
414
        unsigned long offset, end;
415
 
416
        if (pgd_none(*dir))
417
                return;
418
        if (pgd_bad(*dir)) {
419
                pgd_ERROR(*dir);
420
                pgd_clear(dir);
421
                return;
422
        }
423
        pmd = pmd_offset(dir, address);
424
        offset = address & PGDIR_MASK;
425
        address &= ~PGDIR_MASK;
426
        end = address + size;
427
        if (end > PGDIR_SIZE)
428
                end = PGDIR_SIZE;
429
        if (address >= end)
430
                BUG();
431
        do {
432
                unuse_pmd(vma, pmd, address, end - address, offset, entry,
433
                          page);
434
                address = (address + PMD_SIZE) & PMD_MASK;
435
                pmd++;
436
        } while (address && (address < end));
437
}
438
 
439
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
440
static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
441
                        swp_entry_t entry, struct page* page)
442
{
443
        unsigned long start = vma->vm_start, end = vma->vm_end;
444
 
445
        if (start >= end)
446
                BUG();
447
        do {
448
                unuse_pgd(vma, pgdir, start, end - start, entry, page);
449
                start = (start + PGDIR_SIZE) & PGDIR_MASK;
450
                pgdir++;
451
        } while (start && (start < end));
452
}
453
 
454
static void unuse_process(struct mm_struct * mm,
455
                        swp_entry_t entry, struct page* page)
456
{
457
        struct vm_area_struct* vma;
458
 
459
        /*
460
         * Go through process' page directory.
461
         */
462
        spin_lock(&mm->page_table_lock);
463
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
464
                pgd_t * pgd = pgd_offset(mm, vma->vm_start);
465
                unuse_vma(vma, pgd, entry, page);
466
        }
467
        spin_unlock(&mm->page_table_lock);
468
        return;
469
}
470
 
471
/*
472
 * Scan swap_map from current position to next entry still in use.
473
 * Recycle to start on reaching the end, returning 0 when empty.
474
 */
475
static int find_next_to_unuse(struct swap_info_struct *si, int prev)
476
{
477
        int max = si->max;
478
        int i = prev;
479
        int count;
480
 
481
        /*
482
         * No need for swap_device_lock(si) here: we're just looking
483
         * for whether an entry is in use, not modifying it; false
484
         * hits are okay, and sys_swapoff() has already prevented new
485
         * allocations from this area (while holding swap_list_lock()).
486
         */
487
        for (;;) {
488
                if (++i >= max) {
489
                        if (!prev) {
490
                                i = 0;
491
                                break;
492
                        }
493
                        /*
494
                         * No entries in use at top of swap_map,
495
                         * loop back to start and recheck there.
496
                         */
497
                        max = prev + 1;
498
                        prev = 0;
499
                        i = 1;
500
                }
501
                count = si->swap_map[i];
502
                if (count && count != SWAP_MAP_BAD)
503
                        break;
504
        }
505
        return i;
506
}
507
 
508
/*
509
 * We completely avoid races by reading each swap page in advance,
510
 * and then search for the process using it.  All the necessary
511
 * page table adjustments can then be made atomically.
512
 */
513
static int try_to_unuse(unsigned int type)
514
{
515
        struct swap_info_struct * si = &swap_info[type];
516
        struct mm_struct *start_mm;
517
        unsigned short *swap_map;
518
        unsigned short swcount;
519
        struct page *page;
520
        swp_entry_t entry;
521
        int i = 0;
522
        int retval = 0;
523
        int reset_overflow = 0;
524
        int shmem;
525
 
526
        /*
527
         * When searching mms for an entry, a good strategy is to
528
         * start at the first mm we freed the previous entry from
529
         * (though actually we don't notice whether we or coincidence
530
         * freed the entry).  Initialize this start_mm with a hold.
531
         *
532
         * A simpler strategy would be to start at the last mm we
533
         * freed the previous entry from; but that would take less
534
         * advantage of mmlist ordering (now preserved by swap_out()),
535
         * which clusters forked address spaces together, most recent
536
         * child immediately after parent.  If we race with dup_mmap(),
537
         * we very much want to resolve parent before child, otherwise
538
         * we may miss some entries: using last mm would invert that.
539
         */
540
        start_mm = &init_mm;
541
        atomic_inc(&init_mm.mm_users);
542
 
543
        /*
544
         * Keep on scanning until all entries have gone.  Usually,
545
         * one pass through swap_map is enough, but not necessarily:
546
         * mmput() removes mm from mmlist before exit_mmap() and its
547
         * zap_page_range().  That's not too bad, those entries are
548
         * on their way out, and handled faster there than here.
549
         * do_munmap() behaves similarly, taking the range out of mm's
550
         * vma list before zap_page_range().  But unfortunately, when
551
         * unmapping a part of a vma, it takes the whole out first,
552
         * then reinserts what's left after (might even reschedule if
553
         * open() method called) - so swap entries may be invisible
554
         * to swapoff for a while, then reappear - but that is rare.
555
         */
556
        while ((i = find_next_to_unuse(si, i))) {
557
                /*
558
                 * Get a page for the entry, using the existing swap
559
                 * cache page if there is one.  Otherwise, get a clean
560
                 * page and read the swap into it.
561
                 */
562
                swap_map = &si->swap_map[i];
563
                entry = SWP_ENTRY(type, i);
564
                page = read_swap_cache_async(entry);
565
                if (!page) {
566
                        /*
567
                         * Either swap_duplicate() failed because entry
568
                         * has been freed independently, and will not be
569
                         * reused since sys_swapoff() already disabled
570
                         * allocation from here, or alloc_page() failed.
571
                         */
572
                        if (!*swap_map)
573
                                continue;
574
                        retval = -ENOMEM;
575
                        break;
576
                }
577
 
578
                /*
579
                 * Don't hold on to start_mm if it looks like exiting.
580
                 */
581
                if (atomic_read(&start_mm->mm_users) == 1) {
582
                        mmput(start_mm);
583
                        start_mm = &init_mm;
584
                        atomic_inc(&init_mm.mm_users);
585
                }
586
 
587
                /*
588
                 * Wait for and lock page.  When do_swap_page races with
589
                 * try_to_unuse, do_swap_page can handle the fault much
590
                 * faster than try_to_unuse can locate the entry.  This
591
                 * apparently redundant "wait_on_page" lets try_to_unuse
592
                 * defer to do_swap_page in such a case - in some tests,
593
                 * do_swap_page and try_to_unuse repeatedly compete.
594
                 */
595
                wait_on_page(page);
596
                lock_page(page);
597
 
598
                /*
599
                 * Remove all references to entry, without blocking.
600
                 * Whenever we reach init_mm, there's no address space
601
                 * to search, but use it as a reminder to search shmem.
602
                 */
603
                shmem = 0;
604
                swcount = *swap_map;
605
                if (swcount > 1) {
606
                        flush_page_to_ram(page);
607
                        if (start_mm == &init_mm)
608
                                shmem = shmem_unuse(entry, page);
609
                        else
610
                                unuse_process(start_mm, entry, page);
611
                }
612
                if (*swap_map > 1) {
613
                        int set_start_mm = (*swap_map >= swcount);
614
                        struct list_head *p = &start_mm->mmlist;
615
                        struct mm_struct *new_start_mm = start_mm;
616
                        struct mm_struct *mm;
617
 
618
                        spin_lock(&mmlist_lock);
619
                        while (*swap_map > 1 &&
620
                                        (p = p->next) != &start_mm->mmlist) {
621
                                mm = list_entry(p, struct mm_struct, mmlist);
622
                                swcount = *swap_map;
623
                                if (mm == &init_mm) {
624
                                        set_start_mm = 1;
625
                                        spin_unlock(&mmlist_lock);
626
                                        shmem = shmem_unuse(entry, page);
627
                                        spin_lock(&mmlist_lock);
628
                                } else
629
                                        unuse_process(mm, entry, page);
630
                                if (set_start_mm && *swap_map < swcount) {
631
                                        new_start_mm = mm;
632
                                        set_start_mm = 0;
633
                                }
634
                        }
635
                        atomic_inc(&new_start_mm->mm_users);
636
                        spin_unlock(&mmlist_lock);
637
                        mmput(start_mm);
638
                        start_mm = new_start_mm;
639
                }
640
 
641
                /*
642
                 * How could swap count reach 0x7fff when the maximum
643
                 * pid is 0x7fff, and there's no way to repeat a swap
644
                 * page within an mm (except in shmem, where it's the
645
                 * shared object which takes the reference count)?
646
                 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
647
                 *
648
                 * If that's wrong, then we should worry more about
649
                 * exit_mmap() and do_munmap() cases described above:
650
                 * we might be resetting SWAP_MAP_MAX too early here.
651
                 * We know "Undead"s can happen, they're okay, so don't
652
                 * report them; but do report if we reset SWAP_MAP_MAX.
653
                 */
654
                if (*swap_map == SWAP_MAP_MAX) {
655
                        swap_list_lock();
656
                        swap_device_lock(si);
657
                        nr_swap_pages++;
658
                        *swap_map = 1;
659
                        swap_device_unlock(si);
660
                        swap_list_unlock();
661
                        reset_overflow = 1;
662
                }
663
 
664
                /*
665
                 * If a reference remains (rare), we would like to leave
666
                 * the page in the swap cache; but try_to_swap_out could
667
                 * then re-duplicate the entry once we drop page lock,
668
                 * so we might loop indefinitely; also, that page could
669
                 * not be swapped out to other storage meanwhile.  So:
670
                 * delete from cache even if there's another reference,
671
                 * after ensuring that the data has been saved to disk -
672
                 * since if the reference remains (rarer), it will be
673
                 * read from disk into another page.  Splitting into two
674
                 * pages would be incorrect if swap supported "shared
675
                 * private" pages, but they are handled by tmpfs files.
676
                 *
677
                 * Note shmem_unuse already deleted swappage from cache,
678
                 * unless corresponding filepage found already in cache:
679
                 * in which case it left swappage in cache, lowered its
680
                 * swap count to pass quickly through the loops above,
681
                 * and now we must reincrement count to try again later.
682
                 */
683
                if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
684
                        rw_swap_page(WRITE, page);
685
                        lock_page(page);
686
                }
687
                if (PageSwapCache(page)) {
688
                        if (shmem)
689
                                swap_duplicate(entry);
690
                        else
691
                                delete_from_swap_cache(page);
692
                }
693
 
694
                /*
695
                 * So we could skip searching mms once swap count went
696
                 * to 1, we did not mark any present ptes as dirty: must
697
                 * mark page dirty so try_to_swap_out will preserve it.
698
                 */
699
                SetPageDirty(page);
700
                UnlockPage(page);
701
                page_cache_release(page);
702
 
703
                /*
704
                 * Make sure that we aren't completely killing
705
                 * interactive performance.  Interruptible check on
706
                 * signal_pending() would be nice, but changes the spec?
707
                 */
708
                if (current->need_resched)
709
                        schedule();
710
        }
711
 
712
        mmput(start_mm);
713
        if (reset_overflow) {
714
                printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
715
                swap_overflow = 0;
716
        }
717
        return retval;
718
}
719
 
720
asmlinkage long sys_swapoff(const char * specialfile)
721
{
722
        struct swap_info_struct * p = NULL;
723
        unsigned short *swap_map;
724
        struct nameidata nd;
725
        int i, type, prev;
726
        int err;
727
 
728
        if (!capable(CAP_SYS_ADMIN))
729
                return -EPERM;
730
 
731
        err = user_path_walk(specialfile, &nd);
732
        if (err)
733
                goto out;
734
 
735
        lock_kernel();
736
        prev = -1;
737
        swap_list_lock();
738
        for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
739
                p = swap_info + type;
740
                if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
741
                        if (p->swap_file == nd.dentry)
742
                          break;
743
                }
744
                prev = type;
745
        }
746
        err = -EINVAL;
747
        if (type < 0) {
748
                swap_list_unlock();
749
                goto out_dput;
750
        }
751
 
752
        if (prev < 0) {
753
                swap_list.head = p->next;
754
        } else {
755
                swap_info[prev].next = p->next;
756
        }
757
        if (type == swap_list.next) {
758
                /* just pick something that's safe... */
759
                swap_list.next = swap_list.head;
760
        }
761
        nr_swap_pages -= p->pages;
762
        total_swap_pages -= p->pages;
763
        p->flags = SWP_USED;
764
        swap_list_unlock();
765
        unlock_kernel();
766
        err = try_to_unuse(type);
767
        lock_kernel();
768
        if (err) {
769
                /* re-insert swap space back into swap_list */
770
                swap_list_lock();
771
                for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
772
                        if (p->prio >= swap_info[i].prio)
773
                                break;
774
                p->next = i;
775
                if (prev < 0)
776
                        swap_list.head = swap_list.next = p - swap_info;
777
                else
778
                        swap_info[prev].next = p - swap_info;
779
                nr_swap_pages += p->pages;
780
                total_swap_pages += p->pages;
781
                p->flags = SWP_WRITEOK;
782
                swap_list_unlock();
783
                goto out_dput;
784
        }
785
        if (p->swap_device)
786
                blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP);
787
        path_release(&nd);
788
 
789
        swap_list_lock();
790
        swap_device_lock(p);
791
        nd.mnt = p->swap_vfsmnt;
792
        nd.dentry = p->swap_file;
793
        p->swap_vfsmnt = NULL;
794
        p->swap_file = NULL;
795
        p->swap_device = 0;
796
        p->max = 0;
797
        swap_map = p->swap_map;
798
        p->swap_map = NULL;
799
        p->flags = 0;
800
        swap_device_unlock(p);
801
        swap_list_unlock();
802
        vfree(swap_map);
803
        err = 0;
804
 
805
out_dput:
806
        unlock_kernel();
807
        path_release(&nd);
808
out:
809
        return err;
810
}
811
 
812
int get_swaparea_info(char *buf)
813
{
814
        char * page = (char *) __get_free_page(GFP_KERNEL);
815
        struct swap_info_struct *ptr = swap_info;
816
        int i, j, len = 0, usedswap;
817
 
818
        if (!page)
819
                return -ENOMEM;
820
 
821
        len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n");
822
        for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
823
                if ((ptr->flags & SWP_USED) && ptr->swap_map) {
824
                        char * path = d_path(ptr->swap_file, ptr->swap_vfsmnt,
825
                                                page, PAGE_SIZE);
826
 
827
                        len += sprintf(buf + len, "%-31s ", path);
828
 
829
                        if (!ptr->swap_device)
830
                                len += sprintf(buf + len, "file\t\t");
831
                        else
832
                                len += sprintf(buf + len, "partition\t");
833
 
834
                        usedswap = 0;
835
                        for (j = 0; j < ptr->max; ++j)
836
                                switch (ptr->swap_map[j]) {
837
                                        case SWAP_MAP_BAD:
838
                                        case 0:
839
                                                continue;
840
                                        default:
841
                                                usedswap++;
842
                                }
843
                        len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages << (PAGE_SHIFT - 10),
844
                                usedswap << (PAGE_SHIFT - 10), ptr->prio);
845
                }
846
        }
847
        free_page((unsigned long) page);
848
        return len;
849
}
850
 
851
int is_swap_partition(kdev_t dev) {
852
        struct swap_info_struct *ptr = swap_info;
853
        int i;
854
 
855
        for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
856
                if (ptr->flags & SWP_USED)
857
                        if (ptr->swap_device == dev)
858
                                return 1;
859
        }
860
        return 0;
861
}
862
 
863
/*
864
 * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
865
 *
866
 * The swapon system call
867
 */
868
asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
869
{
870
        struct swap_info_struct * p;
871
        struct nameidata nd;
872
        struct inode * swap_inode;
873
        unsigned int type;
874
        int i, j, prev;
875
        int error;
876
        static int least_priority = 0;
877
        union swap_header *swap_header = 0;
878
        int swap_header_version;
879
        int nr_good_pages = 0;
880
        unsigned long maxpages = 1;
881
        int swapfilesize;
882
        struct block_device *bdev = NULL;
883
        unsigned short *swap_map;
884
 
885
        if (!capable(CAP_SYS_ADMIN))
886
                return -EPERM;
887
        lock_kernel();
888
        swap_list_lock();
889
        p = swap_info;
890
        for (type = 0 ; type < nr_swapfiles ; type++,p++)
891
                if (!(p->flags & SWP_USED))
892
                        break;
893
        error = -EPERM;
894
        if (type >= MAX_SWAPFILES) {
895
                swap_list_unlock();
896
                goto out;
897
        }
898
        if (type >= nr_swapfiles)
899
                nr_swapfiles = type+1;
900
        p->flags = SWP_USED;
901
        p->swap_file = NULL;
902
        p->swap_vfsmnt = NULL;
903
        p->swap_device = 0;
904
        p->swap_map = NULL;
905
        p->lowest_bit = 0;
906
        p->highest_bit = 0;
907
        p->cluster_nr = 0;
908
        p->sdev_lock = SPIN_LOCK_UNLOCKED;
909
        p->next = -1;
910
        if (swap_flags & SWAP_FLAG_PREFER) {
911
                p->prio =
912
                  (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
913
        } else {
914
                p->prio = --least_priority;
915
        }
916
        swap_list_unlock();
917
        error = user_path_walk(specialfile, &nd);
918
        if (error)
919
                goto bad_swap_2;
920
 
921
        p->swap_file = nd.dentry;
922
        p->swap_vfsmnt = nd.mnt;
923
        swap_inode = nd.dentry->d_inode;
924
        error = -EINVAL;
925
 
926
        if (S_ISBLK(swap_inode->i_mode)) {
927
                kdev_t dev = swap_inode->i_rdev;
928
                struct block_device_operations *bdops;
929
                devfs_handle_t de;
930
 
931
                if (is_mounted(dev)) {
932
                        error = -EBUSY;
933
                        goto bad_swap_2;
934
                }
935
 
936
                p->swap_device = dev;
937
                set_blocksize(dev, PAGE_SIZE);
938
 
939
                bd_acquire(swap_inode);
940
                bdev = swap_inode->i_bdev;
941
                de = devfs_get_handle_from_inode(swap_inode);
942
                bdops = devfs_get_ops(de);  /*  Increments module use count  */
943
                if (bdops) bdev->bd_op = bdops;
944
 
945
                error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP);
946
                devfs_put_ops(de);/*Decrement module use count now we're safe*/
947
                if (error)
948
                        goto bad_swap_2;
949
                set_blocksize(dev, PAGE_SIZE);
950
                error = -ENODEV;
951
                if (!dev || (blk_size[MAJOR(dev)] &&
952
                     !blk_size[MAJOR(dev)][MINOR(dev)]))
953
                        goto bad_swap;
954
                swapfilesize = 0;
955
                if (blk_size[MAJOR(dev)])
956
                        swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)]
957
                                >> (PAGE_SHIFT - 10);
958
        } else if (S_ISREG(swap_inode->i_mode))
959
                swapfilesize = swap_inode->i_size >> PAGE_SHIFT;
960
        else
961
                goto bad_swap;
962
 
963
        error = -EBUSY;
964
        for (i = 0 ; i < nr_swapfiles ; i++) {
965
                struct swap_info_struct *q = &swap_info[i];
966
                if (i == type || !q->swap_file)
967
                        continue;
968
                if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping)
969
                        goto bad_swap;
970
        }
971
 
972
        swap_header = (void *) __get_free_page(GFP_USER);
973
        if (!swap_header) {
974
                printk("Unable to start swapping: out of memory :-)\n");
975
                error = -ENOMEM;
976
                goto bad_swap;
977
        }
978
 
979
        lock_page(virt_to_page(swap_header));
980
        rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
981
 
982
        if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
983
                swap_header_version = 1;
984
        else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
985
                swap_header_version = 2;
986
        else {
987
                printk("Unable to find swap-space signature\n");
988
                error = -EINVAL;
989
                goto bad_swap;
990
        }
991
 
992
        switch (swap_header_version) {
993
        case 1:
994
                memset(((char *) swap_header)+PAGE_SIZE-10,0,10);
995
                j = 0;
996
                p->lowest_bit = 0;
997
                p->highest_bit = 0;
998
                for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
999
                        if (test_bit(i,(char *) swap_header)) {
1000
                                if (!p->lowest_bit)
1001
                                        p->lowest_bit = i;
1002
                                p->highest_bit = i;
1003
                                maxpages = i+1;
1004
                                j++;
1005
                        }
1006
                }
1007
                nr_good_pages = j;
1008
                p->swap_map = vmalloc(maxpages * sizeof(short));
1009
                if (!p->swap_map) {
1010
                        error = -ENOMEM;
1011
                        goto bad_swap;
1012
                }
1013
                for (i = 1 ; i < maxpages ; i++) {
1014
                        if (test_bit(i,(char *) swap_header))
1015
                                p->swap_map[i] = 0;
1016
                        else
1017
                                p->swap_map[i] = SWAP_MAP_BAD;
1018
                }
1019
                break;
1020
 
1021
        case 2:
1022
                /* Check the swap header's sub-version and the size of
1023
                   the swap file and bad block lists */
1024
                if (swap_header->info.version != 1) {
1025
                        printk(KERN_WARNING
1026
                               "Unable to handle swap header version %d\n",
1027
                               swap_header->info.version);
1028
                        error = -EINVAL;
1029
                        goto bad_swap;
1030
                }
1031
 
1032
                p->lowest_bit  = 1;
1033
                maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
1034
                if (maxpages > swap_header->info.last_page)
1035
                        maxpages = swap_header->info.last_page;
1036
                p->highest_bit = maxpages - 1;
1037
 
1038
                error = -EINVAL;
1039
                if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1040
                        goto bad_swap;
1041
 
1042
                /* OK, set up the swap map and apply the bad block list */
1043
                if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
1044
                        error = -ENOMEM;
1045
                        goto bad_swap;
1046
                }
1047
 
1048
                error = 0;
1049
                memset(p->swap_map, 0, maxpages * sizeof(short));
1050
                for (i=0; i<swap_header->info.nr_badpages; i++) {
1051
                        int page = swap_header->info.badpages[i];
1052
                        if (page <= 0 || page >= swap_header->info.last_page)
1053
                                error = -EINVAL;
1054
                        else
1055
                                p->swap_map[page] = SWAP_MAP_BAD;
1056
                }
1057
                nr_good_pages = swap_header->info.last_page -
1058
                                swap_header->info.nr_badpages -
1059
                                1 /* header page */;
1060
                if (error)
1061
                        goto bad_swap;
1062
        }
1063
 
1064
        if (swapfilesize && maxpages > swapfilesize) {
1065
                printk(KERN_WARNING
1066
                       "Swap area shorter than signature indicates\n");
1067
                error = -EINVAL;
1068
                goto bad_swap;
1069
        }
1070
        if (!nr_good_pages) {
1071
                printk(KERN_WARNING "Empty swap-file\n");
1072
                error = -EINVAL;
1073
                goto bad_swap;
1074
        }
1075
        p->swap_map[0] = SWAP_MAP_BAD;
1076
        swap_list_lock();
1077
        swap_device_lock(p);
1078
        p->max = maxpages;
1079
        p->flags = SWP_WRITEOK;
1080
        p->pages = nr_good_pages;
1081
        nr_swap_pages += nr_good_pages;
1082
        total_swap_pages += nr_good_pages;
1083
        printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
1084
               nr_good_pages<<(PAGE_SHIFT-10), p->prio);
1085
 
1086
        /* insert swap space into swap_list: */
1087
        prev = -1;
1088
        for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
1089
                if (p->prio >= swap_info[i].prio) {
1090
                        break;
1091
                }
1092
                prev = i;
1093
        }
1094
        p->next = i;
1095
        if (prev < 0) {
1096
                swap_list.head = swap_list.next = p - swap_info;
1097
        } else {
1098
                swap_info[prev].next = p - swap_info;
1099
        }
1100
        swap_device_unlock(p);
1101
        swap_list_unlock();
1102
        error = 0;
1103
        goto out;
1104
bad_swap:
1105
        if (bdev)
1106
                blkdev_put(bdev, BDEV_SWAP);
1107
bad_swap_2:
1108
        swap_list_lock();
1109
        swap_map = p->swap_map;
1110
        nd.mnt = p->swap_vfsmnt;
1111
        nd.dentry = p->swap_file;
1112
        p->swap_device = 0;
1113
        p->swap_file = NULL;
1114
        p->swap_vfsmnt = NULL;
1115
        p->swap_map = NULL;
1116
        p->flags = 0;
1117
        if (!(swap_flags & SWAP_FLAG_PREFER))
1118
                ++least_priority;
1119
        swap_list_unlock();
1120
        if (swap_map)
1121
                vfree(swap_map);
1122
        path_release(&nd);
1123
out:
1124
        if (swap_header)
1125
                free_page((long) swap_header);
1126
        unlock_kernel();
1127
        return error;
1128
}
1129
 
1130
void si_swapinfo(struct sysinfo *val)
1131
{
1132
        unsigned int i;
1133
        unsigned long nr_to_be_unused = 0;
1134
 
1135
        swap_list_lock();
1136
        for (i = 0; i < nr_swapfiles; i++) {
1137
                unsigned int j;
1138
                if (swap_info[i].flags != SWP_USED)
1139
                        continue;
1140
                for (j = 0; j < swap_info[i].max; ++j) {
1141
                        switch (swap_info[i].swap_map[j]) {
1142
                                case 0:
1143
                                case SWAP_MAP_BAD:
1144
                                        continue;
1145
                                default:
1146
                                        nr_to_be_unused++;
1147
                        }
1148
                }
1149
        }
1150
        val->freeswap = nr_swap_pages + nr_to_be_unused;
1151
        val->totalswap = total_swap_pages + nr_to_be_unused;
1152
        swap_list_unlock();
1153
}
1154
 
1155
/*
1156
 * Verify that a swap entry is valid and increment its swap map count.
1157
 *
1158
 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
1159
 * "permanent", but will be reclaimed by the next swapoff.
1160
 */
1161
int swap_duplicate(swp_entry_t entry)
1162
{
1163
        struct swap_info_struct * p;
1164
        unsigned long offset, type;
1165
        int result = 0;
1166
 
1167
        type = SWP_TYPE(entry);
1168
        if (type >= nr_swapfiles)
1169
                goto bad_file;
1170
        p = type + swap_info;
1171
        offset = SWP_OFFSET(entry);
1172
 
1173
        swap_device_lock(p);
1174
        if (offset < p->max && p->swap_map[offset]) {
1175
                if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
1176
                        p->swap_map[offset]++;
1177
                        result = 1;
1178
                } else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
1179
                        if (swap_overflow++ < 5)
1180
                                printk(KERN_WARNING "swap_dup: swap entry overflow\n");
1181
                        p->swap_map[offset] = SWAP_MAP_MAX;
1182
                        result = 1;
1183
                }
1184
        }
1185
        swap_device_unlock(p);
1186
out:
1187
        return result;
1188
 
1189
bad_file:
1190
        printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
1191
        goto out;
1192
}
1193
 
1194
/*
1195
 * Prior swap_duplicate protects against swap device deletion.
1196
 */
1197
void get_swaphandle_info(swp_entry_t entry, unsigned long *offset,
1198
                        kdev_t *dev, struct inode **swapf)
1199
{
1200
        unsigned long type;
1201
        struct swap_info_struct *p;
1202
 
1203
        type = SWP_TYPE(entry);
1204
        if (type >= nr_swapfiles) {
1205
                printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val);
1206
                return;
1207
        }
1208
 
1209
        p = &swap_info[type];
1210
        *offset = SWP_OFFSET(entry);
1211
        if (*offset >= p->max && *offset != 0) {
1212
                printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset, entry.val);
1213
                return;
1214
        }
1215
        if (p->swap_map && !p->swap_map[*offset]) {
1216
                printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset, entry.val);
1217
                return;
1218
        }
1219
        if (!(p->flags & SWP_USED)) {
1220
                printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file, entry.val);
1221
                return;
1222
        }
1223
 
1224
        if (p->swap_device) {
1225
                *dev = p->swap_device;
1226
        } else if (p->swap_file) {
1227
                *swapf = p->swap_file->d_inode;
1228
        } else {
1229
                printk(KERN_ERR "rw_swap_page: no swap file or device\n");
1230
        }
1231
        return;
1232
}
1233
 
1234
/*
1235
 * swap_device_lock prevents swap_map being freed. Don't grab an extra
1236
 * reference on the swaphandle, it doesn't matter if it becomes unused.
1237
 */
1238
int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
1239
{
1240
        int ret = 0, i = 1 << page_cluster;
1241
        unsigned long toff;
1242
        struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
1243
 
1244
        if (!page_cluster)      /* no readahead */
1245
                return 0;
1246
        toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster;
1247
        if (!toff)              /* first page is swap header */
1248
                toff++, i--;
1249
        *offset = toff;
1250
 
1251
        swap_device_lock(swapdev);
1252
        do {
1253
                /* Don't read-ahead past the end of the swap area */
1254
                if (toff >= swapdev->max)
1255
                        break;
1256
                /* Don't read in free or bad pages */
1257
                if (!swapdev->swap_map[toff])
1258
                        break;
1259
                if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
1260
                        break;
1261
                toff++;
1262
                ret++;
1263
        } while (--i);
1264
        swap_device_unlock(swapdev);
1265
        return ret;
1266
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.