OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [kernel/] [power/] [snapshot.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * linux/kernel/power/snapshot.c
3
 *
4
 * This file provides system snapshot/restore functionality for swsusp.
5
 *
6
 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7
 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8
 *
9
 * This file is released under the GPLv2.
10
 *
11
 */
12
 
13
#include <linux/version.h>
14
#include <linux/module.h>
15
#include <linux/mm.h>
16
#include <linux/suspend.h>
17
#include <linux/delay.h>
18
#include <linux/bitops.h>
19
#include <linux/spinlock.h>
20
#include <linux/kernel.h>
21
#include <linux/pm.h>
22
#include <linux/device.h>
23
#include <linux/init.h>
24
#include <linux/bootmem.h>
25
#include <linux/syscalls.h>
26
#include <linux/console.h>
27
#include <linux/highmem.h>
28
 
29
#include <asm/uaccess.h>
30
#include <asm/mmu_context.h>
31
#include <asm/pgtable.h>
32
#include <asm/tlbflush.h>
33
#include <asm/io.h>
34
 
35
#include "power.h"
36
 
37
static int swsusp_page_is_free(struct page *);
38
static void swsusp_set_page_forbidden(struct page *);
39
static void swsusp_unset_page_forbidden(struct page *);
40
 
41
/* List of PBEs needed for restoring the pages that were allocated before
42
 * the suspend and included in the suspend image, but have also been
43
 * allocated by the "resume" kernel, so their contents cannot be written
44
 * directly to their "original" page frames.
45
 */
46
struct pbe *restore_pblist;
47
 
48
/* Pointer to an auxiliary buffer (1 page) */
49
static void *buffer;
50
 
51
/**
52
 *      @safe_needed - on resume, for storing the PBE list and the image,
53
 *      we can only use memory pages that do not conflict with the pages
54
 *      used before suspend.  The unsafe pages have PageNosaveFree set
55
 *      and we count them using unsafe_pages.
56
 *
57
 *      Each allocated image page is marked as PageNosave and PageNosaveFree
58
 *      so that swsusp_free() can release it.
59
 */
60
 
61
#define PG_ANY          0
62
#define PG_SAFE         1
63
#define PG_UNSAFE_CLEAR 1
64
#define PG_UNSAFE_KEEP  0
65
 
66
static unsigned int allocated_unsafe_pages;
67
 
68
static void *get_image_page(gfp_t gfp_mask, int safe_needed)
69
{
70
        void *res;
71
 
72
        res = (void *)get_zeroed_page(gfp_mask);
73
        if (safe_needed)
74
                while (res && swsusp_page_is_free(virt_to_page(res))) {
75
                        /* The page is unsafe, mark it for swsusp_free() */
76
                        swsusp_set_page_forbidden(virt_to_page(res));
77
                        allocated_unsafe_pages++;
78
                        res = (void *)get_zeroed_page(gfp_mask);
79
                }
80
        if (res) {
81
                swsusp_set_page_forbidden(virt_to_page(res));
82
                swsusp_set_page_free(virt_to_page(res));
83
        }
84
        return res;
85
}
86
 
87
unsigned long get_safe_page(gfp_t gfp_mask)
88
{
89
        return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
90
}
91
 
92
static struct page *alloc_image_page(gfp_t gfp_mask)
93
{
94
        struct page *page;
95
 
96
        page = alloc_page(gfp_mask);
97
        if (page) {
98
                swsusp_set_page_forbidden(page);
99
                swsusp_set_page_free(page);
100
        }
101
        return page;
102
}
103
 
104
/**
105
 *      free_image_page - free page represented by @addr, allocated with
106
 *      get_image_page (page flags set by it must be cleared)
107
 */
108
 
109
static inline void free_image_page(void *addr, int clear_nosave_free)
110
{
111
        struct page *page;
112
 
113
        BUG_ON(!virt_addr_valid(addr));
114
 
115
        page = virt_to_page(addr);
116
 
117
        swsusp_unset_page_forbidden(page);
118
        if (clear_nosave_free)
119
                swsusp_unset_page_free(page);
120
 
121
        __free_page(page);
122
}
123
 
124
/* struct linked_page is used to build chains of pages */
125
 
126
#define LINKED_PAGE_DATA_SIZE   (PAGE_SIZE - sizeof(void *))
127
 
128
struct linked_page {
129
        struct linked_page *next;
130
        char data[LINKED_PAGE_DATA_SIZE];
131
} __attribute__((packed));
132
 
133
static inline void
134
free_list_of_pages(struct linked_page *list, int clear_page_nosave)
135
{
136
        while (list) {
137
                struct linked_page *lp = list->next;
138
 
139
                free_image_page(list, clear_page_nosave);
140
                list = lp;
141
        }
142
}
143
 
144
/**
145
  *     struct chain_allocator is used for allocating small objects out of
146
  *     a linked list of pages called 'the chain'.
147
  *
148
  *     The chain grows each time when there is no room for a new object in
149
  *     the current page.  The allocated objects cannot be freed individually.
150
  *     It is only possible to free them all at once, by freeing the entire
151
  *     chain.
152
  *
153
  *     NOTE: The chain allocator may be inefficient if the allocated objects
154
  *     are not much smaller than PAGE_SIZE.
155
  */
156
 
157
struct chain_allocator {
158
        struct linked_page *chain;      /* the chain */
159
        unsigned int used_space;        /* total size of objects allocated out
160
                                         * of the current page
161
                                         */
162
        gfp_t gfp_mask;         /* mask for allocating pages */
163
        int safe_needed;        /* if set, only "safe" pages are allocated */
164
};
165
 
166
static void
167
chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
168
{
169
        ca->chain = NULL;
170
        ca->used_space = LINKED_PAGE_DATA_SIZE;
171
        ca->gfp_mask = gfp_mask;
172
        ca->safe_needed = safe_needed;
173
}
174
 
175
static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
176
{
177
        void *ret;
178
 
179
        if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
180
                struct linked_page *lp;
181
 
182
                lp = get_image_page(ca->gfp_mask, ca->safe_needed);
183
                if (!lp)
184
                        return NULL;
185
 
186
                lp->next = ca->chain;
187
                ca->chain = lp;
188
                ca->used_space = 0;
189
        }
190
        ret = ca->chain->data + ca->used_space;
191
        ca->used_space += size;
192
        return ret;
193
}
194
 
195
static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
196
{
197
        free_list_of_pages(ca->chain, clear_page_nosave);
198
        memset(ca, 0, sizeof(struct chain_allocator));
199
}
200
 
201
/**
202
 *      Data types related to memory bitmaps.
203
 *
204
 *      Memory bitmap is a structure consiting of many linked lists of
205
 *      objects.  The main list's elements are of type struct zone_bitmap
206
 *      and each of them corresonds to one zone.  For each zone bitmap
207
 *      object there is a list of objects of type struct bm_block that
208
 *      represent each blocks of bit chunks in which information is
209
 *      stored.
210
 *
211
 *      struct memory_bitmap contains a pointer to the main list of zone
212
 *      bitmap objects, a struct bm_position used for browsing the bitmap,
213
 *      and a pointer to the list of pages used for allocating all of the
214
 *      zone bitmap objects and bitmap block objects.
215
 *
216
 *      NOTE: It has to be possible to lay out the bitmap in memory
217
 *      using only allocations of order 0.  Additionally, the bitmap is
218
 *      designed to work with arbitrary number of zones (this is over the
219
 *      top for now, but let's avoid making unnecessary assumptions ;-).
220
 *
221
 *      struct zone_bitmap contains a pointer to a list of bitmap block
222
 *      objects and a pointer to the bitmap block object that has been
223
 *      most recently used for setting bits.  Additionally, it contains the
224
 *      pfns that correspond to the start and end of the represented zone.
225
 *
226
 *      struct bm_block contains a pointer to the memory page in which
227
 *      information is stored (in the form of a block of bit chunks
228
 *      of type unsigned long each).  It also contains the pfns that
229
 *      correspond to the start and end of the represented memory area and
230
 *      the number of bit chunks in the block.
231
 */
232
 
233
#define BM_END_OF_MAP   (~0UL)
234
 
235
#define BM_CHUNKS_PER_BLOCK     (PAGE_SIZE / sizeof(long))
236
#define BM_BITS_PER_CHUNK       (sizeof(long) << 3)
237
#define BM_BITS_PER_BLOCK       (PAGE_SIZE << 3)
238
 
239
struct bm_block {
240
        struct bm_block *next;          /* next element of the list */
241
        unsigned long start_pfn;        /* pfn represented by the first bit */
242
        unsigned long end_pfn;  /* pfn represented by the last bit plus 1 */
243
        unsigned int size;      /* number of bit chunks */
244
        unsigned long *data;    /* chunks of bits representing pages */
245
};
246
 
247
struct zone_bitmap {
248
        struct zone_bitmap *next;       /* next element of the list */
249
        unsigned long start_pfn;        /* minimal pfn in this zone */
250
        unsigned long end_pfn;          /* maximal pfn in this zone plus 1 */
251
        struct bm_block *bm_blocks;     /* list of bitmap blocks */
252
        struct bm_block *cur_block;     /* recently used bitmap block */
253
};
254
 
255
/* strcut bm_position is used for browsing memory bitmaps */
256
 
257
struct bm_position {
258
        struct zone_bitmap *zone_bm;
259
        struct bm_block *block;
260
        int chunk;
261
        int bit;
262
};
263
 
264
struct memory_bitmap {
265
        struct zone_bitmap *zone_bm_list;       /* list of zone bitmaps */
266
        struct linked_page *p_list;     /* list of pages used to store zone
267
                                         * bitmap objects and bitmap block
268
                                         * objects
269
                                         */
270
        struct bm_position cur; /* most recently used bit position */
271
};
272
 
273
/* Functions that operate on memory bitmaps */
274
 
275
static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
276
{
277
        bm->cur.chunk = 0;
278
        bm->cur.bit = -1;
279
}
280
 
281
static void memory_bm_position_reset(struct memory_bitmap *bm)
282
{
283
        struct zone_bitmap *zone_bm;
284
 
285
        zone_bm = bm->zone_bm_list;
286
        bm->cur.zone_bm = zone_bm;
287
        bm->cur.block = zone_bm->bm_blocks;
288
        memory_bm_reset_chunk(bm);
289
}
290
 
291
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
292
 
293
/**
294
 *      create_bm_block_list - create a list of block bitmap objects
295
 */
296
 
297
static inline struct bm_block *
298
create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
299
{
300
        struct bm_block *bblist = NULL;
301
 
302
        while (nr_blocks-- > 0) {
303
                struct bm_block *bb;
304
 
305
                bb = chain_alloc(ca, sizeof(struct bm_block));
306
                if (!bb)
307
                        return NULL;
308
 
309
                bb->next = bblist;
310
                bblist = bb;
311
        }
312
        return bblist;
313
}
314
 
315
/**
316
 *      create_zone_bm_list - create a list of zone bitmap objects
317
 */
318
 
319
static inline struct zone_bitmap *
320
create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
321
{
322
        struct zone_bitmap *zbmlist = NULL;
323
 
324
        while (nr_zones-- > 0) {
325
                struct zone_bitmap *zbm;
326
 
327
                zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
328
                if (!zbm)
329
                        return NULL;
330
 
331
                zbm->next = zbmlist;
332
                zbmlist = zbm;
333
        }
334
        return zbmlist;
335
}
336
 
337
/**
338
  *     memory_bm_create - allocate memory for a memory bitmap
339
  */
340
 
341
static int
342
memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
343
{
344
        struct chain_allocator ca;
345
        struct zone *zone;
346
        struct zone_bitmap *zone_bm;
347
        struct bm_block *bb;
348
        unsigned int nr;
349
 
350
        chain_init(&ca, gfp_mask, safe_needed);
351
 
352
        /* Compute the number of zones */
353
        nr = 0;
354
        for_each_zone(zone)
355
                if (populated_zone(zone))
356
                        nr++;
357
 
358
        /* Allocate the list of zones bitmap objects */
359
        zone_bm = create_zone_bm_list(nr, &ca);
360
        bm->zone_bm_list = zone_bm;
361
        if (!zone_bm) {
362
                chain_free(&ca, PG_UNSAFE_CLEAR);
363
                return -ENOMEM;
364
        }
365
 
366
        /* Initialize the zone bitmap objects */
367
        for_each_zone(zone) {
368
                unsigned long pfn;
369
 
370
                if (!populated_zone(zone))
371
                        continue;
372
 
373
                zone_bm->start_pfn = zone->zone_start_pfn;
374
                zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
375
                /* Allocate the list of bitmap block objects */
376
                nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
377
                bb = create_bm_block_list(nr, &ca);
378
                zone_bm->bm_blocks = bb;
379
                zone_bm->cur_block = bb;
380
                if (!bb)
381
                        goto Free;
382
 
383
                nr = zone->spanned_pages;
384
                pfn = zone->zone_start_pfn;
385
                /* Initialize the bitmap block objects */
386
                while (bb) {
387
                        unsigned long *ptr;
388
 
389
                        ptr = get_image_page(gfp_mask, safe_needed);
390
                        bb->data = ptr;
391
                        if (!ptr)
392
                                goto Free;
393
 
394
                        bb->start_pfn = pfn;
395
                        if (nr >= BM_BITS_PER_BLOCK) {
396
                                pfn += BM_BITS_PER_BLOCK;
397
                                bb->size = BM_CHUNKS_PER_BLOCK;
398
                                nr -= BM_BITS_PER_BLOCK;
399
                        } else {
400
                                /* This is executed only once in the loop */
401
                                pfn += nr;
402
                                bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
403
                        }
404
                        bb->end_pfn = pfn;
405
                        bb = bb->next;
406
                }
407
                zone_bm = zone_bm->next;
408
        }
409
        bm->p_list = ca.chain;
410
        memory_bm_position_reset(bm);
411
        return 0;
412
 
413
 Free:
414
        bm->p_list = ca.chain;
415
        memory_bm_free(bm, PG_UNSAFE_CLEAR);
416
        return -ENOMEM;
417
}
418
 
419
/**
420
  *     memory_bm_free - free memory occupied by the memory bitmap @bm
421
  */
422
 
423
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
424
{
425
        struct zone_bitmap *zone_bm;
426
 
427
        /* Free the list of bit blocks for each zone_bitmap object */
428
        zone_bm = bm->zone_bm_list;
429
        while (zone_bm) {
430
                struct bm_block *bb;
431
 
432
                bb = zone_bm->bm_blocks;
433
                while (bb) {
434
                        if (bb->data)
435
                                free_image_page(bb->data, clear_nosave_free);
436
                        bb = bb->next;
437
                }
438
                zone_bm = zone_bm->next;
439
        }
440
        free_list_of_pages(bm->p_list, clear_nosave_free);
441
        bm->zone_bm_list = NULL;
442
}
443
 
444
/**
445
 *      memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
446
 *      to given pfn.  The cur_zone_bm member of @bm and the cur_block member
447
 *      of @bm->cur_zone_bm are updated.
448
 */
449
 
450
static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
451
                                void **addr, unsigned int *bit_nr)
452
{
453
        struct zone_bitmap *zone_bm;
454
        struct bm_block *bb;
455
 
456
        /* Check if the pfn is from the current zone */
457
        zone_bm = bm->cur.zone_bm;
458
        if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
459
                zone_bm = bm->zone_bm_list;
460
                /* We don't assume that the zones are sorted by pfns */
461
                while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
462
                        zone_bm = zone_bm->next;
463
 
464
                        BUG_ON(!zone_bm);
465
                }
466
                bm->cur.zone_bm = zone_bm;
467
        }
468
        /* Check if the pfn corresponds to the current bitmap block */
469
        bb = zone_bm->cur_block;
470
        if (pfn < bb->start_pfn)
471
                bb = zone_bm->bm_blocks;
472
 
473
        while (pfn >= bb->end_pfn) {
474
                bb = bb->next;
475
 
476
                BUG_ON(!bb);
477
        }
478
        zone_bm->cur_block = bb;
479
        pfn -= bb->start_pfn;
480
        *bit_nr = pfn % BM_BITS_PER_CHUNK;
481
        *addr = bb->data + pfn / BM_BITS_PER_CHUNK;
482
}
483
 
484
static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
485
{
486
        void *addr;
487
        unsigned int bit;
488
 
489
        memory_bm_find_bit(bm, pfn, &addr, &bit);
490
        set_bit(bit, addr);
491
}
492
 
493
static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
494
{
495
        void *addr;
496
        unsigned int bit;
497
 
498
        memory_bm_find_bit(bm, pfn, &addr, &bit);
499
        clear_bit(bit, addr);
500
}
501
 
502
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
503
{
504
        void *addr;
505
        unsigned int bit;
506
 
507
        memory_bm_find_bit(bm, pfn, &addr, &bit);
508
        return test_bit(bit, addr);
509
}
510
 
511
/* Two auxiliary functions for memory_bm_next_pfn */
512
 
513
/* Find the first set bit in the given chunk, if there is one */
514
 
515
static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
516
{
517
        bit++;
518
        while (bit < BM_BITS_PER_CHUNK) {
519
                if (test_bit(bit, chunk_p))
520
                        return bit;
521
 
522
                bit++;
523
        }
524
        return -1;
525
}
526
 
527
/* Find a chunk containing some bits set in given block of bits */
528
 
529
static inline int next_chunk_in_block(int n, struct bm_block *bb)
530
{
531
        n++;
532
        while (n < bb->size) {
533
                if (bb->data[n])
534
                        return n;
535
 
536
                n++;
537
        }
538
        return -1;
539
}
540
 
541
/**
542
 *      memory_bm_next_pfn - find the pfn that corresponds to the next set bit
543
 *      in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
544
 *      returned.
545
 *
546
 *      It is required to run memory_bm_position_reset() before the first call to
547
 *      this function.
548
 */
549
 
550
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
551
{
552
        struct zone_bitmap *zone_bm;
553
        struct bm_block *bb;
554
        int chunk;
555
        int bit;
556
 
557
        do {
558
                bb = bm->cur.block;
559
                do {
560
                        chunk = bm->cur.chunk;
561
                        bit = bm->cur.bit;
562
                        do {
563
                                bit = next_bit_in_chunk(bit, bb->data + chunk);
564
                                if (bit >= 0)
565
                                        goto Return_pfn;
566
 
567
                                chunk = next_chunk_in_block(chunk, bb);
568
                                bit = -1;
569
                        } while (chunk >= 0);
570
                        bb = bb->next;
571
                        bm->cur.block = bb;
572
                        memory_bm_reset_chunk(bm);
573
                } while (bb);
574
                zone_bm = bm->cur.zone_bm->next;
575
                if (zone_bm) {
576
                        bm->cur.zone_bm = zone_bm;
577
                        bm->cur.block = zone_bm->bm_blocks;
578
                        memory_bm_reset_chunk(bm);
579
                }
580
        } while (zone_bm);
581
        memory_bm_position_reset(bm);
582
        return BM_END_OF_MAP;
583
 
584
 Return_pfn:
585
        bm->cur.chunk = chunk;
586
        bm->cur.bit = bit;
587
        return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
588
}
589
 
590
/**
591
 *      This structure represents a range of page frames the contents of which
592
 *      should not be saved during the suspend.
593
 */
594
 
595
struct nosave_region {
596
        struct list_head list;
597
        unsigned long start_pfn;
598
        unsigned long end_pfn;
599
};
600
 
601
static LIST_HEAD(nosave_regions);
602
 
603
/**
604
 *      register_nosave_region - register a range of page frames the contents
605
 *      of which should not be saved during the suspend (to be used in the early
606
 *      initialization code)
607
 */
608
 
609
void __init
610
__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
611
                         int use_kmalloc)
612
{
613
        struct nosave_region *region;
614
 
615
        if (start_pfn >= end_pfn)
616
                return;
617
 
618
        if (!list_empty(&nosave_regions)) {
619
                /* Try to extend the previous region (they should be sorted) */
620
                region = list_entry(nosave_regions.prev,
621
                                        struct nosave_region, list);
622
                if (region->end_pfn == start_pfn) {
623
                        region->end_pfn = end_pfn;
624
                        goto Report;
625
                }
626
        }
627
        if (use_kmalloc) {
628
                /* during init, this shouldn't fail */
629
                region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
630
                BUG_ON(!region);
631
        } else
632
                /* This allocation cannot fail */
633
                region = alloc_bootmem_low(sizeof(struct nosave_region));
634
        region->start_pfn = start_pfn;
635
        region->end_pfn = end_pfn;
636
        list_add_tail(&region->list, &nosave_regions);
637
 Report:
638
        printk("swsusp: Registered nosave memory region: %016lx - %016lx\n",
639
                start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
640
}
641
 
642
/*
643
 * Set bits in this map correspond to the page frames the contents of which
644
 * should not be saved during the suspend.
645
 */
646
static struct memory_bitmap *forbidden_pages_map;
647
 
648
/* Set bits in this map correspond to free page frames. */
649
static struct memory_bitmap *free_pages_map;
650
 
651
/*
652
 * Each page frame allocated for creating the image is marked by setting the
653
 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
654
 */
655
 
656
void swsusp_set_page_free(struct page *page)
657
{
658
        if (free_pages_map)
659
                memory_bm_set_bit(free_pages_map, page_to_pfn(page));
660
}
661
 
662
static int swsusp_page_is_free(struct page *page)
663
{
664
        return free_pages_map ?
665
                memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
666
}
667
 
668
void swsusp_unset_page_free(struct page *page)
669
{
670
        if (free_pages_map)
671
                memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
672
}
673
 
674
static void swsusp_set_page_forbidden(struct page *page)
675
{
676
        if (forbidden_pages_map)
677
                memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
678
}
679
 
680
int swsusp_page_is_forbidden(struct page *page)
681
{
682
        return forbidden_pages_map ?
683
                memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
684
}
685
 
686
static void swsusp_unset_page_forbidden(struct page *page)
687
{
688
        if (forbidden_pages_map)
689
                memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
690
}
691
 
692
/**
693
 *      mark_nosave_pages - set bits corresponding to the page frames the
694
 *      contents of which should not be saved in a given bitmap.
695
 */
696
 
697
static void mark_nosave_pages(struct memory_bitmap *bm)
698
{
699
        struct nosave_region *region;
700
 
701
        if (list_empty(&nosave_regions))
702
                return;
703
 
704
        list_for_each_entry(region, &nosave_regions, list) {
705
                unsigned long pfn;
706
 
707
                printk("swsusp: Marking nosave pages: %016lx - %016lx\n",
708
                                region->start_pfn << PAGE_SHIFT,
709
                                region->end_pfn << PAGE_SHIFT);
710
 
711
                for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
712
                        if (pfn_valid(pfn))
713
                                memory_bm_set_bit(bm, pfn);
714
        }
715
}
716
 
717
/**
718
 *      create_basic_memory_bitmaps - create bitmaps needed for marking page
719
 *      frames that should not be saved and free page frames.  The pointers
720
 *      forbidden_pages_map and free_pages_map are only modified if everything
721
 *      goes well, because we don't want the bits to be used before both bitmaps
722
 *      are set up.
723
 */
724
 
725
int create_basic_memory_bitmaps(void)
726
{
727
        struct memory_bitmap *bm1, *bm2;
728
        int error = 0;
729
 
730
        BUG_ON(forbidden_pages_map || free_pages_map);
731
 
732
        bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
733
        if (!bm1)
734
                return -ENOMEM;
735
 
736
        error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
737
        if (error)
738
                goto Free_first_object;
739
 
740
        bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
741
        if (!bm2)
742
                goto Free_first_bitmap;
743
 
744
        error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
745
        if (error)
746
                goto Free_second_object;
747
 
748
        forbidden_pages_map = bm1;
749
        free_pages_map = bm2;
750
        mark_nosave_pages(forbidden_pages_map);
751
 
752
        printk("swsusp: Basic memory bitmaps created\n");
753
 
754
        return 0;
755
 
756
 Free_second_object:
757
        kfree(bm2);
758
 Free_first_bitmap:
759
        memory_bm_free(bm1, PG_UNSAFE_CLEAR);
760
 Free_first_object:
761
        kfree(bm1);
762
        return -ENOMEM;
763
}
764
 
765
/**
766
 *      free_basic_memory_bitmaps - free memory bitmaps allocated by
767
 *      create_basic_memory_bitmaps().  The auxiliary pointers are necessary
768
 *      so that the bitmaps themselves are not referred to while they are being
769
 *      freed.
770
 */
771
 
772
void free_basic_memory_bitmaps(void)
773
{
774
        struct memory_bitmap *bm1, *bm2;
775
 
776
        BUG_ON(!(forbidden_pages_map && free_pages_map));
777
 
778
        bm1 = forbidden_pages_map;
779
        bm2 = free_pages_map;
780
        forbidden_pages_map = NULL;
781
        free_pages_map = NULL;
782
        memory_bm_free(bm1, PG_UNSAFE_CLEAR);
783
        kfree(bm1);
784
        memory_bm_free(bm2, PG_UNSAFE_CLEAR);
785
        kfree(bm2);
786
 
787
        printk("swsusp: Basic memory bitmaps freed\n");
788
}
789
 
790
/**
791
 *      snapshot_additional_pages - estimate the number of additional pages
792
 *      be needed for setting up the suspend image data structures for given
793
 *      zone (usually the returned value is greater than the exact number)
794
 */
795
 
796
unsigned int snapshot_additional_pages(struct zone *zone)
797
{
798
        unsigned int res;
799
 
800
        res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
801
        res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
802
        return 2 * res;
803
}
804
 
805
#ifdef CONFIG_HIGHMEM
806
/**
807
 *      count_free_highmem_pages - compute the total number of free highmem
808
 *      pages, system-wide.
809
 */
810
 
811
static unsigned int count_free_highmem_pages(void)
812
{
813
        struct zone *zone;
814
        unsigned int cnt = 0;
815
 
816
        for_each_zone(zone)
817
                if (populated_zone(zone) && is_highmem(zone))
818
                        cnt += zone_page_state(zone, NR_FREE_PAGES);
819
 
820
        return cnt;
821
}
822
 
823
/**
824
 *      saveable_highmem_page - Determine whether a highmem page should be
825
 *      included in the suspend image.
826
 *
827
 *      We should save the page if it isn't Nosave or NosaveFree, or Reserved,
828
 *      and it isn't a part of a free chunk of pages.
829
 */
830
 
831
static struct page *saveable_highmem_page(unsigned long pfn)
832
{
833
        struct page *page;
834
 
835
        if (!pfn_valid(pfn))
836
                return NULL;
837
 
838
        page = pfn_to_page(pfn);
839
 
840
        BUG_ON(!PageHighMem(page));
841
 
842
        if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
843
            PageReserved(page))
844
                return NULL;
845
 
846
        return page;
847
}
848
 
849
/**
850
 *      count_highmem_pages - compute the total number of saveable highmem
851
 *      pages.
852
 */
853
 
854
unsigned int count_highmem_pages(void)
855
{
856
        struct zone *zone;
857
        unsigned int n = 0;
858
 
859
        for_each_zone(zone) {
860
                unsigned long pfn, max_zone_pfn;
861
 
862
                if (!is_highmem(zone))
863
                        continue;
864
 
865
                mark_free_pages(zone);
866
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
867
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
868
                        if (saveable_highmem_page(pfn))
869
                                n++;
870
        }
871
        return n;
872
}
873
#else
874
static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
875
static inline unsigned int count_highmem_pages(void) { return 0; }
876
#endif /* CONFIG_HIGHMEM */
877
 
878
/**
879
 *      saveable - Determine whether a non-highmem page should be included in
880
 *      the suspend image.
881
 *
882
 *      We should save the page if it isn't Nosave, and is not in the range
883
 *      of pages statically defined as 'unsaveable', and it isn't a part of
884
 *      a free chunk of pages.
885
 */
886
 
887
static struct page *saveable_page(unsigned long pfn)
888
{
889
        struct page *page;
890
 
891
        if (!pfn_valid(pfn))
892
                return NULL;
893
 
894
        page = pfn_to_page(pfn);
895
 
896
        BUG_ON(PageHighMem(page));
897
 
898
        if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
899
                return NULL;
900
 
901
        if (PageReserved(page) && pfn_is_nosave(pfn))
902
                return NULL;
903
 
904
        return page;
905
}
906
 
907
/**
908
 *      count_data_pages - compute the total number of saveable non-highmem
909
 *      pages.
910
 */
911
 
912
unsigned int count_data_pages(void)
913
{
914
        struct zone *zone;
915
        unsigned long pfn, max_zone_pfn;
916
        unsigned int n = 0;
917
 
918
        for_each_zone(zone) {
919
                if (is_highmem(zone))
920
                        continue;
921
 
922
                mark_free_pages(zone);
923
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
924
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
925
                        if(saveable_page(pfn))
926
                                n++;
927
        }
928
        return n;
929
}
930
 
931
/* This is needed, because copy_page and memcpy are not usable for copying
932
 * task structs.
933
 */
934
static inline void do_copy_page(long *dst, long *src)
935
{
936
        int n;
937
 
938
        for (n = PAGE_SIZE / sizeof(long); n; n--)
939
                *dst++ = *src++;
940
}
941
 
942
#ifdef CONFIG_HIGHMEM
943
static inline struct page *
944
page_is_saveable(struct zone *zone, unsigned long pfn)
945
{
946
        return is_highmem(zone) ?
947
                        saveable_highmem_page(pfn) : saveable_page(pfn);
948
}
949
 
950
static inline void
951
copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
952
{
953
        struct page *s_page, *d_page;
954
        void *src, *dst;
955
 
956
        s_page = pfn_to_page(src_pfn);
957
        d_page = pfn_to_page(dst_pfn);
958
        if (PageHighMem(s_page)) {
959
                src = kmap_atomic(s_page, KM_USER0);
960
                dst = kmap_atomic(d_page, KM_USER1);
961
                do_copy_page(dst, src);
962
                kunmap_atomic(src, KM_USER0);
963
                kunmap_atomic(dst, KM_USER1);
964
        } else {
965
                src = page_address(s_page);
966
                if (PageHighMem(d_page)) {
967
                        /* Page pointed to by src may contain some kernel
968
                         * data modified by kmap_atomic()
969
                         */
970
                        do_copy_page(buffer, src);
971
                        dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
972
                        memcpy(dst, buffer, PAGE_SIZE);
973
                        kunmap_atomic(dst, KM_USER0);
974
                } else {
975
                        dst = page_address(d_page);
976
                        do_copy_page(dst, src);
977
                }
978
        }
979
}
980
#else
981
#define page_is_saveable(zone, pfn)     saveable_page(pfn)
982
 
983
static inline void
984
copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
985
{
986
        do_copy_page(page_address(pfn_to_page(dst_pfn)),
987
                        page_address(pfn_to_page(src_pfn)));
988
}
989
#endif /* CONFIG_HIGHMEM */
990
 
991
static void
992
copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
993
{
994
        struct zone *zone;
995
        unsigned long pfn;
996
 
997
        for_each_zone(zone) {
998
                unsigned long max_zone_pfn;
999
 
1000
                mark_free_pages(zone);
1001
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1002
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1003
                        if (page_is_saveable(zone, pfn))
1004
                                memory_bm_set_bit(orig_bm, pfn);
1005
        }
1006
        memory_bm_position_reset(orig_bm);
1007
        memory_bm_position_reset(copy_bm);
1008
        for(;;) {
1009
                pfn = memory_bm_next_pfn(orig_bm);
1010
                if (unlikely(pfn == BM_END_OF_MAP))
1011
                        break;
1012
                copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1013
        }
1014
}
1015
 
1016
/* Total number of image pages */
1017
static unsigned int nr_copy_pages;
1018
/* Number of pages needed for saving the original pfns of the image pages */
1019
static unsigned int nr_meta_pages;
1020
 
1021
/**
1022
 *      swsusp_free - free pages allocated for the suspend.
1023
 *
1024
 *      Suspend pages are alocated before the atomic copy is made, so we
1025
 *      need to release them after the resume.
1026
 */
1027
 
1028
void swsusp_free(void)
1029
{
1030
        struct zone *zone;
1031
        unsigned long pfn, max_zone_pfn;
1032
 
1033
        for_each_zone(zone) {
1034
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1035
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1036
                        if (pfn_valid(pfn)) {
1037
                                struct page *page = pfn_to_page(pfn);
1038
 
1039
                                if (swsusp_page_is_forbidden(page) &&
1040
                                    swsusp_page_is_free(page)) {
1041
                                        swsusp_unset_page_forbidden(page);
1042
                                        swsusp_unset_page_free(page);
1043
                                        __free_page(page);
1044
                                }
1045
                        }
1046
        }
1047
        nr_copy_pages = 0;
1048
        nr_meta_pages = 0;
1049
        restore_pblist = NULL;
1050
        buffer = NULL;
1051
}
1052
 
1053
#ifdef CONFIG_HIGHMEM
1054
/**
1055
  *     count_pages_for_highmem - compute the number of non-highmem pages
1056
  *     that will be necessary for creating copies of highmem pages.
1057
  */
1058
 
1059
static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1060
{
1061
        unsigned int free_highmem = count_free_highmem_pages();
1062
 
1063
        if (free_highmem >= nr_highmem)
1064
                nr_highmem = 0;
1065
        else
1066
                nr_highmem -= free_highmem;
1067
 
1068
        return nr_highmem;
1069
}
1070
#else
1071
static unsigned int
1072
count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1073
#endif /* CONFIG_HIGHMEM */
1074
 
1075
/**
1076
 *      enough_free_mem - Make sure we have enough free memory for the
1077
 *      snapshot image.
1078
 */
1079
 
1080
static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1081
{
1082
        struct zone *zone;
1083
        unsigned int free = 0, meta = 0;
1084
 
1085
        for_each_zone(zone) {
1086
                meta += snapshot_additional_pages(zone);
1087
                if (!is_highmem(zone))
1088
                        free += zone_page_state(zone, NR_FREE_PAGES);
1089
        }
1090
 
1091
        nr_pages += count_pages_for_highmem(nr_highmem);
1092
        pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
1093
                nr_pages, PAGES_FOR_IO, meta, free);
1094
 
1095
        return free > nr_pages + PAGES_FOR_IO + meta;
1096
}
1097
 
1098
#ifdef CONFIG_HIGHMEM
1099
/**
1100
 *      get_highmem_buffer - if there are some highmem pages in the suspend
1101
 *      image, we may need the buffer to copy them and/or load their data.
1102
 */
1103
 
1104
static inline int get_highmem_buffer(int safe_needed)
1105
{
1106
        buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1107
        return buffer ? 0 : -ENOMEM;
1108
}
1109
 
1110
/**
1111
 *      alloc_highmem_image_pages - allocate some highmem pages for the image.
1112
 *      Try to allocate as many pages as needed, but if the number of free
1113
 *      highmem pages is lesser than that, allocate them all.
1114
 */
1115
 
1116
static inline unsigned int
1117
alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1118
{
1119
        unsigned int to_alloc = count_free_highmem_pages();
1120
 
1121
        if (to_alloc > nr_highmem)
1122
                to_alloc = nr_highmem;
1123
 
1124
        nr_highmem -= to_alloc;
1125
        while (to_alloc-- > 0) {
1126
                struct page *page;
1127
 
1128
                page = alloc_image_page(__GFP_HIGHMEM);
1129
                memory_bm_set_bit(bm, page_to_pfn(page));
1130
        }
1131
        return nr_highmem;
1132
}
1133
#else
1134
static inline int get_highmem_buffer(int safe_needed) { return 0; }
1135
 
1136
static inline unsigned int
1137
alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1138
#endif /* CONFIG_HIGHMEM */
1139
 
1140
/**
1141
 *      swsusp_alloc - allocate memory for the suspend image
1142
 *
1143
 *      We first try to allocate as many highmem pages as there are
1144
 *      saveable highmem pages in the system.  If that fails, we allocate
1145
 *      non-highmem pages for the copies of the remaining highmem ones.
1146
 *
1147
 *      In this approach it is likely that the copies of highmem pages will
1148
 *      also be located in the high memory, because of the way in which
1149
 *      copy_data_pages() works.
1150
 */
1151
 
1152
static int
1153
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1154
                unsigned int nr_pages, unsigned int nr_highmem)
1155
{
1156
        int error;
1157
 
1158
        error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
1159
        if (error)
1160
                goto Free;
1161
 
1162
        error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
1163
        if (error)
1164
                goto Free;
1165
 
1166
        if (nr_highmem > 0) {
1167
                error = get_highmem_buffer(PG_ANY);
1168
                if (error)
1169
                        goto Free;
1170
 
1171
                nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
1172
        }
1173
        while (nr_pages-- > 0) {
1174
                struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1175
 
1176
                if (!page)
1177
                        goto Free;
1178
 
1179
                memory_bm_set_bit(copy_bm, page_to_pfn(page));
1180
        }
1181
        return 0;
1182
 
1183
 Free:
1184
        swsusp_free();
1185
        return -ENOMEM;
1186
}
1187
 
1188
/* Memory bitmap used for marking saveable pages (during suspend) or the
1189
 * suspend image pages (during resume)
1190
 */
1191
static struct memory_bitmap orig_bm;
1192
/* Memory bitmap used on suspend for marking allocated pages that will contain
1193
 * the copies of saveable pages.  During resume it is initially used for
1194
 * marking the suspend image pages, but then its set bits are duplicated in
1195
 * @orig_bm and it is released.  Next, on systems with high memory, it may be
1196
 * used for marking "safe" highmem pages, but it has to be reinitialized for
1197
 * this purpose.
1198
 */
1199
static struct memory_bitmap copy_bm;
1200
 
1201
asmlinkage int swsusp_save(void)
1202
{
1203
        unsigned int nr_pages, nr_highmem;
1204
 
1205
        printk("swsusp: critical section: \n");
1206
 
1207
        drain_local_pages();
1208
        nr_pages = count_data_pages();
1209
        nr_highmem = count_highmem_pages();
1210
        printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
1211
 
1212
        if (!enough_free_mem(nr_pages, nr_highmem)) {
1213
                printk(KERN_ERR "swsusp: Not enough free memory\n");
1214
                return -ENOMEM;
1215
        }
1216
 
1217
        if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1218
                printk(KERN_ERR "swsusp: Memory allocation failed\n");
1219
                return -ENOMEM;
1220
        }
1221
 
1222
        /* During allocating of suspend pagedir, new cold pages may appear.
1223
         * Kill them.
1224
         */
1225
        drain_local_pages();
1226
        copy_data_pages(&copy_bm, &orig_bm);
1227
 
1228
        /*
1229
         * End of critical section. From now on, we can write to memory,
1230
         * but we should not touch disk. This specially means we must _not_
1231
         * touch swap space! Except we must write out our image of course.
1232
         */
1233
 
1234
        nr_pages += nr_highmem;
1235
        nr_copy_pages = nr_pages;
1236
        nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1237
 
1238
        printk("swsusp: critical section: done (%d pages copied)\n", nr_pages);
1239
 
1240
        return 0;
1241
}
1242
 
1243
#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1244
static int init_header_complete(struct swsusp_info *info)
1245
{
1246
        memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1247
        info->version_code = LINUX_VERSION_CODE;
1248
        return 0;
1249
}
1250
 
1251
static char *check_image_kernel(struct swsusp_info *info)
1252
{
1253
        if (info->version_code != LINUX_VERSION_CODE)
1254
                return "kernel version";
1255
        if (strcmp(info->uts.sysname,init_utsname()->sysname))
1256
                return "system type";
1257
        if (strcmp(info->uts.release,init_utsname()->release))
1258
                return "kernel release";
1259
        if (strcmp(info->uts.version,init_utsname()->version))
1260
                return "version";
1261
        if (strcmp(info->uts.machine,init_utsname()->machine))
1262
                return "machine";
1263
        return NULL;
1264
}
1265
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1266
 
1267
static int init_header(struct swsusp_info *info)
1268
{
1269
        memset(info, 0, sizeof(struct swsusp_info));
1270
        info->num_physpages = num_physpages;
1271
        info->image_pages = nr_copy_pages;
1272
        info->pages = nr_copy_pages + nr_meta_pages + 1;
1273
        info->size = info->pages;
1274
        info->size <<= PAGE_SHIFT;
1275
        return init_header_complete(info);
1276
}
1277
 
1278
/**
1279
 *      pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1280
 *      are stored in the array @buf[] (1 page at a time)
1281
 */
1282
 
1283
static inline void
1284
pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1285
{
1286
        int j;
1287
 
1288
        for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1289
                buf[j] = memory_bm_next_pfn(bm);
1290
                if (unlikely(buf[j] == BM_END_OF_MAP))
1291
                        break;
1292
        }
1293
}
1294
 
1295
/**
1296
 *      snapshot_read_next - used for reading the system memory snapshot.
1297
 *
1298
 *      On the first call to it @handle should point to a zeroed
1299
 *      snapshot_handle structure.  The structure gets updated and a pointer
1300
 *      to it should be passed to this function every next time.
1301
 *
1302
 *      The @count parameter should contain the number of bytes the caller
1303
 *      wants to read from the snapshot.  It must not be zero.
1304
 *
1305
 *      On success the function returns a positive number.  Then, the caller
1306
 *      is allowed to read up to the returned number of bytes from the memory
1307
 *      location computed by the data_of() macro.  The number returned
1308
 *      may be smaller than @count, but this only happens if the read would
1309
 *      cross a page boundary otherwise.
1310
 *
1311
 *      The function returns 0 to indicate the end of data stream condition,
1312
 *      and a negative number is returned on error.  In such cases the
1313
 *      structure pointed to by @handle is not updated and should not be used
1314
 *      any more.
1315
 */
1316
 
1317
int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1318
{
1319
        if (handle->cur > nr_meta_pages + nr_copy_pages)
1320
                return 0;
1321
 
1322
        if (!buffer) {
1323
                /* This makes the buffer be freed by swsusp_free() */
1324
                buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1325
                if (!buffer)
1326
                        return -ENOMEM;
1327
        }
1328
        if (!handle->offset) {
1329
                int error;
1330
 
1331
                error = init_header((struct swsusp_info *)buffer);
1332
                if (error)
1333
                        return error;
1334
                handle->buffer = buffer;
1335
                memory_bm_position_reset(&orig_bm);
1336
                memory_bm_position_reset(&copy_bm);
1337
        }
1338
        if (handle->prev < handle->cur) {
1339
                if (handle->cur <= nr_meta_pages) {
1340
                        memset(buffer, 0, PAGE_SIZE);
1341
                        pack_pfns(buffer, &orig_bm);
1342
                } else {
1343
                        struct page *page;
1344
 
1345
                        page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1346
                        if (PageHighMem(page)) {
1347
                                /* Highmem pages are copied to the buffer,
1348
                                 * because we can't return with a kmapped
1349
                                 * highmem page (we may not be called again).
1350
                                 */
1351
                                void *kaddr;
1352
 
1353
                                kaddr = kmap_atomic(page, KM_USER0);
1354
                                memcpy(buffer, kaddr, PAGE_SIZE);
1355
                                kunmap_atomic(kaddr, KM_USER0);
1356
                                handle->buffer = buffer;
1357
                        } else {
1358
                                handle->buffer = page_address(page);
1359
                        }
1360
                }
1361
                handle->prev = handle->cur;
1362
        }
1363
        handle->buf_offset = handle->cur_offset;
1364
        if (handle->cur_offset + count >= PAGE_SIZE) {
1365
                count = PAGE_SIZE - handle->cur_offset;
1366
                handle->cur_offset = 0;
1367
                handle->cur++;
1368
        } else {
1369
                handle->cur_offset += count;
1370
        }
1371
        handle->offset += count;
1372
        return count;
1373
}
1374
 
1375
/**
1376
 *      mark_unsafe_pages - mark the pages that cannot be used for storing
1377
 *      the image during resume, because they conflict with the pages that
1378
 *      had been used before suspend
1379
 */
1380
 
1381
static int mark_unsafe_pages(struct memory_bitmap *bm)
1382
{
1383
        struct zone *zone;
1384
        unsigned long pfn, max_zone_pfn;
1385
 
1386
        /* Clear page flags */
1387
        for_each_zone(zone) {
1388
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1389
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1390
                        if (pfn_valid(pfn))
1391
                                swsusp_unset_page_free(pfn_to_page(pfn));
1392
        }
1393
 
1394
        /* Mark pages that correspond to the "original" pfns as "unsafe" */
1395
        memory_bm_position_reset(bm);
1396
        do {
1397
                pfn = memory_bm_next_pfn(bm);
1398
                if (likely(pfn != BM_END_OF_MAP)) {
1399
                        if (likely(pfn_valid(pfn)))
1400
                                swsusp_set_page_free(pfn_to_page(pfn));
1401
                        else
1402
                                return -EFAULT;
1403
                }
1404
        } while (pfn != BM_END_OF_MAP);
1405
 
1406
        allocated_unsafe_pages = 0;
1407
 
1408
        return 0;
1409
}
1410
 
1411
static void
1412
duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1413
{
1414
        unsigned long pfn;
1415
 
1416
        memory_bm_position_reset(src);
1417
        pfn = memory_bm_next_pfn(src);
1418
        while (pfn != BM_END_OF_MAP) {
1419
                memory_bm_set_bit(dst, pfn);
1420
                pfn = memory_bm_next_pfn(src);
1421
        }
1422
}
1423
 
1424
static int check_header(struct swsusp_info *info)
1425
{
1426
        char *reason;
1427
 
1428
        reason = check_image_kernel(info);
1429
        if (!reason && info->num_physpages != num_physpages)
1430
                reason = "memory size";
1431
        if (reason) {
1432
                printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
1433
                return -EPERM;
1434
        }
1435
        return 0;
1436
}
1437
 
1438
/**
1439
 *      load header - check the image header and copy data from it
1440
 */
1441
 
1442
static int
1443
load_header(struct swsusp_info *info)
1444
{
1445
        int error;
1446
 
1447
        restore_pblist = NULL;
1448
        error = check_header(info);
1449
        if (!error) {
1450
                nr_copy_pages = info->image_pages;
1451
                nr_meta_pages = info->pages - info->image_pages - 1;
1452
        }
1453
        return error;
1454
}
1455
 
1456
/**
1457
 *      unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1458
 *      the corresponding bit in the memory bitmap @bm
1459
 */
1460
 
1461
static inline void
1462
unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1463
{
1464
        int j;
1465
 
1466
        for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1467
                if (unlikely(buf[j] == BM_END_OF_MAP))
1468
                        break;
1469
 
1470
                memory_bm_set_bit(bm, buf[j]);
1471
        }
1472
}
1473
 
1474
/* List of "safe" pages that may be used to store data loaded from the suspend
1475
 * image
1476
 */
1477
static struct linked_page *safe_pages_list;
1478
 
1479
#ifdef CONFIG_HIGHMEM
1480
/* struct highmem_pbe is used for creating the list of highmem pages that
1481
 * should be restored atomically during the resume from disk, because the page
1482
 * frames they have occupied before the suspend are in use.
1483
 */
1484
struct highmem_pbe {
1485
        struct page *copy_page; /* data is here now */
1486
        struct page *orig_page; /* data was here before the suspend */
1487
        struct highmem_pbe *next;
1488
};
1489
 
1490
/* List of highmem PBEs needed for restoring the highmem pages that were
1491
 * allocated before the suspend and included in the suspend image, but have
1492
 * also been allocated by the "resume" kernel, so their contents cannot be
1493
 * written directly to their "original" page frames.
1494
 */
1495
static struct highmem_pbe *highmem_pblist;
1496
 
1497
/**
1498
 *      count_highmem_image_pages - compute the number of highmem pages in the
1499
 *      suspend image.  The bits in the memory bitmap @bm that correspond to the
1500
 *      image pages are assumed to be set.
1501
 */
1502
 
1503
static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1504
{
1505
        unsigned long pfn;
1506
        unsigned int cnt = 0;
1507
 
1508
        memory_bm_position_reset(bm);
1509
        pfn = memory_bm_next_pfn(bm);
1510
        while (pfn != BM_END_OF_MAP) {
1511
                if (PageHighMem(pfn_to_page(pfn)))
1512
                        cnt++;
1513
 
1514
                pfn = memory_bm_next_pfn(bm);
1515
        }
1516
        return cnt;
1517
}
1518
 
1519
/**
1520
 *      prepare_highmem_image - try to allocate as many highmem pages as
1521
 *      there are highmem image pages (@nr_highmem_p points to the variable
1522
 *      containing the number of highmem image pages).  The pages that are
1523
 *      "safe" (ie. will not be overwritten when the suspend image is
1524
 *      restored) have the corresponding bits set in @bm (it must be
1525
 *      unitialized).
1526
 *
1527
 *      NOTE: This function should not be called if there are no highmem
1528
 *      image pages.
1529
 */
1530
 
1531
static unsigned int safe_highmem_pages;
1532
 
1533
static struct memory_bitmap *safe_highmem_bm;
1534
 
1535
static int
1536
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1537
{
1538
        unsigned int to_alloc;
1539
 
1540
        if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1541
                return -ENOMEM;
1542
 
1543
        if (get_highmem_buffer(PG_SAFE))
1544
                return -ENOMEM;
1545
 
1546
        to_alloc = count_free_highmem_pages();
1547
        if (to_alloc > *nr_highmem_p)
1548
                to_alloc = *nr_highmem_p;
1549
        else
1550
                *nr_highmem_p = to_alloc;
1551
 
1552
        safe_highmem_pages = 0;
1553
        while (to_alloc-- > 0) {
1554
                struct page *page;
1555
 
1556
                page = alloc_page(__GFP_HIGHMEM);
1557
                if (!swsusp_page_is_free(page)) {
1558
                        /* The page is "safe", set its bit the bitmap */
1559
                        memory_bm_set_bit(bm, page_to_pfn(page));
1560
                        safe_highmem_pages++;
1561
                }
1562
                /* Mark the page as allocated */
1563
                swsusp_set_page_forbidden(page);
1564
                swsusp_set_page_free(page);
1565
        }
1566
        memory_bm_position_reset(bm);
1567
        safe_highmem_bm = bm;
1568
        return 0;
1569
}
1570
 
1571
/**
1572
 *      get_highmem_page_buffer - for given highmem image page find the buffer
1573
 *      that suspend_write_next() should set for its caller to write to.
1574
 *
1575
 *      If the page is to be saved to its "original" page frame or a copy of
1576
 *      the page is to be made in the highmem, @buffer is returned.  Otherwise,
1577
 *      the copy of the page is to be made in normal memory, so the address of
1578
 *      the copy is returned.
1579
 *
1580
 *      If @buffer is returned, the caller of suspend_write_next() will write
1581
 *      the page's contents to @buffer, so they will have to be copied to the
1582
 *      right location on the next call to suspend_write_next() and it is done
1583
 *      with the help of copy_last_highmem_page().  For this purpose, if
1584
 *      @buffer is returned, @last_highmem page is set to the page to which
1585
 *      the data will have to be copied from @buffer.
1586
 */
1587
 
1588
static struct page *last_highmem_page;
1589
 
1590
static void *
1591
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1592
{
1593
        struct highmem_pbe *pbe;
1594
        void *kaddr;
1595
 
1596
        if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1597
                /* We have allocated the "original" page frame and we can
1598
                 * use it directly to store the loaded page.
1599
                 */
1600
                last_highmem_page = page;
1601
                return buffer;
1602
        }
1603
        /* The "original" page frame has not been allocated and we have to
1604
         * use a "safe" page frame to store the loaded page.
1605
         */
1606
        pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1607
        if (!pbe) {
1608
                swsusp_free();
1609
                return NULL;
1610
        }
1611
        pbe->orig_page = page;
1612
        if (safe_highmem_pages > 0) {
1613
                struct page *tmp;
1614
 
1615
                /* Copy of the page will be stored in high memory */
1616
                kaddr = buffer;
1617
                tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1618
                safe_highmem_pages--;
1619
                last_highmem_page = tmp;
1620
                pbe->copy_page = tmp;
1621
        } else {
1622
                /* Copy of the page will be stored in normal memory */
1623
                kaddr = safe_pages_list;
1624
                safe_pages_list = safe_pages_list->next;
1625
                pbe->copy_page = virt_to_page(kaddr);
1626
        }
1627
        pbe->next = highmem_pblist;
1628
        highmem_pblist = pbe;
1629
        return kaddr;
1630
}
1631
 
1632
/**
1633
 *      copy_last_highmem_page - copy the contents of a highmem image from
1634
 *      @buffer, where the caller of snapshot_write_next() has place them,
1635
 *      to the right location represented by @last_highmem_page .
1636
 */
1637
 
1638
static void copy_last_highmem_page(void)
1639
{
1640
        if (last_highmem_page) {
1641
                void *dst;
1642
 
1643
                dst = kmap_atomic(last_highmem_page, KM_USER0);
1644
                memcpy(dst, buffer, PAGE_SIZE);
1645
                kunmap_atomic(dst, KM_USER0);
1646
                last_highmem_page = NULL;
1647
        }
1648
}
1649
 
1650
static inline int last_highmem_page_copied(void)
1651
{
1652
        return !last_highmem_page;
1653
}
1654
 
1655
static inline void free_highmem_data(void)
1656
{
1657
        if (safe_highmem_bm)
1658
                memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
1659
 
1660
        if (buffer)
1661
                free_image_page(buffer, PG_UNSAFE_CLEAR);
1662
}
1663
#else
1664
static inline int get_safe_write_buffer(void) { return 0; }
1665
 
1666
static unsigned int
1667
count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
1668
 
1669
static inline int
1670
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1671
{
1672
        return 0;
1673
}
1674
 
1675
static inline void *
1676
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1677
{
1678
        return NULL;
1679
}
1680
 
1681
static inline void copy_last_highmem_page(void) {}
1682
static inline int last_highmem_page_copied(void) { return 1; }
1683
static inline void free_highmem_data(void) {}
1684
#endif /* CONFIG_HIGHMEM */
1685
 
1686
/**
1687
 *      prepare_image - use the memory bitmap @bm to mark the pages that will
1688
 *      be overwritten in the process of restoring the system memory state
1689
 *      from the suspend image ("unsafe" pages) and allocate memory for the
1690
 *      image.
1691
 *
1692
 *      The idea is to allocate a new memory bitmap first and then allocate
1693
 *      as many pages as needed for the image data, but not to assign these
1694
 *      pages to specific tasks initially.  Instead, we just mark them as
1695
 *      allocated and create a lists of "safe" pages that will be used
1696
 *      later.  On systems with high memory a list of "safe" highmem pages is
1697
 *      also created.
1698
 */
1699
 
1700
#define PBES_PER_LINKED_PAGE    (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1701
 
1702
static int
1703
prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1704
{
1705
        unsigned int nr_pages, nr_highmem;
1706
        struct linked_page *sp_list, *lp;
1707
        int error;
1708
 
1709
        /* If there is no highmem, the buffer will not be necessary */
1710
        free_image_page(buffer, PG_UNSAFE_CLEAR);
1711
        buffer = NULL;
1712
 
1713
        nr_highmem = count_highmem_image_pages(bm);
1714
        error = mark_unsafe_pages(bm);
1715
        if (error)
1716
                goto Free;
1717
 
1718
        error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
1719
        if (error)
1720
                goto Free;
1721
 
1722
        duplicate_memory_bitmap(new_bm, bm);
1723
        memory_bm_free(bm, PG_UNSAFE_KEEP);
1724
        if (nr_highmem > 0) {
1725
                error = prepare_highmem_image(bm, &nr_highmem);
1726
                if (error)
1727
                        goto Free;
1728
        }
1729
        /* Reserve some safe pages for potential later use.
1730
         *
1731
         * NOTE: This way we make sure there will be enough safe pages for the
1732
         * chain_alloc() in get_buffer().  It is a bit wasteful, but
1733
         * nr_copy_pages cannot be greater than 50% of the memory anyway.
1734
         */
1735
        sp_list = NULL;
1736
        /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
1737
        nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1738
        nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
1739
        while (nr_pages > 0) {
1740
                lp = get_image_page(GFP_ATOMIC, PG_SAFE);
1741
                if (!lp) {
1742
                        error = -ENOMEM;
1743
                        goto Free;
1744
                }
1745
                lp->next = sp_list;
1746
                sp_list = lp;
1747
                nr_pages--;
1748
        }
1749
        /* Preallocate memory for the image */
1750
        safe_pages_list = NULL;
1751
        nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1752
        while (nr_pages > 0) {
1753
                lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
1754
                if (!lp) {
1755
                        error = -ENOMEM;
1756
                        goto Free;
1757
                }
1758
                if (!swsusp_page_is_free(virt_to_page(lp))) {
1759
                        /* The page is "safe", add it to the list */
1760
                        lp->next = safe_pages_list;
1761
                        safe_pages_list = lp;
1762
                }
1763
                /* Mark the page as allocated */
1764
                swsusp_set_page_forbidden(virt_to_page(lp));
1765
                swsusp_set_page_free(virt_to_page(lp));
1766
                nr_pages--;
1767
        }
1768
        /* Free the reserved safe pages so that chain_alloc() can use them */
1769
        while (sp_list) {
1770
                lp = sp_list->next;
1771
                free_image_page(sp_list, PG_UNSAFE_CLEAR);
1772
                sp_list = lp;
1773
        }
1774
        return 0;
1775
 
1776
 Free:
1777
        swsusp_free();
1778
        return error;
1779
}
1780
 
1781
/**
1782
 *      get_buffer - compute the address that snapshot_write_next() should
1783
 *      set for its caller to write to.
1784
 */
1785
 
1786
static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1787
{
1788
        struct pbe *pbe;
1789
        struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
1790
 
1791
        if (PageHighMem(page))
1792
                return get_highmem_page_buffer(page, ca);
1793
 
1794
        if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
1795
                /* We have allocated the "original" page frame and we can
1796
                 * use it directly to store the loaded page.
1797
                 */
1798
                return page_address(page);
1799
 
1800
        /* The "original" page frame has not been allocated and we have to
1801
         * use a "safe" page frame to store the loaded page.
1802
         */
1803
        pbe = chain_alloc(ca, sizeof(struct pbe));
1804
        if (!pbe) {
1805
                swsusp_free();
1806
                return NULL;
1807
        }
1808
        pbe->orig_address = page_address(page);
1809
        pbe->address = safe_pages_list;
1810
        safe_pages_list = safe_pages_list->next;
1811
        pbe->next = restore_pblist;
1812
        restore_pblist = pbe;
1813
        return pbe->address;
1814
}
1815
 
1816
/**
1817
 *      snapshot_write_next - used for writing the system memory snapshot.
1818
 *
1819
 *      On the first call to it @handle should point to a zeroed
1820
 *      snapshot_handle structure.  The structure gets updated and a pointer
1821
 *      to it should be passed to this function every next time.
1822
 *
1823
 *      The @count parameter should contain the number of bytes the caller
1824
 *      wants to write to the image.  It must not be zero.
1825
 *
1826
 *      On success the function returns a positive number.  Then, the caller
1827
 *      is allowed to write up to the returned number of bytes to the memory
1828
 *      location computed by the data_of() macro.  The number returned
1829
 *      may be smaller than @count, but this only happens if the write would
1830
 *      cross a page boundary otherwise.
1831
 *
1832
 *      The function returns 0 to indicate the "end of file" condition,
1833
 *      and a negative number is returned on error.  In such cases the
1834
 *      structure pointed to by @handle is not updated and should not be used
1835
 *      any more.
1836
 */
1837
 
1838
int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1839
{
1840
        static struct chain_allocator ca;
1841
        int error = 0;
1842
 
1843
        /* Check if we have already loaded the entire image */
1844
        if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
1845
                return 0;
1846
 
1847
        if (handle->offset == 0) {
1848
                if (!buffer)
1849
                        /* This makes the buffer be freed by swsusp_free() */
1850
                        buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1851
 
1852
                if (!buffer)
1853
                        return -ENOMEM;
1854
 
1855
                handle->buffer = buffer;
1856
        }
1857
        handle->sync_read = 1;
1858
        if (handle->prev < handle->cur) {
1859
                if (handle->prev == 0) {
1860
                        error = load_header(buffer);
1861
                        if (error)
1862
                                return error;
1863
 
1864
                        error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
1865
                        if (error)
1866
                                return error;
1867
 
1868
                } else if (handle->prev <= nr_meta_pages) {
1869
                        unpack_orig_pfns(buffer, &copy_bm);
1870
                        if (handle->prev == nr_meta_pages) {
1871
                                error = prepare_image(&orig_bm, &copy_bm);
1872
                                if (error)
1873
                                        return error;
1874
 
1875
                                chain_init(&ca, GFP_ATOMIC, PG_SAFE);
1876
                                memory_bm_position_reset(&orig_bm);
1877
                                restore_pblist = NULL;
1878
                                handle->buffer = get_buffer(&orig_bm, &ca);
1879
                                handle->sync_read = 0;
1880
                                if (!handle->buffer)
1881
                                        return -ENOMEM;
1882
                        }
1883
                } else {
1884
                        copy_last_highmem_page();
1885
                        handle->buffer = get_buffer(&orig_bm, &ca);
1886
                        if (handle->buffer != buffer)
1887
                                handle->sync_read = 0;
1888
                }
1889
                handle->prev = handle->cur;
1890
        }
1891
        handle->buf_offset = handle->cur_offset;
1892
        if (handle->cur_offset + count >= PAGE_SIZE) {
1893
                count = PAGE_SIZE - handle->cur_offset;
1894
                handle->cur_offset = 0;
1895
                handle->cur++;
1896
        } else {
1897
                handle->cur_offset += count;
1898
        }
1899
        handle->offset += count;
1900
        return count;
1901
}
1902
 
1903
/**
1904
 *      snapshot_write_finalize - must be called after the last call to
1905
 *      snapshot_write_next() in case the last page in the image happens
1906
 *      to be a highmem page and its contents should be stored in the
1907
 *      highmem.  Additionally, it releases the memory that will not be
1908
 *      used any more.
1909
 */
1910
 
1911
void snapshot_write_finalize(struct snapshot_handle *handle)
1912
{
1913
        copy_last_highmem_page();
1914
        /* Free only if we have loaded the image entirely */
1915
        if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
1916
                memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
1917
                free_highmem_data();
1918
        }
1919
}
1920
 
1921
int snapshot_image_loaded(struct snapshot_handle *handle)
1922
{
1923
        return !(!nr_copy_pages || !last_highmem_page_copied() ||
1924
                        handle->cur <= nr_meta_pages + nr_copy_pages);
1925
}
1926
 
1927
#ifdef CONFIG_HIGHMEM
1928
/* Assumes that @buf is ready and points to a "safe" page */
1929
static inline void
1930
swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
1931
{
1932
        void *kaddr1, *kaddr2;
1933
 
1934
        kaddr1 = kmap_atomic(p1, KM_USER0);
1935
        kaddr2 = kmap_atomic(p2, KM_USER1);
1936
        memcpy(buf, kaddr1, PAGE_SIZE);
1937
        memcpy(kaddr1, kaddr2, PAGE_SIZE);
1938
        memcpy(kaddr2, buf, PAGE_SIZE);
1939
        kunmap_atomic(kaddr1, KM_USER0);
1940
        kunmap_atomic(kaddr2, KM_USER1);
1941
}
1942
 
1943
/**
1944
 *      restore_highmem - for each highmem page that was allocated before
1945
 *      the suspend and included in the suspend image, and also has been
1946
 *      allocated by the "resume" kernel swap its current (ie. "before
1947
 *      resume") contents with the previous (ie. "before suspend") one.
1948
 *
1949
 *      If the resume eventually fails, we can call this function once
1950
 *      again and restore the "before resume" highmem state.
1951
 */
1952
 
1953
int restore_highmem(void)
1954
{
1955
        struct highmem_pbe *pbe = highmem_pblist;
1956
        void *buf;
1957
 
1958
        if (!pbe)
1959
                return 0;
1960
 
1961
        buf = get_image_page(GFP_ATOMIC, PG_SAFE);
1962
        if (!buf)
1963
                return -ENOMEM;
1964
 
1965
        while (pbe) {
1966
                swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
1967
                pbe = pbe->next;
1968
        }
1969
        free_image_page(buf, PG_UNSAFE_CLEAR);
1970
        return 0;
1971
}
1972
#endif /* CONFIG_HIGHMEM */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.