OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [include/] [linux/] [suspend.h] - Blame information for rev 81

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
#ifndef _LINUX_SUSPEND_H
2
#define _LINUX_SUSPEND_H
3
 
4
#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
5
#include <asm/suspend.h>
6
#endif
7
#include <linux/swap.h>
8
#include <linux/notifier.h>
9
#include <linux/init.h>
10
#include <linux/pm.h>
11
#include <linux/mm.h>
12
#include <asm/errno.h>
13
 
14
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
15
extern int pm_prepare_console(void);
16
extern void pm_restore_console(void);
17
#else
18
static inline int pm_prepare_console(void) { return 0; }
19
static inline void pm_restore_console(void) {}
20
#endif
21
 
22
typedef int __bitwise suspend_state_t;
23
 
24
#define PM_SUSPEND_ON           ((__force suspend_state_t) 0)
25
#define PM_SUSPEND_STANDBY      ((__force suspend_state_t) 1)
26
#define PM_SUSPEND_MEM          ((__force suspend_state_t) 3)
27
#define PM_SUSPEND_MAX          ((__force suspend_state_t) 4)
28
 
29
/**
30
 * struct platform_suspend_ops - Callbacks for managing platform dependent
31
 *      system sleep states.
32
 *
33
 * @valid: Callback to determine if given system sleep state is supported by
34
 *      the platform.
35
 *      Valid (ie. supported) states are advertised in /sys/power/state.  Note
36
 *      that it still may be impossible to enter given system sleep state if the
37
 *      conditions aren't right.
38
 *      There is the %suspend_valid_only_mem function available that can be
39
 *      assigned to this if the platform only supports mem sleep.
40
 *
41
 * @set_target: Tell the platform which system sleep state is going to be
42
 *      entered.
43
 *      @set_target() is executed right prior to suspending devices.  The
44
 *      information conveyed to the platform code by @set_target() should be
45
 *      disregarded by the platform as soon as @finish() is executed and if
46
 *      @prepare() fails.  If @set_target() fails (ie. returns nonzero),
47
 *      @prepare(), @enter() and @finish() will not be called by the PM core.
48
 *      This callback is optional.  However, if it is implemented, the argument
49
 *      passed to @enter() is meaningless and should be ignored.
50
 *
51
 * @prepare: Prepare the platform for entering the system sleep state indicated
52
 *      by @set_target().
53
 *      @prepare() is called right after devices have been suspended (ie. the
54
 *      appropriate .suspend() method has been executed for each device) and
55
 *      before the nonboot CPUs are disabled (it is executed with IRQs enabled).
56
 *      This callback is optional.  It returns 0 on success or a negative
57
 *      error code otherwise, in which case the system cannot enter the desired
58
 *      sleep state (@enter() and @finish() will not be called in that case).
59
 *
60
 * @enter: Enter the system sleep state indicated by @set_target() or
61
 *      represented by the argument if @set_target() is not implemented.
62
 *      This callback is mandatory.  It returns 0 on success or a negative
63
 *      error code otherwise, in which case the system cannot enter the desired
64
 *      sleep state.
65
 *
66
 * @finish: Called when the system has just left a sleep state, right after
67
 *      the nonboot CPUs have been enabled and before devices are resumed (it is
68
 *      executed with IRQs enabled).
69
 *      This callback is optional, but should be implemented by the platforms
70
 *      that implement @prepare().  If implemented, it is always called after
71
 *      @enter() (even if @enter() fails).
72
 */
73
struct platform_suspend_ops {
74
        int (*valid)(suspend_state_t state);
75
        int (*set_target)(suspend_state_t state);
76
        int (*prepare)(void);
77
        int (*enter)(suspend_state_t state);
78
        void (*finish)(void);
79
};
80
 
81
#ifdef CONFIG_SUSPEND
82
/**
83
 * suspend_set_ops - set platform dependent suspend operations
84
 * @ops: The new suspend operations to set.
85
 */
86
extern void suspend_set_ops(struct platform_suspend_ops *ops);
87
extern int suspend_valid_only_mem(suspend_state_t state);
88
 
89
/**
90
 * arch_suspend_disable_irqs - disable IRQs for suspend
91
 *
92
 * Disables IRQs (in the default case). This is a weak symbol in the common
93
 * code and thus allows architectures to override it if more needs to be
94
 * done. Not called for suspend to disk.
95
 */
96
extern void arch_suspend_disable_irqs(void);
97
 
98
/**
99
 * arch_suspend_enable_irqs - enable IRQs after suspend
100
 *
101
 * Enables IRQs (in the default case). This is a weak symbol in the common
102
 * code and thus allows architectures to override it if more needs to be
103
 * done. Not called for suspend to disk.
104
 */
105
extern void arch_suspend_enable_irqs(void);
106
 
107
extern int pm_suspend(suspend_state_t state);
108
#else /* !CONFIG_SUSPEND */
109
#define suspend_valid_only_mem  NULL
110
 
111
static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
112
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
113
#endif /* !CONFIG_SUSPEND */
114
 
115
/* struct pbe is used for creating lists of pages that should be restored
116
 * atomically during the resume from disk, because the page frames they have
117
 * occupied before the suspend are in use.
118
 */
119
struct pbe {
120
        void *address;          /* address of the copy */
121
        void *orig_address;     /* original address of a page */
122
        struct pbe *next;
123
};
124
 
125
/* mm/page_alloc.c */
126
extern void drain_local_pages(void);
127
extern void mark_free_pages(struct zone *zone);
128
 
129
/**
130
 * struct platform_hibernation_ops - hibernation platform support
131
 *
132
 * The methods in this structure allow a platform to override the default
133
 * mechanism of shutting down the machine during a hibernation transition.
134
 *
135
 * All three methods must be assigned.
136
 *
137
 * @start: Tell the platform driver that we're starting hibernation.
138
 *      Called right after shrinking memory and before freezing devices.
139
 *
140
 * @pre_snapshot: Prepare the platform for creating the hibernation image.
141
 *      Called right after devices have been frozen and before the nonboot
142
 *      CPUs are disabled (runs with IRQs on).
143
 *
144
 * @finish: Restore the previous state of the platform after the hibernation
145
 *      image has been created *or* put the platform into the normal operation
146
 *      mode after the hibernation (the same method is executed in both cases).
147
 *      Called right after the nonboot CPUs have been enabled and before
148
 *      thawing devices (runs with IRQs on).
149
 *
150
 * @prepare: Prepare the platform for entering the low power state.
151
 *      Called right after the hibernation image has been saved and before
152
 *      devices are prepared for entering the low power state.
153
 *
154
 * @enter: Put the system into the low power state after the hibernation image
155
 *      has been saved to disk.
156
 *      Called after the nonboot CPUs have been disabled and all of the low
157
 *      level devices have been shut down (runs with IRQs off).
158
 *
159
 * @leave: Perform the first stage of the cleanup after the system sleep state
160
 *      indicated by @set_target() has been left.
161
 *      Called right after the control has been passed from the boot kernel to
162
 *      the image kernel, before the nonboot CPUs are enabled and before devices
163
 *      are resumed.  Executed with interrupts disabled.
164
 *
165
 * @pre_restore: Prepare system for the restoration from a hibernation image.
166
 *      Called right after devices have been frozen and before the nonboot
167
 *      CPUs are disabled (runs with IRQs on).
168
 *
169
 * @restore_cleanup: Clean up after a failing image restoration.
170
 *      Called right after the nonboot CPUs have been enabled and before
171
 *      thawing devices (runs with IRQs on).
172
 */
173
struct platform_hibernation_ops {
174
        int (*start)(void);
175
        int (*pre_snapshot)(void);
176
        void (*finish)(void);
177
        int (*prepare)(void);
178
        int (*enter)(void);
179
        void (*leave)(void);
180
        int (*pre_restore)(void);
181
        void (*restore_cleanup)(void);
182
};
183
 
184
#ifdef CONFIG_HIBERNATION
185
/* kernel/power/snapshot.c */
186
extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
187
static inline void register_nosave_region(unsigned long b, unsigned long e)
188
{
189
        __register_nosave_region(b, e, 0);
190
}
191
static inline void register_nosave_region_late(unsigned long b, unsigned long e)
192
{
193
        __register_nosave_region(b, e, 1);
194
}
195
extern int swsusp_page_is_forbidden(struct page *);
196
extern void swsusp_set_page_free(struct page *);
197
extern void swsusp_unset_page_free(struct page *);
198
extern unsigned long get_safe_page(gfp_t gfp_mask);
199
 
200
extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
201
extern int hibernate(void);
202
#else /* CONFIG_HIBERNATION */
203
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
204
static inline void swsusp_set_page_free(struct page *p) {}
205
static inline void swsusp_unset_page_free(struct page *p) {}
206
 
207
static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
208
static inline int hibernate(void) { return -ENOSYS; }
209
#endif /* CONFIG_HIBERNATION */
210
 
211
#ifdef CONFIG_PM_SLEEP
212
void save_processor_state(void);
213
void restore_processor_state(void);
214
struct saved_context;
215
void __save_processor_state(struct saved_context *ctxt);
216
void __restore_processor_state(struct saved_context *ctxt);
217
 
218
/* kernel/power/main.c */
219
extern struct blocking_notifier_head pm_chain_head;
220
 
221
static inline int register_pm_notifier(struct notifier_block *nb)
222
{
223
        return blocking_notifier_chain_register(&pm_chain_head, nb);
224
}
225
 
226
static inline int unregister_pm_notifier(struct notifier_block *nb)
227
{
228
        return blocking_notifier_chain_unregister(&pm_chain_head, nb);
229
}
230
 
231
#define pm_notifier(fn, pri) {                          \
232
        static struct notifier_block fn##_nb =                  \
233
                { .notifier_call = fn, .priority = pri };       \
234
        register_pm_notifier(&fn##_nb);                 \
235
}
236
#else /* !CONFIG_PM_SLEEP */
237
 
238
static inline int register_pm_notifier(struct notifier_block *nb)
239
{
240
        return 0;
241
}
242
 
243
static inline int unregister_pm_notifier(struct notifier_block *nb)
244
{
245
        return 0;
246
}
247
 
248
#define pm_notifier(fn, pri)    do { (void)(fn); } while (0)
249
#endif /* !CONFIG_PM_SLEEP */
250
 
251
#ifndef CONFIG_HIBERNATION
252
static inline void register_nosave_region(unsigned long b, unsigned long e)
253
{
254
}
255
static inline void register_nosave_region_late(unsigned long b, unsigned long e)
256
{
257
}
258
#endif
259
 
260
#endif /* _LINUX_SUSPEND_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.