OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [kernel/] [pm.c] - Blame information for rev 1766

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  pm.c - Power management interface
3
 *
4
 *  Copyright (C) 2000 Andrew Henroid
5
 *
6
 *  This program is free software; you can redistribute it and/or modify
7
 *  it under the terms of the GNU General Public License as published by
8
 *  the Free Software Foundation; either version 2 of the License, or
9
 *  (at your option) any later version.
10
 *
11
 *  This program is distributed in the hope that it will be useful,
12
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 *  GNU General Public License for more details.
15
 *
16
 *  You should have received a copy of the GNU General Public License
17
 *  along with this program; if not, write to the Free Software
18
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
 
21
#include <linux/module.h>
22
#include <linux/spinlock.h>
23
#include <linux/slab.h>
24
#include <linux/pm.h>
25
#include <linux/interrupt.h>
26
 
27
int pm_active;
28
 
29
/*
30
 *      Locking notes:
31
 *              pm_devs_lock can be a semaphore providing pm ops are not called
32
 *      from an interrupt handler (already a bad idea so no change here). Each
33
 *      change must be protected so that an unlink of an entry doesn't clash
34
 *      with a pm send - which is permitted to sleep in the current architecture
35
 *
36
 *      Module unloads clashing with pm events now work out safely, the module
37
 *      unload path will block until the event has been sent. It may well block
38
 *      until a resume but that will be fine.
39
 */
40
 
41
static DECLARE_MUTEX(pm_devs_lock);
42
static LIST_HEAD(pm_devs);
43
 
44
/**
45
 *      pm_register - register a device with power management
46
 *      @type: device type
47
 *      @id: device ID
48
 *      @callback: callback function
49
 *
50
 *      Add a device to the list of devices that wish to be notified about
51
 *      power management events. A &pm_dev structure is returned on success,
52
 *      on failure the return is %NULL.
53
 *
54
 *      The callback function will be called in process context and
55
 *      it may sleep.
56
 */
57
 
58
struct pm_dev *pm_register(pm_dev_t type,
59
                           unsigned long id,
60
                           pm_callback callback)
61
{
62
        struct pm_dev *dev = kmalloc(sizeof(struct pm_dev), GFP_KERNEL);
63
        if (dev) {
64
                memset(dev, 0, sizeof(*dev));
65
                dev->type = type;
66
                dev->id = id;
67
                dev->callback = callback;
68
 
69
                down(&pm_devs_lock);
70
                list_add(&dev->entry, &pm_devs);
71
                up(&pm_devs_lock);
72
        }
73
        return dev;
74
}
75
 
76
/**
77
 *      pm_unregister -  unregister a device with power management
78
 *      @dev: device to unregister
79
 *
80
 *      Remove a device from the power management notification lists. The
81
 *      dev passed must be a handle previously returned by pm_register.
82
 */
83
 
84
void pm_unregister(struct pm_dev *dev)
85
{
86
        if (dev) {
87
                down(&pm_devs_lock);
88
                list_del(&dev->entry);
89
                up(&pm_devs_lock);
90
 
91
                kfree(dev);
92
        }
93
}
94
 
95
static void __pm_unregister(struct pm_dev *dev)
96
{
97
        if (dev) {
98
                list_del(&dev->entry);
99
                kfree(dev);
100
        }
101
}
102
 
103
/**
104
 *      pm_unregister_all - unregister all devices with matching callback
105
 *      @callback: callback function pointer
106
 *
107
 *      Unregister every device that would call the callback passed. This
108
 *      is primarily meant as a helper function for loadable modules. It
109
 *      enables a module to give up all its managed devices without keeping
110
 *      its own private list.
111
 */
112
 
113
void pm_unregister_all(pm_callback callback)
114
{
115
        struct list_head *entry;
116
 
117
        if (!callback)
118
                return;
119
 
120
        down(&pm_devs_lock);
121
        entry = pm_devs.next;
122
        while (entry != &pm_devs) {
123
                struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
124
                entry = entry->next;
125
                if (dev->callback == callback)
126
                        __pm_unregister(dev);
127
        }
128
        up(&pm_devs_lock);
129
}
130
 
131
/**
132
 *      pm_send - send request to a single device
133
 *      @dev: device to send to
134
 *      @rqst: power management request
135
 *      @data: data for the callback
136
 *
137
 *      Issue a power management request to a given device. The
138
 *      %PM_SUSPEND and %PM_RESUME events are handled specially. The
139
 *      data field must hold the intended next state. No call is made
140
 *      if the state matches.
141
 *
142
 *      BUGS: what stops two power management requests occuring in parallel
143
 *      and conflicting.
144
 *
145
 *      WARNING: Calling pm_send directly is not generally recommended, in
146
 *      paticular there is no locking against the pm_dev going away. The
147
 *      caller must maintain all needed locking or have 'inside knowledge'
148
 *      on the safety. Also remember that this function is not locked against
149
 *      pm_unregister. This means that you must handle SMP races on callback
150
 *      execution and unload yourself.
151
 */
152
 
153
int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
154
{
155
        int status = 0;
156
        int prev_state, next_state;
157
 
158
        if (in_interrupt())
159
                BUG();
160
 
161
        switch (rqst) {
162
        case PM_SUSPEND:
163
        case PM_RESUME:
164
                prev_state = dev->state;
165
                next_state = (unsigned long) data;
166
                if (prev_state != next_state) {
167
                        if (dev->callback)
168
                                status = (*dev->callback)(dev, rqst, data);
169
                        if (!status) {
170
                                dev->state = next_state;
171
                                dev->prev_state = prev_state;
172
                        }
173
                }
174
                else {
175
                        dev->prev_state = prev_state;
176
                }
177
                break;
178
        default:
179
                if (dev->callback)
180
                        status = (*dev->callback)(dev, rqst, data);
181
                break;
182
        }
183
        return status;
184
}
185
 
186
/*
187
 * Undo incomplete request
188
 */
189
static void pm_undo_all(struct pm_dev *last)
190
{
191
        struct list_head *entry = last->entry.prev;
192
        while (entry != &pm_devs) {
193
                struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
194
                if (dev->state != dev->prev_state) {
195
                        /* previous state was zero (running) resume or
196
                         * previous state was non-zero (suspended) suspend
197
                         */
198
                        pm_request_t undo = (dev->prev_state
199
                                             ? PM_SUSPEND:PM_RESUME);
200
                        pm_send(dev, undo, (void*) dev->prev_state);
201
                }
202
                entry = entry->prev;
203
        }
204
}
205
 
206
/**
207
 *      pm_send_all - send request to all managed devices
208
 *      @rqst: power management request
209
 *      @data: data for the callback
210
 *
211
 *      Issue a power management request to a all devices. The
212
 *      %PM_SUSPEND events are handled specially. Any device is
213
 *      permitted to fail a suspend by returning a non zero (error)
214
 *      value from its callback function. If any device vetoes a
215
 *      suspend request then all other devices that have suspended
216
 *      during the processing of this request are restored to their
217
 *      previous state.
218
 *
219
 *      WARNING:  This function takes the pm_devs_lock. The lock is not dropped until
220
 *      the callbacks have completed. This prevents races against pm locking
221
 *      functions, races against module unload pm_unregister code. It does
222
 *      mean however that you must not issue pm_ functions within the callback
223
 *      or you will deadlock and users will hate you.
224
 *
225
 *      Zero is returned on success. If a suspend fails then the status
226
 *      from the device that vetoes the suspend is returned.
227
 *
228
 *      BUGS: what stops two power management requests occuring in parallel
229
 *      and conflicting.
230
 */
231
 
232
int pm_send_all(pm_request_t rqst, void *data)
233
{
234
        struct list_head *entry;
235
 
236
        down(&pm_devs_lock);
237
        entry = pm_devs.next;
238
        while (entry != &pm_devs) {
239
                struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
240
                if (dev->callback) {
241
                        int status = pm_send(dev, rqst, data);
242
                        if (status) {
243
                                /* return devices to previous state on
244
                                 * failed suspend request
245
                                 */
246
                                if (rqst == PM_SUSPEND)
247
                                        pm_undo_all(dev);
248
                                up(&pm_devs_lock);
249
                                return status;
250
                        }
251
                }
252
                entry = entry->next;
253
        }
254
        up(&pm_devs_lock);
255
        return 0;
256
}
257
 
258
/**
259
 *      pm_find  - find a device
260
 *      @type: type of device
261
 *      @from: where to start looking
262
 *
263
 *      Scan the power management list for devices of a specific type. The
264
 *      return value for a matching device may be passed to further calls
265
 *      to this function to find further matches. A %NULL indicates the end
266
 *      of the list.
267
 *
268
 *      To search from the beginning pass %NULL as the @from value.
269
 *
270
 *      The caller MUST hold the pm_devs_lock lock when calling this
271
 *      function. The instant that the lock is dropped all pointers returned
272
 *      may become invalid.
273
 */
274
 
275
struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
276
{
277
        struct list_head *entry = from ? from->entry.next:pm_devs.next;
278
        while (entry != &pm_devs) {
279
                struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
280
                if (type == PM_UNKNOWN_DEV || dev->type == type)
281
                        return dev;
282
                entry = entry->next;
283
        }
284
        return 0;
285
}
286
 
287
EXPORT_SYMBOL(pm_register);
288
EXPORT_SYMBOL(pm_unregister);
289
EXPORT_SYMBOL(pm_unregister_all);
290
EXPORT_SYMBOL(pm_send);
291
EXPORT_SYMBOL(pm_send_all);
292
EXPORT_SYMBOL(pm_find);
293
EXPORT_SYMBOL(pm_active);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.