1 |
1275 |
phoenix |
/*
|
2 |
|
|
* $Id: pci.c,v 1.1.1.1 2004-04-15 01:52:19 phoenix Exp $
|
3 |
|
|
*
|
4 |
|
|
* PCI Bus Services, see include/linux/pci.h for further explanation.
|
5 |
|
|
*
|
6 |
|
|
* Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
|
7 |
|
|
* David Mosberger-Tang
|
8 |
|
|
*
|
9 |
|
|
* Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
|
10 |
|
|
*/
|
11 |
|
|
|
12 |
|
|
#include <linux/config.h>
|
13 |
|
|
#include <linux/module.h>
|
14 |
|
|
#include <linux/types.h>
|
15 |
|
|
#include <linux/kernel.h>
|
16 |
|
|
#include <linux/pci.h>
|
17 |
|
|
#include <linux/string.h>
|
18 |
|
|
#include <linux/init.h>
|
19 |
|
|
#include <linux/slab.h>
|
20 |
|
|
#include <linux/ioport.h>
|
21 |
|
|
#include <linux/spinlock.h>
|
22 |
|
|
#include <linux/pm.h>
|
23 |
|
|
#include <linux/kmod.h> /* for hotplug_path */
|
24 |
|
|
#include <linux/bitops.h>
|
25 |
|
|
#include <linux/delay.h>
|
26 |
|
|
#include <linux/cache.h>
|
27 |
|
|
|
28 |
|
|
#include <asm/page.h>
|
29 |
|
|
#include <asm/dma.h> /* isa_dma_bridge_buggy */
|
30 |
|
|
|
31 |
|
|
#undef DEBUG
|
32 |
|
|
|
33 |
|
|
#ifdef DEBUG
|
34 |
|
|
#define DBG(x...) printk(x)
|
35 |
|
|
#else
|
36 |
|
|
#define DBG(x...)
|
37 |
|
|
#endif
|
38 |
|
|
|
39 |
|
|
LIST_HEAD(pci_root_buses);
|
40 |
|
|
LIST_HEAD(pci_devices);
|
41 |
|
|
|
42 |
|
|
/**
|
43 |
|
|
* pci_find_slot - locate PCI device from a given PCI slot
|
44 |
|
|
* @bus: number of PCI bus on which desired PCI device resides
|
45 |
|
|
* @devfn: encodes number of PCI slot in which the desired PCI
|
46 |
|
|
* device resides and the logical device number within that slot
|
47 |
|
|
* in case of multi-function devices.
|
48 |
|
|
*
|
49 |
|
|
* Given a PCI bus and slot/function number, the desired PCI device
|
50 |
|
|
* is located in system global list of PCI devices. If the device
|
51 |
|
|
* is found, a pointer to its data structure is returned. If no
|
52 |
|
|
* device is found, %NULL is returned.
|
53 |
|
|
*/
|
54 |
|
|
struct pci_dev *
|
55 |
|
|
pci_find_slot(unsigned int bus, unsigned int devfn)
|
56 |
|
|
{
|
57 |
|
|
struct pci_dev *dev;
|
58 |
|
|
|
59 |
|
|
pci_for_each_dev(dev) {
|
60 |
|
|
if (dev->bus->number == bus && dev->devfn == devfn)
|
61 |
|
|
return dev;
|
62 |
|
|
}
|
63 |
|
|
return NULL;
|
64 |
|
|
}
|
65 |
|
|
|
66 |
|
|
/**
|
67 |
|
|
* pci_find_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
|
68 |
|
|
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
|
69 |
|
|
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
|
70 |
|
|
* @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids
|
71 |
|
|
* @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids
|
72 |
|
|
* @from: Previous PCI device found in search, or %NULL for new search.
|
73 |
|
|
*
|
74 |
|
|
* Iterates through the list of known PCI devices. If a PCI device is
|
75 |
|
|
* found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its
|
76 |
|
|
* device structure is returned. Otherwise, %NULL is returned.
|
77 |
|
|
* A new search is initiated by passing %NULL to the @from argument.
|
78 |
|
|
* Otherwise if @from is not %NULL, searches continue from next device on the global list.
|
79 |
|
|
*/
|
80 |
|
|
struct pci_dev *
|
81 |
|
|
pci_find_subsys(unsigned int vendor, unsigned int device,
|
82 |
|
|
unsigned int ss_vendor, unsigned int ss_device,
|
83 |
|
|
const struct pci_dev *from)
|
84 |
|
|
{
|
85 |
|
|
struct list_head *n = from ? from->global_list.next : pci_devices.next;
|
86 |
|
|
|
87 |
|
|
while (n != &pci_devices) {
|
88 |
|
|
struct pci_dev *dev = pci_dev_g(n);
|
89 |
|
|
if ((vendor == PCI_ANY_ID || dev->vendor == vendor) &&
|
90 |
|
|
(device == PCI_ANY_ID || dev->device == device) &&
|
91 |
|
|
(ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) &&
|
92 |
|
|
(ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device))
|
93 |
|
|
return dev;
|
94 |
|
|
n = n->next;
|
95 |
|
|
}
|
96 |
|
|
return NULL;
|
97 |
|
|
}
|
98 |
|
|
|
99 |
|
|
|
100 |
|
|
/**
|
101 |
|
|
* pci_find_device - begin or continue searching for a PCI device by vendor/device id
|
102 |
|
|
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
|
103 |
|
|
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
|
104 |
|
|
* @from: Previous PCI device found in search, or %NULL for new search.
|
105 |
|
|
*
|
106 |
|
|
* Iterates through the list of known PCI devices. If a PCI device is
|
107 |
|
|
* found with a matching @vendor and @device, a pointer to its device structure is
|
108 |
|
|
* returned. Otherwise, %NULL is returned.
|
109 |
|
|
* A new search is initiated by passing %NULL to the @from argument.
|
110 |
|
|
* Otherwise if @from is not %NULL, searches continue from next device on the global list.
|
111 |
|
|
*/
|
112 |
|
|
struct pci_dev *
|
113 |
|
|
pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from)
|
114 |
|
|
{
|
115 |
|
|
return pci_find_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
|
116 |
|
|
}
|
117 |
|
|
|
118 |
|
|
|
119 |
|
|
/**
|
120 |
|
|
* pci_find_class - begin or continue searching for a PCI device by class
|
121 |
|
|
* @class: search for a PCI device with this class designation
|
122 |
|
|
* @from: Previous PCI device found in search, or %NULL for new search.
|
123 |
|
|
*
|
124 |
|
|
* Iterates through the list of known PCI devices. If a PCI device is
|
125 |
|
|
* found with a matching @class, a pointer to its device structure is
|
126 |
|
|
* returned. Otherwise, %NULL is returned.
|
127 |
|
|
* A new search is initiated by passing %NULL to the @from argument.
|
128 |
|
|
* Otherwise if @from is not %NULL, searches continue from next device
|
129 |
|
|
* on the global list.
|
130 |
|
|
*/
|
131 |
|
|
struct pci_dev *
|
132 |
|
|
pci_find_class(unsigned int class, const struct pci_dev *from)
|
133 |
|
|
{
|
134 |
|
|
struct list_head *n = from ? from->global_list.next : pci_devices.next;
|
135 |
|
|
|
136 |
|
|
while (n != &pci_devices) {
|
137 |
|
|
struct pci_dev *dev = pci_dev_g(n);
|
138 |
|
|
if (dev->class == class)
|
139 |
|
|
return dev;
|
140 |
|
|
n = n->next;
|
141 |
|
|
}
|
142 |
|
|
return NULL;
|
143 |
|
|
}
|
144 |
|
|
|
145 |
|
|
/**
|
146 |
|
|
* pci_find_capability - query for devices' capabilities
|
147 |
|
|
* @dev: PCI device to query
|
148 |
|
|
* @cap: capability code
|
149 |
|
|
*
|
150 |
|
|
* Tell if a device supports a given PCI capability.
|
151 |
|
|
* Returns the address of the requested capability structure within the
|
152 |
|
|
* device's PCI configuration space or 0 in case the device does not
|
153 |
|
|
* support it. Possible values for @cap:
|
154 |
|
|
*
|
155 |
|
|
* %PCI_CAP_ID_PM Power Management
|
156 |
|
|
*
|
157 |
|
|
* %PCI_CAP_ID_AGP Accelerated Graphics Port
|
158 |
|
|
*
|
159 |
|
|
* %PCI_CAP_ID_VPD Vital Product Data
|
160 |
|
|
*
|
161 |
|
|
* %PCI_CAP_ID_SLOTID Slot Identification
|
162 |
|
|
*
|
163 |
|
|
* %PCI_CAP_ID_MSI Message Signalled Interrupts
|
164 |
|
|
*
|
165 |
|
|
* %PCI_CAP_ID_CHSWP CompactPCI HotSwap
|
166 |
|
|
*
|
167 |
|
|
* %PCI_CAP_ID_PCIX PCI-X
|
168 |
|
|
*/
|
169 |
|
|
int
|
170 |
|
|
pci_find_capability(struct pci_dev *dev, int cap)
|
171 |
|
|
{
|
172 |
|
|
u16 status;
|
173 |
|
|
u8 pos, id;
|
174 |
|
|
int ttl = 48;
|
175 |
|
|
|
176 |
|
|
pci_read_config_word(dev, PCI_STATUS, &status);
|
177 |
|
|
if (!(status & PCI_STATUS_CAP_LIST))
|
178 |
|
|
return 0;
|
179 |
|
|
switch (dev->hdr_type) {
|
180 |
|
|
case PCI_HEADER_TYPE_NORMAL:
|
181 |
|
|
case PCI_HEADER_TYPE_BRIDGE:
|
182 |
|
|
pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &pos);
|
183 |
|
|
break;
|
184 |
|
|
case PCI_HEADER_TYPE_CARDBUS:
|
185 |
|
|
pci_read_config_byte(dev, PCI_CB_CAPABILITY_LIST, &pos);
|
186 |
|
|
break;
|
187 |
|
|
default:
|
188 |
|
|
return 0;
|
189 |
|
|
}
|
190 |
|
|
while (ttl-- && pos >= 0x40) {
|
191 |
|
|
pos &= ~3;
|
192 |
|
|
pci_read_config_byte(dev, pos + PCI_CAP_LIST_ID, &id);
|
193 |
|
|
if (id == 0xff)
|
194 |
|
|
break;
|
195 |
|
|
if (id == cap)
|
196 |
|
|
return pos;
|
197 |
|
|
pci_read_config_byte(dev, pos + PCI_CAP_LIST_NEXT, &pos);
|
198 |
|
|
}
|
199 |
|
|
return 0;
|
200 |
|
|
}
|
201 |
|
|
|
202 |
|
|
|
203 |
|
|
/**
|
204 |
|
|
* pci_find_parent_resource - return resource region of parent bus of given region
|
205 |
|
|
* @dev: PCI device structure contains resources to be searched
|
206 |
|
|
* @res: child resource record for which parent is sought
|
207 |
|
|
*
|
208 |
|
|
* For given resource region of given device, return the resource
|
209 |
|
|
* region of parent bus the given region is contained in or where
|
210 |
|
|
* it should be allocated from.
|
211 |
|
|
*/
|
212 |
|
|
struct resource *
|
213 |
|
|
pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
|
214 |
|
|
{
|
215 |
|
|
const struct pci_bus *bus = dev->bus;
|
216 |
|
|
int i;
|
217 |
|
|
struct resource *best = NULL;
|
218 |
|
|
|
219 |
|
|
for(i=0; i<4; i++) {
|
220 |
|
|
struct resource *r = bus->resource[i];
|
221 |
|
|
if (!r)
|
222 |
|
|
continue;
|
223 |
|
|
if (res->start && !(res->start >= r->start && res->end <= r->end))
|
224 |
|
|
continue; /* Not contained */
|
225 |
|
|
if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
|
226 |
|
|
continue; /* Wrong type */
|
227 |
|
|
if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
|
228 |
|
|
return r; /* Exact match */
|
229 |
|
|
if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
|
230 |
|
|
best = r; /* Approximating prefetchable by non-prefetchable */
|
231 |
|
|
}
|
232 |
|
|
return best;
|
233 |
|
|
}
|
234 |
|
|
|
235 |
|
|
/**
|
236 |
|
|
* pci_set_power_state - Set the power state of a PCI device
|
237 |
|
|
* @dev: PCI device to be suspended
|
238 |
|
|
* @state: Power state we're entering
|
239 |
|
|
*
|
240 |
|
|
* Transition a device to a new power state, using the Power Management
|
241 |
|
|
* Capabilities in the device's config space.
|
242 |
|
|
*
|
243 |
|
|
* RETURN VALUE:
|
244 |
|
|
* -EINVAL if trying to enter a lower state than we're already in.
|
245 |
|
|
* 0 if we're already in the requested state.
|
246 |
|
|
* -EIO if device does not support PCI PM.
|
247 |
|
|
* 0 if we can successfully change the power state.
|
248 |
|
|
*/
|
249 |
|
|
|
250 |
|
|
int
|
251 |
|
|
pci_set_power_state(struct pci_dev *dev, int state)
|
252 |
|
|
{
|
253 |
|
|
int pm;
|
254 |
|
|
u16 pmcsr;
|
255 |
|
|
|
256 |
|
|
/* bound the state we're entering */
|
257 |
|
|
if (state > 3) state = 3;
|
258 |
|
|
|
259 |
|
|
/* Validate current state:
|
260 |
|
|
* Can enter D0 from any state, but if we can only go deeper
|
261 |
|
|
* to sleep if we're already in a low power state
|
262 |
|
|
*/
|
263 |
|
|
if (state > 0 && dev->current_state > state)
|
264 |
|
|
return -EINVAL;
|
265 |
|
|
else if (dev->current_state == state)
|
266 |
|
|
return 0; /* we're already there */
|
267 |
|
|
|
268 |
|
|
/* find PCI PM capability in list */
|
269 |
|
|
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
|
270 |
|
|
|
271 |
|
|
/* abort if the device doesn't support PM capabilities */
|
272 |
|
|
if (!pm) return -EIO;
|
273 |
|
|
|
274 |
|
|
/* check if this device supports the desired state */
|
275 |
|
|
if (state == 1 || state == 2) {
|
276 |
|
|
u16 pmc;
|
277 |
|
|
pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
|
278 |
|
|
if (state == 1 && !(pmc & PCI_PM_CAP_D1)) return -EIO;
|
279 |
|
|
else if (state == 2 && !(pmc & PCI_PM_CAP_D2)) return -EIO;
|
280 |
|
|
}
|
281 |
|
|
|
282 |
|
|
/* If we're in D3, force entire word to 0.
|
283 |
|
|
* This doesn't affect PME_Status, disables PME_En, and
|
284 |
|
|
* sets PowerState to 0.
|
285 |
|
|
*/
|
286 |
|
|
if (dev->current_state >= 3)
|
287 |
|
|
pmcsr = 0;
|
288 |
|
|
else {
|
289 |
|
|
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
|
290 |
|
|
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
|
291 |
|
|
pmcsr |= state;
|
292 |
|
|
}
|
293 |
|
|
|
294 |
|
|
/* enter specified state */
|
295 |
|
|
pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
|
296 |
|
|
|
297 |
|
|
/* Mandatory power management transition delays */
|
298 |
|
|
/* see PCI PM 1.1 5.6.1 table 18 */
|
299 |
|
|
if(state == 3 || dev->current_state == 3)
|
300 |
|
|
{
|
301 |
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
302 |
|
|
schedule_timeout(HZ/100);
|
303 |
|
|
}
|
304 |
|
|
else if(state == 2 || dev->current_state == 2)
|
305 |
|
|
udelay(200);
|
306 |
|
|
dev->current_state = state;
|
307 |
|
|
|
308 |
|
|
return 0;
|
309 |
|
|
}
|
310 |
|
|
|
311 |
|
|
/**
|
312 |
|
|
* pci_save_state - save the PCI configuration space of a device before suspending
|
313 |
|
|
* @dev: - PCI device that we're dealing with
|
314 |
|
|
* @buffer: - buffer to hold config space context
|
315 |
|
|
*
|
316 |
|
|
* @buffer must be large enough to hold the entire PCI 2.2 config space
|
317 |
|
|
* (>= 64 bytes).
|
318 |
|
|
*/
|
319 |
|
|
int
|
320 |
|
|
pci_save_state(struct pci_dev *dev, u32 *buffer)
|
321 |
|
|
{
|
322 |
|
|
int i;
|
323 |
|
|
if (buffer) {
|
324 |
|
|
/* XXX: 100% dword access ok here? */
|
325 |
|
|
for (i = 0; i < 16; i++)
|
326 |
|
|
pci_read_config_dword(dev, i * 4,&buffer[i]);
|
327 |
|
|
}
|
328 |
|
|
return 0;
|
329 |
|
|
}
|
330 |
|
|
|
331 |
|
|
/**
|
332 |
|
|
* pci_restore_state - Restore the saved state of a PCI device
|
333 |
|
|
* @dev: - PCI device that we're dealing with
|
334 |
|
|
* @buffer: - saved PCI config space
|
335 |
|
|
*
|
336 |
|
|
*/
|
337 |
|
|
int
|
338 |
|
|
pci_restore_state(struct pci_dev *dev, u32 *buffer)
|
339 |
|
|
{
|
340 |
|
|
int i;
|
341 |
|
|
|
342 |
|
|
if (buffer) {
|
343 |
|
|
for (i = 0; i < 16; i++)
|
344 |
|
|
pci_write_config_dword(dev,i * 4, buffer[i]);
|
345 |
|
|
}
|
346 |
|
|
/*
|
347 |
|
|
* otherwise, write the context information we know from bootup.
|
348 |
|
|
* This works around a problem where warm-booting from Windows
|
349 |
|
|
* combined with a D3(hot)->D0 transition causes PCI config
|
350 |
|
|
* header data to be forgotten.
|
351 |
|
|
*/
|
352 |
|
|
else {
|
353 |
|
|
for (i = 0; i < 6; i ++)
|
354 |
|
|
pci_write_config_dword(dev,
|
355 |
|
|
PCI_BASE_ADDRESS_0 + (i * 4),
|
356 |
|
|
dev->resource[i].start);
|
357 |
|
|
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
|
358 |
|
|
}
|
359 |
|
|
return 0;
|
360 |
|
|
}
|
361 |
|
|
|
362 |
|
|
/**
|
363 |
|
|
* pci_enable_device_bars - Initialize some of a device for use
|
364 |
|
|
* @dev: PCI device to be initialized
|
365 |
|
|
* @bars: bitmask of BAR's that must be configured
|
366 |
|
|
*
|
367 |
|
|
* Initialize device before it's used by a driver. Ask low-level code
|
368 |
|
|
* to enable selected I/O and memory resources. Wake up the device if it
|
369 |
|
|
* was suspended. Beware, this function can fail.
|
370 |
|
|
*/
|
371 |
|
|
|
372 |
|
|
int
|
373 |
|
|
pci_enable_device_bars(struct pci_dev *dev, int bars)
|
374 |
|
|
{
|
375 |
|
|
int err;
|
376 |
|
|
|
377 |
|
|
pci_set_power_state(dev, 0);
|
378 |
|
|
if ((err = pcibios_enable_device(dev, bars)) < 0)
|
379 |
|
|
return err;
|
380 |
|
|
return 0;
|
381 |
|
|
}
|
382 |
|
|
|
383 |
|
|
/**
|
384 |
|
|
* pci_enable_device - Initialize device before it's used by a driver.
|
385 |
|
|
* @dev: PCI device to be initialized
|
386 |
|
|
*
|
387 |
|
|
* Initialize device before it's used by a driver. Ask low-level code
|
388 |
|
|
* to enable I/O and memory. Wake up the device if it was suspended.
|
389 |
|
|
* Beware, this function can fail.
|
390 |
|
|
*/
|
391 |
|
|
int
|
392 |
|
|
pci_enable_device(struct pci_dev *dev)
|
393 |
|
|
{
|
394 |
|
|
return pci_enable_device_bars(dev, 0x3F);
|
395 |
|
|
}
|
396 |
|
|
|
397 |
|
|
/**
|
398 |
|
|
* pci_disable_device - Disable PCI device after use
|
399 |
|
|
* @dev: PCI device to be disabled
|
400 |
|
|
*
|
401 |
|
|
* Signal to the system that the PCI device is not in use by the system
|
402 |
|
|
* anymore. This only involves disabling PCI bus-mastering, if active.
|
403 |
|
|
*/
|
404 |
|
|
void
|
405 |
|
|
pci_disable_device(struct pci_dev *dev)
|
406 |
|
|
{
|
407 |
|
|
u16 pci_command;
|
408 |
|
|
|
409 |
|
|
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
|
410 |
|
|
if (pci_command & PCI_COMMAND_MASTER) {
|
411 |
|
|
pci_command &= ~PCI_COMMAND_MASTER;
|
412 |
|
|
pci_write_config_word(dev, PCI_COMMAND, pci_command);
|
413 |
|
|
}
|
414 |
|
|
}
|
415 |
|
|
|
416 |
|
|
/**
|
417 |
|
|
* pci_enable_wake - enable device to generate PME# when suspended
|
418 |
|
|
* @dev: - PCI device to operate on
|
419 |
|
|
* @state: - Current state of device.
|
420 |
|
|
* @enable: - Flag to enable or disable generation
|
421 |
|
|
*
|
422 |
|
|
* Set the bits in the device's PM Capabilities to generate PME# when
|
423 |
|
|
* the system is suspended.
|
424 |
|
|
*
|
425 |
|
|
* -EIO is returned if device doesn't have PM Capabilities.
|
426 |
|
|
* -EINVAL is returned if device supports it, but can't generate wake events.
|
427 |
|
|
* 0 if operation is successful.
|
428 |
|
|
*
|
429 |
|
|
*/
|
430 |
|
|
int pci_enable_wake(struct pci_dev *dev, u32 state, int enable)
|
431 |
|
|
{
|
432 |
|
|
int pm;
|
433 |
|
|
u16 value;
|
434 |
|
|
|
435 |
|
|
/* find PCI PM capability in list */
|
436 |
|
|
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
|
437 |
|
|
|
438 |
|
|
/* If device doesn't support PM Capabilities, but request is to disable
|
439 |
|
|
* wake events, it's a nop; otherwise fail */
|
440 |
|
|
if (!pm)
|
441 |
|
|
return enable ? -EIO : 0;
|
442 |
|
|
|
443 |
|
|
/* Check device's ability to generate PME# */
|
444 |
|
|
pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
|
445 |
|
|
|
446 |
|
|
value &= PCI_PM_CAP_PME_MASK;
|
447 |
|
|
value >>= ffs(value); /* First bit of mask */
|
448 |
|
|
|
449 |
|
|
/* Check if it can generate PME# from requested state. */
|
450 |
|
|
if (!value || !(value & (1 << state)))
|
451 |
|
|
return enable ? -EINVAL : 0;
|
452 |
|
|
|
453 |
|
|
pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
|
454 |
|
|
|
455 |
|
|
/* Clear PME_Status by writing 1 to it and enable PME# */
|
456 |
|
|
value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
|
457 |
|
|
|
458 |
|
|
if (!enable)
|
459 |
|
|
value &= ~PCI_PM_CTRL_PME_ENABLE;
|
460 |
|
|
|
461 |
|
|
pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
|
462 |
|
|
|
463 |
|
|
return 0;
|
464 |
|
|
}
|
465 |
|
|
|
466 |
|
|
int
|
467 |
|
|
pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
|
468 |
|
|
{
|
469 |
|
|
u8 pin;
|
470 |
|
|
|
471 |
|
|
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
|
472 |
|
|
if (!pin)
|
473 |
|
|
return -1;
|
474 |
|
|
pin--;
|
475 |
|
|
while (dev->bus->self) {
|
476 |
|
|
pin = (pin + PCI_SLOT(dev->devfn)) % 4;
|
477 |
|
|
dev = dev->bus->self;
|
478 |
|
|
}
|
479 |
|
|
*bridge = dev;
|
480 |
|
|
return pin;
|
481 |
|
|
}
|
482 |
|
|
|
483 |
|
|
/**
|
484 |
|
|
* pci_release_region - Release a PCI bar
|
485 |
|
|
* @pdev: PCI device whose resources were previously reserved by pci_request_region
|
486 |
|
|
* @bar: BAR to release
|
487 |
|
|
*
|
488 |
|
|
* Releases the PCI I/O and memory resources previously reserved by a
|
489 |
|
|
* successful call to pci_request_region. Call this function only
|
490 |
|
|
* after all use of the PCI regions has ceased.
|
491 |
|
|
*/
|
492 |
|
|
void pci_release_region(struct pci_dev *pdev, int bar)
|
493 |
|
|
{
|
494 |
|
|
if (pci_resource_len(pdev, bar) == 0)
|
495 |
|
|
return;
|
496 |
|
|
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
|
497 |
|
|
release_region(pci_resource_start(pdev, bar),
|
498 |
|
|
pci_resource_len(pdev, bar));
|
499 |
|
|
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
|
500 |
|
|
release_mem_region(pci_resource_start(pdev, bar),
|
501 |
|
|
pci_resource_len(pdev, bar));
|
502 |
|
|
}
|
503 |
|
|
|
504 |
|
|
/**
|
505 |
|
|
* pci_request_region - Reserved PCI I/O and memory resource
|
506 |
|
|
* @pdev: PCI device whose resources are to be reserved
|
507 |
|
|
* @bar: BAR to be reserved
|
508 |
|
|
* @res_name: Name to be associated with resource.
|
509 |
|
|
*
|
510 |
|
|
* Mark the PCI region associated with PCI device @pdev BR @bar as
|
511 |
|
|
* being reserved by owner @res_name. Do not access any
|
512 |
|
|
* address inside the PCI regions unless this call returns
|
513 |
|
|
* successfully.
|
514 |
|
|
*
|
515 |
|
|
* Returns 0 on success, or %EBUSY on error. A warning
|
516 |
|
|
* message is also printed on failure.
|
517 |
|
|
*/
|
518 |
|
|
int pci_request_region(struct pci_dev *pdev, int bar, char *res_name)
|
519 |
|
|
{
|
520 |
|
|
if (pci_resource_len(pdev, bar) == 0)
|
521 |
|
|
return 0;
|
522 |
|
|
|
523 |
|
|
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
|
524 |
|
|
if (!request_region(pci_resource_start(pdev, bar),
|
525 |
|
|
pci_resource_len(pdev, bar), res_name))
|
526 |
|
|
goto err_out;
|
527 |
|
|
}
|
528 |
|
|
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
|
529 |
|
|
if (!request_mem_region(pci_resource_start(pdev, bar),
|
530 |
|
|
pci_resource_len(pdev, bar), res_name))
|
531 |
|
|
goto err_out;
|
532 |
|
|
}
|
533 |
|
|
|
534 |
|
|
return 0;
|
535 |
|
|
|
536 |
|
|
err_out:
|
537 |
|
|
printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
|
538 |
|
|
pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
|
539 |
|
|
bar + 1, /* PCI BAR # */
|
540 |
|
|
pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
|
541 |
|
|
pdev->slot_name);
|
542 |
|
|
return -EBUSY;
|
543 |
|
|
}
|
544 |
|
|
|
545 |
|
|
|
546 |
|
|
/**
|
547 |
|
|
* pci_release_regions - Release reserved PCI I/O and memory resources
|
548 |
|
|
* @pdev: PCI device whose resources were previously reserved by pci_request_regions
|
549 |
|
|
*
|
550 |
|
|
* Releases all PCI I/O and memory resources previously reserved by a
|
551 |
|
|
* successful call to pci_request_regions. Call this function only
|
552 |
|
|
* after all use of the PCI regions has ceased.
|
553 |
|
|
*/
|
554 |
|
|
|
555 |
|
|
void pci_release_regions(struct pci_dev *pdev)
|
556 |
|
|
{
|
557 |
|
|
int i;
|
558 |
|
|
|
559 |
|
|
for (i = 0; i < 6; i++)
|
560 |
|
|
pci_release_region(pdev, i);
|
561 |
|
|
}
|
562 |
|
|
|
563 |
|
|
/**
|
564 |
|
|
* pci_request_regions - Reserved PCI I/O and memory resources
|
565 |
|
|
* @pdev: PCI device whose resources are to be reserved
|
566 |
|
|
* @res_name: Name to be associated with resource.
|
567 |
|
|
*
|
568 |
|
|
* Mark all PCI regions associated with PCI device @pdev as
|
569 |
|
|
* being reserved by owner @res_name. Do not access any
|
570 |
|
|
* address inside the PCI regions unless this call returns
|
571 |
|
|
* successfully.
|
572 |
|
|
*
|
573 |
|
|
* Returns 0 on success, or %EBUSY on error. A warning
|
574 |
|
|
* message is also printed on failure.
|
575 |
|
|
*/
|
576 |
|
|
int pci_request_regions(struct pci_dev *pdev, char *res_name)
|
577 |
|
|
{
|
578 |
|
|
int i;
|
579 |
|
|
|
580 |
|
|
for (i = 0; i < 6; i++)
|
581 |
|
|
if(pci_request_region(pdev, i, res_name))
|
582 |
|
|
goto err_out;
|
583 |
|
|
return 0;
|
584 |
|
|
|
585 |
|
|
err_out:
|
586 |
|
|
printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
|
587 |
|
|
pci_resource_flags(pdev, i) & IORESOURCE_IO ? "I/O" : "mem",
|
588 |
|
|
i + 1, /* PCI BAR # */
|
589 |
|
|
pci_resource_len(pdev, i), pci_resource_start(pdev, i),
|
590 |
|
|
pdev->slot_name);
|
591 |
|
|
while(--i >= 0)
|
592 |
|
|
pci_release_region(pdev, i);
|
593 |
|
|
|
594 |
|
|
return -EBUSY;
|
595 |
|
|
}
|
596 |
|
|
|
597 |
|
|
|
598 |
|
|
/*
|
599 |
|
|
* Registration of PCI drivers and handling of hot-pluggable devices.
|
600 |
|
|
*/
|
601 |
|
|
|
602 |
|
|
static LIST_HEAD(pci_drivers);
|
603 |
|
|
|
604 |
|
|
/**
|
605 |
|
|
* pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
|
606 |
|
|
* @ids: array of PCI device id structures to search in
|
607 |
|
|
* @dev: the PCI device structure to match against
|
608 |
|
|
*
|
609 |
|
|
* Used by a driver to check whether a PCI device present in the
|
610 |
|
|
* system is in its list of supported devices.Returns the matching
|
611 |
|
|
* pci_device_id structure or %NULL if there is no match.
|
612 |
|
|
*/
|
613 |
|
|
const struct pci_device_id *
|
614 |
|
|
pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev)
|
615 |
|
|
{
|
616 |
|
|
while (ids->vendor || ids->subvendor || ids->class_mask) {
|
617 |
|
|
if ((ids->vendor == PCI_ANY_ID || ids->vendor == dev->vendor) &&
|
618 |
|
|
(ids->device == PCI_ANY_ID || ids->device == dev->device) &&
|
619 |
|
|
(ids->subvendor == PCI_ANY_ID || ids->subvendor == dev->subsystem_vendor) &&
|
620 |
|
|
(ids->subdevice == PCI_ANY_ID || ids->subdevice == dev->subsystem_device) &&
|
621 |
|
|
!((ids->class ^ dev->class) & ids->class_mask))
|
622 |
|
|
return ids;
|
623 |
|
|
ids++;
|
624 |
|
|
}
|
625 |
|
|
return NULL;
|
626 |
|
|
}
|
627 |
|
|
|
628 |
|
|
static int
|
629 |
|
|
pci_announce_device(struct pci_driver *drv, struct pci_dev *dev)
|
630 |
|
|
{
|
631 |
|
|
const struct pci_device_id *id;
|
632 |
|
|
int ret = 0;
|
633 |
|
|
|
634 |
|
|
if (drv->id_table) {
|
635 |
|
|
id = pci_match_device(drv->id_table, dev);
|
636 |
|
|
if (!id) {
|
637 |
|
|
ret = 0;
|
638 |
|
|
goto out;
|
639 |
|
|
}
|
640 |
|
|
} else
|
641 |
|
|
id = NULL;
|
642 |
|
|
|
643 |
|
|
dev_probe_lock();
|
644 |
|
|
if (drv->probe(dev, id) >= 0) {
|
645 |
|
|
dev->driver = drv;
|
646 |
|
|
ret = 1;
|
647 |
|
|
}
|
648 |
|
|
dev_probe_unlock();
|
649 |
|
|
out:
|
650 |
|
|
return ret;
|
651 |
|
|
}
|
652 |
|
|
|
653 |
|
|
/**
|
654 |
|
|
* pci_register_driver - register a new pci driver
|
655 |
|
|
* @drv: the driver structure to register
|
656 |
|
|
*
|
657 |
|
|
* Adds the driver structure to the list of registered drivers
|
658 |
|
|
* Returns the number of pci devices which were claimed by the driver
|
659 |
|
|
* during registration. The driver remains registered even if the
|
660 |
|
|
* return value is zero.
|
661 |
|
|
*/
|
662 |
|
|
int
|
663 |
|
|
pci_register_driver(struct pci_driver *drv)
|
664 |
|
|
{
|
665 |
|
|
struct pci_dev *dev;
|
666 |
|
|
int count = 0;
|
667 |
|
|
|
668 |
|
|
list_add_tail(&drv->node, &pci_drivers);
|
669 |
|
|
pci_for_each_dev(dev) {
|
670 |
|
|
if (!pci_dev_driver(dev))
|
671 |
|
|
count += pci_announce_device(drv, dev);
|
672 |
|
|
}
|
673 |
|
|
return count;
|
674 |
|
|
}
|
675 |
|
|
|
676 |
|
|
/**
|
677 |
|
|
* pci_unregister_driver - unregister a pci driver
|
678 |
|
|
* @drv: the driver structure to unregister
|
679 |
|
|
*
|
680 |
|
|
* Deletes the driver structure from the list of registered PCI drivers,
|
681 |
|
|
* gives it a chance to clean up by calling its remove() function for
|
682 |
|
|
* each device it was responsible for, and marks those devices as
|
683 |
|
|
* driverless.
|
684 |
|
|
*/
|
685 |
|
|
|
686 |
|
|
void
|
687 |
|
|
pci_unregister_driver(struct pci_driver *drv)
|
688 |
|
|
{
|
689 |
|
|
struct pci_dev *dev;
|
690 |
|
|
|
691 |
|
|
list_del(&drv->node);
|
692 |
|
|
pci_for_each_dev(dev) {
|
693 |
|
|
if (dev->driver == drv) {
|
694 |
|
|
if (drv->remove)
|
695 |
|
|
drv->remove(dev);
|
696 |
|
|
dev->driver = NULL;
|
697 |
|
|
}
|
698 |
|
|
}
|
699 |
|
|
}
|
700 |
|
|
|
701 |
|
|
#ifdef CONFIG_HOTPLUG
|
702 |
|
|
|
703 |
|
|
#ifndef FALSE
|
704 |
|
|
#define FALSE (0)
|
705 |
|
|
#define TRUE (!FALSE)
|
706 |
|
|
#endif
|
707 |
|
|
|
708 |
|
|
static void
|
709 |
|
|
run_sbin_hotplug(struct pci_dev *pdev, int insert)
|
710 |
|
|
{
|
711 |
|
|
int i;
|
712 |
|
|
char *argv[3], *envp[8];
|
713 |
|
|
char id[20], sub_id[24], bus_id[24], class_id[20];
|
714 |
|
|
|
715 |
|
|
if (!hotplug_path[0])
|
716 |
|
|
return;
|
717 |
|
|
|
718 |
|
|
sprintf(class_id, "PCI_CLASS=%04X", pdev->class);
|
719 |
|
|
sprintf(id, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device);
|
720 |
|
|
sprintf(sub_id, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, pdev->subsystem_device);
|
721 |
|
|
sprintf(bus_id, "PCI_SLOT_NAME=%s", pdev->slot_name);
|
722 |
|
|
|
723 |
|
|
i = 0;
|
724 |
|
|
argv[i++] = hotplug_path;
|
725 |
|
|
argv[i++] = "pci";
|
726 |
|
|
argv[i] = 0;
|
727 |
|
|
|
728 |
|
|
i = 0;
|
729 |
|
|
/* minimal command environment */
|
730 |
|
|
envp[i++] = "HOME=/";
|
731 |
|
|
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
|
732 |
|
|
|
733 |
|
|
/* other stuff we want to pass to /sbin/hotplug */
|
734 |
|
|
envp[i++] = class_id;
|
735 |
|
|
envp[i++] = id;
|
736 |
|
|
envp[i++] = sub_id;
|
737 |
|
|
envp[i++] = bus_id;
|
738 |
|
|
if (insert)
|
739 |
|
|
envp[i++] = "ACTION=add";
|
740 |
|
|
else
|
741 |
|
|
envp[i++] = "ACTION=remove";
|
742 |
|
|
envp[i] = 0;
|
743 |
|
|
|
744 |
|
|
call_usermodehelper (argv [0], argv, envp);
|
745 |
|
|
}
|
746 |
|
|
|
747 |
|
|
/**
|
748 |
|
|
* pci_announce_device_to_drivers - tell the drivers a new device has appeared
|
749 |
|
|
* @dev: the device that has shown up
|
750 |
|
|
*
|
751 |
|
|
* Notifys the drivers that a new device has appeared, and also notifys
|
752 |
|
|
* userspace through /sbin/hotplug.
|
753 |
|
|
*/
|
754 |
|
|
void
|
755 |
|
|
pci_announce_device_to_drivers(struct pci_dev *dev)
|
756 |
|
|
{
|
757 |
|
|
struct list_head *ln;
|
758 |
|
|
|
759 |
|
|
for(ln=pci_drivers.next; ln != &pci_drivers; ln=ln->next) {
|
760 |
|
|
struct pci_driver *drv = list_entry(ln, struct pci_driver, node);
|
761 |
|
|
if (drv->remove && pci_announce_device(drv, dev))
|
762 |
|
|
break;
|
763 |
|
|
}
|
764 |
|
|
|
765 |
|
|
/* notify userspace of new hotplug device */
|
766 |
|
|
run_sbin_hotplug(dev, TRUE);
|
767 |
|
|
}
|
768 |
|
|
|
769 |
|
|
/**
|
770 |
|
|
* pci_insert_device - insert a hotplug device
|
771 |
|
|
* @dev: the device to insert
|
772 |
|
|
* @bus: where to insert it
|
773 |
|
|
*
|
774 |
|
|
* Add a new device to the device lists and notify userspace (/sbin/hotplug).
|
775 |
|
|
*/
|
776 |
|
|
void
|
777 |
|
|
pci_insert_device(struct pci_dev *dev, struct pci_bus *bus)
|
778 |
|
|
{
|
779 |
|
|
list_add_tail(&dev->bus_list, &bus->devices);
|
780 |
|
|
list_add_tail(&dev->global_list, &pci_devices);
|
781 |
|
|
#ifdef CONFIG_PROC_FS
|
782 |
|
|
pci_proc_attach_device(dev);
|
783 |
|
|
#endif
|
784 |
|
|
pci_announce_device_to_drivers(dev);
|
785 |
|
|
}
|
786 |
|
|
|
787 |
|
|
static void
|
788 |
|
|
pci_free_resources(struct pci_dev *dev)
|
789 |
|
|
{
|
790 |
|
|
int i;
|
791 |
|
|
|
792 |
|
|
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
|
793 |
|
|
struct resource *res = dev->resource + i;
|
794 |
|
|
if (res->parent)
|
795 |
|
|
release_resource(res);
|
796 |
|
|
}
|
797 |
|
|
}
|
798 |
|
|
|
799 |
|
|
/**
|
800 |
|
|
* pci_remove_device - remove a hotplug device
|
801 |
|
|
* @dev: the device to remove
|
802 |
|
|
*
|
803 |
|
|
* Delete the device structure from the device lists and
|
804 |
|
|
* notify userspace (/sbin/hotplug).
|
805 |
|
|
*/
|
806 |
|
|
void
|
807 |
|
|
pci_remove_device(struct pci_dev *dev)
|
808 |
|
|
{
|
809 |
|
|
if (dev->driver) {
|
810 |
|
|
if (dev->driver->remove)
|
811 |
|
|
dev->driver->remove(dev);
|
812 |
|
|
dev->driver = NULL;
|
813 |
|
|
}
|
814 |
|
|
list_del(&dev->bus_list);
|
815 |
|
|
list_del(&dev->global_list);
|
816 |
|
|
pci_free_resources(dev);
|
817 |
|
|
#ifdef CONFIG_PROC_FS
|
818 |
|
|
pci_proc_detach_device(dev);
|
819 |
|
|
#endif
|
820 |
|
|
|
821 |
|
|
/* notify userspace of hotplug device removal */
|
822 |
|
|
run_sbin_hotplug(dev, FALSE);
|
823 |
|
|
}
|
824 |
|
|
|
825 |
|
|
#endif
|
826 |
|
|
|
827 |
|
|
static struct pci_driver pci_compat_driver = {
|
828 |
|
|
name: "compat"
|
829 |
|
|
};
|
830 |
|
|
|
831 |
|
|
/**
|
832 |
|
|
* pci_dev_driver - get the pci_driver of a device
|
833 |
|
|
* @dev: the device to query
|
834 |
|
|
*
|
835 |
|
|
* Returns the appropriate pci_driver structure or %NULL if there is no
|
836 |
|
|
* registered driver for the device.
|
837 |
|
|
*/
|
838 |
|
|
struct pci_driver *
|
839 |
|
|
pci_dev_driver(const struct pci_dev *dev)
|
840 |
|
|
{
|
841 |
|
|
if (dev->driver)
|
842 |
|
|
return dev->driver;
|
843 |
|
|
else {
|
844 |
|
|
int i;
|
845 |
|
|
for(i=0; i<=PCI_ROM_RESOURCE; i++)
|
846 |
|
|
if (dev->resource[i].flags & IORESOURCE_BUSY)
|
847 |
|
|
return &pci_compat_driver;
|
848 |
|
|
}
|
849 |
|
|
return NULL;
|
850 |
|
|
}
|
851 |
|
|
|
852 |
|
|
|
853 |
|
|
/*
|
854 |
|
|
* This interrupt-safe spinlock protects all accesses to PCI
|
855 |
|
|
* configuration space.
|
856 |
|
|
*/
|
857 |
|
|
|
858 |
|
|
static spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
|
859 |
|
|
|
860 |
|
|
/*
|
861 |
|
|
* Wrappers for all PCI configuration access functions. They just check
|
862 |
|
|
* alignment, do locking and call the low-level functions pointed to
|
863 |
|
|
* by pci_dev->ops.
|
864 |
|
|
*/
|
865 |
|
|
|
866 |
|
|
#define PCI_byte_BAD 0
|
867 |
|
|
#define PCI_word_BAD (pos & 1)
|
868 |
|
|
#define PCI_dword_BAD (pos & 3)
|
869 |
|
|
|
870 |
|
|
#define PCI_OP(rw,size,type) \
|
871 |
|
|
int pci_##rw##_config_##size (struct pci_dev *dev, int pos, type value) \
|
872 |
|
|
{ \
|
873 |
|
|
int res; \
|
874 |
|
|
unsigned long flags; \
|
875 |
|
|
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
|
876 |
|
|
spin_lock_irqsave(&pci_lock, flags); \
|
877 |
|
|
res = dev->bus->ops->rw##_##size(dev, pos, value); \
|
878 |
|
|
spin_unlock_irqrestore(&pci_lock, flags); \
|
879 |
|
|
return res; \
|
880 |
|
|
}
|
881 |
|
|
|
882 |
|
|
PCI_OP(read, byte, u8 *)
|
883 |
|
|
PCI_OP(read, word, u16 *)
|
884 |
|
|
PCI_OP(read, dword, u32 *)
|
885 |
|
|
PCI_OP(write, byte, u8)
|
886 |
|
|
PCI_OP(write, word, u16)
|
887 |
|
|
PCI_OP(write, dword, u32)
|
888 |
|
|
|
889 |
|
|
/**
|
890 |
|
|
* pci_set_master - enables bus-mastering for device dev
|
891 |
|
|
* @dev: the PCI device to enable
|
892 |
|
|
*
|
893 |
|
|
* Enables bus-mastering on the device and calls pcibios_set_master()
|
894 |
|
|
* to do the needed arch specific settings.
|
895 |
|
|
*/
|
896 |
|
|
void
|
897 |
|
|
pci_set_master(struct pci_dev *dev)
|
898 |
|
|
{
|
899 |
|
|
u16 cmd;
|
900 |
|
|
|
901 |
|
|
pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
902 |
|
|
if (! (cmd & PCI_COMMAND_MASTER)) {
|
903 |
|
|
DBG("PCI: Enabling bus mastering for device %s\n", dev->slot_name);
|
904 |
|
|
cmd |= PCI_COMMAND_MASTER;
|
905 |
|
|
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
906 |
|
|
}
|
907 |
|
|
pcibios_set_master(dev);
|
908 |
|
|
}
|
909 |
|
|
|
910 |
|
|
#ifndef HAVE_ARCH_PCI_MWI
|
911 |
|
|
/* This can be overridden by arch code. */
|
912 |
|
|
u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
|
913 |
|
|
|
914 |
|
|
/**
|
915 |
|
|
* pci_generic_prep_mwi - helper function for pci_set_mwi
|
916 |
|
|
* @dev: the PCI device for which MWI is enabled
|
917 |
|
|
*
|
918 |
|
|
* Helper function for implementation the arch-specific pcibios_set_mwi
|
919 |
|
|
* function. Originally copied from drivers/net/acenic.c.
|
920 |
|
|
* Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
|
921 |
|
|
*
|
922 |
|
|
* RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
|
923 |
|
|
*/
|
924 |
|
|
static int
|
925 |
|
|
pci_generic_prep_mwi(struct pci_dev *dev)
|
926 |
|
|
{
|
927 |
|
|
u8 cacheline_size;
|
928 |
|
|
|
929 |
|
|
if (!pci_cache_line_size)
|
930 |
|
|
return -EINVAL; /* The system doesn't support MWI. */
|
931 |
|
|
|
932 |
|
|
/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
|
933 |
|
|
equal to or multiple of the right value. */
|
934 |
|
|
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
|
935 |
|
|
if (cacheline_size >= pci_cache_line_size &&
|
936 |
|
|
(cacheline_size % pci_cache_line_size) == 0)
|
937 |
|
|
return 0;
|
938 |
|
|
|
939 |
|
|
/* Write the correct value. */
|
940 |
|
|
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
|
941 |
|
|
/* Read it back. */
|
942 |
|
|
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
|
943 |
|
|
if (cacheline_size == pci_cache_line_size)
|
944 |
|
|
return 0;
|
945 |
|
|
|
946 |
|
|
printk(KERN_WARNING "PCI: cache line size of %d is not supported "
|
947 |
|
|
"by device %s\n", pci_cache_line_size << 2, dev->slot_name);
|
948 |
|
|
|
949 |
|
|
return -EINVAL;
|
950 |
|
|
}
|
951 |
|
|
#endif /* !HAVE_ARCH_PCI_MWI */
|
952 |
|
|
|
953 |
|
|
/**
|
954 |
|
|
* pci_set_mwi - enables memory-write-invalidate PCI transaction
|
955 |
|
|
* @dev: the PCI device for which MWI is enabled
|
956 |
|
|
*
|
957 |
|
|
* Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
|
958 |
|
|
* and then calls @pcibios_set_mwi to do the needed arch specific
|
959 |
|
|
* operations or a generic mwi-prep function.
|
960 |
|
|
*
|
961 |
|
|
* RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
|
962 |
|
|
*/
|
963 |
|
|
int
|
964 |
|
|
pci_set_mwi(struct pci_dev *dev)
|
965 |
|
|
{
|
966 |
|
|
int rc;
|
967 |
|
|
u16 cmd;
|
968 |
|
|
|
969 |
|
|
#ifdef HAVE_ARCH_PCI_MWI
|
970 |
|
|
rc = pcibios_set_mwi(dev);
|
971 |
|
|
#else
|
972 |
|
|
rc = pci_generic_prep_mwi(dev);
|
973 |
|
|
#endif
|
974 |
|
|
|
975 |
|
|
if (rc)
|
976 |
|
|
return rc;
|
977 |
|
|
|
978 |
|
|
pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
979 |
|
|
if (! (cmd & PCI_COMMAND_INVALIDATE)) {
|
980 |
|
|
DBG("PCI: Enabling Mem-Wr-Inval for device %s\n", dev->slot_name);
|
981 |
|
|
cmd |= PCI_COMMAND_INVALIDATE;
|
982 |
|
|
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
983 |
|
|
}
|
984 |
|
|
|
985 |
|
|
return 0;
|
986 |
|
|
}
|
987 |
|
|
|
988 |
|
|
/**
|
989 |
|
|
* pci_clear_mwi - disables Memory-Write-Invalidate for device dev
|
990 |
|
|
* @dev: the PCI device to disable
|
991 |
|
|
*
|
992 |
|
|
* Disables PCI Memory-Write-Invalidate transaction on the device
|
993 |
|
|
*/
|
994 |
|
|
void
|
995 |
|
|
pci_clear_mwi(struct pci_dev *dev)
|
996 |
|
|
{
|
997 |
|
|
u16 cmd;
|
998 |
|
|
|
999 |
|
|
pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
1000 |
|
|
if (cmd & PCI_COMMAND_INVALIDATE) {
|
1001 |
|
|
cmd &= ~PCI_COMMAND_INVALIDATE;
|
1002 |
|
|
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
1003 |
|
|
}
|
1004 |
|
|
}
|
1005 |
|
|
|
1006 |
|
|
int
|
1007 |
|
|
pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
1008 |
|
|
{
|
1009 |
|
|
if (!pci_dma_supported(dev, mask))
|
1010 |
|
|
return -EIO;
|
1011 |
|
|
|
1012 |
|
|
dev->dma_mask = mask;
|
1013 |
|
|
|
1014 |
|
|
return 0;
|
1015 |
|
|
}
|
1016 |
|
|
|
1017 |
|
|
int
|
1018 |
|
|
pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
|
1019 |
|
|
{
|
1020 |
|
|
if (!pci_dac_dma_supported(dev, mask))
|
1021 |
|
|
return -EIO;
|
1022 |
|
|
|
1023 |
|
|
dev->dma_mask = mask;
|
1024 |
|
|
|
1025 |
|
|
return 0;
|
1026 |
|
|
}
|
1027 |
|
|
|
1028 |
|
|
/*
|
1029 |
|
|
* Translate the low bits of the PCI base
|
1030 |
|
|
* to the resource type
|
1031 |
|
|
*/
|
1032 |
|
|
static inline unsigned int pci_calc_resource_flags(unsigned int flags)
|
1033 |
|
|
{
|
1034 |
|
|
if (flags & PCI_BASE_ADDRESS_SPACE_IO)
|
1035 |
|
|
return IORESOURCE_IO;
|
1036 |
|
|
|
1037 |
|
|
if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
|
1038 |
|
|
return IORESOURCE_MEM | IORESOURCE_PREFETCH;
|
1039 |
|
|
|
1040 |
|
|
return IORESOURCE_MEM;
|
1041 |
|
|
}
|
1042 |
|
|
|
1043 |
|
|
/*
|
1044 |
|
|
* Find the extent of a PCI decode, do sanity checks.
|
1045 |
|
|
*/
|
1046 |
|
|
static u32 pci_size(u32 base, u32 maxbase, unsigned long mask)
|
1047 |
|
|
{
|
1048 |
|
|
u32 size = mask & maxbase; /* Find the significant bits */
|
1049 |
|
|
if (!size)
|
1050 |
|
|
return 0;
|
1051 |
|
|
size = size & ~(size-1); /* Get the lowest of them to find the decode size */
|
1052 |
|
|
size -= 1; /* extent = size - 1 */
|
1053 |
|
|
if (base == maxbase && ((base | size) & mask) != mask)
|
1054 |
|
|
return 0; /* base == maxbase can be valid only
|
1055 |
|
|
if the BAR has been already
|
1056 |
|
|
programmed with all 1s */
|
1057 |
|
|
return size;
|
1058 |
|
|
}
|
1059 |
|
|
|
1060 |
|
|
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
1061 |
|
|
{
|
1062 |
|
|
unsigned int pos, reg, next;
|
1063 |
|
|
u32 l, sz;
|
1064 |
|
|
struct resource *res;
|
1065 |
|
|
|
1066 |
|
|
for(pos=0; pos<howmany; pos = next) {
|
1067 |
|
|
next = pos+1;
|
1068 |
|
|
res = &dev->resource[pos];
|
1069 |
|
|
res->name = dev->name;
|
1070 |
|
|
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
|
1071 |
|
|
pci_read_config_dword(dev, reg, &l);
|
1072 |
|
|
pci_write_config_dword(dev, reg, ~0);
|
1073 |
|
|
pci_read_config_dword(dev, reg, &sz);
|
1074 |
|
|
pci_write_config_dword(dev, reg, l);
|
1075 |
|
|
if (!sz || sz == 0xffffffff)
|
1076 |
|
|
continue;
|
1077 |
|
|
if (l == 0xffffffff)
|
1078 |
|
|
l = 0;
|
1079 |
|
|
if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
|
1080 |
|
|
sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK);
|
1081 |
|
|
if (!sz)
|
1082 |
|
|
continue;
|
1083 |
|
|
res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
|
1084 |
|
|
res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
|
1085 |
|
|
} else {
|
1086 |
|
|
sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
|
1087 |
|
|
if (!sz)
|
1088 |
|
|
continue;
|
1089 |
|
|
res->start = l & PCI_BASE_ADDRESS_IO_MASK;
|
1090 |
|
|
res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
|
1091 |
|
|
}
|
1092 |
|
|
res->end = res->start + (unsigned long) sz;
|
1093 |
|
|
res->flags |= pci_calc_resource_flags(l);
|
1094 |
|
|
if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
|
1095 |
|
|
== (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
|
1096 |
|
|
pci_read_config_dword(dev, reg+4, &l);
|
1097 |
|
|
next++;
|
1098 |
|
|
#if BITS_PER_LONG == 64
|
1099 |
|
|
res->start |= ((unsigned long) l) << 32;
|
1100 |
|
|
res->end = res->start + sz;
|
1101 |
|
|
pci_write_config_dword(dev, reg+4, ~0);
|
1102 |
|
|
pci_read_config_dword(dev, reg+4, &sz);
|
1103 |
|
|
pci_write_config_dword(dev, reg+4, l);
|
1104 |
|
|
if (~sz)
|
1105 |
|
|
res->end = res->start + 0xffffffff +
|
1106 |
|
|
(((unsigned long) ~sz) << 32);
|
1107 |
|
|
#else
|
1108 |
|
|
if (l) {
|
1109 |
|
|
printk(KERN_ERR "PCI: Unable to handle 64-bit address for device %s\n", dev->slot_name);
|
1110 |
|
|
res->start = 0;
|
1111 |
|
|
res->flags = 0;
|
1112 |
|
|
continue;
|
1113 |
|
|
}
|
1114 |
|
|
#endif
|
1115 |
|
|
}
|
1116 |
|
|
}
|
1117 |
|
|
if (rom) {
|
1118 |
|
|
dev->rom_base_reg = rom;
|
1119 |
|
|
res = &dev->resource[PCI_ROM_RESOURCE];
|
1120 |
|
|
res->name = dev->name;
|
1121 |
|
|
pci_read_config_dword(dev, rom, &l);
|
1122 |
|
|
pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE);
|
1123 |
|
|
pci_read_config_dword(dev, rom, &sz);
|
1124 |
|
|
pci_write_config_dword(dev, rom, l);
|
1125 |
|
|
if (l == 0xffffffff)
|
1126 |
|
|
l = 0;
|
1127 |
|
|
if (sz && sz != 0xffffffff) {
|
1128 |
|
|
sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK);
|
1129 |
|
|
if (!sz)
|
1130 |
|
|
return;
|
1131 |
|
|
res->flags = (l & PCI_ROM_ADDRESS_ENABLE) |
|
1132 |
|
|
IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
|
1133 |
|
|
res->start = l & PCI_ROM_ADDRESS_MASK;
|
1134 |
|
|
res->end = res->start + (unsigned long) sz;
|
1135 |
|
|
}
|
1136 |
|
|
}
|
1137 |
|
|
}
|
1138 |
|
|
|
1139 |
|
|
void __devinit pci_read_bridge_bases(struct pci_bus *child)
|
1140 |
|
|
{
|
1141 |
|
|
struct pci_dev *dev = child->self;
|
1142 |
|
|
u8 io_base_lo, io_limit_lo;
|
1143 |
|
|
u16 mem_base_lo, mem_limit_lo;
|
1144 |
|
|
unsigned long base, limit;
|
1145 |
|
|
struct resource *res;
|
1146 |
|
|
int i;
|
1147 |
|
|
|
1148 |
|
|
if (!dev) /* It's a host bus, nothing to read */
|
1149 |
|
|
return;
|
1150 |
|
|
|
1151 |
|
|
if (dev->transparent) {
|
1152 |
|
|
printk("Transparent bridge - %s\n", dev->name);
|
1153 |
|
|
for(i = 0; i < 4; i++)
|
1154 |
|
|
child->resource[i] = child->parent->resource[i];
|
1155 |
|
|
return;
|
1156 |
|
|
}
|
1157 |
|
|
|
1158 |
|
|
for(i=0; i<3; i++)
|
1159 |
|
|
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
|
1160 |
|
|
|
1161 |
|
|
res = child->resource[0];
|
1162 |
|
|
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
|
1163 |
|
|
pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
|
1164 |
|
|
base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
|
1165 |
|
|
limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
|
1166 |
|
|
|
1167 |
|
|
if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
|
1168 |
|
|
u16 io_base_hi, io_limit_hi;
|
1169 |
|
|
pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
|
1170 |
|
|
pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
|
1171 |
|
|
base |= (io_base_hi << 16);
|
1172 |
|
|
limit |= (io_limit_hi << 16);
|
1173 |
|
|
}
|
1174 |
|
|
|
1175 |
|
|
if (base && base <= limit) {
|
1176 |
|
|
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
|
1177 |
|
|
res->start = base;
|
1178 |
|
|
res->end = limit + 0xfff;
|
1179 |
|
|
}
|
1180 |
|
|
|
1181 |
|
|
res = child->resource[1];
|
1182 |
|
|
pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
|
1183 |
|
|
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
|
1184 |
|
|
base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
|
1185 |
|
|
limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
|
1186 |
|
|
if (base && base <= limit) {
|
1187 |
|
|
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
|
1188 |
|
|
res->start = base;
|
1189 |
|
|
res->end = limit + 0xfffff;
|
1190 |
|
|
}
|
1191 |
|
|
|
1192 |
|
|
res = child->resource[2];
|
1193 |
|
|
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
|
1194 |
|
|
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
|
1195 |
|
|
base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
|
1196 |
|
|
limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
|
1197 |
|
|
|
1198 |
|
|
if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
|
1199 |
|
|
u32 mem_base_hi, mem_limit_hi;
|
1200 |
|
|
pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
|
1201 |
|
|
pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
|
1202 |
|
|
#if BITS_PER_LONG == 64
|
1203 |
|
|
base |= ((long) mem_base_hi) << 32;
|
1204 |
|
|
limit |= ((long) mem_limit_hi) << 32;
|
1205 |
|
|
#else
|
1206 |
|
|
if (mem_base_hi || mem_limit_hi) {
|
1207 |
|
|
printk(KERN_ERR "PCI: Unable to handle 64-bit address space for %s\n", child->name);
|
1208 |
|
|
return;
|
1209 |
|
|
}
|
1210 |
|
|
#endif
|
1211 |
|
|
}
|
1212 |
|
|
if (base && base <= limit) {
|
1213 |
|
|
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
|
1214 |
|
|
res->start = base;
|
1215 |
|
|
res->end = limit + 0xfffff;
|
1216 |
|
|
}
|
1217 |
|
|
}
|
1218 |
|
|
|
1219 |
|
|
static struct pci_bus * __devinit pci_alloc_bus(void)
|
1220 |
|
|
{
|
1221 |
|
|
struct pci_bus *b;
|
1222 |
|
|
|
1223 |
|
|
b = kmalloc(sizeof(*b), GFP_KERNEL);
|
1224 |
|
|
if (b) {
|
1225 |
|
|
memset(b, 0, sizeof(*b));
|
1226 |
|
|
INIT_LIST_HEAD(&b->children);
|
1227 |
|
|
INIT_LIST_HEAD(&b->devices);
|
1228 |
|
|
}
|
1229 |
|
|
return b;
|
1230 |
|
|
}
|
1231 |
|
|
|
1232 |
|
|
struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
|
1233 |
|
|
{
|
1234 |
|
|
struct pci_bus *child;
|
1235 |
|
|
int i;
|
1236 |
|
|
|
1237 |
|
|
/*
|
1238 |
|
|
* Allocate a new bus, and inherit stuff from the parent..
|
1239 |
|
|
*/
|
1240 |
|
|
child = pci_alloc_bus();
|
1241 |
|
|
|
1242 |
|
|
list_add_tail(&child->node, &parent->children);
|
1243 |
|
|
child->self = dev;
|
1244 |
|
|
dev->subordinate = child;
|
1245 |
|
|
child->parent = parent;
|
1246 |
|
|
child->ops = parent->ops;
|
1247 |
|
|
child->sysdata = parent->sysdata;
|
1248 |
|
|
|
1249 |
|
|
/*
|
1250 |
|
|
* Set up the primary, secondary and subordinate
|
1251 |
|
|
* bus numbers.
|
1252 |
|
|
*/
|
1253 |
|
|
child->number = child->secondary = busnr;
|
1254 |
|
|
child->primary = parent->secondary;
|
1255 |
|
|
child->subordinate = 0xff;
|
1256 |
|
|
|
1257 |
|
|
/* Set up default resource pointers and names.. */
|
1258 |
|
|
for (i = 0; i < 4; i++) {
|
1259 |
|
|
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
|
1260 |
|
|
child->resource[i]->name = child->name;
|
1261 |
|
|
}
|
1262 |
|
|
|
1263 |
|
|
return child;
|
1264 |
|
|
}
|
1265 |
|
|
|
1266 |
|
|
unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus);
|
1267 |
|
|
|
1268 |
|
|
/*
|
1269 |
|
|
* If it's a bridge, configure it and scan the bus behind it.
|
1270 |
|
|
* For CardBus bridges, we don't scan behind as the devices will
|
1271 |
|
|
* be handled by the bridge driver itself.
|
1272 |
|
|
*
|
1273 |
|
|
* We need to process bridges in two passes -- first we scan those
|
1274 |
|
|
* already configured by the BIOS and after we are done with all of
|
1275 |
|
|
* them, we proceed to assigning numbers to the remaining buses in
|
1276 |
|
|
* order to avoid overlaps between old and new bus numbers.
|
1277 |
|
|
*/
|
1278 |
|
|
static int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
|
1279 |
|
|
{
|
1280 |
|
|
unsigned int buses;
|
1281 |
|
|
unsigned short cr;
|
1282 |
|
|
struct pci_bus *child;
|
1283 |
|
|
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
|
1284 |
|
|
|
1285 |
|
|
pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
|
1286 |
|
|
DBG("Scanning behind PCI bridge %s, config %06x, pass %d\n", dev->slot_name, buses & 0xffffff, pass);
|
1287 |
|
|
if ((buses & 0xffff00) && !pcibios_assign_all_busses()) {
|
1288 |
|
|
/*
|
1289 |
|
|
* Bus already configured by firmware, process it in the first
|
1290 |
|
|
* pass and just note the configuration.
|
1291 |
|
|
*/
|
1292 |
|
|
if (pass)
|
1293 |
|
|
return max;
|
1294 |
|
|
child = pci_add_new_bus(bus, dev, 0);
|
1295 |
|
|
child->primary = buses & 0xFF;
|
1296 |
|
|
child->secondary = (buses >> 8) & 0xFF;
|
1297 |
|
|
child->subordinate = (buses >> 16) & 0xFF;
|
1298 |
|
|
child->number = child->secondary;
|
1299 |
|
|
if (!is_cardbus) {
|
1300 |
|
|
unsigned int cmax = pci_do_scan_bus(child);
|
1301 |
|
|
if (cmax > max) max = cmax;
|
1302 |
|
|
} else {
|
1303 |
|
|
unsigned int cmax = child->subordinate;
|
1304 |
|
|
if (cmax > max) max = cmax;
|
1305 |
|
|
}
|
1306 |
|
|
} else {
|
1307 |
|
|
/*
|
1308 |
|
|
* We need to assign a number to this bus which we always
|
1309 |
|
|
* do in the second pass. We also keep all address decoders
|
1310 |
|
|
* on the bridge disabled during scanning. FIXME: Why?
|
1311 |
|
|
*/
|
1312 |
|
|
if (!pass)
|
1313 |
|
|
return max;
|
1314 |
|
|
pci_read_config_word(dev, PCI_COMMAND, &cr);
|
1315 |
|
|
pci_write_config_word(dev, PCI_COMMAND, 0x0000);
|
1316 |
|
|
pci_write_config_word(dev, PCI_STATUS, 0xffff);
|
1317 |
|
|
|
1318 |
|
|
child = pci_add_new_bus(bus, dev, ++max);
|
1319 |
|
|
buses = (buses & 0xff000000)
|
1320 |
|
|
| ((unsigned int)(child->primary) << 0)
|
1321 |
|
|
| ((unsigned int)(child->secondary) << 8)
|
1322 |
|
|
| ((unsigned int)(child->subordinate) << 16);
|
1323 |
|
|
/*
|
1324 |
|
|
* We need to blast all three values with a single write.
|
1325 |
|
|
*/
|
1326 |
|
|
pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
|
1327 |
|
|
if (!is_cardbus) {
|
1328 |
|
|
/* Now we can scan all subordinate buses... */
|
1329 |
|
|
max = pci_do_scan_bus(child);
|
1330 |
|
|
} else {
|
1331 |
|
|
/*
|
1332 |
|
|
* For CardBus bridges, we leave 4 bus numbers
|
1333 |
|
|
* as cards with a PCI-to-PCI bridge can be
|
1334 |
|
|
* inserted later.
|
1335 |
|
|
*/
|
1336 |
|
|
max += 3;
|
1337 |
|
|
}
|
1338 |
|
|
/*
|
1339 |
|
|
* Set the subordinate bus number to its real value.
|
1340 |
|
|
*/
|
1341 |
|
|
child->subordinate = max;
|
1342 |
|
|
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
|
1343 |
|
|
pci_write_config_word(dev, PCI_COMMAND, cr);
|
1344 |
|
|
}
|
1345 |
|
|
sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
|
1346 |
|
|
return max;
|
1347 |
|
|
}
|
1348 |
|
|
|
1349 |
|
|
/*
|
1350 |
|
|
* Read interrupt line and base address registers.
|
1351 |
|
|
* The architecture-dependent code can tweak these, of course.
|
1352 |
|
|
*/
|
1353 |
|
|
static void pci_read_irq(struct pci_dev *dev)
|
1354 |
|
|
{
|
1355 |
|
|
unsigned char irq;
|
1356 |
|
|
|
1357 |
|
|
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
|
1358 |
|
|
if (irq)
|
1359 |
|
|
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
|
1360 |
|
|
dev->irq = irq;
|
1361 |
|
|
}
|
1362 |
|
|
|
1363 |
|
|
/**
|
1364 |
|
|
* pci_setup_device - fill in class and map information of a device
|
1365 |
|
|
* @dev: the device structure to fill
|
1366 |
|
|
*
|
1367 |
|
|
* Initialize the device structure with information about the device's
|
1368 |
|
|
* vendor,class,memory and IO-space addresses,IRQ lines etc.
|
1369 |
|
|
* Called at initialisation of the PCI subsystem and by CardBus services.
|
1370 |
|
|
* Returns 0 on success and -1 if unknown type of device (not normal, bridge
|
1371 |
|
|
* or CardBus).
|
1372 |
|
|
*/
|
1373 |
|
|
int pci_setup_device(struct pci_dev * dev)
|
1374 |
|
|
{
|
1375 |
|
|
u32 class;
|
1376 |
|
|
|
1377 |
|
|
sprintf(dev->slot_name, "%02x:%02x.%d", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
|
1378 |
|
|
sprintf(dev->name, "PCI device %04x:%04x", dev->vendor, dev->device);
|
1379 |
|
|
|
1380 |
|
|
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
|
1381 |
|
|
class >>= 8; /* upper 3 bytes */
|
1382 |
|
|
dev->class = class;
|
1383 |
|
|
class >>= 8;
|
1384 |
|
|
|
1385 |
|
|
DBG("Found %02x:%02x [%04x/%04x] %06x %02x\n", dev->bus->number, dev->devfn, dev->vendor, dev->device, class, dev->hdr_type);
|
1386 |
|
|
|
1387 |
|
|
/* "Unknown power state" */
|
1388 |
|
|
dev->current_state = 4;
|
1389 |
|
|
|
1390 |
|
|
switch (dev->hdr_type) { /* header type */
|
1391 |
|
|
case PCI_HEADER_TYPE_NORMAL: /* standard header */
|
1392 |
|
|
if (class == PCI_CLASS_BRIDGE_PCI)
|
1393 |
|
|
goto bad;
|
1394 |
|
|
pci_read_irq(dev);
|
1395 |
|
|
pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
|
1396 |
|
|
pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
|
1397 |
|
|
pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
|
1398 |
|
|
break;
|
1399 |
|
|
|
1400 |
|
|
case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
|
1401 |
|
|
if (class != PCI_CLASS_BRIDGE_PCI)
|
1402 |
|
|
goto bad;
|
1403 |
|
|
/* The PCI-to-PCI bridge spec requires that subtractive
|
1404 |
|
|
decoding (i.e. transparent) bridge must have programming
|
1405 |
|
|
interface code of 0x01. */
|
1406 |
|
|
dev->transparent = ((dev->class & 0xff) == 1);
|
1407 |
|
|
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
|
1408 |
|
|
break;
|
1409 |
|
|
|
1410 |
|
|
case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
|
1411 |
|
|
if (class != PCI_CLASS_BRIDGE_CARDBUS)
|
1412 |
|
|
goto bad;
|
1413 |
|
|
pci_read_irq(dev);
|
1414 |
|
|
pci_read_bases(dev, 1, 0);
|
1415 |
|
|
pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
|
1416 |
|
|
pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
|
1417 |
|
|
break;
|
1418 |
|
|
|
1419 |
|
|
default: /* unknown header */
|
1420 |
|
|
printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
|
1421 |
|
|
dev->slot_name, dev->hdr_type);
|
1422 |
|
|
return -1;
|
1423 |
|
|
|
1424 |
|
|
bad:
|
1425 |
|
|
printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
|
1426 |
|
|
dev->slot_name, class, dev->hdr_type);
|
1427 |
|
|
dev->class = PCI_CLASS_NOT_DEFINED;
|
1428 |
|
|
}
|
1429 |
|
|
|
1430 |
|
|
/* We found a fine healthy device, go go go... */
|
1431 |
|
|
return 0;
|
1432 |
|
|
}
|
1433 |
|
|
|
1434 |
|
|
/*
|
1435 |
|
|
* Read the config data for a PCI device, sanity-check it
|
1436 |
|
|
* and fill in the dev structure...
|
1437 |
|
|
*/
|
1438 |
|
|
struct pci_dev * __devinit pci_scan_device(struct pci_dev *temp)
|
1439 |
|
|
{
|
1440 |
|
|
struct pci_dev *dev;
|
1441 |
|
|
u32 l;
|
1442 |
|
|
|
1443 |
|
|
if (pci_read_config_dword(temp, PCI_VENDOR_ID, &l))
|
1444 |
|
|
return NULL;
|
1445 |
|
|
|
1446 |
|
|
/* some broken boards return 0 or ~0 if a slot is empty: */
|
1447 |
|
|
if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
|
1448 |
|
|
return NULL;
|
1449 |
|
|
|
1450 |
|
|
dev = kmalloc(sizeof(*dev), GFP_KERNEL);
|
1451 |
|
|
if (!dev)
|
1452 |
|
|
return NULL;
|
1453 |
|
|
|
1454 |
|
|
memcpy(dev, temp, sizeof(*dev));
|
1455 |
|
|
dev->vendor = l & 0xffff;
|
1456 |
|
|
dev->device = (l >> 16) & 0xffff;
|
1457 |
|
|
|
1458 |
|
|
/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
|
1459 |
|
|
set this higher, assuming the system even supports it. */
|
1460 |
|
|
dev->dma_mask = 0xffffffff;
|
1461 |
|
|
if (pci_setup_device(dev) < 0) {
|
1462 |
|
|
kfree(dev);
|
1463 |
|
|
dev = NULL;
|
1464 |
|
|
}
|
1465 |
|
|
return dev;
|
1466 |
|
|
}
|
1467 |
|
|
|
1468 |
|
|
struct pci_dev * __devinit pci_scan_slot(struct pci_dev *temp)
|
1469 |
|
|
{
|
1470 |
|
|
struct pci_bus *bus = temp->bus;
|
1471 |
|
|
struct pci_dev *dev;
|
1472 |
|
|
struct pci_dev *first_dev = NULL;
|
1473 |
|
|
int func = 0;
|
1474 |
|
|
int is_multi = 0;
|
1475 |
|
|
u8 hdr_type;
|
1476 |
|
|
|
1477 |
|
|
for (func = 0; func < 8; func++, temp->devfn++) {
|
1478 |
|
|
if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
|
1479 |
|
|
continue;
|
1480 |
|
|
temp->hdr_type = hdr_type & 0x7f;
|
1481 |
|
|
|
1482 |
|
|
dev = pci_scan_device(temp);
|
1483 |
|
|
if (!pcibios_scan_all_fns() && func == 0) {
|
1484 |
|
|
if (!dev)
|
1485 |
|
|
break;
|
1486 |
|
|
} else {
|
1487 |
|
|
if (!dev)
|
1488 |
|
|
continue;
|
1489 |
|
|
is_multi = 1;
|
1490 |
|
|
}
|
1491 |
|
|
|
1492 |
|
|
pci_name_device(dev);
|
1493 |
|
|
if (!first_dev) {
|
1494 |
|
|
is_multi = hdr_type & 0x80;
|
1495 |
|
|
first_dev = dev;
|
1496 |
|
|
}
|
1497 |
|
|
|
1498 |
|
|
/*
|
1499 |
|
|
* Link the device to both the global PCI device chain and
|
1500 |
|
|
* the per-bus list of devices.
|
1501 |
|
|
*/
|
1502 |
|
|
list_add_tail(&dev->global_list, &pci_devices);
|
1503 |
|
|
list_add_tail(&dev->bus_list, &bus->devices);
|
1504 |
|
|
|
1505 |
|
|
/* Fix up broken headers */
|
1506 |
|
|
pci_fixup_device(PCI_FIXUP_HEADER, dev);
|
1507 |
|
|
|
1508 |
|
|
/*
|
1509 |
|
|
* If this is a single function device
|
1510 |
|
|
* don't scan past the first function.
|
1511 |
|
|
*/
|
1512 |
|
|
if (!is_multi)
|
1513 |
|
|
break;
|
1514 |
|
|
|
1515 |
|
|
}
|
1516 |
|
|
return first_dev;
|
1517 |
|
|
}
|
1518 |
|
|
|
1519 |
|
|
unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
|
1520 |
|
|
{
|
1521 |
|
|
unsigned int devfn, max, pass;
|
1522 |
|
|
struct list_head *ln;
|
1523 |
|
|
struct pci_dev *dev, dev0;
|
1524 |
|
|
|
1525 |
|
|
DBG("Scanning bus %02x\n", bus->number);
|
1526 |
|
|
max = bus->secondary;
|
1527 |
|
|
|
1528 |
|
|
/* Create a device template */
|
1529 |
|
|
memset(&dev0, 0, sizeof(dev0));
|
1530 |
|
|
dev0.bus = bus;
|
1531 |
|
|
dev0.sysdata = bus->sysdata;
|
1532 |
|
|
|
1533 |
|
|
/* Go find them, Rover! */
|
1534 |
|
|
for (devfn = 0; devfn < 0x100; devfn += 8) {
|
1535 |
|
|
dev0.devfn = devfn;
|
1536 |
|
|
pci_scan_slot(&dev0);
|
1537 |
|
|
}
|
1538 |
|
|
|
1539 |
|
|
/*
|
1540 |
|
|
* After performing arch-dependent fixup of the bus, look behind
|
1541 |
|
|
* all PCI-to-PCI bridges on this bus.
|
1542 |
|
|
*/
|
1543 |
|
|
DBG("Fixups for bus %02x\n", bus->number);
|
1544 |
|
|
pcibios_fixup_bus(bus);
|
1545 |
|
|
for (pass=0; pass < 2; pass++)
|
1546 |
|
|
for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
|
1547 |
|
|
dev = pci_dev_b(ln);
|
1548 |
|
|
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
|
1549 |
|
|
max = pci_scan_bridge(bus, dev, max, pass);
|
1550 |
|
|
}
|
1551 |
|
|
|
1552 |
|
|
/*
|
1553 |
|
|
* We've scanned the bus and so we know all about what's on
|
1554 |
|
|
* the other side of any bridges that may be on this bus plus
|
1555 |
|
|
* any devices.
|
1556 |
|
|
*
|
1557 |
|
|
* Return how far we've got finding sub-buses.
|
1558 |
|
|
*/
|
1559 |
|
|
DBG("Bus scan for %02x returning with max=%02x\n", bus->number, max);
|
1560 |
|
|
return max;
|
1561 |
|
|
}
|
1562 |
|
|
|
1563 |
|
|
int __devinit pci_bus_exists(const struct list_head *list, int nr)
|
1564 |
|
|
{
|
1565 |
|
|
const struct list_head *l;
|
1566 |
|
|
|
1567 |
|
|
for(l=list->next; l != list; l = l->next) {
|
1568 |
|
|
const struct pci_bus *b = pci_bus_b(l);
|
1569 |
|
|
if (b->number == nr || pci_bus_exists(&b->children, nr))
|
1570 |
|
|
return 1;
|
1571 |
|
|
}
|
1572 |
|
|
return 0;
|
1573 |
|
|
}
|
1574 |
|
|
|
1575 |
|
|
struct pci_bus * __devinit pci_alloc_primary_bus(int bus)
|
1576 |
|
|
{
|
1577 |
|
|
struct pci_bus *b;
|
1578 |
|
|
|
1579 |
|
|
if (pci_bus_exists(&pci_root_buses, bus)) {
|
1580 |
|
|
/* If we already got to this bus through a different bridge, ignore it */
|
1581 |
|
|
DBG("PCI: Bus %02x already known\n", bus);
|
1582 |
|
|
return NULL;
|
1583 |
|
|
}
|
1584 |
|
|
|
1585 |
|
|
b = pci_alloc_bus();
|
1586 |
|
|
list_add_tail(&b->node, &pci_root_buses);
|
1587 |
|
|
|
1588 |
|
|
b->number = b->secondary = bus;
|
1589 |
|
|
b->resource[0] = &ioport_resource;
|
1590 |
|
|
b->resource[1] = &iomem_resource;
|
1591 |
|
|
return b;
|
1592 |
|
|
}
|
1593 |
|
|
|
1594 |
|
|
struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
|
1595 |
|
|
{
|
1596 |
|
|
struct pci_bus *b = pci_alloc_primary_bus(bus);
|
1597 |
|
|
if (b) {
|
1598 |
|
|
b->sysdata = sysdata;
|
1599 |
|
|
b->ops = ops;
|
1600 |
|
|
b->subordinate = pci_do_scan_bus(b);
|
1601 |
|
|
}
|
1602 |
|
|
return b;
|
1603 |
|
|
}
|
1604 |
|
|
|
1605 |
|
|
#ifdef CONFIG_PM
|
1606 |
|
|
|
1607 |
|
|
/*
|
1608 |
|
|
* PCI Power management..
|
1609 |
|
|
*
|
1610 |
|
|
* This needs to be done centralized, so that we power manage PCI
|
1611 |
|
|
* devices in the right order: we should not shut down PCI bridges
|
1612 |
|
|
* before we've shut down the devices behind them, and we should
|
1613 |
|
|
* not wake up devices before we've woken up the bridge to the
|
1614 |
|
|
* device.. Eh?
|
1615 |
|
|
*
|
1616 |
|
|
* We do not touch devices that don't have a driver that exports
|
1617 |
|
|
* a suspend/resume function. That is just too dangerous. If the default
|
1618 |
|
|
* PCI suspend/resume functions work for a device, the driver can
|
1619 |
|
|
* easily implement them (ie just have a suspend function that calls
|
1620 |
|
|
* the pci_set_power_state() function).
|
1621 |
|
|
*/
|
1622 |
|
|
|
1623 |
|
|
static int pci_pm_save_state_device(struct pci_dev *dev, u32 state)
|
1624 |
|
|
{
|
1625 |
|
|
int error = 0;
|
1626 |
|
|
if (dev) {
|
1627 |
|
|
struct pci_driver *driver = dev->driver;
|
1628 |
|
|
if (driver && driver->save_state)
|
1629 |
|
|
error = driver->save_state(dev,state);
|
1630 |
|
|
}
|
1631 |
|
|
return error;
|
1632 |
|
|
}
|
1633 |
|
|
|
1634 |
|
|
static int pci_pm_suspend_device(struct pci_dev *dev, u32 state)
|
1635 |
|
|
{
|
1636 |
|
|
int error = 0;
|
1637 |
|
|
if (dev) {
|
1638 |
|
|
struct pci_driver *driver = dev->driver;
|
1639 |
|
|
if (driver && driver->suspend)
|
1640 |
|
|
error = driver->suspend(dev,state);
|
1641 |
|
|
}
|
1642 |
|
|
return error;
|
1643 |
|
|
}
|
1644 |
|
|
|
1645 |
|
|
static int pci_pm_resume_device(struct pci_dev *dev)
|
1646 |
|
|
{
|
1647 |
|
|
int error = 0;
|
1648 |
|
|
if (dev) {
|
1649 |
|
|
struct pci_driver *driver = dev->driver;
|
1650 |
|
|
if (driver && driver->resume)
|
1651 |
|
|
error = driver->resume(dev);
|
1652 |
|
|
}
|
1653 |
|
|
return error;
|
1654 |
|
|
}
|
1655 |
|
|
|
1656 |
|
|
static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
|
1657 |
|
|
{
|
1658 |
|
|
struct list_head *list;
|
1659 |
|
|
int error = 0;
|
1660 |
|
|
|
1661 |
|
|
list_for_each(list, &bus->children) {
|
1662 |
|
|
error = pci_pm_save_state_bus(pci_bus_b(list),state);
|
1663 |
|
|
if (error) return error;
|
1664 |
|
|
}
|
1665 |
|
|
list_for_each(list, &bus->devices) {
|
1666 |
|
|
error = pci_pm_save_state_device(pci_dev_b(list),state);
|
1667 |
|
|
if (error) return error;
|
1668 |
|
|
}
|
1669 |
|
|
return 0;
|
1670 |
|
|
}
|
1671 |
|
|
|
1672 |
|
|
static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
|
1673 |
|
|
{
|
1674 |
|
|
struct list_head *list;
|
1675 |
|
|
|
1676 |
|
|
/* Walk the bus children list */
|
1677 |
|
|
list_for_each(list, &bus->children)
|
1678 |
|
|
pci_pm_suspend_bus(pci_bus_b(list),state);
|
1679 |
|
|
|
1680 |
|
|
/* Walk the device children list */
|
1681 |
|
|
list_for_each(list, &bus->devices)
|
1682 |
|
|
pci_pm_suspend_device(pci_dev_b(list),state);
|
1683 |
|
|
return 0;
|
1684 |
|
|
}
|
1685 |
|
|
|
1686 |
|
|
static int pci_pm_resume_bus(struct pci_bus *bus)
|
1687 |
|
|
{
|
1688 |
|
|
struct list_head *list;
|
1689 |
|
|
|
1690 |
|
|
/* Walk the device children list */
|
1691 |
|
|
list_for_each(list, &bus->devices)
|
1692 |
|
|
pci_pm_resume_device(pci_dev_b(list));
|
1693 |
|
|
|
1694 |
|
|
/* And then walk the bus children */
|
1695 |
|
|
list_for_each(list, &bus->children)
|
1696 |
|
|
pci_pm_resume_bus(pci_bus_b(list));
|
1697 |
|
|
return 0;
|
1698 |
|
|
}
|
1699 |
|
|
|
1700 |
|
|
static int pci_pm_save_state(u32 state)
|
1701 |
|
|
{
|
1702 |
|
|
struct list_head *list;
|
1703 |
|
|
struct pci_bus *bus;
|
1704 |
|
|
int error = 0;
|
1705 |
|
|
|
1706 |
|
|
list_for_each(list, &pci_root_buses) {
|
1707 |
|
|
bus = pci_bus_b(list);
|
1708 |
|
|
error = pci_pm_save_state_bus(bus,state);
|
1709 |
|
|
if (!error)
|
1710 |
|
|
error = pci_pm_save_state_device(bus->self,state);
|
1711 |
|
|
}
|
1712 |
|
|
return error;
|
1713 |
|
|
}
|
1714 |
|
|
|
1715 |
|
|
static int pci_pm_suspend(u32 state)
|
1716 |
|
|
{
|
1717 |
|
|
struct list_head *list;
|
1718 |
|
|
struct pci_bus *bus;
|
1719 |
|
|
|
1720 |
|
|
list_for_each(list, &pci_root_buses) {
|
1721 |
|
|
bus = pci_bus_b(list);
|
1722 |
|
|
pci_pm_suspend_bus(bus,state);
|
1723 |
|
|
pci_pm_suspend_device(bus->self,state);
|
1724 |
|
|
}
|
1725 |
|
|
return 0;
|
1726 |
|
|
}
|
1727 |
|
|
|
1728 |
|
|
int pci_pm_resume(void)
|
1729 |
|
|
{
|
1730 |
|
|
struct list_head *list;
|
1731 |
|
|
struct pci_bus *bus;
|
1732 |
|
|
|
1733 |
|
|
list_for_each(list, &pci_root_buses) {
|
1734 |
|
|
bus = pci_bus_b(list);
|
1735 |
|
|
pci_pm_resume_device(bus->self);
|
1736 |
|
|
pci_pm_resume_bus(bus);
|
1737 |
|
|
}
|
1738 |
|
|
return 0;
|
1739 |
|
|
}
|
1740 |
|
|
|
1741 |
|
|
static int
|
1742 |
|
|
pci_pm_callback(struct pm_dev *pm_device, pm_request_t rqst, void *data)
|
1743 |
|
|
{
|
1744 |
|
|
int error = 0;
|
1745 |
|
|
|
1746 |
|
|
switch (rqst) {
|
1747 |
|
|
case PM_SAVE_STATE:
|
1748 |
|
|
error = pci_pm_save_state((unsigned long)data);
|
1749 |
|
|
break;
|
1750 |
|
|
case PM_SUSPEND:
|
1751 |
|
|
error = pci_pm_suspend((unsigned long)data);
|
1752 |
|
|
break;
|
1753 |
|
|
case PM_RESUME:
|
1754 |
|
|
error = pci_pm_resume();
|
1755 |
|
|
break;
|
1756 |
|
|
default: break;
|
1757 |
|
|
}
|
1758 |
|
|
return error;
|
1759 |
|
|
}
|
1760 |
|
|
|
1761 |
|
|
#endif
|
1762 |
|
|
|
1763 |
|
|
/*
|
1764 |
|
|
* Pool allocator ... wraps the pci_alloc_consistent page allocator, so
|
1765 |
|
|
* small blocks are easily used by drivers for bus mastering controllers.
|
1766 |
|
|
* This should probably be sharing the guts of the slab allocator.
|
1767 |
|
|
*/
|
1768 |
|
|
|
1769 |
|
|
struct pci_pool { /* the pool */
|
1770 |
|
|
struct list_head page_list;
|
1771 |
|
|
spinlock_t lock;
|
1772 |
|
|
size_t blocks_per_page;
|
1773 |
|
|
size_t size;
|
1774 |
|
|
int flags;
|
1775 |
|
|
struct pci_dev *dev;
|
1776 |
|
|
size_t allocation;
|
1777 |
|
|
char name [32];
|
1778 |
|
|
wait_queue_head_t waitq;
|
1779 |
|
|
};
|
1780 |
|
|
|
1781 |
|
|
struct pci_page { /* cacheable header for 'allocation' bytes */
|
1782 |
|
|
struct list_head page_list;
|
1783 |
|
|
void *vaddr;
|
1784 |
|
|
dma_addr_t dma;
|
1785 |
|
|
unsigned long bitmap [0];
|
1786 |
|
|
};
|
1787 |
|
|
|
1788 |
|
|
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
|
1789 |
|
|
#define POOL_POISON_BYTE 0xa7
|
1790 |
|
|
|
1791 |
|
|
// #define CONFIG_PCIPOOL_DEBUG
|
1792 |
|
|
|
1793 |
|
|
|
1794 |
|
|
/**
|
1795 |
|
|
* pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
|
1796 |
|
|
* @name: name of pool, for diagnostics
|
1797 |
|
|
* @pdev: pci device that will be doing the DMA
|
1798 |
|
|
* @size: size of the blocks in this pool.
|
1799 |
|
|
* @align: alignment requirement for blocks; must be a power of two
|
1800 |
|
|
* @allocation: returned blocks won't cross this boundary (or zero)
|
1801 |
|
|
* @flags: SLAB_* flags (not all are supported).
|
1802 |
|
|
*
|
1803 |
|
|
* Returns a pci allocation pool with the requested characteristics, or
|
1804 |
|
|
* null if one can't be created. Given one of these pools, pci_pool_alloc()
|
1805 |
|
|
* may be used to allocate memory. Such memory will all have "consistent"
|
1806 |
|
|
* DMA mappings, accessible by the device and its driver without using
|
1807 |
|
|
* cache flushing primitives. The actual size of blocks allocated may be
|
1808 |
|
|
* larger than requested because of alignment.
|
1809 |
|
|
*
|
1810 |
|
|
* If allocation is nonzero, objects returned from pci_pool_alloc() won't
|
1811 |
|
|
* cross that size boundary. This is useful for devices which have
|
1812 |
|
|
* addressing restrictions on individual DMA transfers, such as not crossing
|
1813 |
|
|
* boundaries of 4KBytes.
|
1814 |
|
|
*/
|
1815 |
|
|
struct pci_pool *
|
1816 |
|
|
pci_pool_create (const char *name, struct pci_dev *pdev,
|
1817 |
|
|
size_t size, size_t align, size_t allocation, int flags)
|
1818 |
|
|
{
|
1819 |
|
|
struct pci_pool *retval;
|
1820 |
|
|
|
1821 |
|
|
if (align == 0)
|
1822 |
|
|
align = 1;
|
1823 |
|
|
if (size == 0)
|
1824 |
|
|
return 0;
|
1825 |
|
|
else if (size < align)
|
1826 |
|
|
size = align;
|
1827 |
|
|
else if ((size % align) != 0) {
|
1828 |
|
|
size += align + 1;
|
1829 |
|
|
size &= ~(align - 1);
|
1830 |
|
|
}
|
1831 |
|
|
|
1832 |
|
|
if (allocation == 0) {
|
1833 |
|
|
if (PAGE_SIZE < size)
|
1834 |
|
|
allocation = size;
|
1835 |
|
|
else
|
1836 |
|
|
allocation = PAGE_SIZE;
|
1837 |
|
|
// FIXME: round up for less fragmentation
|
1838 |
|
|
} else if (allocation < size)
|
1839 |
|
|
return 0;
|
1840 |
|
|
|
1841 |
|
|
if (!(retval = kmalloc (sizeof *retval, flags)))
|
1842 |
|
|
return retval;
|
1843 |
|
|
|
1844 |
|
|
#ifdef CONFIG_PCIPOOL_DEBUG
|
1845 |
|
|
flags |= SLAB_POISON;
|
1846 |
|
|
#endif
|
1847 |
|
|
|
1848 |
|
|
strncpy (retval->name, name, sizeof retval->name);
|
1849 |
|
|
retval->name [sizeof retval->name - 1] = 0;
|
1850 |
|
|
|
1851 |
|
|
retval->dev = pdev;
|
1852 |
|
|
INIT_LIST_HEAD (&retval->page_list);
|
1853 |
|
|
spin_lock_init (&retval->lock);
|
1854 |
|
|
retval->size = size;
|
1855 |
|
|
retval->flags = flags;
|
1856 |
|
|
retval->allocation = allocation;
|
1857 |
|
|
retval->blocks_per_page = allocation / size;
|
1858 |
|
|
init_waitqueue_head (&retval->waitq);
|
1859 |
|
|
|
1860 |
|
|
#ifdef CONFIG_PCIPOOL_DEBUG
|
1861 |
|
|
printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
|
1862 |
|
|
pdev ? pdev->slot_name : NULL, retval->name, size,
|
1863 |
|
|
retval->blocks_per_page, allocation);
|
1864 |
|
|
#endif
|
1865 |
|
|
|
1866 |
|
|
return retval;
|
1867 |
|
|
}
|
1868 |
|
|
|
1869 |
|
|
|
1870 |
|
|
static struct pci_page *
|
1871 |
|
|
pool_alloc_page (struct pci_pool *pool, int mem_flags)
|
1872 |
|
|
{
|
1873 |
|
|
struct pci_page *page;
|
1874 |
|
|
int mapsize;
|
1875 |
|
|
|
1876 |
|
|
mapsize = pool->blocks_per_page;
|
1877 |
|
|
mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
1878 |
|
|
mapsize *= sizeof (long);
|
1879 |
|
|
|
1880 |
|
|
page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
|
1881 |
|
|
if (!page)
|
1882 |
|
|
return 0;
|
1883 |
|
|
page->vaddr = pci_alloc_consistent (pool->dev,
|
1884 |
|
|
pool->allocation,
|
1885 |
|
|
&page->dma);
|
1886 |
|
|
if (page->vaddr) {
|
1887 |
|
|
memset (page->bitmap, 0xff, mapsize); // bit set == free
|
1888 |
|
|
if (pool->flags & SLAB_POISON)
|
1889 |
|
|
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
|
1890 |
|
|
list_add (&page->page_list, &pool->page_list);
|
1891 |
|
|
} else {
|
1892 |
|
|
kfree (page);
|
1893 |
|
|
page = 0;
|
1894 |
|
|
}
|
1895 |
|
|
return page;
|
1896 |
|
|
}
|
1897 |
|
|
|
1898 |
|
|
|
1899 |
|
|
static inline int
|
1900 |
|
|
is_page_busy (int blocks, unsigned long *bitmap)
|
1901 |
|
|
{
|
1902 |
|
|
while (blocks > 0) {
|
1903 |
|
|
if (*bitmap++ != ~0UL)
|
1904 |
|
|
return 1;
|
1905 |
|
|
blocks -= BITS_PER_LONG;
|
1906 |
|
|
}
|
1907 |
|
|
return 0;
|
1908 |
|
|
}
|
1909 |
|
|
|
1910 |
|
|
static void
|
1911 |
|
|
pool_free_page (struct pci_pool *pool, struct pci_page *page)
|
1912 |
|
|
{
|
1913 |
|
|
dma_addr_t dma = page->dma;
|
1914 |
|
|
|
1915 |
|
|
if (pool->flags & SLAB_POISON)
|
1916 |
|
|
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
|
1917 |
|
|
pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
|
1918 |
|
|
list_del (&page->page_list);
|
1919 |
|
|
kfree (page);
|
1920 |
|
|
}
|
1921 |
|
|
|
1922 |
|
|
|
1923 |
|
|
/**
|
1924 |
|
|
* pci_pool_destroy - destroys a pool of pci memory blocks.
|
1925 |
|
|
* @pool: pci pool that will be destroyed
|
1926 |
|
|
*
|
1927 |
|
|
* Caller guarantees that no more memory from the pool is in use,
|
1928 |
|
|
* and that nothing will try to use the pool after this call.
|
1929 |
|
|
*/
|
1930 |
|
|
void
|
1931 |
|
|
pci_pool_destroy (struct pci_pool *pool)
|
1932 |
|
|
{
|
1933 |
|
|
unsigned long flags;
|
1934 |
|
|
|
1935 |
|
|
#ifdef CONFIG_PCIPOOL_DEBUG
|
1936 |
|
|
printk (KERN_DEBUG "pcipool destroy %s/%s\n",
|
1937 |
|
|
pool->dev ? pool->dev->slot_name : NULL,
|
1938 |
|
|
pool->name);
|
1939 |
|
|
#endif
|
1940 |
|
|
|
1941 |
|
|
spin_lock_irqsave (&pool->lock, flags);
|
1942 |
|
|
while (!list_empty (&pool->page_list)) {
|
1943 |
|
|
struct pci_page *page;
|
1944 |
|
|
page = list_entry (pool->page_list.next,
|
1945 |
|
|
struct pci_page, page_list);
|
1946 |
|
|
if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
|
1947 |
|
|
printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
|
1948 |
|
|
pool->dev ? pool->dev->slot_name : NULL,
|
1949 |
|
|
pool->name, page->vaddr);
|
1950 |
|
|
/* leak the still-in-use consistent memory */
|
1951 |
|
|
list_del (&page->page_list);
|
1952 |
|
|
kfree (page);
|
1953 |
|
|
} else
|
1954 |
|
|
pool_free_page (pool, page);
|
1955 |
|
|
}
|
1956 |
|
|
spin_unlock_irqrestore (&pool->lock, flags);
|
1957 |
|
|
kfree (pool);
|
1958 |
|
|
}
|
1959 |
|
|
|
1960 |
|
|
|
1961 |
|
|
/**
|
1962 |
|
|
* pci_pool_alloc - get a block of consistent memory
|
1963 |
|
|
* @pool: pci pool that will produce the block
|
1964 |
|
|
* @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
|
1965 |
|
|
* @handle: pointer to dma address of block
|
1966 |
|
|
*
|
1967 |
|
|
* This returns the kernel virtual address of a currently unused block,
|
1968 |
|
|
* and reports its dma address through the handle.
|
1969 |
|
|
* If such a memory block can't be allocated, null is returned.
|
1970 |
|
|
*/
|
1971 |
|
|
void *
|
1972 |
|
|
pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
|
1973 |
|
|
{
|
1974 |
|
|
unsigned long flags;
|
1975 |
|
|
struct list_head *entry;
|
1976 |
|
|
struct pci_page *page;
|
1977 |
|
|
int map, block;
|
1978 |
|
|
size_t offset;
|
1979 |
|
|
void *retval;
|
1980 |
|
|
|
1981 |
|
|
restart:
|
1982 |
|
|
spin_lock_irqsave (&pool->lock, flags);
|
1983 |
|
|
list_for_each (entry, &pool->page_list) {
|
1984 |
|
|
int i;
|
1985 |
|
|
page = list_entry (entry, struct pci_page, page_list);
|
1986 |
|
|
/* only cachable accesses here ... */
|
1987 |
|
|
for (map = 0, i = 0;
|
1988 |
|
|
i < pool->blocks_per_page;
|
1989 |
|
|
i += BITS_PER_LONG, map++) {
|
1990 |
|
|
if (page->bitmap [map] == 0)
|
1991 |
|
|
continue;
|
1992 |
|
|
block = ffz (~ page->bitmap [map]);
|
1993 |
|
|
if ((i + block) < pool->blocks_per_page) {
|
1994 |
|
|
clear_bit (block, &page->bitmap [map]);
|
1995 |
|
|
offset = (BITS_PER_LONG * map) + block;
|
1996 |
|
|
offset *= pool->size;
|
1997 |
|
|
goto ready;
|
1998 |
|
|
}
|
1999 |
|
|
}
|
2000 |
|
|
}
|
2001 |
|
|
if (!(page = pool_alloc_page (pool, mem_flags))) {
|
2002 |
|
|
if (mem_flags == SLAB_KERNEL) {
|
2003 |
|
|
DECLARE_WAITQUEUE (wait, current);
|
2004 |
|
|
|
2005 |
|
|
current->state = TASK_INTERRUPTIBLE;
|
2006 |
|
|
add_wait_queue (&pool->waitq, &wait);
|
2007 |
|
|
spin_unlock_irqrestore (&pool->lock, flags);
|
2008 |
|
|
|
2009 |
|
|
schedule_timeout (POOL_TIMEOUT_JIFFIES);
|
2010 |
|
|
|
2011 |
|
|
current->state = TASK_RUNNING;
|
2012 |
|
|
remove_wait_queue (&pool->waitq, &wait);
|
2013 |
|
|
goto restart;
|
2014 |
|
|
}
|
2015 |
|
|
retval = 0;
|
2016 |
|
|
goto done;
|
2017 |
|
|
}
|
2018 |
|
|
|
2019 |
|
|
clear_bit (0, &page->bitmap [0]);
|
2020 |
|
|
offset = 0;
|
2021 |
|
|
ready:
|
2022 |
|
|
retval = offset + page->vaddr;
|
2023 |
|
|
*handle = offset + page->dma;
|
2024 |
|
|
done:
|
2025 |
|
|
spin_unlock_irqrestore (&pool->lock, flags);
|
2026 |
|
|
return retval;
|
2027 |
|
|
}
|
2028 |
|
|
|
2029 |
|
|
|
2030 |
|
|
static struct pci_page *
|
2031 |
|
|
pool_find_page (struct pci_pool *pool, dma_addr_t dma)
|
2032 |
|
|
{
|
2033 |
|
|
unsigned long flags;
|
2034 |
|
|
struct list_head *entry;
|
2035 |
|
|
struct pci_page *page;
|
2036 |
|
|
|
2037 |
|
|
spin_lock_irqsave (&pool->lock, flags);
|
2038 |
|
|
list_for_each (entry, &pool->page_list) {
|
2039 |
|
|
page = list_entry (entry, struct pci_page, page_list);
|
2040 |
|
|
if (dma < page->dma)
|
2041 |
|
|
continue;
|
2042 |
|
|
if (dma < (page->dma + pool->allocation))
|
2043 |
|
|
goto done;
|
2044 |
|
|
}
|
2045 |
|
|
page = 0;
|
2046 |
|
|
done:
|
2047 |
|
|
spin_unlock_irqrestore (&pool->lock, flags);
|
2048 |
|
|
return page;
|
2049 |
|
|
}
|
2050 |
|
|
|
2051 |
|
|
|
2052 |
|
|
/**
|
2053 |
|
|
* pci_pool_free - put block back into pci pool
|
2054 |
|
|
* @pool: the pci pool holding the block
|
2055 |
|
|
* @vaddr: virtual address of block
|
2056 |
|
|
* @dma: dma address of block
|
2057 |
|
|
*
|
2058 |
|
|
* Caller promises neither device nor driver will again touch this block
|
2059 |
|
|
* unless it is first re-allocated.
|
2060 |
|
|
*/
|
2061 |
|
|
void
|
2062 |
|
|
pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
|
2063 |
|
|
{
|
2064 |
|
|
struct pci_page *page;
|
2065 |
|
|
unsigned long flags;
|
2066 |
|
|
int map, block;
|
2067 |
|
|
|
2068 |
|
|
if ((page = pool_find_page (pool, dma)) == 0) {
|
2069 |
|
|
printk (KERN_ERR "pci_pool_free %s/%s, %p/%x (bad dma)\n",
|
2070 |
|
|
pool->dev ? pool->dev->slot_name : NULL,
|
2071 |
|
|
pool->name, vaddr, (int) (dma & 0xffffffff));
|
2072 |
|
|
return;
|
2073 |
|
|
}
|
2074 |
|
|
#ifdef CONFIG_PCIPOOL_DEBUG
|
2075 |
|
|
if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
|
2076 |
|
|
printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%x\n",
|
2077 |
|
|
pool->dev ? pool->dev->slot_name : NULL,
|
2078 |
|
|
pool->name, vaddr, (int) (dma & 0xffffffff));
|
2079 |
|
|
return;
|
2080 |
|
|
}
|
2081 |
|
|
#endif
|
2082 |
|
|
|
2083 |
|
|
block = dma - page->dma;
|
2084 |
|
|
block /= pool->size;
|
2085 |
|
|
map = block / BITS_PER_LONG;
|
2086 |
|
|
block %= BITS_PER_LONG;
|
2087 |
|
|
|
2088 |
|
|
#ifdef CONFIG_PCIPOOL_DEBUG
|
2089 |
|
|
if (page->bitmap [map] & (1UL << block)) {
|
2090 |
|
|
printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
|
2091 |
|
|
pool->dev ? pool->dev->slot_name : NULL,
|
2092 |
|
|
pool->name, dma);
|
2093 |
|
|
return;
|
2094 |
|
|
}
|
2095 |
|
|
#endif
|
2096 |
|
|
if (pool->flags & SLAB_POISON)
|
2097 |
|
|
memset (vaddr, POOL_POISON_BYTE, pool->size);
|
2098 |
|
|
|
2099 |
|
|
spin_lock_irqsave (&pool->lock, flags);
|
2100 |
|
|
set_bit (block, &page->bitmap [map]);
|
2101 |
|
|
if (waitqueue_active (&pool->waitq))
|
2102 |
|
|
wake_up (&pool->waitq);
|
2103 |
|
|
/*
|
2104 |
|
|
* Resist a temptation to do
|
2105 |
|
|
* if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
|
2106 |
|
|
* it is not interrupt safe. Better have empty pages hang around.
|
2107 |
|
|
*/
|
2108 |
|
|
spin_unlock_irqrestore (&pool->lock, flags);
|
2109 |
|
|
}
|
2110 |
|
|
|
2111 |
|
|
|
2112 |
|
|
void __devinit pci_init(void)
|
2113 |
|
|
{
|
2114 |
|
|
struct pci_dev *dev;
|
2115 |
|
|
|
2116 |
|
|
pcibios_init();
|
2117 |
|
|
|
2118 |
|
|
pci_for_each_dev(dev) {
|
2119 |
|
|
pci_fixup_device(PCI_FIXUP_FINAL, dev);
|
2120 |
|
|
}
|
2121 |
|
|
|
2122 |
|
|
#ifdef CONFIG_PM
|
2123 |
|
|
pm_register(PM_PCI_DEV, 0, pci_pm_callback);
|
2124 |
|
|
#endif
|
2125 |
|
|
}
|
2126 |
|
|
|
2127 |
|
|
static int __devinit pci_setup(char *str)
|
2128 |
|
|
{
|
2129 |
|
|
while (str) {
|
2130 |
|
|
char *k = strchr(str, ',');
|
2131 |
|
|
if (k)
|
2132 |
|
|
*k++ = 0;
|
2133 |
|
|
if (*str && (str = pcibios_setup(str)) && *str) {
|
2134 |
|
|
/* PCI layer options should be handled here */
|
2135 |
|
|
printk(KERN_ERR "PCI: Unknown option `%s'\n", str);
|
2136 |
|
|
}
|
2137 |
|
|
str = k;
|
2138 |
|
|
}
|
2139 |
|
|
return 1;
|
2140 |
|
|
}
|
2141 |
|
|
|
2142 |
|
|
__setup("pci=", pci_setup);
|
2143 |
|
|
|
2144 |
|
|
EXPORT_SYMBOL(pci_read_config_byte);
|
2145 |
|
|
EXPORT_SYMBOL(pci_read_config_word);
|
2146 |
|
|
EXPORT_SYMBOL(pci_read_config_dword);
|
2147 |
|
|
EXPORT_SYMBOL(pci_write_config_byte);
|
2148 |
|
|
EXPORT_SYMBOL(pci_write_config_word);
|
2149 |
|
|
EXPORT_SYMBOL(pci_write_config_dword);
|
2150 |
|
|
EXPORT_SYMBOL(pci_devices);
|
2151 |
|
|
EXPORT_SYMBOL(pci_root_buses);
|
2152 |
|
|
EXPORT_SYMBOL(pci_enable_device_bars);
|
2153 |
|
|
EXPORT_SYMBOL(pci_enable_device);
|
2154 |
|
|
EXPORT_SYMBOL(pci_disable_device);
|
2155 |
|
|
EXPORT_SYMBOL(pci_find_capability);
|
2156 |
|
|
EXPORT_SYMBOL(pci_release_regions);
|
2157 |
|
|
EXPORT_SYMBOL(pci_request_regions);
|
2158 |
|
|
EXPORT_SYMBOL(pci_release_region);
|
2159 |
|
|
EXPORT_SYMBOL(pci_request_region);
|
2160 |
|
|
EXPORT_SYMBOL(pci_find_class);
|
2161 |
|
|
EXPORT_SYMBOL(pci_find_device);
|
2162 |
|
|
EXPORT_SYMBOL(pci_find_slot);
|
2163 |
|
|
EXPORT_SYMBOL(pci_find_subsys);
|
2164 |
|
|
EXPORT_SYMBOL(pci_set_master);
|
2165 |
|
|
EXPORT_SYMBOL(pci_set_mwi);
|
2166 |
|
|
EXPORT_SYMBOL(pci_clear_mwi);
|
2167 |
|
|
EXPORT_SYMBOL(pci_set_dma_mask);
|
2168 |
|
|
EXPORT_SYMBOL(pci_dac_set_dma_mask);
|
2169 |
|
|
EXPORT_SYMBOL(pci_assign_resource);
|
2170 |
|
|
EXPORT_SYMBOL(pci_register_driver);
|
2171 |
|
|
EXPORT_SYMBOL(pci_unregister_driver);
|
2172 |
|
|
EXPORT_SYMBOL(pci_dev_driver);
|
2173 |
|
|
EXPORT_SYMBOL(pci_match_device);
|
2174 |
|
|
EXPORT_SYMBOL(pci_find_parent_resource);
|
2175 |
|
|
|
2176 |
|
|
#ifdef CONFIG_HOTPLUG
|
2177 |
|
|
EXPORT_SYMBOL(pci_setup_device);
|
2178 |
|
|
EXPORT_SYMBOL(pci_insert_device);
|
2179 |
|
|
EXPORT_SYMBOL(pci_remove_device);
|
2180 |
|
|
EXPORT_SYMBOL(pci_announce_device_to_drivers);
|
2181 |
|
|
EXPORT_SYMBOL(pci_add_new_bus);
|
2182 |
|
|
EXPORT_SYMBOL(pci_do_scan_bus);
|
2183 |
|
|
EXPORT_SYMBOL(pci_scan_slot);
|
2184 |
|
|
EXPORT_SYMBOL(pci_scan_bus);
|
2185 |
|
|
EXPORT_SYMBOL(pci_scan_device);
|
2186 |
|
|
EXPORT_SYMBOL(pci_read_bridge_bases);
|
2187 |
|
|
#ifdef CONFIG_PROC_FS
|
2188 |
|
|
EXPORT_SYMBOL(pci_proc_attach_device);
|
2189 |
|
|
EXPORT_SYMBOL(pci_proc_detach_device);
|
2190 |
|
|
EXPORT_SYMBOL(pci_proc_attach_bus);
|
2191 |
|
|
EXPORT_SYMBOL(pci_proc_detach_bus);
|
2192 |
|
|
EXPORT_SYMBOL(proc_bus_pci_dir);
|
2193 |
|
|
#endif
|
2194 |
|
|
#endif
|
2195 |
|
|
|
2196 |
|
|
EXPORT_SYMBOL(pci_set_power_state);
|
2197 |
|
|
EXPORT_SYMBOL(pci_save_state);
|
2198 |
|
|
EXPORT_SYMBOL(pci_restore_state);
|
2199 |
|
|
EXPORT_SYMBOL(pci_enable_wake);
|
2200 |
|
|
|
2201 |
|
|
/* Obsolete functions */
|
2202 |
|
|
|
2203 |
|
|
EXPORT_SYMBOL(pcibios_present);
|
2204 |
|
|
EXPORT_SYMBOL(pcibios_read_config_byte);
|
2205 |
|
|
EXPORT_SYMBOL(pcibios_read_config_word);
|
2206 |
|
|
EXPORT_SYMBOL(pcibios_read_config_dword);
|
2207 |
|
|
EXPORT_SYMBOL(pcibios_write_config_byte);
|
2208 |
|
|
EXPORT_SYMBOL(pcibios_write_config_word);
|
2209 |
|
|
EXPORT_SYMBOL(pcibios_write_config_dword);
|
2210 |
|
|
EXPORT_SYMBOL(pcibios_find_class);
|
2211 |
|
|
EXPORT_SYMBOL(pcibios_find_device);
|
2212 |
|
|
|
2213 |
|
|
/* Quirk info */
|
2214 |
|
|
|
2215 |
|
|
EXPORT_SYMBOL(isa_dma_bridge_buggy);
|
2216 |
|
|
EXPORT_SYMBOL(pci_pci_problems);
|
2217 |
|
|
|
2218 |
|
|
/* Pool allocator */
|
2219 |
|
|
|
2220 |
|
|
EXPORT_SYMBOL (pci_pool_create);
|
2221 |
|
|
EXPORT_SYMBOL (pci_pool_destroy);
|
2222 |
|
|
EXPORT_SYMBOL (pci_pool_alloc);
|
2223 |
|
|
EXPORT_SYMBOL (pci_pool_free);
|
2224 |
|
|
|