1 |
1275 |
phoenix |
/*
|
2 |
|
|
* linux/drivers/ide/ide-dma.c Version 4.13 May 21, 2003
|
3 |
|
|
*
|
4 |
|
|
* Copyright (c) 1999-2000 Andre Hedrick <andre@linux-ide.org>
|
5 |
|
|
* May be copied or modified under the terms of the GNU General Public License
|
6 |
|
|
*
|
7 |
|
|
* Portions Copyright Red Hat 2003
|
8 |
|
|
*/
|
9 |
|
|
|
10 |
|
|
/*
|
11 |
|
|
* Special Thanks to Mark for his Six years of work.
|
12 |
|
|
*
|
13 |
|
|
* Copyright (c) 1995-1998 Mark Lord
|
14 |
|
|
* May be copied or modified under the terms of the GNU General Public License
|
15 |
|
|
*/
|
16 |
|
|
|
17 |
|
|
/*
|
18 |
|
|
* This module provides support for the bus-master IDE DMA functions
|
19 |
|
|
* of various PCI chipsets, including the Intel PIIX (i82371FB for
|
20 |
|
|
* the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and
|
21 |
|
|
* 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset)
|
22 |
|
|
* ("PIIX" stands for "PCI ISA IDE Xcellerator").
|
23 |
|
|
*
|
24 |
|
|
* Pretty much the same code works for other IDE PCI bus-mastering chipsets.
|
25 |
|
|
*
|
26 |
|
|
* DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
|
27 |
|
|
*
|
28 |
|
|
* By default, DMA support is prepared for use, but is currently enabled only
|
29 |
|
|
* for drives which already have DMA enabled (UltraDMA or mode 2 multi/single),
|
30 |
|
|
* or which are recognized as "good" (see table below). Drives with only mode0
|
31 |
|
|
* or mode1 (multi/single) DMA should also work with this chipset/driver
|
32 |
|
|
* (eg. MC2112A) but are not enabled by default.
|
33 |
|
|
*
|
34 |
|
|
* Use "hdparm -i" to view modes supported by a given drive.
|
35 |
|
|
*
|
36 |
|
|
* The hdparm-3.5 (or later) utility can be used for manually enabling/disabling
|
37 |
|
|
* DMA support, but must be (re-)compiled against this kernel version or later.
|
38 |
|
|
*
|
39 |
|
|
* To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting.
|
40 |
|
|
* If problems arise, ide.c will disable DMA operation after a few retries.
|
41 |
|
|
* This error recovery mechanism works and has been extremely well exercised.
|
42 |
|
|
*
|
43 |
|
|
* IDE drives, depending on their vintage, may support several different modes
|
44 |
|
|
* of DMA operation. The boot-time modes are indicated with a "*" in
|
45 |
|
|
* the "hdparm -i" listing, and can be changed with *knowledgeable* use of
|
46 |
|
|
* the "hdparm -X" feature. There is seldom a need to do this, as drives
|
47 |
|
|
* normally power-up with their "best" PIO/DMA modes enabled.
|
48 |
|
|
*
|
49 |
|
|
* Testing has been done with a rather extensive number of drives,
|
50 |
|
|
* with Quantum & Western Digital models generally outperforming the pack,
|
51 |
|
|
* and Fujitsu & Conner (and some Seagate which are really Conner) drives
|
52 |
|
|
* showing more lackluster throughput.
|
53 |
|
|
*
|
54 |
|
|
* Keep an eye on /var/adm/messages for "DMA disabled" messages.
|
55 |
|
|
*
|
56 |
|
|
* Some people have reported trouble with Intel Zappa motherboards.
|
57 |
|
|
* This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0,
|
58 |
|
|
* available from ftp://ftp.intel.com/pub/bios/10004bs0.exe
|
59 |
|
|
* (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this).
|
60 |
|
|
*
|
61 |
|
|
* Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
|
62 |
|
|
* fixing the problem with the BIOS on some Acer motherboards.
|
63 |
|
|
*
|
64 |
|
|
* Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
|
65 |
|
|
* "TX" chipset compatibility and for providing patches for the "TX" chipset.
|
66 |
|
|
*
|
67 |
|
|
* Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
|
68 |
|
|
* at generic DMA -- his patches were referred to when preparing this code.
|
69 |
|
|
*
|
70 |
|
|
* Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
|
71 |
|
|
* for supplying a Promise UDMA board & WD UDMA drive for this work!
|
72 |
|
|
*
|
73 |
|
|
* And, yes, Intel Zappa boards really *do* use both PIIX IDE ports.
|
74 |
|
|
*
|
75 |
|
|
* ATA-66/100 and recovery functions, I forgot the rest......
|
76 |
|
|
*
|
77 |
|
|
*/
|
78 |
|
|
|
79 |
|
|
#include <linux/config.h>
|
80 |
|
|
#define __NO_VERSION__
|
81 |
|
|
#include <linux/module.h>
|
82 |
|
|
#include <linux/types.h>
|
83 |
|
|
#include <linux/kernel.h>
|
84 |
|
|
#include <linux/timer.h>
|
85 |
|
|
#include <linux/mm.h>
|
86 |
|
|
#include <linux/interrupt.h>
|
87 |
|
|
#include <linux/pci.h>
|
88 |
|
|
#include <linux/init.h>
|
89 |
|
|
#include <linux/ide.h>
|
90 |
|
|
#include <linux/delay.h>
|
91 |
|
|
|
92 |
|
|
#include <asm/io.h>
|
93 |
|
|
#include <asm/irq.h>
|
94 |
|
|
|
95 |
|
|
#define CONFIG_IDEDMA_NEW_DRIVE_LISTINGS
|
96 |
|
|
|
97 |
|
|
#ifdef CONFIG_IDEDMA_NEW_DRIVE_LISTINGS
|
98 |
|
|
|
99 |
|
|
struct drive_list_entry {
|
100 |
|
|
char * id_model;
|
101 |
|
|
char * id_firmware;
|
102 |
|
|
};
|
103 |
|
|
|
104 |
|
|
struct drive_list_entry drive_whitelist [] = {
|
105 |
|
|
|
106 |
|
|
{ "Micropolis 2112A" , "ALL" },
|
107 |
|
|
{ "CONNER CTMA 4000" , "ALL" },
|
108 |
|
|
{ "CONNER CTT8000-A" , "ALL" },
|
109 |
|
|
{ "ST34342A" , "ALL" },
|
110 |
|
|
{ 0 , 0 }
|
111 |
|
|
};
|
112 |
|
|
|
113 |
|
|
struct drive_list_entry drive_blacklist [] = {
|
114 |
|
|
|
115 |
|
|
{ "WDC AC11000H" , "ALL" },
|
116 |
|
|
{ "WDC AC22100H" , "ALL" },
|
117 |
|
|
{ "WDC AC32500H" , "ALL" },
|
118 |
|
|
{ "WDC AC33100H" , "ALL" },
|
119 |
|
|
{ "WDC AC31600H" , "ALL" },
|
120 |
|
|
{ "WDC AC32100H" , "24.09P07" },
|
121 |
|
|
{ "WDC AC23200L" , "21.10N21" },
|
122 |
|
|
{ "Compaq CRD-8241B" , "ALL" },
|
123 |
|
|
{ "CRD-8400B" , "ALL" },
|
124 |
|
|
{ "CRD-8480B", "ALL" },
|
125 |
|
|
{ "CRD-8480C", "ALL" },
|
126 |
|
|
{ "CRD-8482B", "ALL" },
|
127 |
|
|
{ "CRD-84" , "ALL" },
|
128 |
|
|
{ "SanDisk SDP3B" , "ALL" },
|
129 |
|
|
{ "SanDisk SDP3B-64" , "ALL" },
|
130 |
|
|
{ "SANYO CD-ROM CRD" , "ALL" },
|
131 |
|
|
{ "HITACHI CDR-8" , "ALL" },
|
132 |
|
|
{ "HITACHI CDR-8335" , "ALL" },
|
133 |
|
|
{ "HITACHI CDR-8435" , "ALL" },
|
134 |
|
|
{ "Toshiba CD-ROM XM-6202B" , "ALL" },
|
135 |
|
|
{ "CD-532E-A" , "ALL" },
|
136 |
|
|
{ "E-IDE CD-ROM CR-840", "ALL" },
|
137 |
|
|
{ "CD-ROM Drive/F5A", "ALL" },
|
138 |
|
|
{ "RICOH CD-R/RW MP7083A", "ALL" },
|
139 |
|
|
{ "WPI CDD-820", "ALL" },
|
140 |
|
|
{ "SAMSUNG CD-ROM SC-148C", "ALL" },
|
141 |
|
|
{ "SAMSUNG CD-ROM SC-148F", "ALL" },
|
142 |
|
|
{ "SAMSUNG CD-ROM SC", "ALL" },
|
143 |
|
|
{ "SanDisk SDP3B-64" , "ALL" },
|
144 |
|
|
{ "SAMSUNG CD-ROM SN-124", "ALL" },
|
145 |
|
|
{ "PLEXTOR CD-R PX-W8432T", "ALL" },
|
146 |
|
|
{ "ATAPI CD-ROM DRIVE 40X MAXIMUM", "ALL" },
|
147 |
|
|
{ "_NEC DV5800A", "ALL" },
|
148 |
|
|
{ 0 , 0 }
|
149 |
|
|
|
150 |
|
|
};
|
151 |
|
|
|
152 |
|
|
/**
|
153 |
|
|
* in_drive_list - look for drive in black/white list
|
154 |
|
|
* @id: drive identifier
|
155 |
|
|
* @drive_table: list to inspect
|
156 |
|
|
*
|
157 |
|
|
* Look for a drive in the blacklist and the whitelist tables
|
158 |
|
|
* Returns 1 if the drive is found in the table.
|
159 |
|
|
*/
|
160 |
|
|
|
161 |
|
|
static int in_drive_list(struct hd_driveid *id, struct drive_list_entry * drive_table)
|
162 |
|
|
{
|
163 |
|
|
for ( ; drive_table->id_model ; drive_table++)
|
164 |
|
|
if ((!strcmp(drive_table->id_model, id->model)) &&
|
165 |
|
|
((strstr(drive_table->id_firmware, id->fw_rev)) ||
|
166 |
|
|
(!strcmp(drive_table->id_firmware, "ALL"))))
|
167 |
|
|
return 1;
|
168 |
|
|
return 0;
|
169 |
|
|
}
|
170 |
|
|
|
171 |
|
|
#else /* !CONFIG_IDEDMA_NEW_DRIVE_LISTINGS */
|
172 |
|
|
|
173 |
|
|
/*
|
174 |
|
|
* good_dma_drives() lists the model names (from "hdparm -i")
|
175 |
|
|
* of drives which do not support mode2 DMA but which are
|
176 |
|
|
* known to work fine with this interface under Linux.
|
177 |
|
|
*/
|
178 |
|
|
const char *good_dma_drives[] = {"Micropolis 2112A",
|
179 |
|
|
"CONNER CTMA 4000",
|
180 |
|
|
"CONNER CTT8000-A",
|
181 |
|
|
"ST34342A", /* for Sun Ultra */
|
182 |
|
|
NULL};
|
183 |
|
|
|
184 |
|
|
/*
|
185 |
|
|
* bad_dma_drives() lists the model names (from "hdparm -i")
|
186 |
|
|
* of drives which supposedly support (U)DMA but which are
|
187 |
|
|
* known to corrupt data with this interface under Linux.
|
188 |
|
|
*
|
189 |
|
|
* This is an empirical list. Its generated from bug reports. That means
|
190 |
|
|
* while it reflects actual problem distributions it doesn't answer whether
|
191 |
|
|
* the drive or the controller, or cabling, or software, or some combination
|
192 |
|
|
* thereof is the fault. If you don't happen to agree with the kernel's
|
193 |
|
|
* opinion of your drive - use hdparm to turn DMA on.
|
194 |
|
|
*/
|
195 |
|
|
const char *bad_dma_drives[] = {"WDC AC11000H",
|
196 |
|
|
"WDC AC22100H",
|
197 |
|
|
"WDC AC32100H",
|
198 |
|
|
"WDC AC32500H",
|
199 |
|
|
"WDC AC33100H",
|
200 |
|
|
"WDC AC31600H",
|
201 |
|
|
NULL};
|
202 |
|
|
|
203 |
|
|
#endif /* CONFIG_IDEDMA_NEW_DRIVE_LISTINGS */
|
204 |
|
|
|
205 |
|
|
/**
|
206 |
|
|
* ide_dma_intr - IDE DMA interrupt handler
|
207 |
|
|
* @drive: the drive the interrupt is for
|
208 |
|
|
*
|
209 |
|
|
* Handle an interrupt completing a read/write DMA transfer on an
|
210 |
|
|
* IDE device
|
211 |
|
|
*/
|
212 |
|
|
|
213 |
|
|
ide_startstop_t ide_dma_intr (ide_drive_t *drive)
|
214 |
|
|
{
|
215 |
|
|
u8 stat = 0, dma_stat = 0;
|
216 |
|
|
int i;
|
217 |
|
|
|
218 |
|
|
dma_stat = HWIF(drive)->ide_dma_end(drive);
|
219 |
|
|
stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */
|
220 |
|
|
if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
|
221 |
|
|
if (!dma_stat) {
|
222 |
|
|
struct request *rq = HWGROUP(drive)->rq;
|
223 |
|
|
// rq = HWGROUP(drive)->rq;
|
224 |
|
|
for (i = rq->nr_sectors; i > 0;) {
|
225 |
|
|
i -= rq->current_nr_sectors;
|
226 |
|
|
DRIVER(drive)->end_request(drive, 1);
|
227 |
|
|
}
|
228 |
|
|
return ide_stopped;
|
229 |
|
|
}
|
230 |
|
|
printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
|
231 |
|
|
drive->name, dma_stat);
|
232 |
|
|
}
|
233 |
|
|
return DRIVER(drive)->error(drive, "dma_intr", stat);
|
234 |
|
|
}
|
235 |
|
|
|
236 |
|
|
EXPORT_SYMBOL_GPL(ide_dma_intr);
|
237 |
|
|
|
238 |
|
|
/**
|
239 |
|
|
* ide_build_sglist - map IDE scatter gather for DMA I/O
|
240 |
|
|
* @hwif: the interface to build the DMA table for
|
241 |
|
|
* @rq: the request holding the sg list
|
242 |
|
|
* @ddir: data direction
|
243 |
|
|
*
|
244 |
|
|
* Perform the PCI mapping magic neccessary to access the source or
|
245 |
|
|
* target buffers of a request via PCI DMA. The lower layers of the
|
246 |
|
|
* kernel provide the neccessary cache management so that we can
|
247 |
|
|
* operate in a portable fashion
|
248 |
|
|
*/
|
249 |
|
|
|
250 |
|
|
static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq, int ddir)
|
251 |
|
|
{
|
252 |
|
|
struct buffer_head *bh;
|
253 |
|
|
struct scatterlist *sg = hwif->sg_table;
|
254 |
|
|
unsigned long lastdataend = ~0UL;
|
255 |
|
|
int nents = 0;
|
256 |
|
|
|
257 |
|
|
if (hwif->sg_dma_active)
|
258 |
|
|
BUG();
|
259 |
|
|
|
260 |
|
|
bh = rq->bh;
|
261 |
|
|
do {
|
262 |
|
|
int contig = 0;
|
263 |
|
|
|
264 |
|
|
if (bh->b_page) {
|
265 |
|
|
if (bh_phys(bh) == lastdataend)
|
266 |
|
|
contig = 1;
|
267 |
|
|
} else {
|
268 |
|
|
if ((unsigned long) bh->b_data == lastdataend)
|
269 |
|
|
contig = 1;
|
270 |
|
|
}
|
271 |
|
|
|
272 |
|
|
if (contig) {
|
273 |
|
|
sg[nents - 1].length += bh->b_size;
|
274 |
|
|
lastdataend += bh->b_size;
|
275 |
|
|
continue;
|
276 |
|
|
}
|
277 |
|
|
|
278 |
|
|
if (nents >= PRD_ENTRIES)
|
279 |
|
|
return 0;
|
280 |
|
|
|
281 |
|
|
memset(&sg[nents], 0, sizeof(*sg));
|
282 |
|
|
|
283 |
|
|
if (bh->b_page) {
|
284 |
|
|
sg[nents].page = bh->b_page;
|
285 |
|
|
sg[nents].offset = bh_offset(bh);
|
286 |
|
|
lastdataend = bh_phys(bh) + bh->b_size;
|
287 |
|
|
} else {
|
288 |
|
|
if ((unsigned long) bh->b_data < PAGE_SIZE)
|
289 |
|
|
BUG();
|
290 |
|
|
|
291 |
|
|
sg[nents].address = bh->b_data;
|
292 |
|
|
lastdataend = (unsigned long) bh->b_data + bh->b_size;
|
293 |
|
|
}
|
294 |
|
|
|
295 |
|
|
sg[nents].length = bh->b_size;
|
296 |
|
|
nents++;
|
297 |
|
|
} while ((bh = bh->b_reqnext) != NULL);
|
298 |
|
|
|
299 |
|
|
if(nents == 0)
|
300 |
|
|
BUG();
|
301 |
|
|
|
302 |
|
|
hwif->sg_dma_direction = ddir;
|
303 |
|
|
return pci_map_sg(hwif->pci_dev, sg, nents, ddir);
|
304 |
|
|
}
|
305 |
|
|
|
306 |
|
|
/**
|
307 |
|
|
* ide_raw_build_sglist - map IDE scatter gather for DMA
|
308 |
|
|
* @hwif: the interface to build the DMA table for
|
309 |
|
|
* @rq: the request holding the sg list
|
310 |
|
|
*
|
311 |
|
|
* Perform the PCI mapping magic neccessary to access the source or
|
312 |
|
|
* target buffers of a taskfile request via PCI DMA. The lower layers
|
313 |
|
|
* of the kernel provide the neccessary cache management so that we can
|
314 |
|
|
* operate in a portable fashion
|
315 |
|
|
*/
|
316 |
|
|
|
317 |
|
|
static int ide_raw_build_sglist (ide_hwif_t *hwif, struct request *rq)
|
318 |
|
|
{
|
319 |
|
|
struct scatterlist *sg = hwif->sg_table;
|
320 |
|
|
int nents = 0;
|
321 |
|
|
ide_task_t *args = rq->special;
|
322 |
|
|
u8 *virt_addr = rq->buffer;
|
323 |
|
|
int sector_count = rq->nr_sectors;
|
324 |
|
|
|
325 |
|
|
if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
|
326 |
|
|
hwif->sg_dma_direction = PCI_DMA_TODEVICE;
|
327 |
|
|
else
|
328 |
|
|
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
|
329 |
|
|
#if 1
|
330 |
|
|
if (sector_count > 128) {
|
331 |
|
|
memset(&sg[nents], 0, sizeof(*sg));
|
332 |
|
|
sg[nents].address = virt_addr;
|
333 |
|
|
sg[nents].length = 128 * SECTOR_SIZE;
|
334 |
|
|
nents++;
|
335 |
|
|
virt_addr = virt_addr + (128 * SECTOR_SIZE);
|
336 |
|
|
sector_count -= 128;
|
337 |
|
|
}
|
338 |
|
|
memset(&sg[nents], 0, sizeof(*sg));
|
339 |
|
|
sg[nents].address = virt_addr;
|
340 |
|
|
sg[nents].length = sector_count * SECTOR_SIZE;
|
341 |
|
|
nents++;
|
342 |
|
|
#else
|
343 |
|
|
while (sector_count > 128) {
|
344 |
|
|
memset(&sg[nents], 0, sizeof(*sg));
|
345 |
|
|
sg[nents].address = virt_addr;
|
346 |
|
|
sg[nents].length = 128 * SECTOR_SIZE;
|
347 |
|
|
nents++;
|
348 |
|
|
virt_addr = virt_addr + (128 * SECTOR_SIZE);
|
349 |
|
|
sector_count -= 128;
|
350 |
|
|
};
|
351 |
|
|
memset(&sg[nents], 0, sizeof(*sg));
|
352 |
|
|
sg[nents].address = virt_addr;
|
353 |
|
|
sg[nents].length = sector_count * SECTOR_SIZE;
|
354 |
|
|
nents++;
|
355 |
|
|
#endif
|
356 |
|
|
return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
|
357 |
|
|
}
|
358 |
|
|
|
359 |
|
|
/**
|
360 |
|
|
* ide_build_dmatable - build IDE DMA table
|
361 |
|
|
*
|
362 |
|
|
* ide_build_dmatable() prepares a dma request. We map the command
|
363 |
|
|
* to get the pci bus addresses of the buffers and then build up
|
364 |
|
|
* the PRD table that the IDE layer wants to be fed. The code
|
365 |
|
|
* knows about the 64K wrap bug in the CS5530.
|
366 |
|
|
*
|
367 |
|
|
* Returns 0 if all went okay, returns 1 otherwise.
|
368 |
|
|
* May also be invoked from trm290.c
|
369 |
|
|
*/
|
370 |
|
|
|
371 |
|
|
int ide_build_dmatable (ide_drive_t *drive, struct request *rq, int ddir)
|
372 |
|
|
{
|
373 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
374 |
|
|
unsigned int *table = hwif->dmatable_cpu;
|
375 |
|
|
unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
|
376 |
|
|
unsigned int count = 0;
|
377 |
|
|
int i;
|
378 |
|
|
struct scatterlist *sg;
|
379 |
|
|
|
380 |
|
|
if (rq->cmd == IDE_DRIVE_TASKFILE)
|
381 |
|
|
hwif->sg_nents = i = ide_raw_build_sglist(hwif, rq);
|
382 |
|
|
else
|
383 |
|
|
hwif->sg_nents = i = ide_build_sglist(hwif, rq, ddir);
|
384 |
|
|
|
385 |
|
|
if (!i)
|
386 |
|
|
return 0;
|
387 |
|
|
|
388 |
|
|
sg = hwif->sg_table;
|
389 |
|
|
while (i && sg_dma_len(sg)) {
|
390 |
|
|
u32 cur_addr;
|
391 |
|
|
u32 cur_len;
|
392 |
|
|
|
393 |
|
|
cur_addr = sg_dma_address(sg);
|
394 |
|
|
cur_len = sg_dma_len(sg);
|
395 |
|
|
|
396 |
|
|
/*
|
397 |
|
|
* Fill in the dma table, without crossing any 64kB boundaries.
|
398 |
|
|
* Most hardware requires 16-bit alignment of all blocks,
|
399 |
|
|
* but the trm290 requires 32-bit alignment.
|
400 |
|
|
*/
|
401 |
|
|
|
402 |
|
|
while (cur_len) {
|
403 |
|
|
if (count++ >= PRD_ENTRIES) {
|
404 |
|
|
printk("%s: DMA table too small\n", drive->name);
|
405 |
|
|
goto use_pio_instead;
|
406 |
|
|
} else {
|
407 |
|
|
u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);
|
408 |
|
|
|
409 |
|
|
if (bcount > cur_len)
|
410 |
|
|
bcount = cur_len;
|
411 |
|
|
*table++ = cpu_to_le32(cur_addr);
|
412 |
|
|
xcount = bcount & 0xffff;
|
413 |
|
|
if (is_trm290)
|
414 |
|
|
xcount = ((xcount >> 2) - 1) << 16;
|
415 |
|
|
if (xcount == 0x0000) {
|
416 |
|
|
/*
|
417 |
|
|
* Most chipsets correctly interpret a length of 0x0000 as 64KB,
|
418 |
|
|
* but at least one (e.g. CS5530) misinterprets it as zero (!).
|
419 |
|
|
* So here we break the 64KB entry into two 32KB entries instead.
|
420 |
|
|
*/
|
421 |
|
|
if (count++ >= PRD_ENTRIES) {
|
422 |
|
|
printk("%s: DMA table too small\n", drive->name);
|
423 |
|
|
goto use_pio_instead;
|
424 |
|
|
}
|
425 |
|
|
*table++ = cpu_to_le32(0x8000);
|
426 |
|
|
*table++ = cpu_to_le32(cur_addr + 0x8000);
|
427 |
|
|
xcount = 0x8000;
|
428 |
|
|
}
|
429 |
|
|
*table++ = cpu_to_le32(xcount);
|
430 |
|
|
cur_addr += bcount;
|
431 |
|
|
cur_len -= bcount;
|
432 |
|
|
}
|
433 |
|
|
}
|
434 |
|
|
|
435 |
|
|
sg++;
|
436 |
|
|
i--;
|
437 |
|
|
}
|
438 |
|
|
|
439 |
|
|
if (count) {
|
440 |
|
|
if (!is_trm290)
|
441 |
|
|
*--table |= cpu_to_le32(0x80000000);
|
442 |
|
|
return count;
|
443 |
|
|
}
|
444 |
|
|
printk("%s: empty DMA table?\n", drive->name);
|
445 |
|
|
use_pio_instead:
|
446 |
|
|
pci_unmap_sg(hwif->pci_dev,
|
447 |
|
|
hwif->sg_table,
|
448 |
|
|
hwif->sg_nents,
|
449 |
|
|
hwif->sg_dma_direction);
|
450 |
|
|
hwif->sg_dma_active = 0;
|
451 |
|
|
return 0; /* revert to PIO for this request */
|
452 |
|
|
}
|
453 |
|
|
|
454 |
|
|
EXPORT_SYMBOL_GPL(ide_build_dmatable);
|
455 |
|
|
|
456 |
|
|
/**
|
457 |
|
|
* ide_destroy_dmatable - clean up DMA mapping
|
458 |
|
|
* @drive: The drive to unmap
|
459 |
|
|
*
|
460 |
|
|
* Teardown mappings after DMA has completed. This must be called
|
461 |
|
|
* after the completion of each use of ide_build_dmatable and before
|
462 |
|
|
* the next use of ide_build_dmatable. Failure to do so will cause
|
463 |
|
|
* an oops as only one mapping can be live for each target at a given
|
464 |
|
|
* time.
|
465 |
|
|
*/
|
466 |
|
|
|
467 |
|
|
void ide_destroy_dmatable (ide_drive_t *drive)
|
468 |
|
|
{
|
469 |
|
|
struct pci_dev *dev = HWIF(drive)->pci_dev;
|
470 |
|
|
struct scatterlist *sg = HWIF(drive)->sg_table;
|
471 |
|
|
int nents = HWIF(drive)->sg_nents;
|
472 |
|
|
|
473 |
|
|
pci_unmap_sg(dev, sg, nents, HWIF(drive)->sg_dma_direction);
|
474 |
|
|
HWIF(drive)->sg_dma_active = 0;
|
475 |
|
|
}
|
476 |
|
|
|
477 |
|
|
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
|
478 |
|
|
|
479 |
|
|
/**
|
480 |
|
|
* config_drive_for_dma - attempt to activate IDE DMA
|
481 |
|
|
* @drive: the drive to place in DMA mode
|
482 |
|
|
*
|
483 |
|
|
* If the drive supports at least mode 2 DMA or UDMA of any kind
|
484 |
|
|
* then attempt to place it into DMA mode. Drives that are known to
|
485 |
|
|
* support DMA but predate the DMA properties or that are known
|
486 |
|
|
* to have DMA handling bugs are also set up appropriately based
|
487 |
|
|
* on the good/bad drive lists.
|
488 |
|
|
*/
|
489 |
|
|
|
490 |
|
|
static int config_drive_for_dma (ide_drive_t *drive)
|
491 |
|
|
{
|
492 |
|
|
struct hd_driveid *id = drive->id;
|
493 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
494 |
|
|
|
495 |
|
|
if ((id->capability & 1) && hwif->autodma) {
|
496 |
|
|
/* Consult the list of known "bad" drives */
|
497 |
|
|
if (hwif->ide_dma_bad_drive(drive))
|
498 |
|
|
return hwif->ide_dma_off(drive);
|
499 |
|
|
|
500 |
|
|
/*
|
501 |
|
|
* Enable DMA on any drive that has
|
502 |
|
|
* UltraDMA (mode 0/1/2/3/4/5/6) enabled
|
503 |
|
|
*/
|
504 |
|
|
if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
|
505 |
|
|
return hwif->ide_dma_on(drive);
|
506 |
|
|
/*
|
507 |
|
|
* Enable DMA on any drive that has mode2 DMA
|
508 |
|
|
* (multi or single) enabled
|
509 |
|
|
*/
|
510 |
|
|
if (id->field_valid & 2) /* regular DMA */
|
511 |
|
|
if ((id->dma_mword & 0x404) == 0x404 ||
|
512 |
|
|
(id->dma_1word & 0x404) == 0x404)
|
513 |
|
|
return hwif->ide_dma_on(drive);
|
514 |
|
|
|
515 |
|
|
/* Consult the list of known "good" drives */
|
516 |
|
|
if (hwif->ide_dma_good_drive(drive))
|
517 |
|
|
return hwif->ide_dma_on(drive);
|
518 |
|
|
}
|
519 |
|
|
// if (hwif->tuneproc != NULL) hwif->tuneproc(drive, 255);
|
520 |
|
|
return hwif->ide_dma_off_quietly(drive);
|
521 |
|
|
}
|
522 |
|
|
|
523 |
|
|
/**
|
524 |
|
|
* dma_timer_expiry - handle a DMA timeout
|
525 |
|
|
* @drive: Drive that timed out
|
526 |
|
|
*
|
527 |
|
|
* An IDE DMA transfer timed out. In the event of an error we ask
|
528 |
|
|
* the driver to resolve the problem, if a DMA transfer is still
|
529 |
|
|
* in progress we continue to wait (arguably we need to add a
|
530 |
|
|
* secondary 'I dont care what the drive thinks' timeout here)
|
531 |
|
|
* Finally if we have an interrupt we let it complete the I/O.
|
532 |
|
|
* But only one time - we clear expiry and if it's still not
|
533 |
|
|
* completed after WAIT_CMD, we error and retry in PIO.
|
534 |
|
|
* This can occur if an interrupt is lost or due to hang or bugs.
|
535 |
|
|
*/
|
536 |
|
|
|
537 |
|
|
static int dma_timer_expiry (ide_drive_t *drive)
|
538 |
|
|
{
|
539 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
540 |
|
|
u8 dma_stat = hwif->INB(hwif->dma_status);
|
541 |
|
|
|
542 |
|
|
printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
|
543 |
|
|
drive->name, dma_stat);
|
544 |
|
|
|
545 |
|
|
if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
|
546 |
|
|
return WAIT_CMD;
|
547 |
|
|
|
548 |
|
|
/*
|
549 |
|
|
* Clear the expiry handler in case we decide to wait more,
|
550 |
|
|
* next time timer expires it is an error
|
551 |
|
|
*/
|
552 |
|
|
HWGROUP(drive)->expiry = NULL;
|
553 |
|
|
|
554 |
|
|
/* 1 dmaing, 2 error, 4 intr */
|
555 |
|
|
|
556 |
|
|
if (dma_stat & 2) /* ERROR */
|
557 |
|
|
return -1;
|
558 |
|
|
|
559 |
|
|
if (dma_stat & 1) /* DMAing */
|
560 |
|
|
return WAIT_CMD;
|
561 |
|
|
|
562 |
|
|
if (dma_stat & 4) /* Got an Interrupt */
|
563 |
|
|
return WAIT_CMD;
|
564 |
|
|
|
565 |
|
|
return 0; /* Unknown status -- reset the bus */
|
566 |
|
|
}
|
567 |
|
|
|
568 |
|
|
/**
|
569 |
|
|
* __ide_dma_host_off - Generic DMA kill
|
570 |
|
|
* @drive: drive to control
|
571 |
|
|
*
|
572 |
|
|
* Perform the generic IDE controller DMA off operation. This
|
573 |
|
|
* works for most IDE bus mastering controllers
|
574 |
|
|
*/
|
575 |
|
|
|
576 |
|
|
int __ide_dma_host_off (ide_drive_t *drive)
|
577 |
|
|
{
|
578 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
579 |
|
|
u8 unit = (drive->select.b.unit & 0x01);
|
580 |
|
|
u8 dma_stat = hwif->INB(hwif->dma_status);
|
581 |
|
|
|
582 |
|
|
hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status);
|
583 |
|
|
return 0;
|
584 |
|
|
}
|
585 |
|
|
|
586 |
|
|
EXPORT_SYMBOL(__ide_dma_host_off);
|
587 |
|
|
|
588 |
|
|
/**
|
589 |
|
|
* __ide_dma_host_off_quietly - Generic DMA kill
|
590 |
|
|
* @drive: drive to control
|
591 |
|
|
*
|
592 |
|
|
* Turn off the current DMA on this IDE controller.
|
593 |
|
|
*/
|
594 |
|
|
|
595 |
|
|
int __ide_dma_off_quietly (ide_drive_t *drive)
|
596 |
|
|
{
|
597 |
|
|
drive->using_dma = 0;
|
598 |
|
|
ide_toggle_bounce(drive, 0);
|
599 |
|
|
return HWIF(drive)->ide_dma_host_off(drive);
|
600 |
|
|
}
|
601 |
|
|
|
602 |
|
|
EXPORT_SYMBOL(__ide_dma_off_quietly);
|
603 |
|
|
|
604 |
|
|
/**
|
605 |
|
|
* __ide_dma_host_off - Generic DMA kill
|
606 |
|
|
* @drive: drive to control
|
607 |
|
|
*
|
608 |
|
|
* Turn off the current DMA on this IDE controller. Inform the
|
609 |
|
|
* user that DMA has been disabled.
|
610 |
|
|
*/
|
611 |
|
|
|
612 |
|
|
int __ide_dma_off (ide_drive_t *drive)
|
613 |
|
|
{
|
614 |
|
|
printk(KERN_INFO "%s: DMA disabled\n", drive->name);
|
615 |
|
|
return HWIF(drive)->ide_dma_off_quietly(drive);
|
616 |
|
|
}
|
617 |
|
|
|
618 |
|
|
EXPORT_SYMBOL(__ide_dma_off);
|
619 |
|
|
|
620 |
|
|
/**
|
621 |
|
|
* __ide_dma_host_on - Enable DMA on a host
|
622 |
|
|
* @drive: drive to enable for DMA
|
623 |
|
|
*
|
624 |
|
|
* Enable DMA on an IDE controller following generic bus mastering
|
625 |
|
|
* IDE controller behaviour
|
626 |
|
|
*/
|
627 |
|
|
|
628 |
|
|
int __ide_dma_host_on (ide_drive_t *drive)
|
629 |
|
|
{
|
630 |
|
|
if (drive->using_dma) {
|
631 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
632 |
|
|
u8 unit = (drive->select.b.unit & 0x01);
|
633 |
|
|
u8 dma_stat = hwif->INB(hwif->dma_status);
|
634 |
|
|
|
635 |
|
|
hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status);
|
636 |
|
|
return 0;
|
637 |
|
|
}
|
638 |
|
|
return 1;
|
639 |
|
|
}
|
640 |
|
|
|
641 |
|
|
EXPORT_SYMBOL(__ide_dma_host_on);
|
642 |
|
|
|
643 |
|
|
/**
|
644 |
|
|
* __ide_dma_on - Enable DMA on a device
|
645 |
|
|
* @drive: drive to enable DMA on
|
646 |
|
|
*
|
647 |
|
|
* Enable IDE DMA for a device on this IDE controller.
|
648 |
|
|
*/
|
649 |
|
|
|
650 |
|
|
int __ide_dma_on (ide_drive_t *drive)
|
651 |
|
|
{
|
652 |
|
|
drive->using_dma = 1;
|
653 |
|
|
ide_toggle_bounce(drive, 1);
|
654 |
|
|
return HWIF(drive)->ide_dma_host_on(drive);
|
655 |
|
|
}
|
656 |
|
|
|
657 |
|
|
EXPORT_SYMBOL(__ide_dma_on);
|
658 |
|
|
|
659 |
|
|
/**
|
660 |
|
|
* __ide_dma_check - check DMA setup
|
661 |
|
|
* @drive: drive to check
|
662 |
|
|
*
|
663 |
|
|
* Don't use - due for extermination
|
664 |
|
|
*/
|
665 |
|
|
|
666 |
|
|
int __ide_dma_check (ide_drive_t *drive)
|
667 |
|
|
{
|
668 |
|
|
return config_drive_for_dma(drive);
|
669 |
|
|
}
|
670 |
|
|
|
671 |
|
|
EXPORT_SYMBOL(__ide_dma_check);
|
672 |
|
|
|
673 |
|
|
int __ide_dma_read (ide_drive_t *drive /*, struct request *rq */)
|
674 |
|
|
{
|
675 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
676 |
|
|
struct request *rq = HWGROUP(drive)->rq;
|
677 |
|
|
// ide_task_t *args = rq->special;
|
678 |
|
|
unsigned int reading = 1 << 3;
|
679 |
|
|
unsigned int count = 0;
|
680 |
|
|
u8 dma_stat = 0, lba48 = (drive->addressing == 1) ? 1 : 0;
|
681 |
|
|
task_ioreg_t command = WIN_NOP;
|
682 |
|
|
|
683 |
|
|
if (!(count = ide_build_dmatable(drive, rq, PCI_DMA_FROMDEVICE)))
|
684 |
|
|
/* try PIO instead of DMA */
|
685 |
|
|
return 1;
|
686 |
|
|
/* PRD table */
|
687 |
|
|
hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable);
|
688 |
|
|
/* specify r/w */
|
689 |
|
|
hwif->OUTB(reading, hwif->dma_command);
|
690 |
|
|
/* read dma_status for INTR & ERROR flags */
|
691 |
|
|
dma_stat = hwif->INB(hwif->dma_status);
|
692 |
|
|
/* clear INTR & ERROR flags */
|
693 |
|
|
hwif->OUTB(dma_stat|6, hwif->dma_status);
|
694 |
|
|
drive->waiting_for_dma = 1;
|
695 |
|
|
if (drive->media != ide_disk)
|
696 |
|
|
return 0;
|
697 |
|
|
/*
|
698 |
|
|
* FIX ME to use only ACB ide_task_t args Struct
|
699 |
|
|
*/
|
700 |
|
|
#if 0
|
701 |
|
|
{
|
702 |
|
|
ide_task_t *args = rq->special;
|
703 |
|
|
command = args->tfRegister[IDE_COMMAND_OFFSET];
|
704 |
|
|
}
|
705 |
|
|
#else
|
706 |
|
|
command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA;
|
707 |
|
|
if (rq->cmd == IDE_DRIVE_TASKFILE) {
|
708 |
|
|
ide_task_t *args = rq->special;
|
709 |
|
|
command = args->tfRegister[IDE_COMMAND_OFFSET];
|
710 |
|
|
}
|
711 |
|
|
#endif
|
712 |
|
|
/* issue cmd to drive */
|
713 |
|
|
ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
|
714 |
|
|
return HWIF(drive)->ide_dma_count(drive);
|
715 |
|
|
}
|
716 |
|
|
|
717 |
|
|
EXPORT_SYMBOL(__ide_dma_read);
|
718 |
|
|
|
719 |
|
|
int __ide_dma_write (ide_drive_t *drive /*, struct request *rq */)
|
720 |
|
|
{
|
721 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
722 |
|
|
struct request *rq = HWGROUP(drive)->rq;
|
723 |
|
|
// ide_task_t *args = rq->special;
|
724 |
|
|
unsigned int reading = 0;
|
725 |
|
|
unsigned int count = 0;
|
726 |
|
|
u8 dma_stat = 0, lba48 = (drive->addressing == 1) ? 1 : 0;
|
727 |
|
|
task_ioreg_t command = WIN_NOP;
|
728 |
|
|
|
729 |
|
|
if (!(count = ide_build_dmatable(drive, rq, PCI_DMA_TODEVICE)))
|
730 |
|
|
/* try PIO instead of DMA */
|
731 |
|
|
return 1;
|
732 |
|
|
/* PRD table */
|
733 |
|
|
hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable);
|
734 |
|
|
/* specify r/w */
|
735 |
|
|
hwif->OUTB(reading, hwif->dma_command);
|
736 |
|
|
/* read dma_status for INTR & ERROR flags */
|
737 |
|
|
dma_stat = hwif->INB(hwif->dma_status);
|
738 |
|
|
/* clear INTR & ERROR flags */
|
739 |
|
|
hwif->OUTB(dma_stat|6, hwif->dma_status);
|
740 |
|
|
drive->waiting_for_dma = 1;
|
741 |
|
|
if (drive->media != ide_disk)
|
742 |
|
|
return 0;
|
743 |
|
|
/*
|
744 |
|
|
* FIX ME to use only ACB ide_task_t args Struct
|
745 |
|
|
*/
|
746 |
|
|
#if 0
|
747 |
|
|
{
|
748 |
|
|
ide_task_t *args = rq->special;
|
749 |
|
|
command = args->tfRegister[IDE_COMMAND_OFFSET];
|
750 |
|
|
}
|
751 |
|
|
#else
|
752 |
|
|
command = (lba48) ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
|
753 |
|
|
if (rq->cmd == IDE_DRIVE_TASKFILE) {
|
754 |
|
|
ide_task_t *args = rq->special;
|
755 |
|
|
command = args->tfRegister[IDE_COMMAND_OFFSET];
|
756 |
|
|
}
|
757 |
|
|
#endif
|
758 |
|
|
/* issue cmd to drive */
|
759 |
|
|
ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
|
760 |
|
|
return HWIF(drive)->ide_dma_count(drive);
|
761 |
|
|
}
|
762 |
|
|
|
763 |
|
|
EXPORT_SYMBOL(__ide_dma_write);
|
764 |
|
|
|
765 |
|
|
int __ide_dma_begin (ide_drive_t *drive)
|
766 |
|
|
{
|
767 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
768 |
|
|
u8 dma_cmd = hwif->INB(hwif->dma_command);
|
769 |
|
|
|
770 |
|
|
/* Note that this is done *after* the cmd has
|
771 |
|
|
* been issued to the drive, as per the BM-IDE spec.
|
772 |
|
|
* The Promise Ultra33 doesn't work correctly when
|
773 |
|
|
* we do this part before issuing the drive cmd.
|
774 |
|
|
*/
|
775 |
|
|
/* start DMA */
|
776 |
|
|
hwif->OUTB(dma_cmd|1, hwif->dma_command);
|
777 |
|
|
return 0;
|
778 |
|
|
}
|
779 |
|
|
|
780 |
|
|
EXPORT_SYMBOL(__ide_dma_begin);
|
781 |
|
|
|
782 |
|
|
/* returns 1 on error, 0 otherwise */
|
783 |
|
|
int __ide_dma_end (ide_drive_t *drive)
|
784 |
|
|
{
|
785 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
786 |
|
|
u8 dma_stat = 0, dma_cmd = 0;
|
787 |
|
|
|
788 |
|
|
drive->waiting_for_dma = 0;
|
789 |
|
|
/* get dma_command mode */
|
790 |
|
|
dma_cmd = hwif->INB(hwif->dma_command);
|
791 |
|
|
/* stop DMA */
|
792 |
|
|
hwif->OUTB(dma_cmd&~1, hwif->dma_command);
|
793 |
|
|
/* get DMA status */
|
794 |
|
|
dma_stat = hwif->INB(hwif->dma_status);
|
795 |
|
|
/* clear the INTR & ERROR bits */
|
796 |
|
|
hwif->OUTB(dma_stat|6, hwif->dma_status);
|
797 |
|
|
/* purge DMA mappings */
|
798 |
|
|
ide_destroy_dmatable(drive);
|
799 |
|
|
/* verify good DMA status */
|
800 |
|
|
return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
|
801 |
|
|
}
|
802 |
|
|
|
803 |
|
|
EXPORT_SYMBOL(__ide_dma_end);
|
804 |
|
|
|
805 |
|
|
/* returns 1 if dma irq issued, 0 otherwise */
|
806 |
|
|
int __ide_dma_test_irq (ide_drive_t *drive)
|
807 |
|
|
{
|
808 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
809 |
|
|
u8 dma_stat = hwif->INB(hwif->dma_status);
|
810 |
|
|
|
811 |
|
|
#if 0 /* do not set unless you know what you are doing */
|
812 |
|
|
if (dma_stat & 4) {
|
813 |
|
|
u8 stat = hwif->INB(IDE_STATUS_REG);
|
814 |
|
|
hwif->OUTB(hwif->dma_status, dma_stat & 0xE4);
|
815 |
|
|
}
|
816 |
|
|
#endif
|
817 |
|
|
/* return 1 if INTR asserted */
|
818 |
|
|
if ((dma_stat & 4) == 4)
|
819 |
|
|
return 1;
|
820 |
|
|
if (!drive->waiting_for_dma)
|
821 |
|
|
printk(KERN_WARNING "%s: (%s) called while not waiting\n",
|
822 |
|
|
drive->name, __FUNCTION__);
|
823 |
|
|
#if 0
|
824 |
|
|
drive->waiting_for_dma++;
|
825 |
|
|
#endif
|
826 |
|
|
return 0;
|
827 |
|
|
}
|
828 |
|
|
|
829 |
|
|
EXPORT_SYMBOL(__ide_dma_test_irq);
|
830 |
|
|
|
831 |
|
|
int __ide_dma_bad_drive (ide_drive_t *drive)
|
832 |
|
|
{
|
833 |
|
|
struct hd_driveid *id = drive->id;
|
834 |
|
|
|
835 |
|
|
#ifdef CONFIG_IDEDMA_NEW_DRIVE_LISTINGS
|
836 |
|
|
int blacklist = in_drive_list(id, drive_blacklist);
|
837 |
|
|
if (blacklist) {
|
838 |
|
|
printk("%s: Disabling (U)DMA for %s\n", drive->name, id->model);
|
839 |
|
|
return(blacklist);
|
840 |
|
|
}
|
841 |
|
|
#else /* !CONFIG_IDEDMA_NEW_DRIVE_LISTINGS */
|
842 |
|
|
const char **list;
|
843 |
|
|
/* Consult the list of known "bad" drives */
|
844 |
|
|
list = bad_dma_drives;
|
845 |
|
|
while (*list) {
|
846 |
|
|
if (!strcmp(*list++,id->model)) {
|
847 |
|
|
printk("%s: Disabling (U)DMA for %s\n",
|
848 |
|
|
drive->name, id->model);
|
849 |
|
|
return 1;
|
850 |
|
|
}
|
851 |
|
|
}
|
852 |
|
|
#endif /* CONFIG_IDEDMA_NEW_DRIVE_LISTINGS */
|
853 |
|
|
return 0;
|
854 |
|
|
}
|
855 |
|
|
|
856 |
|
|
EXPORT_SYMBOL(__ide_dma_bad_drive);
|
857 |
|
|
|
858 |
|
|
int __ide_dma_good_drive (ide_drive_t *drive)
|
859 |
|
|
{
|
860 |
|
|
struct hd_driveid *id = drive->id;
|
861 |
|
|
|
862 |
|
|
#ifdef CONFIG_IDEDMA_NEW_DRIVE_LISTINGS
|
863 |
|
|
return in_drive_list(id, drive_whitelist);
|
864 |
|
|
#else /* !CONFIG_IDEDMA_NEW_DRIVE_LISTINGS */
|
865 |
|
|
const char **list;
|
866 |
|
|
/* Consult the list of known "good" drives */
|
867 |
|
|
list = good_dma_drives;
|
868 |
|
|
while (*list) {
|
869 |
|
|
if (!strcmp(*list++,id->model))
|
870 |
|
|
return 1;
|
871 |
|
|
}
|
872 |
|
|
#endif /* CONFIG_IDEDMA_NEW_DRIVE_LISTINGS */
|
873 |
|
|
return 0;
|
874 |
|
|
}
|
875 |
|
|
|
876 |
|
|
EXPORT_SYMBOL(__ide_dma_good_drive);
|
877 |
|
|
|
878 |
|
|
/*
|
879 |
|
|
* Used for HOST FIFO counters for VDMA
|
880 |
|
|
* PIO over DMA, effective ATA-Bridge operator.
|
881 |
|
|
*/
|
882 |
|
|
int __ide_dma_count (ide_drive_t *drive)
|
883 |
|
|
{
|
884 |
|
|
return HWIF(drive)->ide_dma_begin(drive);
|
885 |
|
|
}
|
886 |
|
|
|
887 |
|
|
EXPORT_SYMBOL(__ide_dma_count);
|
888 |
|
|
|
889 |
|
|
int __ide_dma_verbose (ide_drive_t *drive)
|
890 |
|
|
{
|
891 |
|
|
struct hd_driveid *id = drive->id;
|
892 |
|
|
ide_hwif_t *hwif = HWIF(drive);
|
893 |
|
|
|
894 |
|
|
if (id->field_valid & 4) {
|
895 |
|
|
if ((id->dma_ultra >> 8) && (id->dma_mword >> 8)) {
|
896 |
|
|
printk(", BUG DMA OFF");
|
897 |
|
|
return hwif->ide_dma_off_quietly(drive);
|
898 |
|
|
}
|
899 |
|
|
if (id->dma_ultra & ((id->dma_ultra >> 8) & hwif->ultra_mask)) {
|
900 |
|
|
if (((id->dma_ultra >> 11) & 0x1F) &&
|
901 |
|
|
eighty_ninty_three(drive)) {
|
902 |
|
|
if ((id->dma_ultra >> 15) & 1) {
|
903 |
|
|
printk(", UDMA(mode 7)");
|
904 |
|
|
} else if ((id->dma_ultra >> 14) & 1) {
|
905 |
|
|
printk(", UDMA(133)");
|
906 |
|
|
} else if ((id->dma_ultra >> 13) & 1) {
|
907 |
|
|
printk(", UDMA(100)");
|
908 |
|
|
} else if ((id->dma_ultra >> 12) & 1) {
|
909 |
|
|
printk(", UDMA(66)");
|
910 |
|
|
} else if ((id->dma_ultra >> 11) & 1) {
|
911 |
|
|
printk(", UDMA(44)");
|
912 |
|
|
} else
|
913 |
|
|
goto mode_two;
|
914 |
|
|
} else {
|
915 |
|
|
mode_two:
|
916 |
|
|
if ((id->dma_ultra >> 10) & 1) {
|
917 |
|
|
printk(", UDMA(33)");
|
918 |
|
|
} else if ((id->dma_ultra >> 9) & 1) {
|
919 |
|
|
printk(", UDMA(25)");
|
920 |
|
|
} else if ((id->dma_ultra >> 8) & 1) {
|
921 |
|
|
printk(", UDMA(16)");
|
922 |
|
|
}
|
923 |
|
|
}
|
924 |
|
|
} else {
|
925 |
|
|
printk(", (U)DMA"); /* Can be BIOS-enabled! */
|
926 |
|
|
}
|
927 |
|
|
} else if (id->field_valid & 2) {
|
928 |
|
|
if ((id->dma_mword >> 8) && (id->dma_1word >> 8)) {
|
929 |
|
|
printk(", BUG DMA OFF");
|
930 |
|
|
return hwif->ide_dma_off_quietly(drive);
|
931 |
|
|
}
|
932 |
|
|
printk(", DMA");
|
933 |
|
|
} else if (id->field_valid & 1) {
|
934 |
|
|
printk(", BUG");
|
935 |
|
|
}
|
936 |
|
|
return 1;
|
937 |
|
|
}
|
938 |
|
|
|
939 |
|
|
EXPORT_SYMBOL(__ide_dma_verbose);
|
940 |
|
|
|
941 |
|
|
/**
|
942 |
|
|
* __ide_dma_retune - default retune handler
|
943 |
|
|
* @drive: drive to retune
|
944 |
|
|
*
|
945 |
|
|
* Default behaviour when we decide to return the IDE DMA setup.
|
946 |
|
|
* The default behaviour is "we don't"
|
947 |
|
|
*/
|
948 |
|
|
|
949 |
|
|
int __ide_dma_retune (ide_drive_t *drive)
|
950 |
|
|
{
|
951 |
|
|
printk(KERN_WARNING "%s: chipset supported call only\n", __FUNCTION__);
|
952 |
|
|
return 1;
|
953 |
|
|
}
|
954 |
|
|
|
955 |
|
|
EXPORT_SYMBOL(__ide_dma_retune);
|
956 |
|
|
|
957 |
|
|
int __ide_dma_lostirq (ide_drive_t *drive)
|
958 |
|
|
{
|
959 |
|
|
printk("%s: DMA interrupt recovery\n", drive->name);
|
960 |
|
|
return 1;
|
961 |
|
|
}
|
962 |
|
|
|
963 |
|
|
EXPORT_SYMBOL(__ide_dma_lostirq);
|
964 |
|
|
|
965 |
|
|
int __ide_dma_timeout (ide_drive_t *drive)
|
966 |
|
|
{
|
967 |
|
|
printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
|
968 |
|
|
if (HWIF(drive)->ide_dma_test_irq(drive))
|
969 |
|
|
return 0;
|
970 |
|
|
return HWIF(drive)->ide_dma_end(drive);
|
971 |
|
|
}
|
972 |
|
|
|
973 |
|
|
EXPORT_SYMBOL(__ide_dma_timeout);
|
974 |
|
|
|
975 |
|
|
/*
|
976 |
|
|
* Needed for allowing full modular support of ide-driver
|
977 |
|
|
*/
|
978 |
|
|
int ide_release_dma_engine (ide_hwif_t *hwif)
|
979 |
|
|
{
|
980 |
|
|
if (hwif->dmatable_cpu) {
|
981 |
|
|
pci_free_consistent(hwif->pci_dev,
|
982 |
|
|
PRD_ENTRIES * PRD_BYTES,
|
983 |
|
|
hwif->dmatable_cpu,
|
984 |
|
|
hwif->dmatable_dma);
|
985 |
|
|
hwif->dmatable_cpu = NULL;
|
986 |
|
|
}
|
987 |
|
|
if (hwif->sg_table) {
|
988 |
|
|
kfree(hwif->sg_table);
|
989 |
|
|
hwif->sg_table = NULL;
|
990 |
|
|
}
|
991 |
|
|
return 1;
|
992 |
|
|
}
|
993 |
|
|
|
994 |
|
|
int ide_release_mmio_dma (ide_hwif_t *hwif)
|
995 |
|
|
{
|
996 |
|
|
if ((hwif->dma_extra) && (hwif->channel == 0))
|
997 |
|
|
release_mem_region((hwif->dma_base + 16), hwif->dma_extra);
|
998 |
|
|
release_mem_region(hwif->dma_base, 8);
|
999 |
|
|
if (hwif->dma_base2)
|
1000 |
|
|
release_mem_region(hwif->dma_base, 8);
|
1001 |
|
|
return 1;
|
1002 |
|
|
}
|
1003 |
|
|
|
1004 |
|
|
int ide_release_iomio_dma (ide_hwif_t *hwif)
|
1005 |
|
|
{
|
1006 |
|
|
if ((hwif->dma_extra) && (hwif->channel == 0))
|
1007 |
|
|
release_region((hwif->dma_base + 16), hwif->dma_extra);
|
1008 |
|
|
release_region(hwif->dma_base, 8);
|
1009 |
|
|
if (hwif->dma_base2)
|
1010 |
|
|
release_region(hwif->dma_base, 8);
|
1011 |
|
|
return 1;
|
1012 |
|
|
}
|
1013 |
|
|
|
1014 |
|
|
/*
|
1015 |
|
|
* Needed for allowing full modular support of ide-driver
|
1016 |
|
|
*/
|
1017 |
|
|
int ide_release_dma (ide_hwif_t *hwif)
|
1018 |
|
|
{
|
1019 |
|
|
if (hwif->chipset == ide_etrax100)
|
1020 |
|
|
return 1;
|
1021 |
|
|
|
1022 |
|
|
ide_release_dma_engine(hwif);
|
1023 |
|
|
|
1024 |
|
|
if (hwif->mmio==2)
|
1025 |
|
|
return 1;
|
1026 |
|
|
if (hwif->mmio)
|
1027 |
|
|
return ide_release_mmio_dma(hwif);
|
1028 |
|
|
return ide_release_iomio_dma(hwif);
|
1029 |
|
|
}
|
1030 |
|
|
|
1031 |
|
|
int ide_allocate_dma_engine (ide_hwif_t *hwif)
|
1032 |
|
|
{
|
1033 |
|
|
hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
|
1034 |
|
|
PRD_ENTRIES * PRD_BYTES,
|
1035 |
|
|
&hwif->dmatable_dma);
|
1036 |
|
|
hwif->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES,
|
1037 |
|
|
GFP_KERNEL);
|
1038 |
|
|
|
1039 |
|
|
if ((hwif->dmatable_cpu) && (hwif->sg_table))
|
1040 |
|
|
return 0;
|
1041 |
|
|
|
1042 |
|
|
printk(KERN_ERR "%s: -- Error, unable to allocate%s%s table(s).\n",
|
1043 |
|
|
(hwif->dmatable_cpu == NULL) ? " CPU" : "",
|
1044 |
|
|
(hwif->sg_table == NULL) ? " SG DMA" : " DMA",
|
1045 |
|
|
hwif->cds->name);
|
1046 |
|
|
|
1047 |
|
|
ide_release_dma_engine(hwif);
|
1048 |
|
|
return 1;
|
1049 |
|
|
}
|
1050 |
|
|
|
1051 |
|
|
int ide_mapped_mmio_dma (ide_hwif_t *hwif, unsigned long base, unsigned int ports)
|
1052 |
|
|
{
|
1053 |
|
|
printk(KERN_INFO " %s: MMIO-DMA ", hwif->name);
|
1054 |
|
|
hwif->dma_base = base;
|
1055 |
|
|
if ((hwif->cds->extra) && (hwif->channel == 0))
|
1056 |
|
|
hwif->dma_extra = hwif->cds->extra;
|
1057 |
|
|
|
1058 |
|
|
/* There is an issue to watch here. The master might not be
|
1059 |
|
|
registered because the BIOS disabled it. Eventually this should
|
1060 |
|
|
be fixed by always registering the mate */
|
1061 |
|
|
|
1062 |
|
|
if(hwif->mate == NULL)
|
1063 |
|
|
hwif->dma_master = base;
|
1064 |
|
|
else
|
1065 |
|
|
hwif->dma_master = (hwif->channel) ? hwif->mate->dma_base : base;
|
1066 |
|
|
return 0;
|
1067 |
|
|
}
|
1068 |
|
|
|
1069 |
|
|
int ide_mmio_dma (ide_hwif_t *hwif, unsigned long base, unsigned int ports)
|
1070 |
|
|
{
|
1071 |
|
|
printk(KERN_INFO " %s: MMIO-DMA at 0x%08lx-0x%08lx",
|
1072 |
|
|
hwif->name, base, base + ports - 1);
|
1073 |
|
|
if (check_mem_region(base, ports)) {
|
1074 |
|
|
printk(" -- Error, MMIO ports already in use.\n");
|
1075 |
|
|
return 1;
|
1076 |
|
|
}
|
1077 |
|
|
request_mem_region(base, ports, hwif->name);
|
1078 |
|
|
hwif->dma_base = base;
|
1079 |
|
|
if ((hwif->cds->extra) && (hwif->channel == 0)) {
|
1080 |
|
|
request_region(base+16, hwif->cds->extra, hwif->cds->name);
|
1081 |
|
|
hwif->dma_extra = hwif->cds->extra;
|
1082 |
|
|
}
|
1083 |
|
|
|
1084 |
|
|
/* There is an issue to watch here. The master might not be
|
1085 |
|
|
registered because the BIOS disabled it. Eventually this should
|
1086 |
|
|
be fixed by always registering the mate */
|
1087 |
|
|
|
1088 |
|
|
if(hwif->mate == NULL)
|
1089 |
|
|
hwif->dma_master = base;
|
1090 |
|
|
else
|
1091 |
|
|
hwif->dma_master = (hwif->channel) ? hwif->mate->dma_base : base;
|
1092 |
|
|
if (hwif->dma_base2) {
|
1093 |
|
|
if (!check_mem_region(hwif->dma_base2, ports))
|
1094 |
|
|
request_mem_region(hwif->dma_base2, ports, hwif->name);
|
1095 |
|
|
}
|
1096 |
|
|
return 0;
|
1097 |
|
|
}
|
1098 |
|
|
|
1099 |
|
|
int ide_iomio_dma (ide_hwif_t *hwif, unsigned long base, unsigned int ports)
|
1100 |
|
|
{
|
1101 |
|
|
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx",
|
1102 |
|
|
hwif->name, base, base + ports - 1);
|
1103 |
|
|
if (!request_region(base, ports, hwif->name)) {
|
1104 |
|
|
printk(" -- Error, ports in use.\n");
|
1105 |
|
|
return 1;
|
1106 |
|
|
}
|
1107 |
|
|
hwif->dma_base = base;
|
1108 |
|
|
if ((hwif->cds->extra) && (hwif->channel == 0)) {
|
1109 |
|
|
request_region(base+16, hwif->cds->extra, hwif->cds->name);
|
1110 |
|
|
hwif->dma_extra = hwif->cds->extra;
|
1111 |
|
|
}
|
1112 |
|
|
/* There is an issue to watch here. The master might not be
|
1113 |
|
|
registered because the BIOS disabled it. Eventually this should
|
1114 |
|
|
be fixed by always registering the mate */
|
1115 |
|
|
|
1116 |
|
|
if(hwif->mate == NULL)
|
1117 |
|
|
hwif->dma_master = base;
|
1118 |
|
|
else
|
1119 |
|
|
hwif->dma_master = (hwif->channel) ? hwif->mate->dma_base : base;
|
1120 |
|
|
if (hwif->dma_base2) {
|
1121 |
|
|
if (!request_region(hwif->dma_base2, ports, hwif->name))
|
1122 |
|
|
{
|
1123 |
|
|
printk(" -- Error, secondary ports in use.\n");
|
1124 |
|
|
release_region(base, ports);
|
1125 |
|
|
return 1;
|
1126 |
|
|
}
|
1127 |
|
|
}
|
1128 |
|
|
return 0;
|
1129 |
|
|
}
|
1130 |
|
|
|
1131 |
|
|
/*
|
1132 |
|
|
*
|
1133 |
|
|
*/
|
1134 |
|
|
int ide_dma_iobase (ide_hwif_t *hwif, unsigned long base, unsigned int ports)
|
1135 |
|
|
{
|
1136 |
|
|
if (hwif->mmio == 2)
|
1137 |
|
|
return ide_mapped_mmio_dma(hwif, base, ports);
|
1138 |
|
|
if (hwif->mmio)
|
1139 |
|
|
return ide_mmio_dma(hwif, base, ports);
|
1140 |
|
|
return ide_iomio_dma(hwif, base, ports);
|
1141 |
|
|
}
|
1142 |
|
|
|
1143 |
|
|
/*
|
1144 |
|
|
* This can be called for a dynamically installed interface. Don't __init it
|
1145 |
|
|
*/
|
1146 |
|
|
void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_ports)
|
1147 |
|
|
{
|
1148 |
|
|
if (ide_dma_iobase(hwif, dma_base, num_ports))
|
1149 |
|
|
return;
|
1150 |
|
|
|
1151 |
|
|
if (ide_allocate_dma_engine(hwif)) {
|
1152 |
|
|
ide_release_dma(hwif);
|
1153 |
|
|
return;
|
1154 |
|
|
}
|
1155 |
|
|
|
1156 |
|
|
if (!(hwif->dma_command))
|
1157 |
|
|
hwif->dma_command = hwif->dma_base;
|
1158 |
|
|
if (!(hwif->dma_vendor1))
|
1159 |
|
|
hwif->dma_vendor1 = (hwif->dma_base + 1);
|
1160 |
|
|
if (!(hwif->dma_status))
|
1161 |
|
|
hwif->dma_status = (hwif->dma_base + 2);
|
1162 |
|
|
if (!(hwif->dma_vendor3))
|
1163 |
|
|
hwif->dma_vendor3 = (hwif->dma_base + 3);
|
1164 |
|
|
if (!(hwif->dma_prdtable))
|
1165 |
|
|
hwif->dma_prdtable = (hwif->dma_base + 4);
|
1166 |
|
|
|
1167 |
|
|
if (!hwif->ide_dma_off)
|
1168 |
|
|
hwif->ide_dma_off = &__ide_dma_off;
|
1169 |
|
|
if (!hwif->ide_dma_off_quietly)
|
1170 |
|
|
hwif->ide_dma_off_quietly = &__ide_dma_off_quietly;
|
1171 |
|
|
if (!hwif->ide_dma_host_off)
|
1172 |
|
|
hwif->ide_dma_host_off = &__ide_dma_host_off;
|
1173 |
|
|
if (!hwif->ide_dma_on)
|
1174 |
|
|
hwif->ide_dma_on = &__ide_dma_on;
|
1175 |
|
|
if (!hwif->ide_dma_host_on)
|
1176 |
|
|
hwif->ide_dma_host_on = &__ide_dma_host_on;
|
1177 |
|
|
if (!hwif->ide_dma_check)
|
1178 |
|
|
hwif->ide_dma_check = &__ide_dma_check;
|
1179 |
|
|
if (!hwif->ide_dma_read)
|
1180 |
|
|
hwif->ide_dma_read = &__ide_dma_read;
|
1181 |
|
|
if (!hwif->ide_dma_write)
|
1182 |
|
|
hwif->ide_dma_write = &__ide_dma_write;
|
1183 |
|
|
if (!hwif->ide_dma_count)
|
1184 |
|
|
hwif->ide_dma_count = &__ide_dma_count;
|
1185 |
|
|
if (!hwif->ide_dma_begin)
|
1186 |
|
|
hwif->ide_dma_begin = &__ide_dma_begin;
|
1187 |
|
|
if (!hwif->ide_dma_end)
|
1188 |
|
|
hwif->ide_dma_end = &__ide_dma_end;
|
1189 |
|
|
if (!hwif->ide_dma_test_irq)
|
1190 |
|
|
hwif->ide_dma_test_irq = &__ide_dma_test_irq;
|
1191 |
|
|
if (!hwif->ide_dma_bad_drive)
|
1192 |
|
|
hwif->ide_dma_bad_drive = &__ide_dma_bad_drive;
|
1193 |
|
|
if (!hwif->ide_dma_good_drive)
|
1194 |
|
|
hwif->ide_dma_good_drive = &__ide_dma_good_drive;
|
1195 |
|
|
if (!hwif->ide_dma_verbose)
|
1196 |
|
|
hwif->ide_dma_verbose = &__ide_dma_verbose;
|
1197 |
|
|
if (!hwif->ide_dma_timeout)
|
1198 |
|
|
hwif->ide_dma_timeout = &__ide_dma_timeout;
|
1199 |
|
|
if (!hwif->ide_dma_retune)
|
1200 |
|
|
hwif->ide_dma_retune = &__ide_dma_retune;
|
1201 |
|
|
if (!hwif->ide_dma_lostirq)
|
1202 |
|
|
hwif->ide_dma_lostirq = &__ide_dma_lostirq;
|
1203 |
|
|
|
1204 |
|
|
if (hwif->chipset != ide_trm290) {
|
1205 |
|
|
u8 dma_stat = hwif->INB(hwif->dma_status);
|
1206 |
|
|
printk(", BIOS settings: %s:%s, %s:%s",
|
1207 |
|
|
hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "pio",
|
1208 |
|
|
hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
|
1209 |
|
|
}
|
1210 |
|
|
printk("\n");
|
1211 |
|
|
|
1212 |
|
|
if (!(hwif->dma_master))
|
1213 |
|
|
BUG();
|
1214 |
|
|
}
|
1215 |
|
|
|
1216 |
|
|
EXPORT_SYMBOL_GPL(ide_setup_dma);
|