OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [ide/] [pci/] [sgiioc4.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Copyright (c) 2003 Silicon Graphics, Inc.  All Rights Reserved.
3
 *
4
 * This program is free software; you can redistribute it and/or modify it
5
 * under the terms of version 2 of the GNU General Public License
6
 * as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it would be useful, but
9
 * WITHOUT ANY WARRANTY; without even the implied warranty of
10
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * Further, this software is distributed without any warranty that it is
13
 * free of the rightful claim of any third person regarding infringement
14
 * or the like.  Any license provided herein, whether implied or
15
 * otherwise, applies only to this software file.  Patent licenses, if
16
 * any, provided herein do not apply to combinations of this program with
17
 * other software, or any other product whatsoever.
18
 *
19
 * You should have received a copy of the GNU General Public
20
 * License along with this program; if not, write the Free Software
21
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22
 *
23
 * Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24
 * Mountain View, CA  94043, or:
25
 *
26
 * http://www.sgi.com
27
 *
28
 * For further information regarding this notice, see:
29
 *
30
 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
31
 */
32
 
33
#include <linux/config.h>
34
#include <linux/module.h>
35
#include <linux/types.h>
36
#include <linux/pci.h>
37
#include <linux/delay.h>
38
#include <linux/hdreg.h>
39
#include <linux/init.h>
40
#include <linux/kernel.h>
41
#include <linux/timer.h>
42
#include <linux/mm.h>
43
#include <linux/ioport.h>
44
#include <linux/blkdev.h>
45
#include <asm/io.h>
46
#include "sgiioc4.h"
47
 
48
extern int dma_timer_expiry(ide_drive_t * drive);
49
 
50
#ifdef CONFIG_PROC_FS
51
static u8 sgiioc4_proc;
52
#endif /* CONFIG_PROC_FS */
53
 
54
static int n_sgiioc4_devs ;
55
 
56
static inline void
57
xide_delay(long ticks)
58
{
59
        if (!ticks)
60
                return;
61
 
62
        current->state = TASK_UNINTERRUPTIBLE;
63
        schedule_timeout(ticks);
64
}
65
 
66
static void __init
67
sgiioc4_ide_setup_pci_device(struct pci_dev *dev, const char *name)
68
{
69
        unsigned long base = 0, ctl = 0, dma_base = 0, irqport = 0;
70
        ide_hwif_t *hwif = NULL;
71
        int h = 0;
72
 
73
        /*  Get the CmdBlk and CtrlBlk Base Registers */
74
        base = pci_resource_start(dev, 0) + IOC4_CMD_OFFSET;
75
        ctl = pci_resource_start(dev, 0) + IOC4_CTRL_OFFSET;
76
        irqport = pci_resource_start(dev, 0) + IOC4_INTR_OFFSET;
77
        dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
78
 
79
        for (h = 0; h < MAX_HWIFS; ++h) {
80
                hwif = &ide_hwifs[h];
81
                /* Find an empty HWIF */
82
                if (hwif->chipset == ide_unknown)
83
                        break;
84
        }
85
 
86
        if (hwif->io_ports[IDE_DATA_OFFSET] != base) {
87
                /* Initialize the IO registers */
88
                sgiioc4_init_hwif_ports(&hwif->hw, base, ctl, irqport);
89
                memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof (hwif->io_ports));
90
                hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
91
        }
92
 
93
        hwif->chipset = ide_pci;
94
        hwif->pci_dev = dev;
95
        hwif->channel = 0;       /* Single Channel chip */
96
        hwif->hw.ack_intr = &sgiioc4_checkirq;  /* MultiFunction Chip */
97
 
98
        /* Initializing chipset IRQ Registers */
99
        hwif->OUTL(0x03, irqport + IOC4_INTR_SET * 4);
100
 
101
        (void) ide_init_sgiioc4(hwif);
102
 
103
        if (dma_base)
104
                ide_dma_sgiioc4(hwif, dma_base);
105
        else
106
                printk(KERN_INFO "%s: %s Bus-Master DMA disabled \n", hwif->name, name);
107
}
108
 
109
/* XXX Hack to ensure we can build this for generic kernels without
110
 * having all the SN2 code sync'd and merged.  For now this is
111
 * acceptable but this should be resolved ASAP. PV#: 896401 */
112
 
113
pciio_endian_t __attribute__((weak)) snia_pciio_endian_set(struct pci_dev *pci_dev, pciio_endian_t device_end, pciio_endian_t desired_end);
114
 
115
static unsigned int __init
116
pci_init_sgiioc4(struct pci_dev *dev, const char *name)
117
{
118
 
119
        if (pci_enable_device(dev)) {
120
                printk(KERN_INFO "Failed to enable device %s at slot %s \n",name,dev->slot_name);
121
                return 1;
122
        }
123
        pci_set_master(dev);
124
 
125
        /* Enable Byte Swapping in the PIC... */
126
        if (snia_pciio_endian_set) {
127
                /* ... if the symbol exists (hack to get this to build
128
                 * for SuSE before we merge the SN2 code */
129
                snia_pciio_endian_set(dev, PCIDMA_ENDIAN_LITTLE, PCIDMA_ENDIAN_BIG);
130
        } else {
131
                printk(KERN_INFO "Failed to set endianness for device %s at slot %s \n", name, dev->slot_name);
132
                return 1;
133
        }
134
 
135
#ifdef CONFIG_PROC_FS
136
        sgiioc4_devs[n_sgiioc4_devs++] = dev;
137
        if (!sgiioc4_proc) {
138
                sgiioc4_proc = 1;
139
                ide_pci_register_host_proc(&sgiioc4_procs[0]);
140
        }
141
#endif
142
        sgiioc4_ide_setup_pci_device(dev, name);
143
        return 0;
144
}
145
 
146
static void
147
sgiioc4_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port,
148
                        ide_ioreg_t ctrl_port, ide_ioreg_t irq_port)
149
{
150
        ide_ioreg_t reg = data_port;
151
        int i;
152
 
153
        for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
154
                hw->io_ports[i] = reg + i * 4;  /* Registers are word (32 bit) aligned */
155
 
156
        if (ctrl_port)
157
                hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
158
 
159
        if (irq_port)
160
                hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
161
}
162
 
163
static void
164
sgiioc4_resetproc(ide_drive_t * drive)
165
{
166
        sgiioc4_ide_dma_end(drive);
167
        sgiioc4_clearirq(drive);
168
}
169
 
170
static void
171
sgiioc4_maskproc(ide_drive_t * drive, int mask)
172
{
173
        ide_hwif_t *hwif = HWIF(drive);
174
        hwif->OUTB(mask ? (drive->ctl | 2) : (drive->ctl & ~2), IDE_CONTROL_REG);
175
}
176
 
177
static void __init
178
ide_init_sgiioc4(ide_hwif_t * hwif)
179
{
180
        hwif->autodma = 1;
181
        hwif->index = 0; /* Channel 0 */
182
        hwif->channel = 0;
183
        hwif->atapi_dma = 1;
184
        hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
185
        hwif->mwdma_mask = 0x2; /* Multimode-2 DMA  */
186
        hwif->swdma_mask = 0x2;
187
        hwif->identify = NULL;
188
        hwif->tuneproc = NULL;  /* Sets timing for PIO mode */
189
        hwif->speedproc = NULL; /* Sets timing for DMA &/or PIO modes */
190
        hwif->selectproc = NULL;        /* Use the default selection routine to select drive */
191
        hwif->reset_poll = NULL;        /* No HBA specific reset_poll needed */
192
        hwif->pre_reset = NULL; /* No HBA specific pre_set needed */
193
        hwif->resetproc = &sgiioc4_resetproc;   /* Reset the IOC4 DMA engine, clear interrupts etc */
194
        hwif->intrproc = NULL;  /* Enable or Disable interrupt from drive */
195
        hwif->maskproc = &sgiioc4_maskproc;     /* Mask on/off NIEN register */
196
        hwif->quirkproc = NULL;
197
        hwif->busproc = NULL;
198
 
199
        hwif->ide_dma_read = &sgiioc4_ide_dma_read;
200
        hwif->ide_dma_write = &sgiioc4_ide_dma_write;
201
        hwif->ide_dma_begin = &sgiioc4_ide_dma_begin;
202
        hwif->ide_dma_end = &sgiioc4_ide_dma_end;
203
        hwif->ide_dma_check = &sgiioc4_ide_dma_check;
204
        hwif->ide_dma_on = &sgiioc4_ide_dma_on;
205
        hwif->ide_dma_off = &sgiioc4_ide_dma_off;
206
        hwif->ide_dma_off_quietly = &sgiioc4_ide_dma_off_quietly;
207
        hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
208
        hwif->ide_dma_host_on = &sgiioc4_ide_dma_host_on;
209
        hwif->ide_dma_host_off = &sgiioc4_ide_dma_host_off;
210
        hwif->ide_dma_bad_drive = &__ide_dma_bad_drive;
211
        hwif->ide_dma_good_drive = &__ide_dma_good_drive;
212
        hwif->ide_dma_count = &sgiioc4_ide_dma_count;
213
        hwif->ide_dma_verbose = &sgiioc4_ide_dma_verbose;
214
        hwif->ide_dma_retune = &__ide_dma_retune;
215
        hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq;
216
        hwif->ide_dma_timeout = &sgiioc4_ide_dma_timeout;
217
        hwif->INB = &sgiioc4_INB;
218
}
219
 
220
static int
221
sgiioc4_ide_dma_read(ide_drive_t * drive)
222
{
223
        struct request *rq = HWGROUP(drive)->rq;
224
        unsigned int count = 0;
225
 
226
        if (!(count = sgiioc4_build_dma_table(drive, rq, PCI_DMA_FROMDEVICE))) {
227
                /* try PIO instead of DMA */
228
                return 1;
229
        }
230
        /* Writes FROM the IOC4 TO Main Memory */
231
        sgiioc4_configure_for_dma(IOC4_DMA_WRITE, drive);
232
 
233
        return 0;
234
}
235
 
236
static int
237
sgiioc4_ide_dma_write(ide_drive_t * drive)
238
{
239
        struct request *rq = HWGROUP(drive)->rq;
240
        unsigned int count = 0;
241
 
242
        if (!(count = sgiioc4_build_dma_table(drive, rq, PCI_DMA_TODEVICE))) {
243
                /* try PIO instead of DMA */
244
                return 1;
245
        }
246
 
247
        sgiioc4_configure_for_dma(IOC4_DMA_READ, drive);
248
        /* Writes TO the IOC4 FROM Main Memory */
249
 
250
        return 0;
251
}
252
 
253
static int
254
sgiioc4_ide_dma_begin(ide_drive_t * drive)
255
{
256
        ide_hwif_t *hwif = HWIF(drive);
257
        unsigned int reg = hwif->INL(hwif->dma_base + IOC4_DMA_CTRL * 4);
258
        unsigned int temp_reg = reg | IOC4_S_DMA_START;
259
 
260
        hwif->OUTL(temp_reg, hwif->dma_base + IOC4_DMA_CTRL * 4);
261
 
262
        return 0;
263
}
264
 
265
/* Stops the IOC4 DMA Engine */
266
static int
267
sgiioc4_ide_dma_end(ide_drive_t * drive)
268
{
269
        u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
270
        ide_hwif_t *hwif = HWIF(drive);
271
        uint64_t dma_base = hwif->dma_base;
272
        int dma_stat = 0, count;
273
        unsigned long *ending_dma = (unsigned long *) hwif->dma_base2;
274
 
275
        hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4);
276
 
277
        count = 0;
278
        do {
279
                xide_delay(count);
280
                ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4);
281
                count += 10;
282
        } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100));
283
 
284
        if (ioc4_dma & IOC4_S_DMA_STOP) {
285
                printk(KERN_ERR "sgiioc4_stopdma(%s): IOC4 DMA STOP bit is still 1 : ioc4_dma_reg 0x%x\n", drive->name, ioc4_dma);
286
                dma_stat = 1;
287
        }
288
 
289
        if (ending_dma) {
290
                do {
291
                        for (num = 0; num < 16; num++) {
292
                                if (ending_dma[num] & (~0ul)) {
293
                                        valid = 1;
294
                                        break;
295
                                }
296
                        }
297
                        xide_delay(cnt);
298
                } while ((cnt++ < 100) && (!valid));
299
        }
300
 
301
        if (!valid)
302
                printk(KERN_INFO "sgiioc4_ide_dma_end(%s) : Stale DMA Data in Memory\n", drive->name);
303
 
304
        bc_dev = hwif->INL(dma_base + IOC4_BC_DEV * 4);
305
        bc_mem = hwif->INL(dma_base + IOC4_BC_MEM * 4);
306
 
307
        if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) {
308
                if (bc_dev > bc_mem + 8) {
309
                        printk(KERN_ERR "sgiioc4_ide_dma_end(%s) : WARNING!!! byte_count_at_dev %d != byte_count_at_mem %d\n",
310
                               drive->name, bc_dev, bc_mem);
311
                }
312
        }
313
 
314
        drive->waiting_for_dma = 0;
315
        ide_destroy_dmatable(drive);
316
 
317
        return dma_stat;
318
}
319
 
320
static int
321
sgiioc4_ide_dma_check(ide_drive_t * drive)
322
{
323
        if (ide_config_drive_speed(drive,XFER_MW_DMA_2)!=0) {
324
                printk(KERN_INFO "Couldnot set %s in Multimode-2 DMA mode | Drive %s using PIO instead\n",
325
                                drive->name, drive->name);
326
                drive->using_dma = 0;
327
        } else
328
                drive->using_dma = 1;
329
 
330
        return 0;
331
}
332
 
333
static int
334
sgiioc4_ide_dma_on(ide_drive_t * drive)
335
{
336
        drive->using_dma = 1;
337
 
338
        return HWIF(drive)->ide_dma_host_on(drive);
339
}
340
 
341
static int
342
sgiioc4_ide_dma_off(ide_drive_t * drive)
343
{
344
        printk(KERN_INFO "%s: DMA disabled\n", drive->name);
345
 
346
        return HWIF(drive)->ide_dma_off_quietly(drive);
347
}
348
 
349
static int
350
sgiioc4_ide_dma_off_quietly(ide_drive_t * drive)
351
{
352
        drive->using_dma = 0;
353
 
354
        return HWIF(drive)->ide_dma_host_off(drive);
355
}
356
 
357
/* returns 1 if dma irq issued, 0 otherwise */
358
static int
359
sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
360
{
361
        return sgiioc4_checkirq(HWIF(drive));
362
}
363
 
364
static int
365
sgiioc4_ide_dma_host_on(ide_drive_t * drive)
366
{
367
        if (drive->using_dma)
368
                return 0;
369
 
370
        return 1;
371
}
372
 
373
static int
374
sgiioc4_ide_dma_host_off(ide_drive_t * drive)
375
{
376
        sgiioc4_clearirq(drive);
377
 
378
        return 0;
379
}
380
 
381
static int
382
sgiioc4_ide_dma_count(ide_drive_t * drive)
383
{
384
        return HWIF(drive)->ide_dma_begin(drive);
385
}
386
 
387
static int
388
sgiioc4_ide_dma_verbose(ide_drive_t * drive)
389
{
390
        if (drive->using_dma == 1)
391
                printk(", UDMA(16)");
392
        else
393
                printk(", PIO");
394
 
395
        return 1;
396
}
397
 
398
static int
399
sgiioc4_ide_dma_lostirq(ide_drive_t * drive)
400
{
401
        HWIF(drive)->resetproc(drive);
402
 
403
        return __ide_dma_lostirq(drive);
404
}
405
 
406
static int
407
sgiioc4_ide_dma_timeout(ide_drive_t * drive)
408
{
409
        printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
410
        if (HWIF(drive)->ide_dma_test_irq(drive))
411
                return 0;
412
 
413
        return HWIF(drive)->ide_dma_end(drive);
414
}
415
 
416
static u8
417
sgiioc4_INB(unsigned long port)
418
{
419
        u8 reg = (u8) inb(port);
420
 
421
        if ((port & 0xFFF) == 0x11C) {  /* Status register of IOC4 */
422
                if (reg & 0x51) {       /* Not busy...check for interrupt */
423
                        unsigned long other_ir = port - 0x110;
424
                        unsigned int intr_reg = (u32) inl(other_ir);
425
 
426
                        if (intr_reg & 0x03) {
427
                                /* Clear the Interrupt, Error bits on the IOC4 */
428
                                outl(0x03, other_ir);
429
                                intr_reg = (u32) inl(other_ir);
430
                        }
431
                }
432
        }
433
 
434
        return reg;
435
}
436
 
437
/* Creates a dma map for the scatter-gather list entries */
438
static void __init
439
ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
440
{
441
        int num_ports = sizeof (ioc4_dma_regs_t);
442
 
443
        printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, dma_base, dma_base + num_ports - 1);
444
 
445
        if (!request_region(dma_base, num_ports, hwif->name)) {
446
                printk(KERN_ERR "ide_dma_sgiioc4(%s) -- Error, Port Addresses 0x%p to 0x%p ALREADY in use\n",
447
                       hwif->name, (void *)dma_base, (void *)dma_base + num_ports - 1);
448
                return;
449
        }
450
 
451
        hwif->dma_base = dma_base;
452
        hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
453
                                                  IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,    /* 1 Page */
454
                                                  &hwif->dmatable_dma);
455
 
456
        if (!hwif->dmatable_cpu)
457
                goto dma_alloc_failure;
458
 
459
        hwif->sg_table = kmalloc(sizeof (struct scatterlist) * IOC4_PRD_ENTRIES, GFP_KERNEL);
460
 
461
        if (!hwif->sg_table) {
462
                pci_free_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, hwif->dmatable_cpu, hwif->dmatable_dma);
463
                goto dma_alloc_failure;
464
        }
465
 
466
        hwif->dma_base2 = (unsigned long) pci_alloc_consistent(hwif->pci_dev, IOC4_IDE_CACHELINE_SIZE,
467
                                                               (dma_addr_t*)&(hwif->dma_status));
468
 
469
        if (!hwif->dma_base2) {
470
                pci_free_consistent(hwif->pci_dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, hwif->dmatable_cpu, hwif->dmatable_dma);
471
                kfree(hwif->sg_table);
472
                goto dma_alloc_failure;
473
        }
474
 
475
        return;
476
 
477
 dma_alloc_failure:
478
        printk(KERN_INFO "ide_dma_sgiioc4() -- Error! Unable to allocate DMA Maps for drive %s\n", hwif->name);
479
        printk(KERN_INFO "Changing from DMA to PIO mode for Drive %s \n", hwif->name);
480
 
481
        /* Disable DMA because we couldnot allocate any DMA maps */
482
        hwif->autodma = 0;
483
        hwif->atapi_dma = 0;
484
}
485
 
486
/* Initializes the IOC4 DMA Engine */
487
static void
488
sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
489
{
490
        u32 ioc4_dma;
491
        int count;
492
        ide_hwif_t *hwif = HWIF(drive);
493
        uint64_t dma_base = hwif->dma_base;
494
        uint32_t dma_addr, ending_dma_addr;
495
 
496
        ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4);
497
 
498
        if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
499
                printk(KERN_WARNING "sgiioc4_configure_for_dma(%s):Warning!! IOC4 DMA from previous transfer was still active\n",
500
                        drive->name);
501
                hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4);
502
                count = 0;
503
                do {
504
                        xide_delay(count);
505
                        ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4);
506
                        count += 10;
507
                } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100));
508
 
509
                if (ioc4_dma & IOC4_S_DMA_STOP)
510
                        printk(KERN_ERR "sgiioc4_configure_for__dma(%s) : IOC4 Dma STOP bit is still 1\n", drive->name);
511
        }
512
 
513
        ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4);
514
        if (ioc4_dma & IOC4_S_DMA_ERROR) {
515
                printk(KERN_WARNING "sgiioc4_configure_for__dma(%s) : Warning!! - DMA Error during Previous transfer | status 0x%x \n",
516
                       drive->name, ioc4_dma);
517
                hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4);
518
                count = 0;
519
                do {
520
                        ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4);
521
                        xide_delay(count);
522
                        count += 10;
523
                } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100));
524
 
525
                if (ioc4_dma & IOC4_S_DMA_STOP)
526
                        printk(KERN_ERR "sgiioc4_configure_for__dma(%s) : IOC4 DMA STOP bit is still 1\n", drive->name);
527
        }
528
 
529
        /* Address of the Scatter Gather List */
530
        dma_addr = cpu_to_le32(hwif->dmatable_dma);
531
        hwif->OUTL(dma_addr, dma_base + IOC4_DMA_PTR_L * 4);
532
 
533
        /* Address of the Ending DMA */
534
        memset((unsigned int *) hwif->dma_base2, 0,IOC4_IDE_CACHELINE_SIZE);
535
        ending_dma_addr = cpu_to_le32(hwif->dma_status);
536
        hwif->OUTL(ending_dma_addr,dma_base + IOC4_DMA_END_ADDR * 4);
537
 
538
        hwif->OUTL(dma_direction, dma_base + IOC4_DMA_CTRL * 4);
539
        drive->waiting_for_dma = 1;
540
}
541
 
542
/* IOC4 Scatter Gather list Format                                              */
543
/* 128 Bit entries to support 64 bit addresses in the future                    */
544
/* The Scatter Gather list Entry should be in the BIG-ENDIAN Format             */
545
/* ---------------------------------------------------------------------------  */
546
/* | Upper 32 bits - Zero               |       Lower 32 bits- address       |  */
547
/* ---------------------------------------------------------------------------  */
548
/* | Upper 32 bits - Zero               |EOL|    16 Bit Data Length          |  */
549
/* ---------------------------------------------------------------------------  */
550
 
551
/* Creates the scatter gather list, DMA Table */
552
static unsigned int
553
sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
554
{
555
        ide_hwif_t *hwif = HWIF(drive);
556
        unsigned int *table = hwif->dmatable_cpu;
557
        unsigned int count = 0, i = 1;
558
        struct scatterlist *sg;
559
 
560
        if (rq->cmd == IDE_DRIVE_TASKFILE)
561
                hwif->sg_nents = i = sgiioc4_ide_raw_build_sglist(hwif, rq);
562
        else
563
                hwif->sg_nents = i = sgiioc4_ide_build_sglist(hwif, rq, ddir);
564
 
565
        if (!i)
566
                return 0;        /* sglist of length Zero */
567
 
568
        sg = hwif->sg_table;
569
        while (i && sg_dma_len(sg)) {
570
                dma_addr_t cur_addr;
571
                int cur_len;
572
                cur_addr = sg_dma_address(sg);
573
                cur_len = sg_dma_len(sg);
574
 
575
                while (cur_len) {
576
                        if (count++ >= IOC4_PRD_ENTRIES) {
577
                                printk(KERN_WARNING "%s: DMA table too small\n", drive->name);
578
                                goto use_pio_instead;
579
                        } else {
580
                                uint32_t xcount, bcount = 0x10000 - (cur_addr & 0xffff);
581
 
582
                                if (bcount > cur_len)
583
                                        bcount = cur_len;
584
 
585
                                /* put the addr, length in the IOC4 dma-table format */
586
                                *table = 0x0;
587
                                table++;
588
                                *table = cpu_to_be32(cur_addr);
589
                                table++;
590
                                *table = 0x0;
591
                                table++;
592
 
593
                                xcount = bcount & 0xffff;
594
                                *table = cpu_to_be32(xcount);
595
                                table++;
596
 
597
                                cur_addr += bcount;
598
                                cur_len -= bcount;
599
                        }
600
                }
601
 
602
                sg++;
603
                i--;
604
        }
605
 
606
        if (count) {
607
                table--;
608
                *table |= cpu_to_be32(0x80000000);
609
                return count;
610
        }
611
 
612
      use_pio_instead:
613
        pci_unmap_sg(hwif->pci_dev, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction);
614
        hwif->sg_dma_active = 0;
615
 
616
        return 0;                /* revert to PIO for this request */
617
}
618
 
619
static int
620
sgiioc4_checkirq(ide_hwif_t * hwif)
621
{
622
        uint8_t intr_reg = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4);
623
 
624
        if (intr_reg & 0x03)
625
                return 1;
626
 
627
        return 0;
628
}
629
 
630
static int
631
sgiioc4_clearirq(ide_drive_t * drive)
632
{
633
        u32 intr_reg;
634
        ide_hwif_t *hwif = HWIF(drive);
635
        ide_ioreg_t other_ir = hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2);
636
 
637
        /* Code to check for PCI error conditions */
638
        intr_reg = hwif->INL(other_ir);
639
        if (intr_reg & 0x03) {
640
                /* Valid IOC4-IDE interrupt */
641
                u8 stat = hwif->INB(IDE_STATUS_REG);
642
                int count = 0;
643
                do {
644
                        xide_delay(count);
645
                        stat = hwif->INB(IDE_STATUS_REG);       /* Removes Interrupt from IDE Device */
646
                } while ((stat & 0x80) && (count++ < 1024));
647
 
648
                if (intr_reg & 0x02) {
649
                        /* Error when transferring DMA data on PCI bus */
650
                        uint32_t pci_err_addr_low, pci_err_addr_high, pci_stat_cmd_reg;
651
 
652
                        pci_err_addr_low = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET]);
653
                        pci_err_addr_high = hwif->INL(hwif->io_ports[IDE_IRQ_OFFSET] + 4);
654
                        pci_read_config_dword(hwif->pci_dev, PCI_COMMAND, &pci_stat_cmd_reg);
655
                        printk(KERN_ERR "sgiioc4_clearirq(%s) : PCI Bus Error when doing DMA : status-cmd reg is 0x%x \n", drive->name, pci_stat_cmd_reg);
656
                        printk(KERN_ERR "sgiioc4_clearirq(%s) : PCI Error Address is 0x%x%x \n", drive->name, pci_err_addr_high, pci_err_addr_low);
657
                        /* Clear the PCI Error indicator */
658
                        pci_write_config_dword(hwif->pci_dev, PCI_COMMAND, 0x00000146);
659
                }
660
 
661
                hwif->OUTL(0x03, other_ir);     /* Clear the Interrupt, Error bits on the IOC4 */
662
 
663
                intr_reg = hwif->INL(other_ir);
664
        }
665
 
666
        return intr_reg;
667
}
668
 
669
/* XXX: duplicated code. See PV#: 896400 */
670
 
671
/**
672
 *      "Copied from drivers/ide/ide-dma.c"
673
 *      sgiioc4_ide_build_sglist - map IDE scatter gather for DMA I/O
674
 *      @hwif: the interface to build the DMA table for
675
 *      @rq: the request holding the sg list
676
 *      @ddir: data direction
677
 *
678
 *      Perform the PCI mapping magic neccessary to access the source
679
 *      or target buffers of a request via PCI DMA. The lower layers
680
 *      of the kernel provide the neccessary cache management so that
681
 *      we can operate in a portable fashion.
682
 *
683
 *      This code is identical to ide_build_sglist in ide-dma.c
684
 *      however that it not exported and even if it were would create
685
 *      dependancy problems for modular drivers.
686
 */
687
static int
688
sgiioc4_ide_build_sglist(ide_hwif_t * hwif, struct request *rq, int ddir)
689
{
690
        struct buffer_head *bh;
691
        struct scatterlist *sg = hwif->sg_table;
692
        unsigned long lastdataend = ~0UL;
693
        int nents = 0;
694
 
695
        if (hwif->sg_dma_active)
696
                BUG();
697
 
698
        bh = rq->bh;
699
        do {
700
                int contig = 0;
701
 
702
                if (bh->b_page) {
703
                        if (bh_phys(bh) == lastdataend)
704
                                contig = 1;
705
                } else {
706
                        if ((unsigned long) bh->b_data == lastdataend)
707
                                contig = 1;
708
                }
709
 
710
                if (contig) {
711
                        sg[nents - 1].length += bh->b_size;
712
                        lastdataend += bh->b_size;
713
                        continue;
714
                }
715
 
716
                if (nents >= PRD_ENTRIES)
717
                        return 0;
718
 
719
                memset(&sg[nents], 0, sizeof (*sg));
720
 
721
                if (bh->b_page) {
722
                        sg[nents].page = bh->b_page;
723
                        sg[nents].offset = bh_offset(bh);
724
                        lastdataend = bh_phys(bh) + bh->b_size;
725
                } else {
726
                        if ((unsigned long) bh->b_data < PAGE_SIZE)
727
                                BUG();
728
 
729
                        sg[nents].address = bh->b_data;
730
                        lastdataend = (unsigned long) bh->b_data + bh->b_size;
731
                }
732
 
733
                sg[nents].length = bh->b_size;
734
                nents++;
735
        } while ((bh = bh->b_reqnext) != NULL);
736
 
737
        if (nents == 0)
738
                BUG();
739
 
740
        hwif->sg_dma_direction = ddir;
741
        return pci_map_sg(hwif->pci_dev, sg, nents, ddir);
742
}
743
 
744
/* XXX: duplicated code. See PV#: 896400 */
745
 
746
/**
747
 *      Copied from drivers/ide/ide-dma.c
748
 *      sgiioc4_ide_raw_build_sglist    -       map IDE scatter gather for DMA
749
 *      @hwif: the interface to build the DMA table for
750
 *      @rq: the request holding the sg list
751
 *
752
 *      Perform the PCI mapping magic neccessary to access the source or
753
 *      target buffers of a taskfile request via PCI DMA. The lower layers
754
 *      of the  kernel provide the neccessary cache management so that we can
755
 *      operate in a portable fashion
756
 *
757
 *      This code is identical to ide_raw_build_sglist in ide-dma.c
758
 *      however that it not exported and even if it were would create
759
 *      dependancy problems for modular drivers.
760
 */
761
static int
762
sgiioc4_ide_raw_build_sglist(ide_hwif_t * hwif, struct request *rq)
763
{
764
        struct scatterlist *sg = hwif->sg_table;
765
        int nents = 0;
766
        ide_task_t *args = rq->special;
767
        u8 *virt_addr = rq->buffer;
768
        int sector_count = rq->nr_sectors;
769
 
770
        if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
771
                hwif->sg_dma_direction = PCI_DMA_TODEVICE;
772
        else
773
                hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
774
#if 1
775
        if (sector_count > 128) {
776
                memset(&sg[nents], 0, sizeof (*sg));
777
                sg[nents].address = virt_addr;
778
                sg[nents].length = 128 * SECTOR_SIZE;
779
                nents++;
780
                virt_addr = virt_addr + (128 * SECTOR_SIZE);
781
                sector_count -= 128;
782
        }
783
        memset(&sg[nents], 0, sizeof (*sg));
784
        sg[nents].address = virt_addr;
785
        sg[nents].length = sector_count * SECTOR_SIZE;
786
        nents++;
787
#else
788
        while (sector_count > 128) {
789
                memset(&sg[nents], 0, sizeof (*sg));
790
                sg[nents].address = virt_addr;
791
                sg[nents].length = 128 * SECTOR_SIZE;
792
                nents++;
793
                virt_addr = virt_addr + (128 * SECTOR_SIZE);
794
                sector_count -= 128;
795
        };
796
        memset(&sg[nents], 0, sizeof (*sg));
797
        sg[nents].address = virt_addr;
798
        sg[nents].length = sector_count * SECTOR_SIZE;
799
        nents++;
800
#endif
801
        return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
802
}
803
 
804
#ifdef CONFIG_PROC_FS
805
 
806
static int
807
sgiioc4_get_info(char *buffer, char **addr, off_t offset, int count)
808
{
809
        char *p = buffer;
810
        unsigned int class_rev;
811
        int i = 0;
812
 
813
        while (i < n_sgiioc4_devs) {
814
                pci_read_config_dword(sgiioc4_devs[i], PCI_CLASS_REVISION,
815
                                      &class_rev);
816
                class_rev &= 0xff;
817
 
818
                if (sgiioc4_devs[i]->device == PCI_DEVICE_ID_SGI_IOC4) {
819
                        p += sprintf(p, "\n     SGI IOC4 Chipset rev %d. ", class_rev);
820
                        p += sprintf(p, "\n     Chipset has 1 IDE channel and supports 2 devices on that channel.");
821
                        p += sprintf(p, "\n     Chipset supports DMA in MultiMode-2 data transfer protocol.\n");
822
                        /* Do we need more info. here? */
823
                }
824
                i++;
825
        }
826
 
827
        return p - buffer;
828
}
829
 
830
#endif /* CONFIG_PROC_FS */
831
 
832
static int __devinit
833
sgiioc4_init_one(struct pci_dev *dev, const struct pci_device_id *id)
834
{
835
        unsigned int class_rev;
836
        ide_pci_device_t *d = &sgiioc4_chipsets[id->driver_data];
837
        if (dev->device != d->device) {
838
                printk(KERN_ERR "Error in sgiioc4_init_one(dev 0x%p | id 0x%p )\n", (void *) dev, (void *) id);
839
                BUG();
840
        }
841
 
842
        pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
843
        class_rev &= 0xff;
844
 
845
        if (class_rev < IOC4_SUPPORTED_FIRMWARE_REV) {
846
                printk(KERN_INFO "Disabling the IOC4 IDE Part due to unsupported Firmware Rev (%d). \n",class_rev);
847
                printk(KERN_INFO "Please upgrade to Firmware Rev 46 or higher \n");
848
                return 0;
849
        }
850
 
851
        printk(KERN_INFO "%s: IDE controller at PCI slot %s\n", d->name, dev->slot_name);
852
 
853
        if (pci_init_sgiioc4(dev, d->name))
854
                return 0;
855
 
856
        MOD_INC_USE_COUNT;
857
 
858
        return 0;
859
}
860
 
861
static struct pci_device_id sgiioc4_pci_tbl[] __devinitdata = {
862
        { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID, PCI_ANY_ID, 0x0b4000, 0xFFFFFF, 0 },
863
        { 0 }
864
};
865
 
866
static struct pci_driver driver = {
867
        .name = "SGI-IOC4 IDE",
868
        .id_table = sgiioc4_pci_tbl,
869
        .probe = sgiioc4_init_one,
870
};
871
 
872
static int
873
sgiioc4_ide_init(void)
874
{
875
        return ide_pci_register_driver(&driver);
876
}
877
 
878
static void
879
sgiioc4_ide_exit(void)
880
{
881
        ide_pci_unregister_driver(&driver);
882
}
883
 
884
module_init(sgiioc4_ide_init);
885
module_exit(sgiioc4_ide_exit);
886
 
887
MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)");
888
MODULE_DESCRIPTION("PCI driver module for SGI IOC4 Base-IO Card");
889
MODULE_LICENSE("GPL");
890
 
891
EXPORT_NO_SYMBOLS;

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.