OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [ata/] [sata_nv.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  sata_nv.c - NVIDIA nForce SATA
3
 *
4
 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5
 *  Copyright 2004 Andrew Chew
6
 *
7
 *
8
 *  This program is free software; you can redistribute it and/or modify
9
 *  it under the terms of the GNU General Public License as published by
10
 *  the Free Software Foundation; either version 2, or (at your option)
11
 *  any later version.
12
 *
13
 *  This program is distributed in the hope that it will be useful,
14
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
 *  GNU General Public License for more details.
17
 *
18
 *  You should have received a copy of the GNU General Public License
19
 *  along with this program; see the file COPYING.  If not, write to
20
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
 *
22
 *
23
 *  libata documentation is available via 'make {ps|pdf}docs',
24
 *  as Documentation/DocBook/libata.*
25
 *
26
 *  No hardware documentation available outside of NVIDIA.
27
 *  This driver programs the NVIDIA SATA controller in a similar
28
 *  fashion as with other PCI IDE BMDMA controllers, with a few
29
 *  NV-specific details such as register offsets, SATA phy location,
30
 *  hotplug info, etc.
31
 *
32
 *  CK804/MCP04 controllers support an alternate programming interface
33
 *  similar to the ADMA specification (with some modifications).
34
 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35
 *  sent through the legacy interface.
36
 *
37
 */
38
 
39
#include <linux/kernel.h>
40
#include <linux/module.h>
41
#include <linux/pci.h>
42
#include <linux/init.h>
43
#include <linux/blkdev.h>
44
#include <linux/delay.h>
45
#include <linux/interrupt.h>
46
#include <linux/device.h>
47
#include <scsi/scsi_host.h>
48
#include <scsi/scsi_device.h>
49
#include <linux/libata.h>
50
 
51
#define DRV_NAME                        "sata_nv"
52
#define DRV_VERSION                     "3.5"
53
 
54
#define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
55
 
56
enum {
57
        NV_MMIO_BAR                     = 5,
58
 
59
        NV_PORTS                        = 2,
60
        NV_PIO_MASK                     = 0x1f,
61
        NV_MWDMA_MASK                   = 0x07,
62
        NV_UDMA_MASK                    = 0x7f,
63
        NV_PORT0_SCR_REG_OFFSET         = 0x00,
64
        NV_PORT1_SCR_REG_OFFSET         = 0x40,
65
 
66
        /* INT_STATUS/ENABLE */
67
        NV_INT_STATUS                   = 0x10,
68
        NV_INT_ENABLE                   = 0x11,
69
        NV_INT_STATUS_CK804             = 0x440,
70
        NV_INT_ENABLE_CK804             = 0x441,
71
 
72
        /* INT_STATUS/ENABLE bits */
73
        NV_INT_DEV                      = 0x01,
74
        NV_INT_PM                       = 0x02,
75
        NV_INT_ADDED                    = 0x04,
76
        NV_INT_REMOVED                  = 0x08,
77
 
78
        NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
79
 
80
        NV_INT_ALL                      = 0x0f,
81
        NV_INT_MASK                     = NV_INT_DEV |
82
                                          NV_INT_ADDED | NV_INT_REMOVED,
83
 
84
        /* INT_CONFIG */
85
        NV_INT_CONFIG                   = 0x12,
86
        NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
87
 
88
        // For PCI config register 20
89
        NV_MCP_SATA_CFG_20              = 0x50,
90
        NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91
        NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
92
        NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
93
        NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94
        NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
 
96
        NV_ADMA_MAX_CPBS                = 32,
97
        NV_ADMA_CPB_SZ                  = 128,
98
        NV_ADMA_APRD_SZ                 = 16,
99
        NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
100
                                           NV_ADMA_APRD_SZ,
101
        NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
102
        NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103
        NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104
                                           (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
 
106
        /* BAR5 offset to ADMA general registers */
107
        NV_ADMA_GEN                     = 0x400,
108
        NV_ADMA_GEN_CTL                 = 0x00,
109
        NV_ADMA_NOTIFIER_CLEAR          = 0x30,
110
 
111
        /* BAR5 offset to ADMA ports */
112
        NV_ADMA_PORT                    = 0x480,
113
 
114
        /* size of ADMA port register space  */
115
        NV_ADMA_PORT_SIZE               = 0x100,
116
 
117
        /* ADMA port registers */
118
        NV_ADMA_CTL                     = 0x40,
119
        NV_ADMA_CPB_COUNT               = 0x42,
120
        NV_ADMA_NEXT_CPB_IDX            = 0x43,
121
        NV_ADMA_STAT                    = 0x44,
122
        NV_ADMA_CPB_BASE_LOW            = 0x48,
123
        NV_ADMA_CPB_BASE_HIGH           = 0x4C,
124
        NV_ADMA_APPEND                  = 0x50,
125
        NV_ADMA_NOTIFIER                = 0x68,
126
        NV_ADMA_NOTIFIER_ERROR          = 0x6C,
127
 
128
        /* NV_ADMA_CTL register bits */
129
        NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
130
        NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
131
        NV_ADMA_CTL_GO                  = (1 << 7),
132
        NV_ADMA_CTL_AIEN                = (1 << 8),
133
        NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
134
        NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
135
 
136
        /* CPB response flag bits */
137
        NV_CPB_RESP_DONE                = (1 << 0),
138
        NV_CPB_RESP_ATA_ERR             = (1 << 3),
139
        NV_CPB_RESP_CMD_ERR             = (1 << 4),
140
        NV_CPB_RESP_CPB_ERR             = (1 << 7),
141
 
142
        /* CPB control flag bits */
143
        NV_CPB_CTL_CPB_VALID            = (1 << 0),
144
        NV_CPB_CTL_QUEUE                = (1 << 1),
145
        NV_CPB_CTL_APRD_VALID           = (1 << 2),
146
        NV_CPB_CTL_IEN                  = (1 << 3),
147
        NV_CPB_CTL_FPDMA                = (1 << 4),
148
 
149
        /* APRD flags */
150
        NV_APRD_WRITE                   = (1 << 1),
151
        NV_APRD_END                     = (1 << 2),
152
        NV_APRD_CONT                    = (1 << 3),
153
 
154
        /* NV_ADMA_STAT flags */
155
        NV_ADMA_STAT_TIMEOUT            = (1 << 0),
156
        NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
157
        NV_ADMA_STAT_HOTPLUG            = (1 << 2),
158
        NV_ADMA_STAT_CPBERR             = (1 << 4),
159
        NV_ADMA_STAT_SERROR             = (1 << 5),
160
        NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
161
        NV_ADMA_STAT_IDLE               = (1 << 8),
162
        NV_ADMA_STAT_LEGACY             = (1 << 9),
163
        NV_ADMA_STAT_STOPPED            = (1 << 10),
164
        NV_ADMA_STAT_DONE               = (1 << 12),
165
        NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
166
                                          NV_ADMA_STAT_TIMEOUT,
167
 
168
        /* port flags */
169
        NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
170
        NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
171
 
172
        /* MCP55 reg offset */
173
        NV_CTL_MCP55                    = 0x400,
174
        NV_INT_STATUS_MCP55             = 0x440,
175
        NV_INT_ENABLE_MCP55             = 0x444,
176
        NV_NCQ_REG_MCP55                = 0x448,
177
 
178
        /* MCP55 */
179
        NV_INT_ALL_MCP55                = 0xffff,
180
        NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
181
        NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
182
 
183
        /* SWNCQ ENABLE BITS*/
184
        NV_CTL_PRI_SWNCQ                = 0x02,
185
        NV_CTL_SEC_SWNCQ                = 0x04,
186
 
187
        /* SW NCQ status bits*/
188
        NV_SWNCQ_IRQ_DEV                = (1 << 0),
189
        NV_SWNCQ_IRQ_PM                 = (1 << 1),
190
        NV_SWNCQ_IRQ_ADDED              = (1 << 2),
191
        NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
192
 
193
        NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
194
        NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
195
        NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
196
        NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
197
 
198
        NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
199
                                          NV_SWNCQ_IRQ_REMOVED,
200
 
201
};
202
 
203
/* ADMA Physical Region Descriptor - one SG segment */
204
struct nv_adma_prd {
205
        __le64                  addr;
206
        __le32                  len;
207
        u8                      flags;
208
        u8                      packet_len;
209
        __le16                  reserved;
210
};
211
 
212
enum nv_adma_regbits {
213
        CMDEND  = (1 << 15),            /* end of command list */
214
        WNB     = (1 << 14),            /* wait-not-BSY */
215
        IGN     = (1 << 13),            /* ignore this entry */
216
        CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
217
        DA2     = (1 << (2 + 8)),
218
        DA1     = (1 << (1 + 8)),
219
        DA0     = (1 << (0 + 8)),
220
};
221
 
222
/* ADMA Command Parameter Block
223
   The first 5 SG segments are stored inside the Command Parameter Block itself.
224
   If there are more than 5 segments the remainder are stored in a separate
225
   memory area indicated by next_aprd. */
226
struct nv_adma_cpb {
227
        u8                      resp_flags;    /* 0 */
228
        u8                      reserved1;     /* 1 */
229
        u8                      ctl_flags;     /* 2 */
230
        /* len is length of taskfile in 64 bit words */
231
        u8                      len;            /* 3  */
232
        u8                      tag;           /* 4 */
233
        u8                      next_cpb_idx;  /* 5 */
234
        __le16                  reserved2;     /* 6-7 */
235
        __le16                  tf[12];        /* 8-31 */
236
        struct nv_adma_prd      aprd[5];       /* 32-111 */
237
        __le64                  next_aprd;     /* 112-119 */
238
        __le64                  reserved3;     /* 120-127 */
239
};
240
 
241
 
242
struct nv_adma_port_priv {
243
        struct nv_adma_cpb      *cpb;
244
        dma_addr_t              cpb_dma;
245
        struct nv_adma_prd      *aprd;
246
        dma_addr_t              aprd_dma;
247
        void __iomem            *ctl_block;
248
        void __iomem            *gen_block;
249
        void __iomem            *notifier_clear_block;
250
        u8                      flags;
251
        int                     last_issue_ncq;
252
};
253
 
254
struct nv_host_priv {
255
        unsigned long           type;
256
};
257
 
258
struct defer_queue {
259
        u32             defer_bits;
260
        unsigned int    head;
261
        unsigned int    tail;
262
        unsigned int    tag[ATA_MAX_QUEUE];
263
};
264
 
265
enum ncq_saw_flag_list {
266
        ncq_saw_d2h     = (1U << 0),
267
        ncq_saw_dmas    = (1U << 1),
268
        ncq_saw_sdb     = (1U << 2),
269
        ncq_saw_backout = (1U << 3),
270
};
271
 
272
struct nv_swncq_port_priv {
273
        struct ata_prd  *prd;    /* our SG list */
274
        dma_addr_t      prd_dma; /* and its DMA mapping */
275
        void __iomem    *sactive_block;
276
        void __iomem    *irq_block;
277
        void __iomem    *tag_block;
278
        u32             qc_active;
279
 
280
        unsigned int    last_issue_tag;
281
 
282
        /* fifo circular queue to store deferral command */
283
        struct defer_queue defer_queue;
284
 
285
        /* for NCQ interrupt analysis */
286
        u32             dhfis_bits;
287
        u32             dmafis_bits;
288
        u32             sdbfis_bits;
289
 
290
        unsigned int    ncq_flags;
291
};
292
 
293
 
294
#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
295
 
296
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
297
#ifdef CONFIG_PM
298
static int nv_pci_device_resume(struct pci_dev *pdev);
299
#endif
300
static void nv_ck804_host_stop(struct ata_host *host);
301
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
302
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
303
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
304
static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
305
static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
306
 
307
static void nv_nf2_freeze(struct ata_port *ap);
308
static void nv_nf2_thaw(struct ata_port *ap);
309
static void nv_ck804_freeze(struct ata_port *ap);
310
static void nv_ck804_thaw(struct ata_port *ap);
311
static void nv_error_handler(struct ata_port *ap);
312
static int nv_adma_slave_config(struct scsi_device *sdev);
313
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314
static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317
static void nv_adma_irq_clear(struct ata_port *ap);
318
static int nv_adma_port_start(struct ata_port *ap);
319
static void nv_adma_port_stop(struct ata_port *ap);
320
#ifdef CONFIG_PM
321
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322
static int nv_adma_port_resume(struct ata_port *ap);
323
#endif
324
static void nv_adma_freeze(struct ata_port *ap);
325
static void nv_adma_thaw(struct ata_port *ap);
326
static void nv_adma_error_handler(struct ata_port *ap);
327
static void nv_adma_host_stop(struct ata_host *host);
328
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329
static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330
 
331
static void nv_mcp55_thaw(struct ata_port *ap);
332
static void nv_mcp55_freeze(struct ata_port *ap);
333
static void nv_swncq_error_handler(struct ata_port *ap);
334
static int nv_swncq_slave_config(struct scsi_device *sdev);
335
static int nv_swncq_port_start(struct ata_port *ap);
336
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340
static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341
#ifdef CONFIG_PM
342
static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343
static int nv_swncq_port_resume(struct ata_port *ap);
344
#endif
345
 
346
enum nv_host_type
347
{
348
        GENERIC,
349
        NFORCE2,
350
        NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
351
        CK804,
352
        ADMA,
353
        SWNCQ,
354
};
355
 
356
static const struct pci_device_id nv_pci_tbl[] = {
357
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
369
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
370
        { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
371
 
372
        { } /* terminate list */
373
};
374
 
375
static struct pci_driver nv_pci_driver = {
376
        .name                   = DRV_NAME,
377
        .id_table               = nv_pci_tbl,
378
        .probe                  = nv_init_one,
379
#ifdef CONFIG_PM
380
        .suspend                = ata_pci_device_suspend,
381
        .resume                 = nv_pci_device_resume,
382
#endif
383
        .remove                 = ata_pci_remove_one,
384
};
385
 
386
static struct scsi_host_template nv_sht = {
387
        .module                 = THIS_MODULE,
388
        .name                   = DRV_NAME,
389
        .ioctl                  = ata_scsi_ioctl,
390
        .queuecommand           = ata_scsi_queuecmd,
391
        .can_queue              = ATA_DEF_QUEUE,
392
        .this_id                = ATA_SHT_THIS_ID,
393
        .sg_tablesize           = LIBATA_MAX_PRD,
394
        .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
395
        .emulated               = ATA_SHT_EMULATED,
396
        .use_clustering         = ATA_SHT_USE_CLUSTERING,
397
        .proc_name              = DRV_NAME,
398
        .dma_boundary           = ATA_DMA_BOUNDARY,
399
        .slave_configure        = ata_scsi_slave_config,
400
        .slave_destroy          = ata_scsi_slave_destroy,
401
        .bios_param             = ata_std_bios_param,
402
};
403
 
404
static struct scsi_host_template nv_adma_sht = {
405
        .module                 = THIS_MODULE,
406
        .name                   = DRV_NAME,
407
        .ioctl                  = ata_scsi_ioctl,
408
        .queuecommand           = ata_scsi_queuecmd,
409
        .change_queue_depth     = ata_scsi_change_queue_depth,
410
        .can_queue              = NV_ADMA_MAX_CPBS,
411
        .this_id                = ATA_SHT_THIS_ID,
412
        .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
413
        .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
414
        .emulated               = ATA_SHT_EMULATED,
415
        .use_clustering         = ATA_SHT_USE_CLUSTERING,
416
        .proc_name              = DRV_NAME,
417
        .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
418
        .slave_configure        = nv_adma_slave_config,
419
        .slave_destroy          = ata_scsi_slave_destroy,
420
        .bios_param             = ata_std_bios_param,
421
};
422
 
423
static struct scsi_host_template nv_swncq_sht = {
424
        .module                 = THIS_MODULE,
425
        .name                   = DRV_NAME,
426
        .ioctl                  = ata_scsi_ioctl,
427
        .queuecommand           = ata_scsi_queuecmd,
428
        .change_queue_depth     = ata_scsi_change_queue_depth,
429
        .can_queue              = ATA_MAX_QUEUE,
430
        .this_id                = ATA_SHT_THIS_ID,
431
        .sg_tablesize           = LIBATA_MAX_PRD,
432
        .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
433
        .emulated               = ATA_SHT_EMULATED,
434
        .use_clustering         = ATA_SHT_USE_CLUSTERING,
435
        .proc_name              = DRV_NAME,
436
        .dma_boundary           = ATA_DMA_BOUNDARY,
437
        .slave_configure        = nv_swncq_slave_config,
438
        .slave_destroy          = ata_scsi_slave_destroy,
439
        .bios_param             = ata_std_bios_param,
440
};
441
 
442
static const struct ata_port_operations nv_generic_ops = {
443
        .tf_load                = ata_tf_load,
444
        .tf_read                = ata_tf_read,
445
        .exec_command           = ata_exec_command,
446
        .check_status           = ata_check_status,
447
        .dev_select             = ata_std_dev_select,
448
        .bmdma_setup            = ata_bmdma_setup,
449
        .bmdma_start            = ata_bmdma_start,
450
        .bmdma_stop             = ata_bmdma_stop,
451
        .bmdma_status           = ata_bmdma_status,
452
        .qc_prep                = ata_qc_prep,
453
        .qc_issue               = ata_qc_issue_prot,
454
        .freeze                 = ata_bmdma_freeze,
455
        .thaw                   = ata_bmdma_thaw,
456
        .error_handler          = nv_error_handler,
457
        .post_internal_cmd      = ata_bmdma_post_internal_cmd,
458
        .data_xfer              = ata_data_xfer,
459
        .irq_clear              = ata_bmdma_irq_clear,
460
        .irq_on                 = ata_irq_on,
461
        .scr_read               = nv_scr_read,
462
        .scr_write              = nv_scr_write,
463
        .port_start             = ata_port_start,
464
};
465
 
466
static const struct ata_port_operations nv_nf2_ops = {
467
        .tf_load                = ata_tf_load,
468
        .tf_read                = ata_tf_read,
469
        .exec_command           = ata_exec_command,
470
        .check_status           = ata_check_status,
471
        .dev_select             = ata_std_dev_select,
472
        .bmdma_setup            = ata_bmdma_setup,
473
        .bmdma_start            = ata_bmdma_start,
474
        .bmdma_stop             = ata_bmdma_stop,
475
        .bmdma_status           = ata_bmdma_status,
476
        .qc_prep                = ata_qc_prep,
477
        .qc_issue               = ata_qc_issue_prot,
478
        .freeze                 = nv_nf2_freeze,
479
        .thaw                   = nv_nf2_thaw,
480
        .error_handler          = nv_error_handler,
481
        .post_internal_cmd      = ata_bmdma_post_internal_cmd,
482
        .data_xfer              = ata_data_xfer,
483
        .irq_clear              = ata_bmdma_irq_clear,
484
        .irq_on                 = ata_irq_on,
485
        .scr_read               = nv_scr_read,
486
        .scr_write              = nv_scr_write,
487
        .port_start             = ata_port_start,
488
};
489
 
490
static const struct ata_port_operations nv_ck804_ops = {
491
        .tf_load                = ata_tf_load,
492
        .tf_read                = ata_tf_read,
493
        .exec_command           = ata_exec_command,
494
        .check_status           = ata_check_status,
495
        .dev_select             = ata_std_dev_select,
496
        .bmdma_setup            = ata_bmdma_setup,
497
        .bmdma_start            = ata_bmdma_start,
498
        .bmdma_stop             = ata_bmdma_stop,
499
        .bmdma_status           = ata_bmdma_status,
500
        .qc_prep                = ata_qc_prep,
501
        .qc_issue               = ata_qc_issue_prot,
502
        .freeze                 = nv_ck804_freeze,
503
        .thaw                   = nv_ck804_thaw,
504
        .error_handler          = nv_error_handler,
505
        .post_internal_cmd      = ata_bmdma_post_internal_cmd,
506
        .data_xfer              = ata_data_xfer,
507
        .irq_clear              = ata_bmdma_irq_clear,
508
        .irq_on                 = ata_irq_on,
509
        .scr_read               = nv_scr_read,
510
        .scr_write              = nv_scr_write,
511
        .port_start             = ata_port_start,
512
        .host_stop              = nv_ck804_host_stop,
513
};
514
 
515
static const struct ata_port_operations nv_adma_ops = {
516
        .tf_load                = ata_tf_load,
517
        .tf_read                = nv_adma_tf_read,
518
        .check_atapi_dma        = nv_adma_check_atapi_dma,
519
        .exec_command           = ata_exec_command,
520
        .check_status           = ata_check_status,
521
        .dev_select             = ata_std_dev_select,
522
        .bmdma_setup            = ata_bmdma_setup,
523
        .bmdma_start            = ata_bmdma_start,
524
        .bmdma_stop             = ata_bmdma_stop,
525
        .bmdma_status           = ata_bmdma_status,
526
        .qc_defer               = ata_std_qc_defer,
527
        .qc_prep                = nv_adma_qc_prep,
528
        .qc_issue               = nv_adma_qc_issue,
529
        .freeze                 = nv_adma_freeze,
530
        .thaw                   = nv_adma_thaw,
531
        .error_handler          = nv_adma_error_handler,
532
        .post_internal_cmd      = nv_adma_post_internal_cmd,
533
        .data_xfer              = ata_data_xfer,
534
        .irq_clear              = nv_adma_irq_clear,
535
        .irq_on                 = ata_irq_on,
536
        .scr_read               = nv_scr_read,
537
        .scr_write              = nv_scr_write,
538
        .port_start             = nv_adma_port_start,
539
        .port_stop              = nv_adma_port_stop,
540
#ifdef CONFIG_PM
541
        .port_suspend           = nv_adma_port_suspend,
542
        .port_resume            = nv_adma_port_resume,
543
#endif
544
        .host_stop              = nv_adma_host_stop,
545
};
546
 
547
static const struct ata_port_operations nv_swncq_ops = {
548
        .tf_load                = ata_tf_load,
549
        .tf_read                = ata_tf_read,
550
        .exec_command           = ata_exec_command,
551
        .check_status           = ata_check_status,
552
        .dev_select             = ata_std_dev_select,
553
        .bmdma_setup            = ata_bmdma_setup,
554
        .bmdma_start            = ata_bmdma_start,
555
        .bmdma_stop             = ata_bmdma_stop,
556
        .bmdma_status           = ata_bmdma_status,
557
        .qc_defer               = ata_std_qc_defer,
558
        .qc_prep                = nv_swncq_qc_prep,
559
        .qc_issue               = nv_swncq_qc_issue,
560
        .freeze                 = nv_mcp55_freeze,
561
        .thaw                   = nv_mcp55_thaw,
562
        .error_handler          = nv_swncq_error_handler,
563
        .post_internal_cmd      = ata_bmdma_post_internal_cmd,
564
        .data_xfer              = ata_data_xfer,
565
        .irq_clear              = ata_bmdma_irq_clear,
566
        .irq_on                 = ata_irq_on,
567
        .scr_read               = nv_scr_read,
568
        .scr_write              = nv_scr_write,
569
#ifdef CONFIG_PM
570
        .port_suspend           = nv_swncq_port_suspend,
571
        .port_resume            = nv_swncq_port_resume,
572
#endif
573
        .port_start             = nv_swncq_port_start,
574
};
575
 
576
static const struct ata_port_info nv_port_info[] = {
577
        /* generic */
578
        {
579
                .sht            = &nv_sht,
580
                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
581
                .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
582
                .pio_mask       = NV_PIO_MASK,
583
                .mwdma_mask     = NV_MWDMA_MASK,
584
                .udma_mask      = NV_UDMA_MASK,
585
                .port_ops       = &nv_generic_ops,
586
                .irq_handler    = nv_generic_interrupt,
587
        },
588
        /* nforce2/3 */
589
        {
590
                .sht            = &nv_sht,
591
                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
592
                .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
593
                .pio_mask       = NV_PIO_MASK,
594
                .mwdma_mask     = NV_MWDMA_MASK,
595
                .udma_mask      = NV_UDMA_MASK,
596
                .port_ops       = &nv_nf2_ops,
597
                .irq_handler    = nv_nf2_interrupt,
598
        },
599
        /* ck804 */
600
        {
601
                .sht            = &nv_sht,
602
                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
603
                .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
604
                .pio_mask       = NV_PIO_MASK,
605
                .mwdma_mask     = NV_MWDMA_MASK,
606
                .udma_mask      = NV_UDMA_MASK,
607
                .port_ops       = &nv_ck804_ops,
608
                .irq_handler    = nv_ck804_interrupt,
609
        },
610
        /* ADMA */
611
        {
612
                .sht            = &nv_adma_sht,
613
                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
614
                                  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
615
                .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
616
                .pio_mask       = NV_PIO_MASK,
617
                .mwdma_mask     = NV_MWDMA_MASK,
618
                .udma_mask      = NV_UDMA_MASK,
619
                .port_ops       = &nv_adma_ops,
620
                .irq_handler    = nv_adma_interrupt,
621
        },
622
        /* SWNCQ */
623
        {
624
                .sht            = &nv_swncq_sht,
625
                .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
626
                                  ATA_FLAG_NCQ,
627
                .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
628
                .pio_mask       = NV_PIO_MASK,
629
                .mwdma_mask     = NV_MWDMA_MASK,
630
                .udma_mask      = NV_UDMA_MASK,
631
                .port_ops       = &nv_swncq_ops,
632
                .irq_handler    = nv_swncq_interrupt,
633
        },
634
};
635
 
636
MODULE_AUTHOR("NVIDIA");
637
MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
638
MODULE_LICENSE("GPL");
639
MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
640
MODULE_VERSION(DRV_VERSION);
641
 
642
static int adma_enabled = 1;
643
static int swncq_enabled;
644
 
645
static void nv_adma_register_mode(struct ata_port *ap)
646
{
647
        struct nv_adma_port_priv *pp = ap->private_data;
648
        void __iomem *mmio = pp->ctl_block;
649
        u16 tmp, status;
650
        int count = 0;
651
 
652
        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
653
                return;
654
 
655
        status = readw(mmio + NV_ADMA_STAT);
656
        while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
657
                ndelay(50);
658
                status = readw(mmio + NV_ADMA_STAT);
659
                count++;
660
        }
661
        if (count == 20)
662
                ata_port_printk(ap, KERN_WARNING,
663
                        "timeout waiting for ADMA IDLE, stat=0x%hx\n",
664
                        status);
665
 
666
        tmp = readw(mmio + NV_ADMA_CTL);
667
        writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
668
 
669
        count = 0;
670
        status = readw(mmio + NV_ADMA_STAT);
671
        while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
672
                ndelay(50);
673
                status = readw(mmio + NV_ADMA_STAT);
674
                count++;
675
        }
676
        if (count == 20)
677
                ata_port_printk(ap, KERN_WARNING,
678
                         "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
679
                         status);
680
 
681
        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
682
}
683
 
684
static void nv_adma_mode(struct ata_port *ap)
685
{
686
        struct nv_adma_port_priv *pp = ap->private_data;
687
        void __iomem *mmio = pp->ctl_block;
688
        u16 tmp, status;
689
        int count = 0;
690
 
691
        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
692
                return;
693
 
694
        WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
695
 
696
        tmp = readw(mmio + NV_ADMA_CTL);
697
        writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
698
 
699
        status = readw(mmio + NV_ADMA_STAT);
700
        while (((status & NV_ADMA_STAT_LEGACY) ||
701
              !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
702
                ndelay(50);
703
                status = readw(mmio + NV_ADMA_STAT);
704
                count++;
705
        }
706
        if (count == 20)
707
                ata_port_printk(ap, KERN_WARNING,
708
                        "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
709
                        status);
710
 
711
        pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
712
}
713
 
714
static int nv_adma_slave_config(struct scsi_device *sdev)
715
{
716
        struct ata_port *ap = ata_shost_to_port(sdev->host);
717
        struct nv_adma_port_priv *pp = ap->private_data;
718
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
719
        u64 bounce_limit;
720
        unsigned long segment_boundary;
721
        unsigned short sg_tablesize;
722
        int rc;
723
        int adma_enable;
724
        u32 current_reg, new_reg, config_mask;
725
 
726
        rc = ata_scsi_slave_config(sdev);
727
 
728
        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
729
                /* Not a proper libata device, ignore */
730
                return rc;
731
 
732
        if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
733
                /*
734
                 * NVIDIA reports that ADMA mode does not support ATAPI commands.
735
                 * Therefore ATAPI commands are sent through the legacy interface.
736
                 * However, the legacy interface only supports 32-bit DMA.
737
                 * Restrict DMA parameters as required by the legacy interface
738
                 * when an ATAPI device is connected.
739
                 */
740
                bounce_limit = ATA_DMA_MASK;
741
                segment_boundary = ATA_DMA_BOUNDARY;
742
                /* Subtract 1 since an extra entry may be needed for padding, see
743
                   libata-scsi.c */
744
                sg_tablesize = LIBATA_MAX_PRD - 1;
745
 
746
                /* Since the legacy DMA engine is in use, we need to disable ADMA
747
                   on the port. */
748
                adma_enable = 0;
749
                nv_adma_register_mode(ap);
750
        } else {
751
                bounce_limit = *ap->dev->dma_mask;
752
                segment_boundary = NV_ADMA_DMA_BOUNDARY;
753
                sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
754
                adma_enable = 1;
755
        }
756
 
757
        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
758
 
759
        if (ap->port_no == 1)
760
                config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
761
                              NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
762
        else
763
                config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
764
                              NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
765
 
766
        if (adma_enable) {
767
                new_reg = current_reg | config_mask;
768
                pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
769
        } else {
770
                new_reg = current_reg & ~config_mask;
771
                pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
772
        }
773
 
774
        if (current_reg != new_reg)
775
                pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
776
 
777
        blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
778
        blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
779
        blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
780
        ata_port_printk(ap, KERN_INFO,
781
                "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
782
                (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
783
        return rc;
784
}
785
 
786
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
787
{
788
        struct nv_adma_port_priv *pp = qc->ap->private_data;
789
        return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
790
}
791
 
792
static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
793
{
794
        /* Other than when internal or pass-through commands are executed,
795
           the only time this function will be called in ADMA mode will be
796
           if a command fails. In the failure case we don't care about going
797
           into register mode with ADMA commands pending, as the commands will
798
           all shortly be aborted anyway. We assume that NCQ commands are not
799
           issued via passthrough, which is the only way that switching into
800
           ADMA mode could abort outstanding commands. */
801
        nv_adma_register_mode(ap);
802
 
803
        ata_tf_read(ap, tf);
804
}
805
 
806
static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
807
{
808
        unsigned int idx = 0;
809
 
810
        if (tf->flags & ATA_TFLAG_ISADDR) {
811
                if (tf->flags & ATA_TFLAG_LBA48) {
812
                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
813
                        cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
814
                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
815
                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
816
                        cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
817
                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
818
                } else
819
                        cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
820
 
821
                cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
822
                cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
823
                cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
824
                cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
825
        }
826
 
827
        if (tf->flags & ATA_TFLAG_DEVICE)
828
                cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
829
 
830
        cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
831
 
832
        while (idx < 12)
833
                cpb[idx++] = cpu_to_le16(IGN);
834
 
835
        return idx;
836
}
837
 
838
static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
839
{
840
        struct nv_adma_port_priv *pp = ap->private_data;
841
        u8 flags = pp->cpb[cpb_num].resp_flags;
842
 
843
        VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
844
 
845
        if (unlikely((force_err ||
846
                     flags & (NV_CPB_RESP_ATA_ERR |
847
                              NV_CPB_RESP_CMD_ERR |
848
                              NV_CPB_RESP_CPB_ERR)))) {
849
                struct ata_eh_info *ehi = &ap->link.eh_info;
850
                int freeze = 0;
851
 
852
                ata_ehi_clear_desc(ehi);
853
                __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
854
                if (flags & NV_CPB_RESP_ATA_ERR) {
855
                        ata_ehi_push_desc(ehi, "ATA error");
856
                        ehi->err_mask |= AC_ERR_DEV;
857
                } else if (flags & NV_CPB_RESP_CMD_ERR) {
858
                        ata_ehi_push_desc(ehi, "CMD error");
859
                        ehi->err_mask |= AC_ERR_DEV;
860
                } else if (flags & NV_CPB_RESP_CPB_ERR) {
861
                        ata_ehi_push_desc(ehi, "CPB error");
862
                        ehi->err_mask |= AC_ERR_SYSTEM;
863
                        freeze = 1;
864
                } else {
865
                        /* notifier error, but no error in CPB flags? */
866
                        ata_ehi_push_desc(ehi, "unknown");
867
                        ehi->err_mask |= AC_ERR_OTHER;
868
                        freeze = 1;
869
                }
870
                /* Kill all commands. EH will determine what actually failed. */
871
                if (freeze)
872
                        ata_port_freeze(ap);
873
                else
874
                        ata_port_abort(ap);
875
                return 1;
876
        }
877
 
878
        if (likely(flags & NV_CPB_RESP_DONE)) {
879
                struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
880
                VPRINTK("CPB flags done, flags=0x%x\n", flags);
881
                if (likely(qc)) {
882
                        DPRINTK("Completing qc from tag %d\n", cpb_num);
883
                        ata_qc_complete(qc);
884
                } else {
885
                        struct ata_eh_info *ehi = &ap->link.eh_info;
886
                        /* Notifier bits set without a command may indicate the drive
887
                           is misbehaving. Raise host state machine violation on this
888
                           condition. */
889
                        ata_port_printk(ap, KERN_ERR,
890
                                        "notifier for tag %d with no cmd?\n",
891
                                        cpb_num);
892
                        ehi->err_mask |= AC_ERR_HSM;
893
                        ehi->action |= ATA_EH_SOFTRESET;
894
                        ata_port_freeze(ap);
895
                        return 1;
896
                }
897
        }
898
        return 0;
899
}
900
 
901
static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
902
{
903
        struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
904
 
905
        /* freeze if hotplugged */
906
        if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
907
                ata_port_freeze(ap);
908
                return 1;
909
        }
910
 
911
        /* bail out if not our interrupt */
912
        if (!(irq_stat & NV_INT_DEV))
913
                return 0;
914
 
915
        /* DEV interrupt w/ no active qc? */
916
        if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
917
                ata_check_status(ap);
918
                return 1;
919
        }
920
 
921
        /* handle interrupt */
922
        return ata_host_intr(ap, qc);
923
}
924
 
925
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
926
{
927
        struct ata_host *host = dev_instance;
928
        int i, handled = 0;
929
        u32 notifier_clears[2];
930
 
931
        spin_lock(&host->lock);
932
 
933
        for (i = 0; i < host->n_ports; i++) {
934
                struct ata_port *ap = host->ports[i];
935
                notifier_clears[i] = 0;
936
 
937
                if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
938
                        struct nv_adma_port_priv *pp = ap->private_data;
939
                        void __iomem *mmio = pp->ctl_block;
940
                        u16 status;
941
                        u32 gen_ctl;
942
                        u32 notifier, notifier_error;
943
 
944
                        /* if ADMA is disabled, use standard ata interrupt handler */
945
                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
946
                                u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
947
                                        >> (NV_INT_PORT_SHIFT * i);
948
                                handled += nv_host_intr(ap, irq_stat);
949
                                continue;
950
                        }
951
 
952
                        /* if in ATA register mode, check for standard interrupts */
953
                        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
954
                                u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
955
                                        >> (NV_INT_PORT_SHIFT * i);
956
                                if (ata_tag_valid(ap->link.active_tag))
957
                                        /** NV_INT_DEV indication seems unreliable at times
958
                                            at least in ADMA mode. Force it on always when a
959
                                            command is active, to prevent losing interrupts. */
960
                                        irq_stat |= NV_INT_DEV;
961
                                handled += nv_host_intr(ap, irq_stat);
962
                        }
963
 
964
                        notifier = readl(mmio + NV_ADMA_NOTIFIER);
965
                        notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
966
                        notifier_clears[i] = notifier | notifier_error;
967
 
968
                        gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
969
 
970
                        if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
971
                            !notifier_error)
972
                                /* Nothing to do */
973
                                continue;
974
 
975
                        status = readw(mmio + NV_ADMA_STAT);
976
 
977
                        /* Clear status. Ensure the controller sees the clearing before we start
978
                           looking at any of the CPB statuses, so that any CPB completions after
979
                           this point in the handler will raise another interrupt. */
980
                        writew(status, mmio + NV_ADMA_STAT);
981
                        readw(mmio + NV_ADMA_STAT); /* flush posted write */
982
                        rmb();
983
 
984
                        handled++; /* irq handled if we got here */
985
 
986
                        /* freeze if hotplugged or controller error */
987
                        if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
988
                                               NV_ADMA_STAT_HOTUNPLUG |
989
                                               NV_ADMA_STAT_TIMEOUT |
990
                                               NV_ADMA_STAT_SERROR))) {
991
                                struct ata_eh_info *ehi = &ap->link.eh_info;
992
 
993
                                ata_ehi_clear_desc(ehi);
994
                                __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
995
                                if (status & NV_ADMA_STAT_TIMEOUT) {
996
                                        ehi->err_mask |= AC_ERR_SYSTEM;
997
                                        ata_ehi_push_desc(ehi, "timeout");
998
                                } else if (status & NV_ADMA_STAT_HOTPLUG) {
999
                                        ata_ehi_hotplugged(ehi);
1000
                                        ata_ehi_push_desc(ehi, "hotplug");
1001
                                } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1002
                                        ata_ehi_hotplugged(ehi);
1003
                                        ata_ehi_push_desc(ehi, "hot unplug");
1004
                                } else if (status & NV_ADMA_STAT_SERROR) {
1005
                                        /* let libata analyze SError and figure out the cause */
1006
                                        ata_ehi_push_desc(ehi, "SError");
1007
                                } else
1008
                                        ata_ehi_push_desc(ehi, "unknown");
1009
                                ata_port_freeze(ap);
1010
                                continue;
1011
                        }
1012
 
1013
                        if (status & (NV_ADMA_STAT_DONE |
1014
                                      NV_ADMA_STAT_CPBERR)) {
1015
                                u32 check_commands;
1016
                                int pos, error = 0;
1017
 
1018
                                if (ata_tag_valid(ap->link.active_tag))
1019
                                        check_commands = 1 << ap->link.active_tag;
1020
                                else
1021
                                        check_commands = ap->link.sactive;
1022
 
1023
                                /** Check CPBs for completed commands */
1024
                                while ((pos = ffs(check_commands)) && !error) {
1025
                                        pos--;
1026
                                        error = nv_adma_check_cpb(ap, pos,
1027
                                                notifier_error & (1 << pos));
1028
                                        check_commands &= ~(1 << pos);
1029
                                }
1030
                        }
1031
                }
1032
        }
1033
 
1034
        if (notifier_clears[0] || notifier_clears[1]) {
1035
                /* Note: Both notifier clear registers must be written
1036
                   if either is set, even if one is zero, according to NVIDIA. */
1037
                struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1038
                writel(notifier_clears[0], pp->notifier_clear_block);
1039
                pp = host->ports[1]->private_data;
1040
                writel(notifier_clears[1], pp->notifier_clear_block);
1041
        }
1042
 
1043
        spin_unlock(&host->lock);
1044
 
1045
        return IRQ_RETVAL(handled);
1046
}
1047
 
1048
static void nv_adma_freeze(struct ata_port *ap)
1049
{
1050
        struct nv_adma_port_priv *pp = ap->private_data;
1051
        void __iomem *mmio = pp->ctl_block;
1052
        u16 tmp;
1053
 
1054
        nv_ck804_freeze(ap);
1055
 
1056
        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1057
                return;
1058
 
1059
        /* clear any outstanding CK804 notifications */
1060
        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1061
                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1062
 
1063
        /* Disable interrupt */
1064
        tmp = readw(mmio + NV_ADMA_CTL);
1065
        writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1066
                mmio + NV_ADMA_CTL);
1067
        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1068
}
1069
 
1070
static void nv_adma_thaw(struct ata_port *ap)
1071
{
1072
        struct nv_adma_port_priv *pp = ap->private_data;
1073
        void __iomem *mmio = pp->ctl_block;
1074
        u16 tmp;
1075
 
1076
        nv_ck804_thaw(ap);
1077
 
1078
        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1079
                return;
1080
 
1081
        /* Enable interrupt */
1082
        tmp = readw(mmio + NV_ADMA_CTL);
1083
        writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1084
                mmio + NV_ADMA_CTL);
1085
        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1086
}
1087
 
1088
static void nv_adma_irq_clear(struct ata_port *ap)
1089
{
1090
        struct nv_adma_port_priv *pp = ap->private_data;
1091
        void __iomem *mmio = pp->ctl_block;
1092
        u32 notifier_clears[2];
1093
 
1094
        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1095
                ata_bmdma_irq_clear(ap);
1096
                return;
1097
        }
1098
 
1099
        /* clear any outstanding CK804 notifications */
1100
        writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1101
                ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1102
 
1103
        /* clear ADMA status */
1104
        writew(0xffff, mmio + NV_ADMA_STAT);
1105
 
1106
        /* clear notifiers - note both ports need to be written with
1107
           something even though we are only clearing on one */
1108
        if (ap->port_no == 0) {
1109
                notifier_clears[0] = 0xFFFFFFFF;
1110
                notifier_clears[1] = 0;
1111
        } else {
1112
                notifier_clears[0] = 0;
1113
                notifier_clears[1] = 0xFFFFFFFF;
1114
        }
1115
        pp = ap->host->ports[0]->private_data;
1116
        writel(notifier_clears[0], pp->notifier_clear_block);
1117
        pp = ap->host->ports[1]->private_data;
1118
        writel(notifier_clears[1], pp->notifier_clear_block);
1119
}
1120
 
1121
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1122
{
1123
        struct nv_adma_port_priv *pp = qc->ap->private_data;
1124
 
1125
        if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1126
                ata_bmdma_post_internal_cmd(qc);
1127
}
1128
 
1129
static int nv_adma_port_start(struct ata_port *ap)
1130
{
1131
        struct device *dev = ap->host->dev;
1132
        struct nv_adma_port_priv *pp;
1133
        int rc;
1134
        void *mem;
1135
        dma_addr_t mem_dma;
1136
        void __iomem *mmio;
1137
        u16 tmp;
1138
 
1139
        VPRINTK("ENTER\n");
1140
 
1141
        rc = ata_port_start(ap);
1142
        if (rc)
1143
                return rc;
1144
 
1145
        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1146
        if (!pp)
1147
                return -ENOMEM;
1148
 
1149
        mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1150
               ap->port_no * NV_ADMA_PORT_SIZE;
1151
        pp->ctl_block = mmio;
1152
        pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1153
        pp->notifier_clear_block = pp->gen_block +
1154
               NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1155
 
1156
        mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1157
                                  &mem_dma, GFP_KERNEL);
1158
        if (!mem)
1159
                return -ENOMEM;
1160
        memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1161
 
1162
        /*
1163
         * First item in chunk of DMA memory:
1164
         * 128-byte command parameter block (CPB)
1165
         * one for each command tag
1166
         */
1167
        pp->cpb     = mem;
1168
        pp->cpb_dma = mem_dma;
1169
 
1170
        writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1171
        writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1172
 
1173
        mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1174
        mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1175
 
1176
        /*
1177
         * Second item: block of ADMA_SGTBL_LEN s/g entries
1178
         */
1179
        pp->aprd = mem;
1180
        pp->aprd_dma = mem_dma;
1181
 
1182
        ap->private_data = pp;
1183
 
1184
        /* clear any outstanding interrupt conditions */
1185
        writew(0xffff, mmio + NV_ADMA_STAT);
1186
 
1187
        /* initialize port variables */
1188
        pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1189
 
1190
        /* clear CPB fetch count */
1191
        writew(0, mmio + NV_ADMA_CPB_COUNT);
1192
 
1193
        /* clear GO for register mode, enable interrupt */
1194
        tmp = readw(mmio + NV_ADMA_CTL);
1195
        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1196
                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1197
 
1198
        tmp = readw(mmio + NV_ADMA_CTL);
1199
        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1200
        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1201
        udelay(1);
1202
        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1203
        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1204
 
1205
        return 0;
1206
}
1207
 
1208
static void nv_adma_port_stop(struct ata_port *ap)
1209
{
1210
        struct nv_adma_port_priv *pp = ap->private_data;
1211
        void __iomem *mmio = pp->ctl_block;
1212
 
1213
        VPRINTK("ENTER\n");
1214
        writew(0, mmio + NV_ADMA_CTL);
1215
}
1216
 
1217
#ifdef CONFIG_PM
1218
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1219
{
1220
        struct nv_adma_port_priv *pp = ap->private_data;
1221
        void __iomem *mmio = pp->ctl_block;
1222
 
1223
        /* Go to register mode - clears GO */
1224
        nv_adma_register_mode(ap);
1225
 
1226
        /* clear CPB fetch count */
1227
        writew(0, mmio + NV_ADMA_CPB_COUNT);
1228
 
1229
        /* disable interrupt, shut down port */
1230
        writew(0, mmio + NV_ADMA_CTL);
1231
 
1232
        return 0;
1233
}
1234
 
1235
static int nv_adma_port_resume(struct ata_port *ap)
1236
{
1237
        struct nv_adma_port_priv *pp = ap->private_data;
1238
        void __iomem *mmio = pp->ctl_block;
1239
        u16 tmp;
1240
 
1241
        /* set CPB block location */
1242
        writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1243
        writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1244
 
1245
        /* clear any outstanding interrupt conditions */
1246
        writew(0xffff, mmio + NV_ADMA_STAT);
1247
 
1248
        /* initialize port variables */
1249
        pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1250
 
1251
        /* clear CPB fetch count */
1252
        writew(0, mmio + NV_ADMA_CPB_COUNT);
1253
 
1254
        /* clear GO for register mode, enable interrupt */
1255
        tmp = readw(mmio + NV_ADMA_CTL);
1256
        writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1257
                NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1258
 
1259
        tmp = readw(mmio + NV_ADMA_CTL);
1260
        writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1261
        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1262
        udelay(1);
1263
        writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1264
        readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1265
 
1266
        return 0;
1267
}
1268
#endif
1269
 
1270
static void nv_adma_setup_port(struct ata_port *ap)
1271
{
1272
        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1273
        struct ata_ioports *ioport = &ap->ioaddr;
1274
 
1275
        VPRINTK("ENTER\n");
1276
 
1277
        mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1278
 
1279
        ioport->cmd_addr        = mmio;
1280
        ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1281
        ioport->error_addr      =
1282
        ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1283
        ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1284
        ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1285
        ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1286
        ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1287
        ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1288
        ioport->status_addr     =
1289
        ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1290
        ioport->altstatus_addr  =
1291
        ioport->ctl_addr        = mmio + 0x20;
1292
}
1293
 
1294
static int nv_adma_host_init(struct ata_host *host)
1295
{
1296
        struct pci_dev *pdev = to_pci_dev(host->dev);
1297
        unsigned int i;
1298
        u32 tmp32;
1299
 
1300
        VPRINTK("ENTER\n");
1301
 
1302
        /* enable ADMA on the ports */
1303
        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1304
        tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1305
                 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1306
                 NV_MCP_SATA_CFG_20_PORT1_EN |
1307
                 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1308
 
1309
        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1310
 
1311
        for (i = 0; i < host->n_ports; i++)
1312
                nv_adma_setup_port(host->ports[i]);
1313
 
1314
        return 0;
1315
}
1316
 
1317
static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1318
                              struct scatterlist *sg,
1319
                              int idx,
1320
                              struct nv_adma_prd *aprd)
1321
{
1322
        u8 flags = 0;
1323
        if (qc->tf.flags & ATA_TFLAG_WRITE)
1324
                flags |= NV_APRD_WRITE;
1325
        if (idx == qc->n_elem - 1)
1326
                flags |= NV_APRD_END;
1327
        else if (idx != 4)
1328
                flags |= NV_APRD_CONT;
1329
 
1330
        aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1331
        aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1332
        aprd->flags = flags;
1333
        aprd->packet_len = 0;
1334
}
1335
 
1336
static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1337
{
1338
        struct nv_adma_port_priv *pp = qc->ap->private_data;
1339
        unsigned int idx;
1340
        struct nv_adma_prd *aprd;
1341
        struct scatterlist *sg;
1342
 
1343
        VPRINTK("ENTER\n");
1344
 
1345
        idx = 0;
1346
 
1347
        ata_for_each_sg(sg, qc) {
1348
                aprd = (idx < 5) ? &cpb->aprd[idx] :
1349
                               &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1350
                nv_adma_fill_aprd(qc, sg, idx, aprd);
1351
                idx++;
1352
        }
1353
        if (idx > 5)
1354
                cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1355
        else
1356
                cpb->next_aprd = cpu_to_le64(0);
1357
}
1358
 
1359
static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1360
{
1361
        struct nv_adma_port_priv *pp = qc->ap->private_data;
1362
 
1363
        /* ADMA engine can only be used for non-ATAPI DMA commands,
1364
           or interrupt-driven no-data commands. */
1365
        if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1366
           (qc->tf.flags & ATA_TFLAG_POLLING))
1367
                return 1;
1368
 
1369
        if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1370
           (qc->tf.protocol == ATA_PROT_NODATA))
1371
                return 0;
1372
 
1373
        return 1;
1374
}
1375
 
1376
static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1377
{
1378
        struct nv_adma_port_priv *pp = qc->ap->private_data;
1379
        struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1380
        u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1381
                       NV_CPB_CTL_IEN;
1382
 
1383
        if (nv_adma_use_reg_mode(qc)) {
1384
                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1385
                        (qc->flags & ATA_QCFLAG_DMAMAP));
1386
                nv_adma_register_mode(qc->ap);
1387
                ata_qc_prep(qc);
1388
                return;
1389
        }
1390
 
1391
        cpb->resp_flags = NV_CPB_RESP_DONE;
1392
        wmb();
1393
        cpb->ctl_flags = 0;
1394
        wmb();
1395
 
1396
        cpb->len                = 3;
1397
        cpb->tag                = qc->tag;
1398
        cpb->next_cpb_idx       = 0;
1399
 
1400
        /* turn on NCQ flags for NCQ commands */
1401
        if (qc->tf.protocol == ATA_PROT_NCQ)
1402
                ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1403
 
1404
        VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1405
 
1406
        nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1407
 
1408
        if (qc->flags & ATA_QCFLAG_DMAMAP) {
1409
                nv_adma_fill_sg(qc, cpb);
1410
                ctl_flags |= NV_CPB_CTL_APRD_VALID;
1411
        } else
1412
                memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1413
 
1414
        /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1415
           until we are finished filling in all of the contents */
1416
        wmb();
1417
        cpb->ctl_flags = ctl_flags;
1418
        wmb();
1419
        cpb->resp_flags = 0;
1420
}
1421
 
1422
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1423
{
1424
        struct nv_adma_port_priv *pp = qc->ap->private_data;
1425
        void __iomem *mmio = pp->ctl_block;
1426
        int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1427
 
1428
        VPRINTK("ENTER\n");
1429
 
1430
        /* We can't handle result taskfile with NCQ commands, since
1431
           retrieving the taskfile switches us out of ADMA mode and would abort
1432
           existing commands. */
1433
        if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1434
                     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1435
                ata_dev_printk(qc->dev, KERN_ERR,
1436
                        "NCQ w/ RESULT_TF not allowed\n");
1437
                return AC_ERR_SYSTEM;
1438
        }
1439
 
1440
        if (nv_adma_use_reg_mode(qc)) {
1441
                /* use ATA register mode */
1442
                VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1443
                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1444
                        (qc->flags & ATA_QCFLAG_DMAMAP));
1445
                nv_adma_register_mode(qc->ap);
1446
                return ata_qc_issue_prot(qc);
1447
        } else
1448
                nv_adma_mode(qc->ap);
1449
 
1450
        /* write append register, command tag in lower 8 bits
1451
           and (number of cpbs to append -1) in top 8 bits */
1452
        wmb();
1453
 
1454
        if (curr_ncq != pp->last_issue_ncq) {
1455
                /* Seems to need some delay before switching between NCQ and
1456
                   non-NCQ commands, else we get command timeouts and such. */
1457
                udelay(20);
1458
                pp->last_issue_ncq = curr_ncq;
1459
        }
1460
 
1461
        writew(qc->tag, mmio + NV_ADMA_APPEND);
1462
 
1463
        DPRINTK("Issued tag %u\n", qc->tag);
1464
 
1465
        return 0;
1466
}
1467
 
1468
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1469
{
1470
        struct ata_host *host = dev_instance;
1471
        unsigned int i;
1472
        unsigned int handled = 0;
1473
        unsigned long flags;
1474
 
1475
        spin_lock_irqsave(&host->lock, flags);
1476
 
1477
        for (i = 0; i < host->n_ports; i++) {
1478
                struct ata_port *ap;
1479
 
1480
                ap = host->ports[i];
1481
                if (ap &&
1482
                    !(ap->flags & ATA_FLAG_DISABLED)) {
1483
                        struct ata_queued_cmd *qc;
1484
 
1485
                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1486
                        if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1487
                                handled += ata_host_intr(ap, qc);
1488
                        else
1489
                                // No request pending?  Clear interrupt status
1490
                                // anyway, in case there's one pending.
1491
                                ap->ops->check_status(ap);
1492
                }
1493
 
1494
        }
1495
 
1496
        spin_unlock_irqrestore(&host->lock, flags);
1497
 
1498
        return IRQ_RETVAL(handled);
1499
}
1500
 
1501
static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1502
{
1503
        int i, handled = 0;
1504
 
1505
        for (i = 0; i < host->n_ports; i++) {
1506
                struct ata_port *ap = host->ports[i];
1507
 
1508
                if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1509
                        handled += nv_host_intr(ap, irq_stat);
1510
 
1511
                irq_stat >>= NV_INT_PORT_SHIFT;
1512
        }
1513
 
1514
        return IRQ_RETVAL(handled);
1515
}
1516
 
1517
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1518
{
1519
        struct ata_host *host = dev_instance;
1520
        u8 irq_stat;
1521
        irqreturn_t ret;
1522
 
1523
        spin_lock(&host->lock);
1524
        irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1525
        ret = nv_do_interrupt(host, irq_stat);
1526
        spin_unlock(&host->lock);
1527
 
1528
        return ret;
1529
}
1530
 
1531
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1532
{
1533
        struct ata_host *host = dev_instance;
1534
        u8 irq_stat;
1535
        irqreturn_t ret;
1536
 
1537
        spin_lock(&host->lock);
1538
        irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1539
        ret = nv_do_interrupt(host, irq_stat);
1540
        spin_unlock(&host->lock);
1541
 
1542
        return ret;
1543
}
1544
 
1545
static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1546
{
1547
        if (sc_reg > SCR_CONTROL)
1548
                return -EINVAL;
1549
 
1550
        *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1551
        return 0;
1552
}
1553
 
1554
static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1555
{
1556
        if (sc_reg > SCR_CONTROL)
1557
                return -EINVAL;
1558
 
1559
        iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1560
        return 0;
1561
}
1562
 
1563
static void nv_nf2_freeze(struct ata_port *ap)
1564
{
1565
        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1566
        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1567
        u8 mask;
1568
 
1569
        mask = ioread8(scr_addr + NV_INT_ENABLE);
1570
        mask &= ~(NV_INT_ALL << shift);
1571
        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1572
}
1573
 
1574
static void nv_nf2_thaw(struct ata_port *ap)
1575
{
1576
        void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1577
        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1578
        u8 mask;
1579
 
1580
        iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1581
 
1582
        mask = ioread8(scr_addr + NV_INT_ENABLE);
1583
        mask |= (NV_INT_MASK << shift);
1584
        iowrite8(mask, scr_addr + NV_INT_ENABLE);
1585
}
1586
 
1587
static void nv_ck804_freeze(struct ata_port *ap)
1588
{
1589
        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1590
        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1591
        u8 mask;
1592
 
1593
        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1594
        mask &= ~(NV_INT_ALL << shift);
1595
        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1596
}
1597
 
1598
static void nv_ck804_thaw(struct ata_port *ap)
1599
{
1600
        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1601
        int shift = ap->port_no * NV_INT_PORT_SHIFT;
1602
        u8 mask;
1603
 
1604
        writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1605
 
1606
        mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1607
        mask |= (NV_INT_MASK << shift);
1608
        writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1609
}
1610
 
1611
static void nv_mcp55_freeze(struct ata_port *ap)
1612
{
1613
        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1614
        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1615
        u32 mask;
1616
 
1617
        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1618
 
1619
        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1620
        mask &= ~(NV_INT_ALL_MCP55 << shift);
1621
        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1622
        ata_bmdma_freeze(ap);
1623
}
1624
 
1625
static void nv_mcp55_thaw(struct ata_port *ap)
1626
{
1627
        void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1628
        int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1629
        u32 mask;
1630
 
1631
        writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1632
 
1633
        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1634
        mask |= (NV_INT_MASK_MCP55 << shift);
1635
        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1636
        ata_bmdma_thaw(ap);
1637
}
1638
 
1639
static int nv_hardreset(struct ata_link *link, unsigned int *class,
1640
                        unsigned long deadline)
1641
{
1642
        unsigned int dummy;
1643
 
1644
        /* SATA hardreset fails to retrieve proper device signature on
1645
         * some controllers.  Don't classify on hardreset.  For more
1646
         * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1647
         */
1648
        return sata_std_hardreset(link, &dummy, deadline);
1649
}
1650
 
1651
static void nv_error_handler(struct ata_port *ap)
1652
{
1653
        ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1654
                           nv_hardreset, ata_std_postreset);
1655
}
1656
 
1657
static void nv_adma_error_handler(struct ata_port *ap)
1658
{
1659
        struct nv_adma_port_priv *pp = ap->private_data;
1660
        if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1661
                void __iomem *mmio = pp->ctl_block;
1662
                int i;
1663
                u16 tmp;
1664
 
1665
                if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1666
                        u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1667
                        u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1668
                        u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1669
                        u32 status = readw(mmio + NV_ADMA_STAT);
1670
                        u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1671
                        u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1672
 
1673
                        ata_port_printk(ap, KERN_ERR,
1674
                                "EH in ADMA mode, notifier 0x%X "
1675
                                "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1676
                                "next cpb count 0x%X next cpb idx 0x%x\n",
1677
                                notifier, notifier_error, gen_ctl, status,
1678
                                cpb_count, next_cpb_idx);
1679
 
1680
                        for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1681
                                struct nv_adma_cpb *cpb = &pp->cpb[i];
1682
                                if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1683
                                    ap->link.sactive & (1 << i))
1684
                                        ata_port_printk(ap, KERN_ERR,
1685
                                                "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1686
                                                i, cpb->ctl_flags, cpb->resp_flags);
1687
                        }
1688
                }
1689
 
1690
                /* Push us back into port register mode for error handling. */
1691
                nv_adma_register_mode(ap);
1692
 
1693
                /* Mark all of the CPBs as invalid to prevent them from
1694
                   being executed */
1695
                for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1696
                        pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1697
 
1698
                /* clear CPB fetch count */
1699
                writew(0, mmio + NV_ADMA_CPB_COUNT);
1700
 
1701
                /* Reset channel */
1702
                tmp = readw(mmio + NV_ADMA_CTL);
1703
                writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1704
                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1705
                udelay(1);
1706
                writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1707
                readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1708
        }
1709
 
1710
        ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1711
                           nv_hardreset, ata_std_postreset);
1712
}
1713
 
1714
static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1715
{
1716
        struct nv_swncq_port_priv *pp = ap->private_data;
1717
        struct defer_queue *dq = &pp->defer_queue;
1718
 
1719
        /* queue is full */
1720
        WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1721
        dq->defer_bits |= (1 << qc->tag);
1722
        dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1723
}
1724
 
1725
static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1726
{
1727
        struct nv_swncq_port_priv *pp = ap->private_data;
1728
        struct defer_queue *dq = &pp->defer_queue;
1729
        unsigned int tag;
1730
 
1731
        if (dq->head == dq->tail)       /* null queue */
1732
                return NULL;
1733
 
1734
        tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1735
        dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1736
        WARN_ON(!(dq->defer_bits & (1 << tag)));
1737
        dq->defer_bits &= ~(1 << tag);
1738
 
1739
        return ata_qc_from_tag(ap, tag);
1740
}
1741
 
1742
static void nv_swncq_fis_reinit(struct ata_port *ap)
1743
{
1744
        struct nv_swncq_port_priv *pp = ap->private_data;
1745
 
1746
        pp->dhfis_bits = 0;
1747
        pp->dmafis_bits = 0;
1748
        pp->sdbfis_bits = 0;
1749
        pp->ncq_flags = 0;
1750
}
1751
 
1752
static void nv_swncq_pp_reinit(struct ata_port *ap)
1753
{
1754
        struct nv_swncq_port_priv *pp = ap->private_data;
1755
        struct defer_queue *dq = &pp->defer_queue;
1756
 
1757
        dq->head = 0;
1758
        dq->tail = 0;
1759
        dq->defer_bits = 0;
1760
        pp->qc_active = 0;
1761
        pp->last_issue_tag = ATA_TAG_POISON;
1762
        nv_swncq_fis_reinit(ap);
1763
}
1764
 
1765
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1766
{
1767
        struct nv_swncq_port_priv *pp = ap->private_data;
1768
 
1769
        writew(fis, pp->irq_block);
1770
}
1771
 
1772
static void __ata_bmdma_stop(struct ata_port *ap)
1773
{
1774
        struct ata_queued_cmd qc;
1775
 
1776
        qc.ap = ap;
1777
        ata_bmdma_stop(&qc);
1778
}
1779
 
1780
static void nv_swncq_ncq_stop(struct ata_port *ap)
1781
{
1782
        struct nv_swncq_port_priv *pp = ap->private_data;
1783
        unsigned int i;
1784
        u32 sactive;
1785
        u32 done_mask;
1786
 
1787
        ata_port_printk(ap, KERN_ERR,
1788
                        "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1789
                        ap->qc_active, ap->link.sactive);
1790
        ata_port_printk(ap, KERN_ERR,
1791
                "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1792
                "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1793
                pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1794
                pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1795
 
1796
        ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1797
                        ap->ops->check_status(ap),
1798
                        ioread8(ap->ioaddr.error_addr));
1799
 
1800
        sactive = readl(pp->sactive_block);
1801
        done_mask = pp->qc_active ^ sactive;
1802
 
1803
        ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1804
        for (i = 0; i < ATA_MAX_QUEUE; i++) {
1805
                u8 err = 0;
1806
                if (pp->qc_active & (1 << i))
1807
                        err = 0;
1808
                else if (done_mask & (1 << i))
1809
                        err = 1;
1810
                else
1811
                        continue;
1812
 
1813
                ata_port_printk(ap, KERN_ERR,
1814
                                "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1815
                                (pp->dhfis_bits >> i) & 0x1,
1816
                                (pp->dmafis_bits >> i) & 0x1,
1817
                                (pp->sdbfis_bits >> i) & 0x1,
1818
                                (sactive >> i) & 0x1,
1819
                                (err ? "error! tag doesn't exit" : " "));
1820
        }
1821
 
1822
        nv_swncq_pp_reinit(ap);
1823
        ap->ops->irq_clear(ap);
1824
        __ata_bmdma_stop(ap);
1825
        nv_swncq_irq_clear(ap, 0xffff);
1826
}
1827
 
1828
static void nv_swncq_error_handler(struct ata_port *ap)
1829
{
1830
        struct ata_eh_context *ehc = &ap->link.eh_context;
1831
 
1832
        if (ap->link.sactive) {
1833
                nv_swncq_ncq_stop(ap);
1834
                ehc->i.action |= ATA_EH_HARDRESET;
1835
        }
1836
 
1837
        ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1838
                           nv_hardreset, ata_std_postreset);
1839
}
1840
 
1841
#ifdef CONFIG_PM
1842
static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1843
{
1844
        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1845
        u32 tmp;
1846
 
1847
        /* clear irq */
1848
        writel(~0, mmio + NV_INT_STATUS_MCP55);
1849
 
1850
        /* disable irq */
1851
        writel(0, mmio + NV_INT_ENABLE_MCP55);
1852
 
1853
        /* disable swncq */
1854
        tmp = readl(mmio + NV_CTL_MCP55);
1855
        tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1856
        writel(tmp, mmio + NV_CTL_MCP55);
1857
 
1858
        return 0;
1859
}
1860
 
1861
static int nv_swncq_port_resume(struct ata_port *ap)
1862
{
1863
        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1864
        u32 tmp;
1865
 
1866
        /* clear irq */
1867
        writel(~0, mmio + NV_INT_STATUS_MCP55);
1868
 
1869
        /* enable irq */
1870
        writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1871
 
1872
        /* enable swncq */
1873
        tmp = readl(mmio + NV_CTL_MCP55);
1874
        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1875
 
1876
        return 0;
1877
}
1878
#endif
1879
 
1880
static void nv_swncq_host_init(struct ata_host *host)
1881
{
1882
        u32 tmp;
1883
        void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1884
        struct pci_dev *pdev = to_pci_dev(host->dev);
1885
        u8 regval;
1886
 
1887
        /* disable  ECO 398 */
1888
        pci_read_config_byte(pdev, 0x7f, &regval);
1889
        regval &= ~(1 << 7);
1890
        pci_write_config_byte(pdev, 0x7f, regval);
1891
 
1892
        /* enable swncq */
1893
        tmp = readl(mmio + NV_CTL_MCP55);
1894
        VPRINTK("HOST_CTL:0x%X\n", tmp);
1895
        writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1896
 
1897
        /* enable irq intr */
1898
        tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1899
        VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1900
        writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1901
 
1902
        /*  clear port irq */
1903
        writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1904
}
1905
 
1906
static int nv_swncq_slave_config(struct scsi_device *sdev)
1907
{
1908
        struct ata_port *ap = ata_shost_to_port(sdev->host);
1909
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1910
        struct ata_device *dev;
1911
        int rc;
1912
        u8 rev;
1913
        u8 check_maxtor = 0;
1914
        unsigned char model_num[ATA_ID_PROD_LEN + 1];
1915
 
1916
        rc = ata_scsi_slave_config(sdev);
1917
        if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1918
                /* Not a proper libata device, ignore */
1919
                return rc;
1920
 
1921
        dev = &ap->link.device[sdev->id];
1922
        if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1923
                return rc;
1924
 
1925
        /* if MCP51 and Maxtor, then disable ncq */
1926
        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1927
                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1928
                check_maxtor = 1;
1929
 
1930
        /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1931
        if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1932
                pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1933
                pci_read_config_byte(pdev, 0x8, &rev);
1934
                if (rev <= 0xa2)
1935
                        check_maxtor = 1;
1936
        }
1937
 
1938
        if (!check_maxtor)
1939
                return rc;
1940
 
1941
        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1942
 
1943
        if (strncmp(model_num, "Maxtor", 6) == 0) {
1944
                ata_scsi_change_queue_depth(sdev, 1);
1945
                ata_dev_printk(dev, KERN_NOTICE,
1946
                        "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1947
        }
1948
 
1949
        return rc;
1950
}
1951
 
1952
static int nv_swncq_port_start(struct ata_port *ap)
1953
{
1954
        struct device *dev = ap->host->dev;
1955
        void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1956
        struct nv_swncq_port_priv *pp;
1957
        int rc;
1958
 
1959
        rc = ata_port_start(ap);
1960
        if (rc)
1961
                return rc;
1962
 
1963
        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1964
        if (!pp)
1965
                return -ENOMEM;
1966
 
1967
        pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1968
                                      &pp->prd_dma, GFP_KERNEL);
1969
        if (!pp->prd)
1970
                return -ENOMEM;
1971
        memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1972
 
1973
        ap->private_data = pp;
1974
        pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1975
        pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1976
        pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1977
 
1978
        return 0;
1979
}
1980
 
1981
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1982
{
1983
        if (qc->tf.protocol != ATA_PROT_NCQ) {
1984
                ata_qc_prep(qc);
1985
                return;
1986
        }
1987
 
1988
        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1989
                return;
1990
 
1991
        nv_swncq_fill_sg(qc);
1992
}
1993
 
1994
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1995
{
1996
        struct ata_port *ap = qc->ap;
1997
        struct scatterlist *sg;
1998
        unsigned int idx;
1999
        struct nv_swncq_port_priv *pp = ap->private_data;
2000
        struct ata_prd *prd;
2001
 
2002
        WARN_ON(qc->__sg == NULL);
2003
        WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2004
 
2005
        prd = pp->prd + ATA_MAX_PRD * qc->tag;
2006
 
2007
        idx = 0;
2008
        ata_for_each_sg(sg, qc) {
2009
                u32 addr, offset;
2010
                u32 sg_len, len;
2011
 
2012
                addr = (u32)sg_dma_address(sg);
2013
                sg_len = sg_dma_len(sg);
2014
 
2015
                while (sg_len) {
2016
                        offset = addr & 0xffff;
2017
                        len = sg_len;
2018
                        if ((offset + sg_len) > 0x10000)
2019
                                len = 0x10000 - offset;
2020
 
2021
                        prd[idx].addr = cpu_to_le32(addr);
2022
                        prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2023
 
2024
                        idx++;
2025
                        sg_len -= len;
2026
                        addr += len;
2027
                }
2028
        }
2029
 
2030
        if (idx)
2031
                prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2032
}
2033
 
2034
static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2035
                                          struct ata_queued_cmd *qc)
2036
{
2037
        struct nv_swncq_port_priv *pp = ap->private_data;
2038
 
2039
        if (qc == NULL)
2040
                return 0;
2041
 
2042
        DPRINTK("Enter\n");
2043
 
2044
        writel((1 << qc->tag), pp->sactive_block);
2045
        pp->last_issue_tag = qc->tag;
2046
        pp->dhfis_bits &= ~(1 << qc->tag);
2047
        pp->dmafis_bits &= ~(1 << qc->tag);
2048
        pp->qc_active |= (0x1 << qc->tag);
2049
 
2050
        ap->ops->tf_load(ap, &qc->tf);   /* load tf registers */
2051
        ap->ops->exec_command(ap, &qc->tf);
2052
 
2053
        DPRINTK("Issued tag %u\n", qc->tag);
2054
 
2055
        return 0;
2056
}
2057
 
2058
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2059
{
2060
        struct ata_port *ap = qc->ap;
2061
        struct nv_swncq_port_priv *pp = ap->private_data;
2062
 
2063
        if (qc->tf.protocol != ATA_PROT_NCQ)
2064
                return ata_qc_issue_prot(qc);
2065
 
2066
        DPRINTK("Enter\n");
2067
 
2068
        if (!pp->qc_active)
2069
                nv_swncq_issue_atacmd(ap, qc);
2070
        else
2071
                nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2072
 
2073
        return 0;
2074
}
2075
 
2076
static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2077
{
2078
        u32 serror;
2079
        struct ata_eh_info *ehi = &ap->link.eh_info;
2080
 
2081
        ata_ehi_clear_desc(ehi);
2082
 
2083
        /* AHCI needs SError cleared; otherwise, it might lock up */
2084
        sata_scr_read(&ap->link, SCR_ERROR, &serror);
2085
        sata_scr_write(&ap->link, SCR_ERROR, serror);
2086
 
2087
        /* analyze @irq_stat */
2088
        if (fis & NV_SWNCQ_IRQ_ADDED)
2089
                ata_ehi_push_desc(ehi, "hot plug");
2090
        else if (fis & NV_SWNCQ_IRQ_REMOVED)
2091
                ata_ehi_push_desc(ehi, "hot unplug");
2092
 
2093
        ata_ehi_hotplugged(ehi);
2094
 
2095
        /* okay, let's hand over to EH */
2096
        ehi->serror |= serror;
2097
 
2098
        ata_port_freeze(ap);
2099
}
2100
 
2101
static int nv_swncq_sdbfis(struct ata_port *ap)
2102
{
2103
        struct ata_queued_cmd *qc;
2104
        struct nv_swncq_port_priv *pp = ap->private_data;
2105
        struct ata_eh_info *ehi = &ap->link.eh_info;
2106
        u32 sactive;
2107
        int nr_done = 0;
2108
        u32 done_mask;
2109
        int i;
2110
        u8 host_stat;
2111
        u8 lack_dhfis = 0;
2112
 
2113
        host_stat = ap->ops->bmdma_status(ap);
2114
        if (unlikely(host_stat & ATA_DMA_ERR)) {
2115
                /* error when transfering data to/from memory */
2116
                ata_ehi_clear_desc(ehi);
2117
                ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2118
                ehi->err_mask |= AC_ERR_HOST_BUS;
2119
                ehi->action |= ATA_EH_SOFTRESET;
2120
                return -EINVAL;
2121
        }
2122
 
2123
        ap->ops->irq_clear(ap);
2124
        __ata_bmdma_stop(ap);
2125
 
2126
        sactive = readl(pp->sactive_block);
2127
        done_mask = pp->qc_active ^ sactive;
2128
 
2129
        if (unlikely(done_mask & sactive)) {
2130
                ata_ehi_clear_desc(ehi);
2131
                ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2132
                                  "(%08x->%08x)", pp->qc_active, sactive);
2133
                ehi->err_mask |= AC_ERR_HSM;
2134
                ehi->action |= ATA_EH_HARDRESET;
2135
                return -EINVAL;
2136
        }
2137
        for (i = 0; i < ATA_MAX_QUEUE; i++) {
2138
                if (!(done_mask & (1 << i)))
2139
                        continue;
2140
 
2141
                qc = ata_qc_from_tag(ap, i);
2142
                if (qc) {
2143
                        ata_qc_complete(qc);
2144
                        pp->qc_active &= ~(1 << i);
2145
                        pp->dhfis_bits &= ~(1 << i);
2146
                        pp->dmafis_bits &= ~(1 << i);
2147
                        pp->sdbfis_bits |= (1 << i);
2148
                        nr_done++;
2149
                }
2150
        }
2151
 
2152
        if (!ap->qc_active) {
2153
                DPRINTK("over\n");
2154
                nv_swncq_pp_reinit(ap);
2155
                return nr_done;
2156
        }
2157
 
2158
        if (pp->qc_active & pp->dhfis_bits)
2159
                return nr_done;
2160
 
2161
        if ((pp->ncq_flags & ncq_saw_backout) ||
2162
            (pp->qc_active ^ pp->dhfis_bits))
2163
                /* if the controller cann't get a device to host register FIS,
2164
                 * The driver needs to reissue the new command.
2165
                 */
2166
                lack_dhfis = 1;
2167
 
2168
        DPRINTK("id 0x%x QC: qc_active 0x%x,"
2169
                "SWNCQ:qc_active 0x%X defer_bits %X "
2170
                "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2171
                ap->print_id, ap->qc_active, pp->qc_active,
2172
                pp->defer_queue.defer_bits, pp->dhfis_bits,
2173
                pp->dmafis_bits, pp->last_issue_tag);
2174
 
2175
        nv_swncq_fis_reinit(ap);
2176
 
2177
        if (lack_dhfis) {
2178
                qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2179
                nv_swncq_issue_atacmd(ap, qc);
2180
                return nr_done;
2181
        }
2182
 
2183
        if (pp->defer_queue.defer_bits) {
2184
                /* send deferral queue command */
2185
                qc = nv_swncq_qc_from_dq(ap);
2186
                WARN_ON(qc == NULL);
2187
                nv_swncq_issue_atacmd(ap, qc);
2188
        }
2189
 
2190
        return nr_done;
2191
}
2192
 
2193
static inline u32 nv_swncq_tag(struct ata_port *ap)
2194
{
2195
        struct nv_swncq_port_priv *pp = ap->private_data;
2196
        u32 tag;
2197
 
2198
        tag = readb(pp->tag_block) >> 2;
2199
        return (tag & 0x1f);
2200
}
2201
 
2202
static int nv_swncq_dmafis(struct ata_port *ap)
2203
{
2204
        struct ata_queued_cmd *qc;
2205
        unsigned int rw;
2206
        u8 dmactl;
2207
        u32 tag;
2208
        struct nv_swncq_port_priv *pp = ap->private_data;
2209
 
2210
        __ata_bmdma_stop(ap);
2211
        tag = nv_swncq_tag(ap);
2212
 
2213
        DPRINTK("dma setup tag 0x%x\n", tag);
2214
        qc = ata_qc_from_tag(ap, tag);
2215
 
2216
        if (unlikely(!qc))
2217
                return 0;
2218
 
2219
        rw = qc->tf.flags & ATA_TFLAG_WRITE;
2220
 
2221
        /* load PRD table addr. */
2222
        iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2223
                  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2224
 
2225
        /* specify data direction, triple-check start bit is clear */
2226
        dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2227
        dmactl &= ~ATA_DMA_WR;
2228
        if (!rw)
2229
                dmactl |= ATA_DMA_WR;
2230
 
2231
        iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2232
 
2233
        return 1;
2234
}
2235
 
2236
static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2237
{
2238
        struct nv_swncq_port_priv *pp = ap->private_data;
2239
        struct ata_queued_cmd *qc;
2240
        struct ata_eh_info *ehi = &ap->link.eh_info;
2241
        u32 serror;
2242
        u8 ata_stat;
2243
        int rc = 0;
2244
 
2245
        ata_stat = ap->ops->check_status(ap);
2246
        nv_swncq_irq_clear(ap, fis);
2247
        if (!fis)
2248
                return;
2249
 
2250
        if (ap->pflags & ATA_PFLAG_FROZEN)
2251
                return;
2252
 
2253
        if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2254
                nv_swncq_hotplug(ap, fis);
2255
                return;
2256
        }
2257
 
2258
        if (!pp->qc_active)
2259
                return;
2260
 
2261
        if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2262
                return;
2263
        ap->ops->scr_write(ap, SCR_ERROR, serror);
2264
 
2265
        if (ata_stat & ATA_ERR) {
2266
                ata_ehi_clear_desc(ehi);
2267
                ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2268
                ehi->err_mask |= AC_ERR_DEV;
2269
                ehi->serror |= serror;
2270
                ehi->action |= ATA_EH_SOFTRESET;
2271
                ata_port_freeze(ap);
2272
                return;
2273
        }
2274
 
2275
        if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2276
                /* If the IRQ is backout, driver must issue
2277
                 * the new command again some time later.
2278
                 */
2279
                pp->ncq_flags |= ncq_saw_backout;
2280
        }
2281
 
2282
        if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2283
                pp->ncq_flags |= ncq_saw_sdb;
2284
                DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2285
                        "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2286
                        ap->print_id, pp->qc_active, pp->dhfis_bits,
2287
                        pp->dmafis_bits, readl(pp->sactive_block));
2288
                rc = nv_swncq_sdbfis(ap);
2289
                if (rc < 0)
2290
                        goto irq_error;
2291
        }
2292
 
2293
        if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2294
                /* The interrupt indicates the new command
2295
                 * was transmitted correctly to the drive.
2296
                 */
2297
                pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2298
                pp->ncq_flags |= ncq_saw_d2h;
2299
                if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2300
                        ata_ehi_push_desc(ehi, "illegal fis transaction");
2301
                        ehi->err_mask |= AC_ERR_HSM;
2302
                        ehi->action |= ATA_EH_HARDRESET;
2303
                        goto irq_error;
2304
                }
2305
 
2306
                if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2307
                    !(pp->ncq_flags & ncq_saw_dmas)) {
2308
                        ata_stat = ap->ops->check_status(ap);
2309
                        if (ata_stat & ATA_BUSY)
2310
                                goto irq_exit;
2311
 
2312
                        if (pp->defer_queue.defer_bits) {
2313
                                DPRINTK("send next command\n");
2314
                                qc = nv_swncq_qc_from_dq(ap);
2315
                                nv_swncq_issue_atacmd(ap, qc);
2316
                        }
2317
                }
2318
        }
2319
 
2320
        if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2321
                /* program the dma controller with appropriate PRD buffers
2322
                 * and start the DMA transfer for requested command.
2323
                 */
2324
                pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2325
                pp->ncq_flags |= ncq_saw_dmas;
2326
                rc = nv_swncq_dmafis(ap);
2327
        }
2328
 
2329
irq_exit:
2330
        return;
2331
irq_error:
2332
        ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2333
        ata_port_freeze(ap);
2334
        return;
2335
}
2336
 
2337
static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2338
{
2339
        struct ata_host *host = dev_instance;
2340
        unsigned int i;
2341
        unsigned int handled = 0;
2342
        unsigned long flags;
2343
        u32 irq_stat;
2344
 
2345
        spin_lock_irqsave(&host->lock, flags);
2346
 
2347
        irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2348
 
2349
        for (i = 0; i < host->n_ports; i++) {
2350
                struct ata_port *ap = host->ports[i];
2351
 
2352
                if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2353
                        if (ap->link.sactive) {
2354
                                nv_swncq_host_interrupt(ap, (u16)irq_stat);
2355
                                handled = 1;
2356
                        } else {
2357
                                if (irq_stat)   /* reserve Hotplug */
2358
                                        nv_swncq_irq_clear(ap, 0xfff0);
2359
 
2360
                                handled += nv_host_intr(ap, (u8)irq_stat);
2361
                        }
2362
                }
2363
                irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2364
        }
2365
 
2366
        spin_unlock_irqrestore(&host->lock, flags);
2367
 
2368
        return IRQ_RETVAL(handled);
2369
}
2370
 
2371
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2372
{
2373
        static int printed_version;
2374
        const struct ata_port_info *ppi[] = { NULL, NULL };
2375
        struct ata_host *host;
2376
        struct nv_host_priv *hpriv;
2377
        int rc;
2378
        u32 bar;
2379
        void __iomem *base;
2380
        unsigned long type = ent->driver_data;
2381
 
2382
        // Make sure this is a SATA controller by counting the number of bars
2383
        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2384
        // it's an IDE controller and we ignore it.
2385
        for (bar = 0; bar < 6; bar++)
2386
                if (pci_resource_start(pdev, bar) == 0)
2387
                        return -ENODEV;
2388
 
2389
        if (!printed_version++)
2390
                dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2391
 
2392
        rc = pcim_enable_device(pdev);
2393
        if (rc)
2394
                return rc;
2395
 
2396
        /* determine type and allocate host */
2397
        if (type == CK804 && adma_enabled) {
2398
                dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2399
                type = ADMA;
2400
        }
2401
 
2402
        if (type == SWNCQ) {
2403
                if (swncq_enabled)
2404
                        dev_printk(KERN_NOTICE, &pdev->dev,
2405
                                   "Using SWNCQ mode\n");
2406
                else
2407
                        type = GENERIC;
2408
        }
2409
 
2410
        ppi[0] = &nv_port_info[type];
2411
        rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2412
        if (rc)
2413
                return rc;
2414
 
2415
        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2416
        if (!hpriv)
2417
                return -ENOMEM;
2418
        hpriv->type = type;
2419
        host->private_data = hpriv;
2420
 
2421
        /* set 64bit dma masks, may fail */
2422
        if (type == ADMA) {
2423
                if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
2424
                        pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2425
        }
2426
 
2427
        /* request and iomap NV_MMIO_BAR */
2428
        rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2429
        if (rc)
2430
                return rc;
2431
 
2432
        /* configure SCR access */
2433
        base = host->iomap[NV_MMIO_BAR];
2434
        host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2435
        host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2436
 
2437
        /* enable SATA space for CK804 */
2438
        if (type >= CK804) {
2439
                u8 regval;
2440
 
2441
                pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2442
                regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2443
                pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2444
        }
2445
 
2446
        /* init ADMA */
2447
        if (type == ADMA) {
2448
                rc = nv_adma_host_init(host);
2449
                if (rc)
2450
                        return rc;
2451
        } else if (type == SWNCQ)
2452
                nv_swncq_host_init(host);
2453
 
2454
        pci_set_master(pdev);
2455
        return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2456
                                 IRQF_SHARED, ppi[0]->sht);
2457
}
2458
 
2459
#ifdef CONFIG_PM
2460
static int nv_pci_device_resume(struct pci_dev *pdev)
2461
{
2462
        struct ata_host *host = dev_get_drvdata(&pdev->dev);
2463
        struct nv_host_priv *hpriv = host->private_data;
2464
        int rc;
2465
 
2466
        rc = ata_pci_device_do_resume(pdev);
2467
        if (rc)
2468
                return rc;
2469
 
2470
        if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2471
                if (hpriv->type >= CK804) {
2472
                        u8 regval;
2473
 
2474
                        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2475
                        regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2476
                        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2477
                }
2478
                if (hpriv->type == ADMA) {
2479
                        u32 tmp32;
2480
                        struct nv_adma_port_priv *pp;
2481
                        /* enable/disable ADMA on the ports appropriately */
2482
                        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2483
 
2484
                        pp = host->ports[0]->private_data;
2485
                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2486
                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2487
                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2488
                        else
2489
                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2490
                                           NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2491
                        pp = host->ports[1]->private_data;
2492
                        if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2493
                                tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2494
                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2495
                        else
2496
                                tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2497
                                           NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2498
 
2499
                        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2500
                }
2501
        }
2502
 
2503
        ata_host_resume(host);
2504
 
2505
        return 0;
2506
}
2507
#endif
2508
 
2509
static void nv_ck804_host_stop(struct ata_host *host)
2510
{
2511
        struct pci_dev *pdev = to_pci_dev(host->dev);
2512
        u8 regval;
2513
 
2514
        /* disable SATA space for CK804 */
2515
        pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2516
        regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2517
        pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2518
}
2519
 
2520
static void nv_adma_host_stop(struct ata_host *host)
2521
{
2522
        struct pci_dev *pdev = to_pci_dev(host->dev);
2523
        u32 tmp32;
2524
 
2525
        /* disable ADMA on the ports */
2526
        pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2527
        tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2528
                   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2529
                   NV_MCP_SATA_CFG_20_PORT1_EN |
2530
                   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2531
 
2532
        pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2533
 
2534
        nv_ck804_host_stop(host);
2535
}
2536
 
2537
static int __init nv_init(void)
2538
{
2539
        return pci_register_driver(&nv_pci_driver);
2540
}
2541
 
2542
static void __exit nv_exit(void)
2543
{
2544
        pci_unregister_driver(&nv_pci_driver);
2545
}
2546
 
2547
module_init(nv_init);
2548
module_exit(nv_exit);
2549
module_param_named(adma, adma_enabled, bool, 0444);
2550
MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2551
module_param_named(swncq, swncq_enabled, bool, 0444);
2552
MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2553
 

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.