OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [ata/] [sata_mv.c] - Blame information for rev 65

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * sata_mv.c - Marvell SATA support
3
 *
4
 * Copyright 2005: EMC Corporation, all rights reserved.
5
 * Copyright 2005 Red Hat, Inc.  All rights reserved.
6
 *
7
 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8
 *
9
 * This program is free software; you can redistribute it and/or modify
10
 * it under the terms of the GNU General Public License as published by
11
 * the Free Software Foundation; version 2 of the License.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21
 *
22
 */
23
 
24
/*
25
  sata_mv TODO list:
26
 
27
  1) Needs a full errata audit for all chipsets.  I implemented most
28
  of the errata workarounds found in the Marvell vendor driver, but
29
  I distinctly remember a couple workarounds (one related to PCI-X)
30
  are still needed.
31
 
32
  4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
 
34
  5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
 
36
  6) Add port multiplier support (intermediate)
37
 
38
  8) Develop a low-power-consumption strategy, and implement it.
39
 
40
  9) [Experiment, low priority] See if ATAPI can be supported using
41
  "unknown FIS" or "vendor-specific FIS" support, or something creative
42
  like that.
43
 
44
  10) [Experiment, low priority] Investigate interrupt coalescing.
45
  Quite often, especially with PCI Message Signalled Interrupts (MSI),
46
  the overhead reduced by interrupt mitigation is quite often not
47
  worth the latency cost.
48
 
49
  11) [Experiment, Marvell value added] Is it possible to use target
50
  mode to cross-connect two Linux boxes with Marvell cards?  If so,
51
  creating LibATA target mode support would be very interesting.
52
 
53
  Target mode, for those without docs, is the ability to directly
54
  connect two SATA controllers.
55
 
56
  13) Verify that 7042 is fully supported.  I only have a 6042.
57
 
58
*/
59
 
60
 
61
#include <linux/kernel.h>
62
#include <linux/module.h>
63
#include <linux/pci.h>
64
#include <linux/init.h>
65
#include <linux/blkdev.h>
66
#include <linux/delay.h>
67
#include <linux/interrupt.h>
68
#include <linux/dma-mapping.h>
69
#include <linux/device.h>
70
#include <scsi/scsi_host.h>
71
#include <scsi/scsi_cmnd.h>
72
#include <scsi/scsi_device.h>
73
#include <linux/libata.h>
74
 
75
#define DRV_NAME        "sata_mv"
76
#define DRV_VERSION     "1.01"
77
 
78
enum {
79
        /* BAR's are enumerated in terms of pci_resource_start() terms */
80
        MV_PRIMARY_BAR          = 0,     /* offset 0x10: memory space */
81
        MV_IO_BAR               = 2,    /* offset 0x18: IO space */
82
        MV_MISC_BAR             = 3,    /* offset 0x1c: FLASH, NVRAM, SRAM */
83
 
84
        MV_MAJOR_REG_AREA_SZ    = 0x10000,      /* 64KB */
85
        MV_MINOR_REG_AREA_SZ    = 0x2000,       /* 8KB */
86
 
87
        MV_PCI_REG_BASE         = 0,
88
        MV_IRQ_COAL_REG_BASE    = 0x18000,      /* 6xxx part only */
89
        MV_IRQ_COAL_CAUSE               = (MV_IRQ_COAL_REG_BASE + 0x08),
90
        MV_IRQ_COAL_CAUSE_LO            = (MV_IRQ_COAL_REG_BASE + 0x88),
91
        MV_IRQ_COAL_CAUSE_HI            = (MV_IRQ_COAL_REG_BASE + 0x8c),
92
        MV_IRQ_COAL_THRESHOLD           = (MV_IRQ_COAL_REG_BASE + 0xcc),
93
        MV_IRQ_COAL_TIME_THRESHOLD      = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
 
95
        MV_SATAHC0_REG_BASE     = 0x20000,
96
        MV_FLASH_CTL            = 0x1046c,
97
        MV_GPIO_PORT_CTL        = 0x104f0,
98
        MV_RESET_CFG            = 0x180d8,
99
 
100
        MV_PCI_REG_SZ           = MV_MAJOR_REG_AREA_SZ,
101
        MV_SATAHC_REG_SZ        = MV_MAJOR_REG_AREA_SZ,
102
        MV_SATAHC_ARBTR_REG_SZ  = MV_MINOR_REG_AREA_SZ,         /* arbiter */
103
        MV_PORT_REG_SZ          = MV_MINOR_REG_AREA_SZ,
104
 
105
        MV_MAX_Q_DEPTH          = 32,
106
        MV_MAX_Q_DEPTH_MASK     = MV_MAX_Q_DEPTH - 1,
107
 
108
        /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109
         * CRPB needs alignment on a 256B boundary. Size == 256B
110
         * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111
         * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112
         */
113
        MV_CRQB_Q_SZ            = (32 * MV_MAX_Q_DEPTH),
114
        MV_CRPB_Q_SZ            = (8 * MV_MAX_Q_DEPTH),
115
        MV_MAX_SG_CT            = 176,
116
        MV_SG_TBL_SZ            = (16 * MV_MAX_SG_CT),
117
        MV_PORT_PRIV_DMA_SZ     = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
 
119
        MV_PORTS_PER_HC         = 4,
120
        /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121
        MV_PORT_HC_SHIFT        = 2,
122
        /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123
        MV_PORT_MASK            = 3,
124
 
125
        /* Host Flags */
126
        MV_FLAG_DUAL_HC         = (1 << 30),  /* two SATA Host Controllers */
127
        MV_FLAG_IRQ_COALESCE    = (1 << 29),  /* IRQ coalescing capability */
128
        MV_COMMON_FLAGS         = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129
                                  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130
                                  ATA_FLAG_PIO_POLLING,
131
        MV_6XXX_FLAGS           = MV_FLAG_IRQ_COALESCE,
132
 
133
        CRQB_FLAG_READ          = (1 << 0),
134
        CRQB_TAG_SHIFT          = 1,
135
        CRQB_IOID_SHIFT         = 6,    /* CRQB Gen-II/IIE IO Id shift */
136
        CRQB_HOSTQ_SHIFT        = 17,   /* CRQB Gen-II/IIE HostQueTag shift */
137
        CRQB_CMD_ADDR_SHIFT     = 8,
138
        CRQB_CMD_CS             = (0x2 << 11),
139
        CRQB_CMD_LAST           = (1 << 15),
140
 
141
        CRPB_FLAG_STATUS_SHIFT  = 8,
142
        CRPB_IOID_SHIFT_6       = 5,    /* CRPB Gen-II IO Id shift */
143
        CRPB_IOID_SHIFT_7       = 7,    /* CRPB Gen-IIE IO Id shift */
144
 
145
        EPRD_FLAG_END_OF_TBL    = (1 << 31),
146
 
147
        /* PCI interface registers */
148
 
149
        PCI_COMMAND_OFS         = 0xc00,
150
 
151
        PCI_MAIN_CMD_STS_OFS    = 0xd30,
152
        STOP_PCI_MASTER         = (1 << 2),
153
        PCI_MASTER_EMPTY        = (1 << 3),
154
        GLOB_SFT_RST            = (1 << 4),
155
 
156
        MV_PCI_MODE             = 0xd00,
157
        MV_PCI_EXP_ROM_BAR_CTL  = 0xd2c,
158
        MV_PCI_DISC_TIMER       = 0xd04,
159
        MV_PCI_MSI_TRIGGER      = 0xc38,
160
        MV_PCI_SERR_MASK        = 0xc28,
161
        MV_PCI_XBAR_TMOUT       = 0x1d04,
162
        MV_PCI_ERR_LOW_ADDRESS  = 0x1d40,
163
        MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164
        MV_PCI_ERR_ATTRIBUTE    = 0x1d48,
165
        MV_PCI_ERR_COMMAND      = 0x1d50,
166
 
167
        PCI_IRQ_CAUSE_OFS       = 0x1d58,
168
        PCI_IRQ_MASK_OFS        = 0x1d5c,
169
        PCI_UNMASK_ALL_IRQS     = 0x7fffff,     /* bits 22-0 */
170
 
171
        PCIE_IRQ_CAUSE_OFS      = 0x1900,
172
        PCIE_IRQ_MASK_OFS       = 0x1910,
173
        PCIE_UNMASK_ALL_IRQS    = 0x70a,        /* assorted bits */
174
 
175
        HC_MAIN_IRQ_CAUSE_OFS   = 0x1d60,
176
        HC_MAIN_IRQ_MASK_OFS    = 0x1d64,
177
        PORT0_ERR               = (1 << 0),      /* shift by port # */
178
        PORT0_DONE              = (1 << 1),     /* shift by port # */
179
        HC0_IRQ_PEND            = 0x1ff,        /* bits 0-8 = HC0's ports */
180
        HC_SHIFT                = 9,            /* bits 9-17 = HC1's ports */
181
        PCI_ERR                 = (1 << 18),
182
        TRAN_LO_DONE            = (1 << 19),    /* 6xxx: IRQ coalescing */
183
        TRAN_HI_DONE            = (1 << 20),    /* 6xxx: IRQ coalescing */
184
        PORTS_0_3_COAL_DONE     = (1 << 8),
185
        PORTS_4_7_COAL_DONE     = (1 << 17),
186
        PORTS_0_7_COAL_DONE     = (1 << 21),    /* 6xxx: IRQ coalescing */
187
        GPIO_INT                = (1 << 22),
188
        SELF_INT                = (1 << 23),
189
        TWSI_INT                = (1 << 24),
190
        HC_MAIN_RSVD            = (0x7f << 25), /* bits 31-25 */
191
        HC_MAIN_RSVD_5          = (0x1fff << 19), /* bits 31-19 */
192
        HC_MAIN_MASKED_IRQS     = (TRAN_LO_DONE | TRAN_HI_DONE |
193
                                   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194
                                   HC_MAIN_RSVD),
195
        HC_MAIN_MASKED_IRQS_5   = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196
                                   HC_MAIN_RSVD_5),
197
 
198
        /* SATAHC registers */
199
        HC_CFG_OFS              = 0,
200
 
201
        HC_IRQ_CAUSE_OFS        = 0x14,
202
        CRPB_DMA_DONE           = (1 << 0),      /* shift by port # */
203
        HC_IRQ_COAL             = (1 << 4),     /* IRQ coalescing */
204
        DEV_IRQ                 = (1 << 8),     /* shift by port # */
205
 
206
        /* Shadow block registers */
207
        SHD_BLK_OFS             = 0x100,
208
        SHD_CTL_AST_OFS         = 0x20,         /* ofs from SHD_BLK_OFS */
209
 
210
        /* SATA registers */
211
        SATA_STATUS_OFS         = 0x300,  /* ctrl, err regs follow status */
212
        SATA_ACTIVE_OFS         = 0x350,
213
        PHY_MODE3               = 0x310,
214
        PHY_MODE4               = 0x314,
215
        PHY_MODE2               = 0x330,
216
        MV5_PHY_MODE            = 0x74,
217
        MV5_LT_MODE             = 0x30,
218
        MV5_PHY_CTL             = 0x0C,
219
        SATA_INTERFACE_CTL      = 0x050,
220
 
221
        MV_M2_PREAMP_MASK       = 0x7e0,
222
 
223
        /* Port registers */
224
        EDMA_CFG_OFS            = 0,
225
        EDMA_CFG_Q_DEPTH        = 0,                     /* queueing disabled */
226
        EDMA_CFG_NCQ            = (1 << 5),
227
        EDMA_CFG_NCQ_GO_ON_ERR  = (1 << 14),            /* continue on error */
228
        EDMA_CFG_RD_BRST_EXT    = (1 << 11),            /* read burst 512B */
229
        EDMA_CFG_WR_BUFF_LEN    = (1 << 13),            /* write buffer 512B */
230
 
231
        EDMA_ERR_IRQ_CAUSE_OFS  = 0x8,
232
        EDMA_ERR_IRQ_MASK_OFS   = 0xc,
233
        EDMA_ERR_D_PAR          = (1 << 0),      /* UDMA data parity err */
234
        EDMA_ERR_PRD_PAR        = (1 << 1),     /* UDMA PRD parity err */
235
        EDMA_ERR_DEV            = (1 << 2),     /* device error */
236
        EDMA_ERR_DEV_DCON       = (1 << 3),     /* device disconnect */
237
        EDMA_ERR_DEV_CON        = (1 << 4),     /* device connected */
238
        EDMA_ERR_SERR           = (1 << 5),     /* SError bits [WBDST] raised */
239
        EDMA_ERR_SELF_DIS       = (1 << 7),     /* Gen II/IIE self-disable */
240
        EDMA_ERR_SELF_DIS_5     = (1 << 8),     /* Gen I self-disable */
241
        EDMA_ERR_BIST_ASYNC     = (1 << 8),     /* BIST FIS or Async Notify */
242
        EDMA_ERR_TRANS_IRQ_7    = (1 << 8),     /* Gen IIE transprt layer irq */
243
        EDMA_ERR_CRQB_PAR       = (1 << 9),     /* CRQB parity error */
244
        EDMA_ERR_CRPB_PAR       = (1 << 10),    /* CRPB parity error */
245
        EDMA_ERR_INTRL_PAR      = (1 << 11),    /* internal parity error */
246
        EDMA_ERR_IORDY          = (1 << 12),    /* IORdy timeout */
247
        EDMA_ERR_LNK_CTRL_RX    = (0xf << 13),  /* link ctrl rx error */
248
        EDMA_ERR_LNK_CTRL_RX_2  = (1 << 15),
249
        EDMA_ERR_LNK_DATA_RX    = (0xf << 17),  /* link data rx error */
250
        EDMA_ERR_LNK_CTRL_TX    = (0x1f << 21), /* link ctrl tx error */
251
        EDMA_ERR_LNK_DATA_TX    = (0x1f << 26), /* link data tx error */
252
        EDMA_ERR_TRANS_PROTO    = (1 << 31),    /* transport protocol error */
253
        EDMA_ERR_OVERRUN_5      = (1 << 5),
254
        EDMA_ERR_UNDERRUN_5     = (1 << 6),
255
        EDMA_EH_FREEZE          = EDMA_ERR_D_PAR |
256
                                  EDMA_ERR_PRD_PAR |
257
                                  EDMA_ERR_DEV_DCON |
258
                                  EDMA_ERR_DEV_CON |
259
                                  EDMA_ERR_SERR |
260
                                  EDMA_ERR_SELF_DIS |
261
                                  EDMA_ERR_CRQB_PAR |
262
                                  EDMA_ERR_CRPB_PAR |
263
                                  EDMA_ERR_INTRL_PAR |
264
                                  EDMA_ERR_IORDY |
265
                                  EDMA_ERR_LNK_CTRL_RX_2 |
266
                                  EDMA_ERR_LNK_DATA_RX |
267
                                  EDMA_ERR_LNK_DATA_TX |
268
                                  EDMA_ERR_TRANS_PROTO,
269
        EDMA_EH_FREEZE_5        = EDMA_ERR_D_PAR |
270
                                  EDMA_ERR_PRD_PAR |
271
                                  EDMA_ERR_DEV_DCON |
272
                                  EDMA_ERR_DEV_CON |
273
                                  EDMA_ERR_OVERRUN_5 |
274
                                  EDMA_ERR_UNDERRUN_5 |
275
                                  EDMA_ERR_SELF_DIS_5 |
276
                                  EDMA_ERR_CRQB_PAR |
277
                                  EDMA_ERR_CRPB_PAR |
278
                                  EDMA_ERR_INTRL_PAR |
279
                                  EDMA_ERR_IORDY,
280
 
281
        EDMA_REQ_Q_BASE_HI_OFS  = 0x10,
282
        EDMA_REQ_Q_IN_PTR_OFS   = 0x14,         /* also contains BASE_LO */
283
 
284
        EDMA_REQ_Q_OUT_PTR_OFS  = 0x18,
285
        EDMA_REQ_Q_PTR_SHIFT    = 5,
286
 
287
        EDMA_RSP_Q_BASE_HI_OFS  = 0x1c,
288
        EDMA_RSP_Q_IN_PTR_OFS   = 0x20,
289
        EDMA_RSP_Q_OUT_PTR_OFS  = 0x24,         /* also contains BASE_LO */
290
        EDMA_RSP_Q_PTR_SHIFT    = 3,
291
 
292
        EDMA_CMD_OFS            = 0x28,         /* EDMA command register */
293
        EDMA_EN                 = (1 << 0),      /* enable EDMA */
294
        EDMA_DS                 = (1 << 1),     /* disable EDMA; self-negated */
295
        ATA_RST                 = (1 << 2),     /* reset trans/link/phy */
296
 
297
        EDMA_IORDY_TMOUT        = 0x34,
298
        EDMA_ARB_CFG            = 0x38,
299
 
300
        /* Host private flags (hp_flags) */
301
        MV_HP_FLAG_MSI          = (1 << 0),
302
        MV_HP_ERRATA_50XXB0     = (1 << 1),
303
        MV_HP_ERRATA_50XXB2     = (1 << 2),
304
        MV_HP_ERRATA_60X1B2     = (1 << 3),
305
        MV_HP_ERRATA_60X1C0     = (1 << 4),
306
        MV_HP_ERRATA_XX42A0     = (1 << 5),
307
        MV_HP_GEN_I             = (1 << 6),     /* Generation I: 50xx */
308
        MV_HP_GEN_II            = (1 << 7),     /* Generation II: 60xx */
309
        MV_HP_GEN_IIE           = (1 << 8),     /* Generation IIE: 6042/7042 */
310
        MV_HP_PCIE              = (1 << 9),     /* PCIe bus/regs: 7042 */
311
 
312
        /* Port private flags (pp_flags) */
313
        MV_PP_FLAG_EDMA_EN      = (1 << 0),      /* is EDMA engine enabled? */
314
        MV_PP_FLAG_HAD_A_RESET  = (1 << 2),     /* 1st hard reset complete? */
315
};
316
 
317
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
318
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
319
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
320
 
321
enum {
322
        /* DMA boundary 0xffff is required by the s/g splitting
323
         * we need on /length/ in mv_fill-sg().
324
         */
325
        MV_DMA_BOUNDARY         = 0xffffU,
326
 
327
        /* mask of register bits containing lower 32 bits
328
         * of EDMA request queue DMA address
329
         */
330
        EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
331
 
332
        /* ditto, for response queue */
333
        EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
334
};
335
 
336
enum chip_type {
337
        chip_504x,
338
        chip_508x,
339
        chip_5080,
340
        chip_604x,
341
        chip_608x,
342
        chip_6042,
343
        chip_7042,
344
};
345
 
346
/* Command ReQuest Block: 32B */
347
struct mv_crqb {
348
        __le32                  sg_addr;
349
        __le32                  sg_addr_hi;
350
        __le16                  ctrl_flags;
351
        __le16                  ata_cmd[11];
352
};
353
 
354
struct mv_crqb_iie {
355
        __le32                  addr;
356
        __le32                  addr_hi;
357
        __le32                  flags;
358
        __le32                  len;
359
        __le32                  ata_cmd[4];
360
};
361
 
362
/* Command ResPonse Block: 8B */
363
struct mv_crpb {
364
        __le16                  id;
365
        __le16                  flags;
366
        __le32                  tmstmp;
367
};
368
 
369
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
370
struct mv_sg {
371
        __le32                  addr;
372
        __le32                  flags_size;
373
        __le32                  addr_hi;
374
        __le32                  reserved;
375
};
376
 
377
struct mv_port_priv {
378
        struct mv_crqb          *crqb;
379
        dma_addr_t              crqb_dma;
380
        struct mv_crpb          *crpb;
381
        dma_addr_t              crpb_dma;
382
        struct mv_sg            *sg_tbl;
383
        dma_addr_t              sg_tbl_dma;
384
 
385
        unsigned int            req_idx;
386
        unsigned int            resp_idx;
387
 
388
        u32                     pp_flags;
389
};
390
 
391
struct mv_port_signal {
392
        u32                     amps;
393
        u32                     pre;
394
};
395
 
396
struct mv_host_priv {
397
        u32                     hp_flags;
398
        struct mv_port_signal   signal[8];
399
        const struct mv_hw_ops  *ops;
400
        u32                     irq_cause_ofs;
401
        u32                     irq_mask_ofs;
402
        u32                     unmask_all_irqs;
403
};
404
 
405
struct mv_hw_ops {
406
        void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
407
                           unsigned int port);
408
        void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
409
        void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
410
                           void __iomem *mmio);
411
        int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
412
                        unsigned int n_hc);
413
        void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
414
        void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
415
};
416
 
417
static void mv_irq_clear(struct ata_port *ap);
418
static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
419
static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
420
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
421
static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
422
static int mv_port_start(struct ata_port *ap);
423
static void mv_port_stop(struct ata_port *ap);
424
static void mv_qc_prep(struct ata_queued_cmd *qc);
425
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
426
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
427
static void mv_error_handler(struct ata_port *ap);
428
static void mv_post_int_cmd(struct ata_queued_cmd *qc);
429
static void mv_eh_freeze(struct ata_port *ap);
430
static void mv_eh_thaw(struct ata_port *ap);
431
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
432
 
433
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434
                           unsigned int port);
435
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
436
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
437
                           void __iomem *mmio);
438
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439
                        unsigned int n_hc);
440
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
441
static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
442
 
443
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
444
                           unsigned int port);
445
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
446
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
447
                           void __iomem *mmio);
448
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
449
                        unsigned int n_hc);
450
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
451
static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
452
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
453
                             unsigned int port_no);
454
 
455
static struct scsi_host_template mv5_sht = {
456
        .module                 = THIS_MODULE,
457
        .name                   = DRV_NAME,
458
        .ioctl                  = ata_scsi_ioctl,
459
        .queuecommand           = ata_scsi_queuecmd,
460
        .can_queue              = ATA_DEF_QUEUE,
461
        .this_id                = ATA_SHT_THIS_ID,
462
        .sg_tablesize           = MV_MAX_SG_CT / 2,
463
        .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
464
        .emulated               = ATA_SHT_EMULATED,
465
        .use_clustering         = 1,
466
        .proc_name              = DRV_NAME,
467
        .dma_boundary           = MV_DMA_BOUNDARY,
468
        .slave_configure        = ata_scsi_slave_config,
469
        .slave_destroy          = ata_scsi_slave_destroy,
470
        .bios_param             = ata_std_bios_param,
471
};
472
 
473
static struct scsi_host_template mv6_sht = {
474
        .module                 = THIS_MODULE,
475
        .name                   = DRV_NAME,
476
        .ioctl                  = ata_scsi_ioctl,
477
        .queuecommand           = ata_scsi_queuecmd,
478
        .can_queue              = ATA_DEF_QUEUE,
479
        .this_id                = ATA_SHT_THIS_ID,
480
        .sg_tablesize           = MV_MAX_SG_CT / 2,
481
        .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
482
        .emulated               = ATA_SHT_EMULATED,
483
        .use_clustering         = 1,
484
        .proc_name              = DRV_NAME,
485
        .dma_boundary           = MV_DMA_BOUNDARY,
486
        .slave_configure        = ata_scsi_slave_config,
487
        .slave_destroy          = ata_scsi_slave_destroy,
488
        .bios_param             = ata_std_bios_param,
489
};
490
 
491
static const struct ata_port_operations mv5_ops = {
492
        .tf_load                = ata_tf_load,
493
        .tf_read                = ata_tf_read,
494
        .check_status           = ata_check_status,
495
        .exec_command           = ata_exec_command,
496
        .dev_select             = ata_std_dev_select,
497
 
498
        .cable_detect           = ata_cable_sata,
499
 
500
        .qc_prep                = mv_qc_prep,
501
        .qc_issue               = mv_qc_issue,
502
        .data_xfer              = ata_data_xfer,
503
 
504
        .irq_clear              = mv_irq_clear,
505
        .irq_on                 = ata_irq_on,
506
 
507
        .error_handler          = mv_error_handler,
508
        .post_internal_cmd      = mv_post_int_cmd,
509
        .freeze                 = mv_eh_freeze,
510
        .thaw                   = mv_eh_thaw,
511
 
512
        .scr_read               = mv5_scr_read,
513
        .scr_write              = mv5_scr_write,
514
 
515
        .port_start             = mv_port_start,
516
        .port_stop              = mv_port_stop,
517
};
518
 
519
static const struct ata_port_operations mv6_ops = {
520
        .tf_load                = ata_tf_load,
521
        .tf_read                = ata_tf_read,
522
        .check_status           = ata_check_status,
523
        .exec_command           = ata_exec_command,
524
        .dev_select             = ata_std_dev_select,
525
 
526
        .cable_detect           = ata_cable_sata,
527
 
528
        .qc_prep                = mv_qc_prep,
529
        .qc_issue               = mv_qc_issue,
530
        .data_xfer              = ata_data_xfer,
531
 
532
        .irq_clear              = mv_irq_clear,
533
        .irq_on                 = ata_irq_on,
534
 
535
        .error_handler          = mv_error_handler,
536
        .post_internal_cmd      = mv_post_int_cmd,
537
        .freeze                 = mv_eh_freeze,
538
        .thaw                   = mv_eh_thaw,
539
 
540
        .scr_read               = mv_scr_read,
541
        .scr_write              = mv_scr_write,
542
 
543
        .port_start             = mv_port_start,
544
        .port_stop              = mv_port_stop,
545
};
546
 
547
static const struct ata_port_operations mv_iie_ops = {
548
        .tf_load                = ata_tf_load,
549
        .tf_read                = ata_tf_read,
550
        .check_status           = ata_check_status,
551
        .exec_command           = ata_exec_command,
552
        .dev_select             = ata_std_dev_select,
553
 
554
        .cable_detect           = ata_cable_sata,
555
 
556
        .qc_prep                = mv_qc_prep_iie,
557
        .qc_issue               = mv_qc_issue,
558
        .data_xfer              = ata_data_xfer,
559
 
560
        .irq_clear              = mv_irq_clear,
561
        .irq_on                 = ata_irq_on,
562
 
563
        .error_handler          = mv_error_handler,
564
        .post_internal_cmd      = mv_post_int_cmd,
565
        .freeze                 = mv_eh_freeze,
566
        .thaw                   = mv_eh_thaw,
567
 
568
        .scr_read               = mv_scr_read,
569
        .scr_write              = mv_scr_write,
570
 
571
        .port_start             = mv_port_start,
572
        .port_stop              = mv_port_stop,
573
};
574
 
575
static const struct ata_port_info mv_port_info[] = {
576
        {  /* chip_504x */
577
                .flags          = MV_COMMON_FLAGS,
578
                .pio_mask       = 0x1f, /* pio0-4 */
579
                .udma_mask      = ATA_UDMA6,
580
                .port_ops       = &mv5_ops,
581
        },
582
        {  /* chip_508x */
583
                .flags          = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
584
                .pio_mask       = 0x1f, /* pio0-4 */
585
                .udma_mask      = ATA_UDMA6,
586
                .port_ops       = &mv5_ops,
587
        },
588
        {  /* chip_5080 */
589
                .flags          = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590
                .pio_mask       = 0x1f, /* pio0-4 */
591
                .udma_mask      = ATA_UDMA6,
592
                .port_ops       = &mv5_ops,
593
        },
594
        {  /* chip_604x */
595
                .flags          = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
596
                .pio_mask       = 0x1f, /* pio0-4 */
597
                .udma_mask      = ATA_UDMA6,
598
                .port_ops       = &mv6_ops,
599
        },
600
        {  /* chip_608x */
601
                .flags          = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602
                                  MV_FLAG_DUAL_HC,
603
                .pio_mask       = 0x1f, /* pio0-4 */
604
                .udma_mask      = ATA_UDMA6,
605
                .port_ops       = &mv6_ops,
606
        },
607
        {  /* chip_6042 */
608
                .flags          = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
609
                .pio_mask       = 0x1f, /* pio0-4 */
610
                .udma_mask      = ATA_UDMA6,
611
                .port_ops       = &mv_iie_ops,
612
        },
613
        {  /* chip_7042 */
614
                .flags          = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
615
                .pio_mask       = 0x1f, /* pio0-4 */
616
                .udma_mask      = ATA_UDMA6,
617
                .port_ops       = &mv_iie_ops,
618
        },
619
};
620
 
621
static const struct pci_device_id mv_pci_tbl[] = {
622
        { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
623
        { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
624
        { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
625
        { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
626
        /* RocketRAID 1740/174x have different identifiers */
627
        { PCI_VDEVICE(TTI, 0x1740), chip_508x },
628
        { PCI_VDEVICE(TTI, 0x1742), chip_508x },
629
 
630
        { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
631
        { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
632
        { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
633
        { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
634
        { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
635
 
636
        { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
637
 
638
        /* Adaptec 1430SA */
639
        { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
640
 
641
        /* Marvell 7042 support */
642
        { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
643
 
644
        /* Highpoint RocketRAID PCIe series */
645
        { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
646
        { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
647
 
648
        { }                     /* terminate list */
649
};
650
 
651
static struct pci_driver mv_pci_driver = {
652
        .name                   = DRV_NAME,
653
        .id_table               = mv_pci_tbl,
654
        .probe                  = mv_init_one,
655
        .remove                 = ata_pci_remove_one,
656
};
657
 
658
static const struct mv_hw_ops mv5xxx_ops = {
659
        .phy_errata             = mv5_phy_errata,
660
        .enable_leds            = mv5_enable_leds,
661
        .read_preamp            = mv5_read_preamp,
662
        .reset_hc               = mv5_reset_hc,
663
        .reset_flash            = mv5_reset_flash,
664
        .reset_bus              = mv5_reset_bus,
665
};
666
 
667
static const struct mv_hw_ops mv6xxx_ops = {
668
        .phy_errata             = mv6_phy_errata,
669
        .enable_leds            = mv6_enable_leds,
670
        .read_preamp            = mv6_read_preamp,
671
        .reset_hc               = mv6_reset_hc,
672
        .reset_flash            = mv6_reset_flash,
673
        .reset_bus              = mv_reset_pci_bus,
674
};
675
 
676
/*
677
 * module options
678
 */
679
static int msi;       /* Use PCI msi; either zero (off, default) or non-zero */
680
 
681
 
682
/* move to PCI layer or libata core? */
683
static int pci_go_64(struct pci_dev *pdev)
684
{
685
        int rc;
686
 
687
        if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
688
                rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
689
                if (rc) {
690
                        rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
691
                        if (rc) {
692
                                dev_printk(KERN_ERR, &pdev->dev,
693
                                           "64-bit DMA enable failed\n");
694
                                return rc;
695
                        }
696
                }
697
        } else {
698
                rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
699
                if (rc) {
700
                        dev_printk(KERN_ERR, &pdev->dev,
701
                                   "32-bit DMA enable failed\n");
702
                        return rc;
703
                }
704
                rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
705
                if (rc) {
706
                        dev_printk(KERN_ERR, &pdev->dev,
707
                                   "32-bit consistent DMA enable failed\n");
708
                        return rc;
709
                }
710
        }
711
 
712
        return rc;
713
}
714
 
715
/*
716
 * Functions
717
 */
718
 
719
static inline void writelfl(unsigned long data, void __iomem *addr)
720
{
721
        writel(data, addr);
722
        (void) readl(addr);     /* flush to avoid PCI posted write */
723
}
724
 
725
static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726
{
727
        return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
728
}
729
 
730
static inline unsigned int mv_hc_from_port(unsigned int port)
731
{
732
        return port >> MV_PORT_HC_SHIFT;
733
}
734
 
735
static inline unsigned int mv_hardport_from_port(unsigned int port)
736
{
737
        return port & MV_PORT_MASK;
738
}
739
 
740
static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741
                                                 unsigned int port)
742
{
743
        return mv_hc_base(base, mv_hc_from_port(port));
744
}
745
 
746
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
747
{
748
        return  mv_hc_base_from_port(base, port) +
749
                MV_SATAHC_ARBTR_REG_SZ +
750
                (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
751
}
752
 
753
static inline void __iomem *mv_ap_base(struct ata_port *ap)
754
{
755
        return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
756
}
757
 
758
static inline int mv_get_hc_count(unsigned long port_flags)
759
{
760
        return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
761
}
762
 
763
static void mv_irq_clear(struct ata_port *ap)
764
{
765
}
766
 
767
static void mv_set_edma_ptrs(void __iomem *port_mmio,
768
                             struct mv_host_priv *hpriv,
769
                             struct mv_port_priv *pp)
770
{
771
        u32 index;
772
 
773
        /*
774
         * initialize request queue
775
         */
776
        index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
777
 
778
        WARN_ON(pp->crqb_dma & 0x3ff);
779
        writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
780
        writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
781
                 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
782
 
783
        if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
784
                writelfl((pp->crqb_dma & 0xffffffff) | index,
785
                         port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
786
        else
787
                writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
788
 
789
        /*
790
         * initialize response queue
791
         */
792
        index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
793
 
794
        WARN_ON(pp->crpb_dma & 0xff);
795
        writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
796
 
797
        if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
798
                writelfl((pp->crpb_dma & 0xffffffff) | index,
799
                         port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
800
        else
801
                writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
802
 
803
        writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
804
                 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
805
}
806
 
807
/**
808
 *      mv_start_dma - Enable eDMA engine
809
 *      @base: port base address
810
 *      @pp: port private data
811
 *
812
 *      Verify the local cache of the eDMA state is accurate with a
813
 *      WARN_ON.
814
 *
815
 *      LOCKING:
816
 *      Inherited from caller.
817
 */
818
static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
819
                         struct mv_port_priv *pp)
820
{
821
        if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
822
                /* clear EDMA event indicators, if any */
823
                writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
824
 
825
                mv_set_edma_ptrs(base, hpriv, pp);
826
 
827
                writelfl(EDMA_EN, base + EDMA_CMD_OFS);
828
                pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
829
        }
830
        WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
831
}
832
 
833
/**
834
 *      __mv_stop_dma - Disable eDMA engine
835
 *      @ap: ATA channel to manipulate
836
 *
837
 *      Verify the local cache of the eDMA state is accurate with a
838
 *      WARN_ON.
839
 *
840
 *      LOCKING:
841
 *      Inherited from caller.
842
 */
843
static int __mv_stop_dma(struct ata_port *ap)
844
{
845
        void __iomem *port_mmio = mv_ap_base(ap);
846
        struct mv_port_priv *pp = ap->private_data;
847
        u32 reg;
848
        int i, err = 0;
849
 
850
        if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
851
                /* Disable EDMA if active.   The disable bit auto clears.
852
                 */
853
                writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
854
                pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
855
        } else {
856
                WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
857
        }
858
 
859
        /* now properly wait for the eDMA to stop */
860
        for (i = 1000; i > 0; i--) {
861
                reg = readl(port_mmio + EDMA_CMD_OFS);
862
                if (!(reg & EDMA_EN))
863
                        break;
864
 
865
                udelay(100);
866
        }
867
 
868
        if (reg & EDMA_EN) {
869
                ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
870
                err = -EIO;
871
        }
872
 
873
        return err;
874
}
875
 
876
static int mv_stop_dma(struct ata_port *ap)
877
{
878
        unsigned long flags;
879
        int rc;
880
 
881
        spin_lock_irqsave(&ap->host->lock, flags);
882
        rc = __mv_stop_dma(ap);
883
        spin_unlock_irqrestore(&ap->host->lock, flags);
884
 
885
        return rc;
886
}
887
 
888
#ifdef ATA_DEBUG
889
static void mv_dump_mem(void __iomem *start, unsigned bytes)
890
{
891
        int b, w;
892
        for (b = 0; b < bytes; ) {
893
                DPRINTK("%p: ", start + b);
894
                for (w = 0; b < bytes && w < 4; w++) {
895
                        printk("%08x ", readl(start + b));
896
                        b += sizeof(u32);
897
                }
898
                printk("\n");
899
        }
900
}
901
#endif
902
 
903
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904
{
905
#ifdef ATA_DEBUG
906
        int b, w;
907
        u32 dw;
908
        for (b = 0; b < bytes; ) {
909
                DPRINTK("%02x: ", b);
910
                for (w = 0; b < bytes && w < 4; w++) {
911
                        (void) pci_read_config_dword(pdev, b, &dw);
912
                        printk("%08x ", dw);
913
                        b += sizeof(u32);
914
                }
915
                printk("\n");
916
        }
917
#endif
918
}
919
static void mv_dump_all_regs(void __iomem *mmio_base, int port,
920
                             struct pci_dev *pdev)
921
{
922
#ifdef ATA_DEBUG
923
        void __iomem *hc_base = mv_hc_base(mmio_base,
924
                                           port >> MV_PORT_HC_SHIFT);
925
        void __iomem *port_base;
926
        int start_port, num_ports, p, start_hc, num_hcs, hc;
927
 
928
        if (0 > port) {
929
                start_hc = start_port = 0;
930
                num_ports = 8;          /* shld be benign for 4 port devs */
931
                num_hcs = 2;
932
        } else {
933
                start_hc = port >> MV_PORT_HC_SHIFT;
934
                start_port = port;
935
                num_ports = num_hcs = 1;
936
        }
937
        DPRINTK("All registers for port(s) %u-%u:\n", start_port,
938
                num_ports > 1 ? num_ports - 1 : start_port);
939
 
940
        if (NULL != pdev) {
941
                DPRINTK("PCI config space regs:\n");
942
                mv_dump_pci_cfg(pdev, 0x68);
943
        }
944
        DPRINTK("PCI regs:\n");
945
        mv_dump_mem(mmio_base+0xc00, 0x3c);
946
        mv_dump_mem(mmio_base+0xd00, 0x34);
947
        mv_dump_mem(mmio_base+0xf00, 0x4);
948
        mv_dump_mem(mmio_base+0x1d00, 0x6c);
949
        for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
950
                hc_base = mv_hc_base(mmio_base, hc);
951
                DPRINTK("HC regs (HC %i):\n", hc);
952
                mv_dump_mem(hc_base, 0x1c);
953
        }
954
        for (p = start_port; p < start_port + num_ports; p++) {
955
                port_base = mv_port_base(mmio_base, p);
956
                DPRINTK("EDMA regs (port %i):\n", p);
957
                mv_dump_mem(port_base, 0x54);
958
                DPRINTK("SATA regs (port %i):\n", p);
959
                mv_dump_mem(port_base+0x300, 0x60);
960
        }
961
#endif
962
}
963
 
964
static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965
{
966
        unsigned int ofs;
967
 
968
        switch (sc_reg_in) {
969
        case SCR_STATUS:
970
        case SCR_CONTROL:
971
        case SCR_ERROR:
972
                ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
973
                break;
974
        case SCR_ACTIVE:
975
                ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
976
                break;
977
        default:
978
                ofs = 0xffffffffU;
979
                break;
980
        }
981
        return ofs;
982
}
983
 
984
static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
985
{
986
        unsigned int ofs = mv_scr_offset(sc_reg_in);
987
 
988
        if (ofs != 0xffffffffU) {
989
                *val = readl(mv_ap_base(ap) + ofs);
990
                return 0;
991
        } else
992
                return -EINVAL;
993
}
994
 
995
static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
996
{
997
        unsigned int ofs = mv_scr_offset(sc_reg_in);
998
 
999
        if (ofs != 0xffffffffU) {
1000
                writelfl(val, mv_ap_base(ap) + ofs);
1001
                return 0;
1002
        } else
1003
                return -EINVAL;
1004
}
1005
 
1006
static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1007
                        void __iomem *port_mmio)
1008
{
1009
        u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1010
 
1011
        /* set up non-NCQ EDMA configuration */
1012
        cfg &= ~(1 << 9);       /* disable eQue */
1013
 
1014
        if (IS_GEN_I(hpriv)) {
1015
                cfg &= ~0x1f;           /* clear queue depth */
1016
                cfg |= (1 << 8);        /* enab config burst size mask */
1017
        }
1018
 
1019
        else if (IS_GEN_II(hpriv)) {
1020
                cfg &= ~0x1f;           /* clear queue depth */
1021
                cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1022
                cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1023
        }
1024
 
1025
        else if (IS_GEN_IIE(hpriv)) {
1026
                cfg |= (1 << 23);       /* do not mask PM field in rx'd FIS */
1027
                cfg |= (1 << 22);       /* enab 4-entry host queue cache */
1028
                cfg &= ~(1 << 19);      /* dis 128-entry queue (for now?) */
1029
                cfg |= (1 << 18);       /* enab early completion */
1030
                cfg |= (1 << 17);       /* enab cut-through (dis stor&forwrd) */
1031
                cfg &= ~(1 << 16);      /* dis FIS-based switching (for now) */
1032
                cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1033
        }
1034
 
1035
        writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1036
}
1037
 
1038
/**
1039
 *      mv_port_start - Port specific init/start routine.
1040
 *      @ap: ATA channel to manipulate
1041
 *
1042
 *      Allocate and point to DMA memory, init port private memory,
1043
 *      zero indices.
1044
 *
1045
 *      LOCKING:
1046
 *      Inherited from caller.
1047
 */
1048
static int mv_port_start(struct ata_port *ap)
1049
{
1050
        struct device *dev = ap->host->dev;
1051
        struct mv_host_priv *hpriv = ap->host->private_data;
1052
        struct mv_port_priv *pp;
1053
        void __iomem *port_mmio = mv_ap_base(ap);
1054
        void *mem;
1055
        dma_addr_t mem_dma;
1056
        unsigned long flags;
1057
        int rc;
1058
 
1059
        pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1060
        if (!pp)
1061
                return -ENOMEM;
1062
 
1063
        mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1064
                                  GFP_KERNEL);
1065
        if (!mem)
1066
                return -ENOMEM;
1067
        memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1068
 
1069
        rc = ata_pad_alloc(ap, dev);
1070
        if (rc)
1071
                return rc;
1072
 
1073
        /* First item in chunk of DMA memory:
1074
         * 32-slot command request table (CRQB), 32 bytes each in size
1075
         */
1076
        pp->crqb = mem;
1077
        pp->crqb_dma = mem_dma;
1078
        mem += MV_CRQB_Q_SZ;
1079
        mem_dma += MV_CRQB_Q_SZ;
1080
 
1081
        /* Second item:
1082
         * 32-slot command response table (CRPB), 8 bytes each in size
1083
         */
1084
        pp->crpb = mem;
1085
        pp->crpb_dma = mem_dma;
1086
        mem += MV_CRPB_Q_SZ;
1087
        mem_dma += MV_CRPB_Q_SZ;
1088
 
1089
        /* Third item:
1090
         * Table of scatter-gather descriptors (ePRD), 16 bytes each
1091
         */
1092
        pp->sg_tbl = mem;
1093
        pp->sg_tbl_dma = mem_dma;
1094
 
1095
        spin_lock_irqsave(&ap->host->lock, flags);
1096
 
1097
        mv_edma_cfg(ap, hpriv, port_mmio);
1098
 
1099
        mv_set_edma_ptrs(port_mmio, hpriv, pp);
1100
 
1101
        spin_unlock_irqrestore(&ap->host->lock, flags);
1102
 
1103
        /* Don't turn on EDMA here...do it before DMA commands only.  Else
1104
         * we'll be unable to send non-data, PIO, etc due to restricted access
1105
         * to shadow regs.
1106
         */
1107
        ap->private_data = pp;
1108
        return 0;
1109
}
1110
 
1111
/**
1112
 *      mv_port_stop - Port specific cleanup/stop routine.
1113
 *      @ap: ATA channel to manipulate
1114
 *
1115
 *      Stop DMA, cleanup port memory.
1116
 *
1117
 *      LOCKING:
1118
 *      This routine uses the host lock to protect the DMA stop.
1119
 */
1120
static void mv_port_stop(struct ata_port *ap)
1121
{
1122
        mv_stop_dma(ap);
1123
}
1124
 
1125
/**
1126
 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1127
 *      @qc: queued command whose SG list to source from
1128
 *
1129
 *      Populate the SG list and mark the last entry.
1130
 *
1131
 *      LOCKING:
1132
 *      Inherited from caller.
1133
 */
1134
static void mv_fill_sg(struct ata_queued_cmd *qc)
1135
{
1136
        struct mv_port_priv *pp = qc->ap->private_data;
1137
        struct scatterlist *sg;
1138
        struct mv_sg *mv_sg, *last_sg = NULL;
1139
 
1140
        mv_sg = pp->sg_tbl;
1141
        ata_for_each_sg(sg, qc) {
1142
                dma_addr_t addr = sg_dma_address(sg);
1143
                u32 sg_len = sg_dma_len(sg);
1144
 
1145
                while (sg_len) {
1146
                        u32 offset = addr & 0xffff;
1147
                        u32 len = sg_len;
1148
 
1149
                        if ((offset + sg_len > 0x10000))
1150
                                len = 0x10000 - offset;
1151
 
1152
                        mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1153
                        mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1154
                        mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1155
 
1156
                        sg_len -= len;
1157
                        addr += len;
1158
 
1159
                        last_sg = mv_sg;
1160
                        mv_sg++;
1161
                }
1162
        }
1163
 
1164
        if (likely(last_sg))
1165
                last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1166
}
1167
 
1168
static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1169
{
1170
        u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1171
                (last ? CRQB_CMD_LAST : 0);
1172
        *cmdw = cpu_to_le16(tmp);
1173
}
1174
 
1175
/**
1176
 *      mv_qc_prep - Host specific command preparation.
1177
 *      @qc: queued command to prepare
1178
 *
1179
 *      This routine simply redirects to the general purpose routine
1180
 *      if command is not DMA.  Else, it handles prep of the CRQB
1181
 *      (command request block), does some sanity checking, and calls
1182
 *      the SG load routine.
1183
 *
1184
 *      LOCKING:
1185
 *      Inherited from caller.
1186
 */
1187
static void mv_qc_prep(struct ata_queued_cmd *qc)
1188
{
1189
        struct ata_port *ap = qc->ap;
1190
        struct mv_port_priv *pp = ap->private_data;
1191
        __le16 *cw;
1192
        struct ata_taskfile *tf;
1193
        u16 flags = 0;
1194
        unsigned in_index;
1195
 
1196
        if (qc->tf.protocol != ATA_PROT_DMA)
1197
                return;
1198
 
1199
        /* Fill in command request block
1200
         */
1201
        if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1202
                flags |= CRQB_FLAG_READ;
1203
        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1204
        flags |= qc->tag << CRQB_TAG_SHIFT;
1205
        flags |= qc->tag << CRQB_IOID_SHIFT;    /* 50xx appears to ignore this*/
1206
 
1207
        /* get current queue index from software */
1208
        in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1209
 
1210
        pp->crqb[in_index].sg_addr =
1211
                cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1212
        pp->crqb[in_index].sg_addr_hi =
1213
                cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1214
        pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1215
 
1216
        cw = &pp->crqb[in_index].ata_cmd[0];
1217
        tf = &qc->tf;
1218
 
1219
        /* Sadly, the CRQB cannot accomodate all registers--there are
1220
         * only 11 bytes...so we must pick and choose required
1221
         * registers based on the command.  So, we drop feature and
1222
         * hob_feature for [RW] DMA commands, but they are needed for
1223
         * NCQ.  NCQ will drop hob_nsect.
1224
         */
1225
        switch (tf->command) {
1226
        case ATA_CMD_READ:
1227
        case ATA_CMD_READ_EXT:
1228
        case ATA_CMD_WRITE:
1229
        case ATA_CMD_WRITE_EXT:
1230
        case ATA_CMD_WRITE_FUA_EXT:
1231
                mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1232
                break;
1233
#ifdef LIBATA_NCQ               /* FIXME: remove this line when NCQ added */
1234
        case ATA_CMD_FPDMA_READ:
1235
        case ATA_CMD_FPDMA_WRITE:
1236
                mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1237
                mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1238
                break;
1239
#endif                          /* FIXME: remove this line when NCQ added */
1240
        default:
1241
                /* The only other commands EDMA supports in non-queued and
1242
                 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1243
                 * of which are defined/used by Linux.  If we get here, this
1244
                 * driver needs work.
1245
                 *
1246
                 * FIXME: modify libata to give qc_prep a return value and
1247
                 * return error here.
1248
                 */
1249
                BUG_ON(tf->command);
1250
                break;
1251
        }
1252
        mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1253
        mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1254
        mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1255
        mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1256
        mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1257
        mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1258
        mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1259
        mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1260
        mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);    /* last */
1261
 
1262
        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1263
                return;
1264
        mv_fill_sg(qc);
1265
}
1266
 
1267
/**
1268
 *      mv_qc_prep_iie - Host specific command preparation.
1269
 *      @qc: queued command to prepare
1270
 *
1271
 *      This routine simply redirects to the general purpose routine
1272
 *      if command is not DMA.  Else, it handles prep of the CRQB
1273
 *      (command request block), does some sanity checking, and calls
1274
 *      the SG load routine.
1275
 *
1276
 *      LOCKING:
1277
 *      Inherited from caller.
1278
 */
1279
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1280
{
1281
        struct ata_port *ap = qc->ap;
1282
        struct mv_port_priv *pp = ap->private_data;
1283
        struct mv_crqb_iie *crqb;
1284
        struct ata_taskfile *tf;
1285
        unsigned in_index;
1286
        u32 flags = 0;
1287
 
1288
        if (qc->tf.protocol != ATA_PROT_DMA)
1289
                return;
1290
 
1291
        /* Fill in Gen IIE command request block
1292
         */
1293
        if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1294
                flags |= CRQB_FLAG_READ;
1295
 
1296
        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1297
        flags |= qc->tag << CRQB_TAG_SHIFT;
1298
        flags |= qc->tag << CRQB_IOID_SHIFT;    /* "I/O Id" is -really-
1299
                                                   what we use as our tag */
1300
 
1301
        /* get current queue index from software */
1302
        in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1303
 
1304
        crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1305
        crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1306
        crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1307
        crqb->flags = cpu_to_le32(flags);
1308
 
1309
        tf = &qc->tf;
1310
        crqb->ata_cmd[0] = cpu_to_le32(
1311
                        (tf->command << 16) |
1312
                        (tf->feature << 24)
1313
                );
1314
        crqb->ata_cmd[1] = cpu_to_le32(
1315
                        (tf->lbal << 0) |
1316
                        (tf->lbam << 8) |
1317
                        (tf->lbah << 16) |
1318
                        (tf->device << 24)
1319
                );
1320
        crqb->ata_cmd[2] = cpu_to_le32(
1321
                        (tf->hob_lbal << 0) |
1322
                        (tf->hob_lbam << 8) |
1323
                        (tf->hob_lbah << 16) |
1324
                        (tf->hob_feature << 24)
1325
                );
1326
        crqb->ata_cmd[3] = cpu_to_le32(
1327
                        (tf->nsect << 0) |
1328
                        (tf->hob_nsect << 8)
1329
                );
1330
 
1331
        if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1332
                return;
1333
        mv_fill_sg(qc);
1334
}
1335
 
1336
/**
1337
 *      mv_qc_issue - Initiate a command to the host
1338
 *      @qc: queued command to start
1339
 *
1340
 *      This routine simply redirects to the general purpose routine
1341
 *      if command is not DMA.  Else, it sanity checks our local
1342
 *      caches of the request producer/consumer indices then enables
1343
 *      DMA and bumps the request producer index.
1344
 *
1345
 *      LOCKING:
1346
 *      Inherited from caller.
1347
 */
1348
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1349
{
1350
        struct ata_port *ap = qc->ap;
1351
        void __iomem *port_mmio = mv_ap_base(ap);
1352
        struct mv_port_priv *pp = ap->private_data;
1353
        struct mv_host_priv *hpriv = ap->host->private_data;
1354
        u32 in_index;
1355
 
1356
        if (qc->tf.protocol != ATA_PROT_DMA) {
1357
                /* We're about to send a non-EDMA capable command to the
1358
                 * port.  Turn off EDMA so there won't be problems accessing
1359
                 * shadow block, etc registers.
1360
                 */
1361
                __mv_stop_dma(ap);
1362
                return ata_qc_issue_prot(qc);
1363
        }
1364
 
1365
        mv_start_dma(port_mmio, hpriv, pp);
1366
 
1367
        in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1368
 
1369
        /* until we do queuing, the queue should be empty at this point */
1370
        WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1371
                >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1372
 
1373
        pp->req_idx++;
1374
 
1375
        in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1376
 
1377
        /* and write the request in pointer to kick the EDMA to life */
1378
        writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1379
                 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1380
 
1381
        return 0;
1382
}
1383
 
1384
/**
1385
 *      mv_err_intr - Handle error interrupts on the port
1386
 *      @ap: ATA channel to manipulate
1387
 *      @reset_allowed: bool: 0 == don't trigger from reset here
1388
 *
1389
 *      In most cases, just clear the interrupt and move on.  However,
1390
 *      some cases require an eDMA reset, which is done right before
1391
 *      the COMRESET in mv_phy_reset().  The SERR case requires a
1392
 *      clear of pending errors in the SATA SERROR register.  Finally,
1393
 *      if the port disabled DMA, update our cached copy to match.
1394
 *
1395
 *      LOCKING:
1396
 *      Inherited from caller.
1397
 */
1398
static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1399
{
1400
        void __iomem *port_mmio = mv_ap_base(ap);
1401
        u32 edma_err_cause, eh_freeze_mask, serr = 0;
1402
        struct mv_port_priv *pp = ap->private_data;
1403
        struct mv_host_priv *hpriv = ap->host->private_data;
1404
        unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1405
        unsigned int action = 0, err_mask = 0;
1406
        struct ata_eh_info *ehi = &ap->link.eh_info;
1407
 
1408
        ata_ehi_clear_desc(ehi);
1409
 
1410
        if (!edma_enabled) {
1411
                /* just a guess: do we need to do this? should we
1412
                 * expand this, and do it in all cases?
1413
                 */
1414
                sata_scr_read(&ap->link, SCR_ERROR, &serr);
1415
                sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1416
        }
1417
 
1418
        edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1419
 
1420
        ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1421
 
1422
        /*
1423
         * all generations share these EDMA error cause bits
1424
         */
1425
 
1426
        if (edma_err_cause & EDMA_ERR_DEV)
1427
                err_mask |= AC_ERR_DEV;
1428
        if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1429
                        EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1430
                        EDMA_ERR_INTRL_PAR)) {
1431
                err_mask |= AC_ERR_ATA_BUS;
1432
                action |= ATA_EH_HARDRESET;
1433
                ata_ehi_push_desc(ehi, "parity error");
1434
        }
1435
        if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1436
                ata_ehi_hotplugged(ehi);
1437
                ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1438
                        "dev disconnect" : "dev connect");
1439
        }
1440
 
1441
        if (IS_GEN_I(hpriv)) {
1442
                eh_freeze_mask = EDMA_EH_FREEZE_5;
1443
 
1444
                if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1445
                        struct mv_port_priv *pp = ap->private_data;
1446
                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1447
                        ata_ehi_push_desc(ehi, "EDMA self-disable");
1448
                }
1449
        } else {
1450
                eh_freeze_mask = EDMA_EH_FREEZE;
1451
 
1452
                if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1453
                        struct mv_port_priv *pp = ap->private_data;
1454
                        pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1455
                        ata_ehi_push_desc(ehi, "EDMA self-disable");
1456
                }
1457
 
1458
                if (edma_err_cause & EDMA_ERR_SERR) {
1459
                        sata_scr_read(&ap->link, SCR_ERROR, &serr);
1460
                        sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1461
                        err_mask = AC_ERR_ATA_BUS;
1462
                        action |= ATA_EH_HARDRESET;
1463
                }
1464
        }
1465
 
1466
        /* Clear EDMA now that SERR cleanup done */
1467
        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1468
 
1469
        if (!err_mask) {
1470
                err_mask = AC_ERR_OTHER;
1471
                action |= ATA_EH_HARDRESET;
1472
        }
1473
 
1474
        ehi->serror |= serr;
1475
        ehi->action |= action;
1476
 
1477
        if (qc)
1478
                qc->err_mask |= err_mask;
1479
        else
1480
                ehi->err_mask |= err_mask;
1481
 
1482
        if (edma_err_cause & eh_freeze_mask)
1483
                ata_port_freeze(ap);
1484
        else
1485
                ata_port_abort(ap);
1486
}
1487
 
1488
static void mv_intr_pio(struct ata_port *ap)
1489
{
1490
        struct ata_queued_cmd *qc;
1491
        u8 ata_status;
1492
 
1493
        /* ignore spurious intr if drive still BUSY */
1494
        ata_status = readb(ap->ioaddr.status_addr);
1495
        if (unlikely(ata_status & ATA_BUSY))
1496
                return;
1497
 
1498
        /* get active ATA command */
1499
        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1500
        if (unlikely(!qc))                      /* no active tag */
1501
                return;
1502
        if (qc->tf.flags & ATA_TFLAG_POLLING)   /* polling; we don't own qc */
1503
                return;
1504
 
1505
        /* and finally, complete the ATA command */
1506
        qc->err_mask |= ac_err_mask(ata_status);
1507
        ata_qc_complete(qc);
1508
}
1509
 
1510
static void mv_intr_edma(struct ata_port *ap)
1511
{
1512
        void __iomem *port_mmio = mv_ap_base(ap);
1513
        struct mv_host_priv *hpriv = ap->host->private_data;
1514
        struct mv_port_priv *pp = ap->private_data;
1515
        struct ata_queued_cmd *qc;
1516
        u32 out_index, in_index;
1517
        bool work_done = false;
1518
 
1519
        /* get h/w response queue pointer */
1520
        in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1521
                        >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1522
 
1523
        while (1) {
1524
                u16 status;
1525
                unsigned int tag;
1526
 
1527
                /* get s/w response queue last-read pointer, and compare */
1528
                out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1529
                if (in_index == out_index)
1530
                        break;
1531
 
1532
                /* 50xx: get active ATA command */
1533
                if (IS_GEN_I(hpriv))
1534
                        tag = ap->link.active_tag;
1535
 
1536
                /* Gen II/IIE: get active ATA command via tag, to enable
1537
                 * support for queueing.  this works transparently for
1538
                 * queued and non-queued modes.
1539
                 */
1540
                else if (IS_GEN_II(hpriv))
1541
                        tag = (le16_to_cpu(pp->crpb[out_index].id)
1542
                                >> CRPB_IOID_SHIFT_6) & 0x3f;
1543
 
1544
                else /* IS_GEN_IIE */
1545
                        tag = (le16_to_cpu(pp->crpb[out_index].id)
1546
                                >> CRPB_IOID_SHIFT_7) & 0x3f;
1547
 
1548
                qc = ata_qc_from_tag(ap, tag);
1549
 
1550
                /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1551
                 * bits (WARNING: might not necessarily be associated
1552
                 * with this command), which -should- be clear
1553
                 * if all is well
1554
                 */
1555
                status = le16_to_cpu(pp->crpb[out_index].flags);
1556
                if (unlikely(status & 0xff)) {
1557
                        mv_err_intr(ap, qc);
1558
                        return;
1559
                }
1560
 
1561
                /* and finally, complete the ATA command */
1562
                if (qc) {
1563
                        qc->err_mask |=
1564
                                ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1565
                        ata_qc_complete(qc);
1566
                }
1567
 
1568
                /* advance software response queue pointer, to
1569
                 * indicate (after the loop completes) to hardware
1570
                 * that we have consumed a response queue entry.
1571
                 */
1572
                work_done = true;
1573
                pp->resp_idx++;
1574
        }
1575
 
1576
        if (work_done)
1577
                writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1578
                         (out_index << EDMA_RSP_Q_PTR_SHIFT),
1579
                         port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1580
}
1581
 
1582
/**
1583
 *      mv_host_intr - Handle all interrupts on the given host controller
1584
 *      @host: host specific structure
1585
 *      @relevant: port error bits relevant to this host controller
1586
 *      @hc: which host controller we're to look at
1587
 *
1588
 *      Read then write clear the HC interrupt status then walk each
1589
 *      port connected to the HC and see if it needs servicing.  Port
1590
 *      success ints are reported in the HC interrupt status reg, the
1591
 *      port error ints are reported in the higher level main
1592
 *      interrupt status register and thus are passed in via the
1593
 *      'relevant' argument.
1594
 *
1595
 *      LOCKING:
1596
 *      Inherited from caller.
1597
 */
1598
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1599
{
1600
        void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1601
        void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1602
        u32 hc_irq_cause;
1603
        int port, port0;
1604
 
1605
        if (hc == 0)
1606
                port0 = 0;
1607
        else
1608
                port0 = MV_PORTS_PER_HC;
1609
 
1610
        /* we'll need the HC success int register in most cases */
1611
        hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1612
        if (!hc_irq_cause)
1613
                return;
1614
 
1615
        writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1616
 
1617
        VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1618
                hc, relevant, hc_irq_cause);
1619
 
1620
        for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1621
                struct ata_port *ap = host->ports[port];
1622
                struct mv_port_priv *pp = ap->private_data;
1623
                int have_err_bits, hard_port, shift;
1624
 
1625
                if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1626
                        continue;
1627
 
1628
                shift = port << 1;              /* (port * 2) */
1629
                if (port >= MV_PORTS_PER_HC) {
1630
                        shift++;        /* skip bit 8 in the HC Main IRQ reg */
1631
                }
1632
                have_err_bits = ((PORT0_ERR << shift) & relevant);
1633
 
1634
                if (unlikely(have_err_bits)) {
1635
                        struct ata_queued_cmd *qc;
1636
 
1637
                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1638
                        if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1639
                                continue;
1640
 
1641
                        mv_err_intr(ap, qc);
1642
                        continue;
1643
                }
1644
 
1645
                hard_port = mv_hardport_from_port(port); /* range 0..3 */
1646
 
1647
                if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1648
                        if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1649
                                mv_intr_edma(ap);
1650
                } else {
1651
                        if ((DEV_IRQ << hard_port) & hc_irq_cause)
1652
                                mv_intr_pio(ap);
1653
                }
1654
        }
1655
        VPRINTK("EXIT\n");
1656
}
1657
 
1658
static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1659
{
1660
        struct mv_host_priv *hpriv = host->private_data;
1661
        struct ata_port *ap;
1662
        struct ata_queued_cmd *qc;
1663
        struct ata_eh_info *ehi;
1664
        unsigned int i, err_mask, printed = 0;
1665
        u32 err_cause;
1666
 
1667
        err_cause = readl(mmio + hpriv->irq_cause_ofs);
1668
 
1669
        dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1670
                   err_cause);
1671
 
1672
        DPRINTK("All regs @ PCI error\n");
1673
        mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1674
 
1675
        writelfl(0, mmio + hpriv->irq_cause_ofs);
1676
 
1677
        for (i = 0; i < host->n_ports; i++) {
1678
                ap = host->ports[i];
1679
                if (!ata_link_offline(&ap->link)) {
1680
                        ehi = &ap->link.eh_info;
1681
                        ata_ehi_clear_desc(ehi);
1682
                        if (!printed++)
1683
                                ata_ehi_push_desc(ehi,
1684
                                        "PCI err cause 0x%08x", err_cause);
1685
                        err_mask = AC_ERR_HOST_BUS;
1686
                        ehi->action = ATA_EH_HARDRESET;
1687
                        qc = ata_qc_from_tag(ap, ap->link.active_tag);
1688
                        if (qc)
1689
                                qc->err_mask |= err_mask;
1690
                        else
1691
                                ehi->err_mask |= err_mask;
1692
 
1693
                        ata_port_freeze(ap);
1694
                }
1695
        }
1696
}
1697
 
1698
/**
1699
 *      mv_interrupt - Main interrupt event handler
1700
 *      @irq: unused
1701
 *      @dev_instance: private data; in this case the host structure
1702
 *
1703
 *      Read the read only register to determine if any host
1704
 *      controllers have pending interrupts.  If so, call lower level
1705
 *      routine to handle.  Also check for PCI errors which are only
1706
 *      reported here.
1707
 *
1708
 *      LOCKING:
1709
 *      This routine holds the host lock while processing pending
1710
 *      interrupts.
1711
 */
1712
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1713
{
1714
        struct ata_host *host = dev_instance;
1715
        unsigned int hc, handled = 0, n_hcs;
1716
        void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1717
        u32 irq_stat;
1718
 
1719
        irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1720
 
1721
        /* check the cases where we either have nothing pending or have read
1722
         * a bogus register value which can indicate HW removal or PCI fault
1723
         */
1724
        if (!irq_stat || (0xffffffffU == irq_stat))
1725
                return IRQ_NONE;
1726
 
1727
        n_hcs = mv_get_hc_count(host->ports[0]->flags);
1728
        spin_lock(&host->lock);
1729
 
1730
        if (unlikely(irq_stat & PCI_ERR)) {
1731
                mv_pci_error(host, mmio);
1732
                handled = 1;
1733
                goto out_unlock;        /* skip all other HC irq handling */
1734
        }
1735
 
1736
        for (hc = 0; hc < n_hcs; hc++) {
1737
                u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1738
                if (relevant) {
1739
                        mv_host_intr(host, relevant, hc);
1740
                        handled = 1;
1741
                }
1742
        }
1743
 
1744
out_unlock:
1745
        spin_unlock(&host->lock);
1746
 
1747
        return IRQ_RETVAL(handled);
1748
}
1749
 
1750
static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1751
{
1752
        void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1753
        unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1754
 
1755
        return hc_mmio + ofs;
1756
}
1757
 
1758
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1759
{
1760
        unsigned int ofs;
1761
 
1762
        switch (sc_reg_in) {
1763
        case SCR_STATUS:
1764
        case SCR_ERROR:
1765
        case SCR_CONTROL:
1766
                ofs = sc_reg_in * sizeof(u32);
1767
                break;
1768
        default:
1769
                ofs = 0xffffffffU;
1770
                break;
1771
        }
1772
        return ofs;
1773
}
1774
 
1775
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1776
{
1777
        void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1778
        void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1779
        unsigned int ofs = mv5_scr_offset(sc_reg_in);
1780
 
1781
        if (ofs != 0xffffffffU) {
1782
                *val = readl(addr + ofs);
1783
                return 0;
1784
        } else
1785
                return -EINVAL;
1786
}
1787
 
1788
static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1789
{
1790
        void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1791
        void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1792
        unsigned int ofs = mv5_scr_offset(sc_reg_in);
1793
 
1794
        if (ofs != 0xffffffffU) {
1795
                writelfl(val, addr + ofs);
1796
                return 0;
1797
        } else
1798
                return -EINVAL;
1799
}
1800
 
1801
static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1802
{
1803
        int early_5080;
1804
 
1805
        early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1806
 
1807
        if (!early_5080) {
1808
                u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1809
                tmp |= (1 << 0);
1810
                writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811
        }
1812
 
1813
        mv_reset_pci_bus(pdev, mmio);
1814
}
1815
 
1816
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1817
{
1818
        writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1819
}
1820
 
1821
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1822
                           void __iomem *mmio)
1823
{
1824
        void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1825
        u32 tmp;
1826
 
1827
        tmp = readl(phy_mmio + MV5_PHY_MODE);
1828
 
1829
        hpriv->signal[idx].pre = tmp & 0x1800;  /* bits 12:11 */
1830
        hpriv->signal[idx].amps = tmp & 0xe0;   /* bits 7:5 */
1831
}
1832
 
1833
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1834
{
1835
        u32 tmp;
1836
 
1837
        writel(0, mmio + MV_GPIO_PORT_CTL);
1838
 
1839
        /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1840
 
1841
        tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1842
        tmp |= ~(1 << 0);
1843
        writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1844
}
1845
 
1846
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1847
                           unsigned int port)
1848
{
1849
        void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1850
        const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1851
        u32 tmp;
1852
        int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1853
 
1854
        if (fix_apm_sq) {
1855
                tmp = readl(phy_mmio + MV5_LT_MODE);
1856
                tmp |= (1 << 19);
1857
                writel(tmp, phy_mmio + MV5_LT_MODE);
1858
 
1859
                tmp = readl(phy_mmio + MV5_PHY_CTL);
1860
                tmp &= ~0x3;
1861
                tmp |= 0x1;
1862
                writel(tmp, phy_mmio + MV5_PHY_CTL);
1863
        }
1864
 
1865
        tmp = readl(phy_mmio + MV5_PHY_MODE);
1866
        tmp &= ~mask;
1867
        tmp |= hpriv->signal[port].pre;
1868
        tmp |= hpriv->signal[port].amps;
1869
        writel(tmp, phy_mmio + MV5_PHY_MODE);
1870
}
1871
 
1872
 
1873
#undef ZERO
1874
#define ZERO(reg) writel(0, port_mmio + (reg))
1875
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1876
                             unsigned int port)
1877
{
1878
        void __iomem *port_mmio = mv_port_base(mmio, port);
1879
 
1880
        writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1881
 
1882
        mv_channel_reset(hpriv, mmio, port);
1883
 
1884
        ZERO(0x028);    /* command */
1885
        writel(0x11f, port_mmio + EDMA_CFG_OFS);
1886
        ZERO(0x004);    /* timer */
1887
        ZERO(0x008);    /* irq err cause */
1888
        ZERO(0x00c);    /* irq err mask */
1889
        ZERO(0x010);    /* rq bah */
1890
        ZERO(0x014);    /* rq inp */
1891
        ZERO(0x018);    /* rq outp */
1892
        ZERO(0x01c);    /* respq bah */
1893
        ZERO(0x024);    /* respq outp */
1894
        ZERO(0x020);    /* respq inp */
1895
        ZERO(0x02c);    /* test control */
1896
        writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1897
}
1898
#undef ZERO
1899
 
1900
#define ZERO(reg) writel(0, hc_mmio + (reg))
1901
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1902
                        unsigned int hc)
1903
{
1904
        void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1905
        u32 tmp;
1906
 
1907
        ZERO(0x00c);
1908
        ZERO(0x010);
1909
        ZERO(0x014);
1910
        ZERO(0x018);
1911
 
1912
        tmp = readl(hc_mmio + 0x20);
1913
        tmp &= 0x1c1c1c1c;
1914
        tmp |= 0x03030303;
1915
        writel(tmp, hc_mmio + 0x20);
1916
}
1917
#undef ZERO
1918
 
1919
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1920
                        unsigned int n_hc)
1921
{
1922
        unsigned int hc, port;
1923
 
1924
        for (hc = 0; hc < n_hc; hc++) {
1925
                for (port = 0; port < MV_PORTS_PER_HC; port++)
1926
                        mv5_reset_hc_port(hpriv, mmio,
1927
                                          (hc * MV_PORTS_PER_HC) + port);
1928
 
1929
                mv5_reset_one_hc(hpriv, mmio, hc);
1930
        }
1931
 
1932
        return 0;
1933
}
1934
 
1935
#undef ZERO
1936
#define ZERO(reg) writel(0, mmio + (reg))
1937
static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1938
{
1939
        struct ata_host     *host = dev_get_drvdata(&pdev->dev);
1940
        struct mv_host_priv *hpriv = host->private_data;
1941
        u32 tmp;
1942
 
1943
        tmp = readl(mmio + MV_PCI_MODE);
1944
        tmp &= 0xff00ffff;
1945
        writel(tmp, mmio + MV_PCI_MODE);
1946
 
1947
        ZERO(MV_PCI_DISC_TIMER);
1948
        ZERO(MV_PCI_MSI_TRIGGER);
1949
        writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1950
        ZERO(HC_MAIN_IRQ_MASK_OFS);
1951
        ZERO(MV_PCI_SERR_MASK);
1952
        ZERO(hpriv->irq_cause_ofs);
1953
        ZERO(hpriv->irq_mask_ofs);
1954
        ZERO(MV_PCI_ERR_LOW_ADDRESS);
1955
        ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1956
        ZERO(MV_PCI_ERR_ATTRIBUTE);
1957
        ZERO(MV_PCI_ERR_COMMAND);
1958
}
1959
#undef ZERO
1960
 
1961
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1962
{
1963
        u32 tmp;
1964
 
1965
        mv5_reset_flash(hpriv, mmio);
1966
 
1967
        tmp = readl(mmio + MV_GPIO_PORT_CTL);
1968
        tmp &= 0x3;
1969
        tmp |= (1 << 5) | (1 << 6);
1970
        writel(tmp, mmio + MV_GPIO_PORT_CTL);
1971
}
1972
 
1973
/**
1974
 *      mv6_reset_hc - Perform the 6xxx global soft reset
1975
 *      @mmio: base address of the HBA
1976
 *
1977
 *      This routine only applies to 6xxx parts.
1978
 *
1979
 *      LOCKING:
1980
 *      Inherited from caller.
1981
 */
1982
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1983
                        unsigned int n_hc)
1984
{
1985
        void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1986
        int i, rc = 0;
1987
        u32 t;
1988
 
1989
        /* Following procedure defined in PCI "main command and status
1990
         * register" table.
1991
         */
1992
        t = readl(reg);
1993
        writel(t | STOP_PCI_MASTER, reg);
1994
 
1995
        for (i = 0; i < 1000; i++) {
1996
                udelay(1);
1997
                t = readl(reg);
1998
                if (PCI_MASTER_EMPTY & t)
1999
                        break;
2000
        }
2001
        if (!(PCI_MASTER_EMPTY & t)) {
2002
                printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2003
                rc = 1;
2004
                goto done;
2005
        }
2006
 
2007
        /* set reset */
2008
        i = 5;
2009
        do {
2010
                writel(t | GLOB_SFT_RST, reg);
2011
                t = readl(reg);
2012
                udelay(1);
2013
        } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2014
 
2015
        if (!(GLOB_SFT_RST & t)) {
2016
                printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2017
                rc = 1;
2018
                goto done;
2019
        }
2020
 
2021
        /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2022
        i = 5;
2023
        do {
2024
                writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2025
                t = readl(reg);
2026
                udelay(1);
2027
        } while ((GLOB_SFT_RST & t) && (i-- > 0));
2028
 
2029
        if (GLOB_SFT_RST & t) {
2030
                printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2031
                rc = 1;
2032
        }
2033
done:
2034
        return rc;
2035
}
2036
 
2037
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2038
                           void __iomem *mmio)
2039
{
2040
        void __iomem *port_mmio;
2041
        u32 tmp;
2042
 
2043
        tmp = readl(mmio + MV_RESET_CFG);
2044
        if ((tmp & (1 << 0)) == 0) {
2045
                hpriv->signal[idx].amps = 0x7 << 8;
2046
                hpriv->signal[idx].pre = 0x1 << 5;
2047
                return;
2048
        }
2049
 
2050
        port_mmio = mv_port_base(mmio, idx);
2051
        tmp = readl(port_mmio + PHY_MODE2);
2052
 
2053
        hpriv->signal[idx].amps = tmp & 0x700;  /* bits 10:8 */
2054
        hpriv->signal[idx].pre = tmp & 0xe0;    /* bits 7:5 */
2055
}
2056
 
2057
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2058
{
2059
        writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2060
}
2061
 
2062
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2063
                           unsigned int port)
2064
{
2065
        void __iomem *port_mmio = mv_port_base(mmio, port);
2066
 
2067
        u32 hp_flags = hpriv->hp_flags;
2068
        int fix_phy_mode2 =
2069
                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2070
        int fix_phy_mode4 =
2071
                hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2072
        u32 m2, tmp;
2073
 
2074
        if (fix_phy_mode2) {
2075
                m2 = readl(port_mmio + PHY_MODE2);
2076
                m2 &= ~(1 << 16);
2077
                m2 |= (1 << 31);
2078
                writel(m2, port_mmio + PHY_MODE2);
2079
 
2080
                udelay(200);
2081
 
2082
                m2 = readl(port_mmio + PHY_MODE2);
2083
                m2 &= ~((1 << 16) | (1 << 31));
2084
                writel(m2, port_mmio + PHY_MODE2);
2085
 
2086
                udelay(200);
2087
        }
2088
 
2089
        /* who knows what this magic does */
2090
        tmp = readl(port_mmio + PHY_MODE3);
2091
        tmp &= ~0x7F800000;
2092
        tmp |= 0x2A800000;
2093
        writel(tmp, port_mmio + PHY_MODE3);
2094
 
2095
        if (fix_phy_mode4) {
2096
                u32 m4;
2097
 
2098
                m4 = readl(port_mmio + PHY_MODE4);
2099
 
2100
                if (hp_flags & MV_HP_ERRATA_60X1B2)
2101
                        tmp = readl(port_mmio + 0x310);
2102
 
2103
                m4 = (m4 & ~(1 << 1)) | (1 << 0);
2104
 
2105
                writel(m4, port_mmio + PHY_MODE4);
2106
 
2107
                if (hp_flags & MV_HP_ERRATA_60X1B2)
2108
                        writel(tmp, port_mmio + 0x310);
2109
        }
2110
 
2111
        /* Revert values of pre-emphasis and signal amps to the saved ones */
2112
        m2 = readl(port_mmio + PHY_MODE2);
2113
 
2114
        m2 &= ~MV_M2_PREAMP_MASK;
2115
        m2 |= hpriv->signal[port].amps;
2116
        m2 |= hpriv->signal[port].pre;
2117
        m2 &= ~(1 << 16);
2118
 
2119
        /* according to mvSata 3.6.1, some IIE values are fixed */
2120
        if (IS_GEN_IIE(hpriv)) {
2121
                m2 &= ~0xC30FF01F;
2122
                m2 |= 0x0000900F;
2123
        }
2124
 
2125
        writel(m2, port_mmio + PHY_MODE2);
2126
}
2127
 
2128
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2129
                             unsigned int port_no)
2130
{
2131
        void __iomem *port_mmio = mv_port_base(mmio, port_no);
2132
 
2133
        writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2134
 
2135
        if (IS_GEN_II(hpriv)) {
2136
                u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2137
                ifctl |= (1 << 7);              /* enable gen2i speed */
2138
                ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2139
                writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2140
        }
2141
 
2142
        udelay(25);             /* allow reset propagation */
2143
 
2144
        /* Spec never mentions clearing the bit.  Marvell's driver does
2145
         * clear the bit, however.
2146
         */
2147
        writelfl(0, port_mmio + EDMA_CMD_OFS);
2148
 
2149
        hpriv->ops->phy_errata(hpriv, mmio, port_no);
2150
 
2151
        if (IS_GEN_I(hpriv))
2152
                mdelay(1);
2153
}
2154
 
2155
/**
2156
 *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2157
 *      @ap: ATA channel to manipulate
2158
 *
2159
 *      Part of this is taken from __sata_phy_reset and modified to
2160
 *      not sleep since this routine gets called from interrupt level.
2161
 *
2162
 *      LOCKING:
2163
 *      Inherited from caller.  This is coded to safe to call at
2164
 *      interrupt level, i.e. it does not sleep.
2165
 */
2166
static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2167
                         unsigned long deadline)
2168
{
2169
        struct mv_port_priv *pp = ap->private_data;
2170
        struct mv_host_priv *hpriv = ap->host->private_data;
2171
        void __iomem *port_mmio = mv_ap_base(ap);
2172
        int retry = 5;
2173
        u32 sstatus;
2174
 
2175
        VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2176
 
2177
#ifdef DEBUG
2178
        {
2179
                u32 sstatus, serror, scontrol;
2180
 
2181
                mv_scr_read(ap, SCR_STATUS, &sstatus);
2182
                mv_scr_read(ap, SCR_ERROR, &serror);
2183
                mv_scr_read(ap, SCR_CONTROL, &scontrol);
2184
                DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2185
                        "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2186
        }
2187
#endif
2188
 
2189
        /* Issue COMRESET via SControl */
2190
comreset_retry:
2191
        sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2192
        msleep(1);
2193
 
2194
        sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2195
        msleep(20);
2196
 
2197
        do {
2198
                sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2199
                if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2200
                        break;
2201
 
2202
                msleep(1);
2203
        } while (time_before(jiffies, deadline));
2204
 
2205
        /* work around errata */
2206
        if (IS_GEN_II(hpriv) &&
2207
            (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2208
            (retry-- > 0))
2209
                goto comreset_retry;
2210
 
2211
#ifdef DEBUG
2212
        {
2213
                u32 sstatus, serror, scontrol;
2214
 
2215
                mv_scr_read(ap, SCR_STATUS, &sstatus);
2216
                mv_scr_read(ap, SCR_ERROR, &serror);
2217
                mv_scr_read(ap, SCR_CONTROL, &scontrol);
2218
                DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2219
                        "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2220
        }
2221
#endif
2222
 
2223
        if (ata_link_offline(&ap->link)) {
2224
                *class = ATA_DEV_NONE;
2225
                return;
2226
        }
2227
 
2228
        /* even after SStatus reflects that device is ready,
2229
         * it seems to take a while for link to be fully
2230
         * established (and thus Status no longer 0x80/0x7F),
2231
         * so we poll a bit for that, here.
2232
         */
2233
        retry = 20;
2234
        while (1) {
2235
                u8 drv_stat = ata_check_status(ap);
2236
                if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2237
                        break;
2238
                msleep(500);
2239
                if (retry-- <= 0)
2240
                        break;
2241
                if (time_after(jiffies, deadline))
2242
                        break;
2243
        }
2244
 
2245
        /* FIXME: if we passed the deadline, the following
2246
         * code probably produces an invalid result
2247
         */
2248
 
2249
        /* finally, read device signature from TF registers */
2250
        *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2251
 
2252
        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2253
 
2254
        WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2255
 
2256
        VPRINTK("EXIT\n");
2257
}
2258
 
2259
static int mv_prereset(struct ata_link *link, unsigned long deadline)
2260
{
2261
        struct ata_port *ap = link->ap;
2262
        struct mv_port_priv *pp = ap->private_data;
2263
        struct ata_eh_context *ehc = &link->eh_context;
2264
        int rc;
2265
 
2266
        rc = mv_stop_dma(ap);
2267
        if (rc)
2268
                ehc->i.action |= ATA_EH_HARDRESET;
2269
 
2270
        if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2271
                pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2272
                ehc->i.action |= ATA_EH_HARDRESET;
2273
        }
2274
 
2275
        /* if we're about to do hardreset, nothing more to do */
2276
        if (ehc->i.action & ATA_EH_HARDRESET)
2277
                return 0;
2278
 
2279
        if (ata_link_online(link))
2280
                rc = ata_wait_ready(ap, deadline);
2281
        else
2282
                rc = -ENODEV;
2283
 
2284
        return rc;
2285
}
2286
 
2287
static int mv_hardreset(struct ata_link *link, unsigned int *class,
2288
                        unsigned long deadline)
2289
{
2290
        struct ata_port *ap = link->ap;
2291
        struct mv_host_priv *hpriv = ap->host->private_data;
2292
        void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2293
 
2294
        mv_stop_dma(ap);
2295
 
2296
        mv_channel_reset(hpriv, mmio, ap->port_no);
2297
 
2298
        mv_phy_reset(ap, class, deadline);
2299
 
2300
        return 0;
2301
}
2302
 
2303
static void mv_postreset(struct ata_link *link, unsigned int *classes)
2304
{
2305
        struct ata_port *ap = link->ap;
2306
        u32 serr;
2307
 
2308
        /* print link status */
2309
        sata_print_link_status(link);
2310
 
2311
        /* clear SError */
2312
        sata_scr_read(link, SCR_ERROR, &serr);
2313
        sata_scr_write_flush(link, SCR_ERROR, serr);
2314
 
2315
        /* bail out if no device is present */
2316
        if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2317
                DPRINTK("EXIT, no device\n");
2318
                return;
2319
        }
2320
 
2321
        /* set up device control */
2322
        iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2323
}
2324
 
2325
static void mv_error_handler(struct ata_port *ap)
2326
{
2327
        ata_do_eh(ap, mv_prereset, ata_std_softreset,
2328
                  mv_hardreset, mv_postreset);
2329
}
2330
 
2331
static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2332
{
2333
        mv_stop_dma(qc->ap);
2334
}
2335
 
2336
static void mv_eh_freeze(struct ata_port *ap)
2337
{
2338
        void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2339
        unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2340
        u32 tmp, mask;
2341
        unsigned int shift;
2342
 
2343
        /* FIXME: handle coalescing completion events properly */
2344
 
2345
        shift = ap->port_no * 2;
2346
        if (hc > 0)
2347
                shift++;
2348
 
2349
        mask = 0x3 << shift;
2350
 
2351
        /* disable assertion of portN err, done events */
2352
        tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2353
        writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2354
}
2355
 
2356
static void mv_eh_thaw(struct ata_port *ap)
2357
{
2358
        void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2359
        unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2360
        void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2361
        void __iomem *port_mmio = mv_ap_base(ap);
2362
        u32 tmp, mask, hc_irq_cause;
2363
        unsigned int shift, hc_port_no = ap->port_no;
2364
 
2365
        /* FIXME: handle coalescing completion events properly */
2366
 
2367
        shift = ap->port_no * 2;
2368
        if (hc > 0) {
2369
                shift++;
2370
                hc_port_no -= 4;
2371
        }
2372
 
2373
        mask = 0x3 << shift;
2374
 
2375
        /* clear EDMA errors on this port */
2376
        writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2377
 
2378
        /* clear pending irq events */
2379
        hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2380
        hc_irq_cause &= ~(1 << hc_port_no);     /* clear CRPB-done */
2381
        hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2382
        writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2383
 
2384
        /* enable assertion of portN err, done events */
2385
        tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2386
        writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2387
}
2388
 
2389
/**
2390
 *      mv_port_init - Perform some early initialization on a single port.
2391
 *      @port: libata data structure storing shadow register addresses
2392
 *      @port_mmio: base address of the port
2393
 *
2394
 *      Initialize shadow register mmio addresses, clear outstanding
2395
 *      interrupts on the port, and unmask interrupts for the future
2396
 *      start of the port.
2397
 *
2398
 *      LOCKING:
2399
 *      Inherited from caller.
2400
 */
2401
static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2402
{
2403
        void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2404
        unsigned serr_ofs;
2405
 
2406
        /* PIO related setup
2407
         */
2408
        port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2409
        port->error_addr =
2410
                port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2411
        port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2412
        port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2413
        port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2414
        port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2415
        port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2416
        port->status_addr =
2417
                port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2418
        /* special case: control/altstatus doesn't have ATA_REG_ address */
2419
        port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2420
 
2421
        /* unused: */
2422
        port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2423
 
2424
        /* Clear any currently outstanding port interrupt conditions */
2425
        serr_ofs = mv_scr_offset(SCR_ERROR);
2426
        writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2427
        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2428
 
2429
        /* unmask all EDMA error interrupts */
2430
        writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2431
 
2432
        VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2433
                readl(port_mmio + EDMA_CFG_OFS),
2434
                readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2435
                readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2436
}
2437
 
2438
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2439
{
2440
        struct pci_dev *pdev = to_pci_dev(host->dev);
2441
        struct mv_host_priv *hpriv = host->private_data;
2442
        u32 hp_flags = hpriv->hp_flags;
2443
 
2444
        switch (board_idx) {
2445
        case chip_5080:
2446
                hpriv->ops = &mv5xxx_ops;
2447
                hp_flags |= MV_HP_GEN_I;
2448
 
2449
                switch (pdev->revision) {
2450
                case 0x1:
2451
                        hp_flags |= MV_HP_ERRATA_50XXB0;
2452
                        break;
2453
                case 0x3:
2454
                        hp_flags |= MV_HP_ERRATA_50XXB2;
2455
                        break;
2456
                default:
2457
                        dev_printk(KERN_WARNING, &pdev->dev,
2458
                           "Applying 50XXB2 workarounds to unknown rev\n");
2459
                        hp_flags |= MV_HP_ERRATA_50XXB2;
2460
                        break;
2461
                }
2462
                break;
2463
 
2464
        case chip_504x:
2465
        case chip_508x:
2466
                hpriv->ops = &mv5xxx_ops;
2467
                hp_flags |= MV_HP_GEN_I;
2468
 
2469
                switch (pdev->revision) {
2470
                case 0x0:
2471
                        hp_flags |= MV_HP_ERRATA_50XXB0;
2472
                        break;
2473
                case 0x3:
2474
                        hp_flags |= MV_HP_ERRATA_50XXB2;
2475
                        break;
2476
                default:
2477
                        dev_printk(KERN_WARNING, &pdev->dev,
2478
                           "Applying B2 workarounds to unknown rev\n");
2479
                        hp_flags |= MV_HP_ERRATA_50XXB2;
2480
                        break;
2481
                }
2482
                break;
2483
 
2484
        case chip_604x:
2485
        case chip_608x:
2486
                hpriv->ops = &mv6xxx_ops;
2487
                hp_flags |= MV_HP_GEN_II;
2488
 
2489
                switch (pdev->revision) {
2490
                case 0x7:
2491
                        hp_flags |= MV_HP_ERRATA_60X1B2;
2492
                        break;
2493
                case 0x9:
2494
                        hp_flags |= MV_HP_ERRATA_60X1C0;
2495
                        break;
2496
                default:
2497
                        dev_printk(KERN_WARNING, &pdev->dev,
2498
                                   "Applying B2 workarounds to unknown rev\n");
2499
                        hp_flags |= MV_HP_ERRATA_60X1B2;
2500
                        break;
2501
                }
2502
                break;
2503
 
2504
        case chip_7042:
2505
                hp_flags |= MV_HP_PCIE;
2506
                if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2507
                    (pdev->device == 0x2300 || pdev->device == 0x2310))
2508
                {
2509
                        /*
2510
                         * Highpoint RocketRAID PCIe 23xx series cards:
2511
                         *
2512
                         * Unconfigured drives are treated as "Legacy"
2513
                         * by the BIOS, and it overwrites sector 8 with
2514
                         * a "Lgcy" metadata block prior to Linux boot.
2515
                         *
2516
                         * Configured drives (RAID or JBOD) leave sector 8
2517
                         * alone, but instead overwrite a high numbered
2518
                         * sector for the RAID metadata.  This sector can
2519
                         * be determined exactly, by truncating the physical
2520
                         * drive capacity to a nice even GB value.
2521
                         *
2522
                         * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2523
                         *
2524
                         * Warn the user, lest they think we're just buggy.
2525
                         */
2526
                        printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2527
                                " BIOS CORRUPTS DATA on all attached drives,"
2528
                                " regardless of if/how they are configured."
2529
                                " BEWARE!\n");
2530
                        printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2531
                                " use sectors 8-9 on \"Legacy\" drives,"
2532
                                " and avoid the final two gigabytes on"
2533
                                " all RocketRAID BIOS initialized drives.\n");
2534
                }
2535
        case chip_6042:
2536
                hpriv->ops = &mv6xxx_ops;
2537
                hp_flags |= MV_HP_GEN_IIE;
2538
 
2539
                switch (pdev->revision) {
2540
                case 0x0:
2541
                        hp_flags |= MV_HP_ERRATA_XX42A0;
2542
                        break;
2543
                case 0x1:
2544
                        hp_flags |= MV_HP_ERRATA_60X1C0;
2545
                        break;
2546
                default:
2547
                        dev_printk(KERN_WARNING, &pdev->dev,
2548
                           "Applying 60X1C0 workarounds to unknown rev\n");
2549
                        hp_flags |= MV_HP_ERRATA_60X1C0;
2550
                        break;
2551
                }
2552
                break;
2553
 
2554
        default:
2555
                dev_printk(KERN_ERR, &pdev->dev,
2556
                           "BUG: invalid board index %u\n", board_idx);
2557
                return 1;
2558
        }
2559
 
2560
        hpriv->hp_flags = hp_flags;
2561
        if (hp_flags & MV_HP_PCIE) {
2562
                hpriv->irq_cause_ofs    = PCIE_IRQ_CAUSE_OFS;
2563
                hpriv->irq_mask_ofs     = PCIE_IRQ_MASK_OFS;
2564
                hpriv->unmask_all_irqs  = PCIE_UNMASK_ALL_IRQS;
2565
        } else {
2566
                hpriv->irq_cause_ofs    = PCI_IRQ_CAUSE_OFS;
2567
                hpriv->irq_mask_ofs     = PCI_IRQ_MASK_OFS;
2568
                hpriv->unmask_all_irqs  = PCI_UNMASK_ALL_IRQS;
2569
        }
2570
 
2571
        return 0;
2572
}
2573
 
2574
/**
2575
 *      mv_init_host - Perform some early initialization of the host.
2576
 *      @host: ATA host to initialize
2577
 *      @board_idx: controller index
2578
 *
2579
 *      If possible, do an early global reset of the host.  Then do
2580
 *      our port init and clear/unmask all/relevant host interrupts.
2581
 *
2582
 *      LOCKING:
2583
 *      Inherited from caller.
2584
 */
2585
static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2586
{
2587
        int rc = 0, n_hc, port, hc;
2588
        struct pci_dev *pdev = to_pci_dev(host->dev);
2589
        void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2590
        struct mv_host_priv *hpriv = host->private_data;
2591
 
2592
        /* global interrupt mask */
2593
        writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2594
 
2595
        rc = mv_chip_id(host, board_idx);
2596
        if (rc)
2597
                goto done;
2598
 
2599
        n_hc = mv_get_hc_count(host->ports[0]->flags);
2600
 
2601
        for (port = 0; port < host->n_ports; port++)
2602
                hpriv->ops->read_preamp(hpriv, port, mmio);
2603
 
2604
        rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2605
        if (rc)
2606
                goto done;
2607
 
2608
        hpriv->ops->reset_flash(hpriv, mmio);
2609
        hpriv->ops->reset_bus(pdev, mmio);
2610
        hpriv->ops->enable_leds(hpriv, mmio);
2611
 
2612
        for (port = 0; port < host->n_ports; port++) {
2613
                if (IS_GEN_II(hpriv)) {
2614
                        void __iomem *port_mmio = mv_port_base(mmio, port);
2615
 
2616
                        u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2617
                        ifctl |= (1 << 7);              /* enable gen2i speed */
2618
                        ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2619
                        writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2620
                }
2621
 
2622
                hpriv->ops->phy_errata(hpriv, mmio, port);
2623
        }
2624
 
2625
        for (port = 0; port < host->n_ports; port++) {
2626
                struct ata_port *ap = host->ports[port];
2627
                void __iomem *port_mmio = mv_port_base(mmio, port);
2628
                unsigned int offset = port_mmio - mmio;
2629
 
2630
                mv_port_init(&ap->ioaddr, port_mmio);
2631
 
2632
                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2633
                ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2634
        }
2635
 
2636
        for (hc = 0; hc < n_hc; hc++) {
2637
                void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2638
 
2639
                VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2640
                        "(before clear)=0x%08x\n", hc,
2641
                        readl(hc_mmio + HC_CFG_OFS),
2642
                        readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2643
 
2644
                /* Clear any currently outstanding hc interrupt conditions */
2645
                writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2646
        }
2647
 
2648
        /* Clear any currently outstanding host interrupt conditions */
2649
        writelfl(0, mmio + hpriv->irq_cause_ofs);
2650
 
2651
        /* and unmask interrupt generation for host regs */
2652
        writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2653
 
2654
        if (IS_GEN_I(hpriv))
2655
                writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2656
        else
2657
                writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2658
 
2659
        VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2660
                "PCI int cause/mask=0x%08x/0x%08x\n",
2661
                readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2662
                readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2663
                readl(mmio + hpriv->irq_cause_ofs),
2664
                readl(mmio + hpriv->irq_mask_ofs));
2665
 
2666
done:
2667
        return rc;
2668
}
2669
 
2670
/**
2671
 *      mv_print_info - Dump key info to kernel log for perusal.
2672
 *      @host: ATA host to print info about
2673
 *
2674
 *      FIXME: complete this.
2675
 *
2676
 *      LOCKING:
2677
 *      Inherited from caller.
2678
 */
2679
static void mv_print_info(struct ata_host *host)
2680
{
2681
        struct pci_dev *pdev = to_pci_dev(host->dev);
2682
        struct mv_host_priv *hpriv = host->private_data;
2683
        u8 scc;
2684
        const char *scc_s, *gen;
2685
 
2686
        /* Use this to determine the HW stepping of the chip so we know
2687
         * what errata to workaround
2688
         */
2689
        pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2690
        if (scc == 0)
2691
                scc_s = "SCSI";
2692
        else if (scc == 0x01)
2693
                scc_s = "RAID";
2694
        else
2695
                scc_s = "?";
2696
 
2697
        if (IS_GEN_I(hpriv))
2698
                gen = "I";
2699
        else if (IS_GEN_II(hpriv))
2700
                gen = "II";
2701
        else if (IS_GEN_IIE(hpriv))
2702
                gen = "IIE";
2703
        else
2704
                gen = "?";
2705
 
2706
        dev_printk(KERN_INFO, &pdev->dev,
2707
               "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2708
               gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2709
               scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2710
}
2711
 
2712
/**
2713
 *      mv_init_one - handle a positive probe of a Marvell host
2714
 *      @pdev: PCI device found
2715
 *      @ent: PCI device ID entry for the matched host
2716
 *
2717
 *      LOCKING:
2718
 *      Inherited from caller.
2719
 */
2720
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2721
{
2722
        static int printed_version;
2723
        unsigned int board_idx = (unsigned int)ent->driver_data;
2724
        const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2725
        struct ata_host *host;
2726
        struct mv_host_priv *hpriv;
2727
        int n_ports, rc;
2728
 
2729
        if (!printed_version++)
2730
                dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2731
 
2732
        /* allocate host */
2733
        n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2734
 
2735
        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2736
        hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2737
        if (!host || !hpriv)
2738
                return -ENOMEM;
2739
        host->private_data = hpriv;
2740
 
2741
        /* acquire resources */
2742
        rc = pcim_enable_device(pdev);
2743
        if (rc)
2744
                return rc;
2745
 
2746
        rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2747
        if (rc == -EBUSY)
2748
                pcim_pin_device(pdev);
2749
        if (rc)
2750
                return rc;
2751
        host->iomap = pcim_iomap_table(pdev);
2752
 
2753
        rc = pci_go_64(pdev);
2754
        if (rc)
2755
                return rc;
2756
 
2757
        /* initialize adapter */
2758
        rc = mv_init_host(host, board_idx);
2759
        if (rc)
2760
                return rc;
2761
 
2762
        /* Enable interrupts */
2763
        if (msi && pci_enable_msi(pdev))
2764
                pci_intx(pdev, 1);
2765
 
2766
        mv_dump_pci_cfg(pdev, 0x68);
2767
        mv_print_info(host);
2768
 
2769
        pci_set_master(pdev);
2770
        pci_try_set_mwi(pdev);
2771
        return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2772
                                 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2773
}
2774
 
2775
static int __init mv_init(void)
2776
{
2777
        return pci_register_driver(&mv_pci_driver);
2778
}
2779
 
2780
static void __exit mv_exit(void)
2781
{
2782
        pci_unregister_driver(&mv_pci_driver);
2783
}
2784
 
2785
MODULE_AUTHOR("Brett Russ");
2786
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2787
MODULE_LICENSE("GPL");
2788
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2789
MODULE_VERSION(DRV_VERSION);
2790
 
2791
module_param(msi, int, 0444);
2792
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2793
 
2794
module_init(mv_init);
2795
module_exit(mv_exit);

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.