sata_mv ncq Fix EDMA configuration
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
02a121da
ML
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
02a121da
ML
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 174
20f733e7
BR
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
fb621e2f
JG
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
20f733e7
BR
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
31961943 202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
31961943
BR
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
0c58912e 213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 214 PHY_MODE3 = 0x310,
bca1c4eb
JG
215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
c9d39130
JG
217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
0c58912e
ML
226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 248
6c1153e0 249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
6c1153e0 255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 256
6c1153e0 257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
6c1153e0 264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 265
6c1153e0 266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
bdd4ddde
JG
275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
6c1153e0 281 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
6c1153e0 296 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
20f733e7 300
31961943
BR
301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
0ea9e179
JG
312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 316
c9d39130 317 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 318 EDMA_ARB_CFG = 0x38,
bca1c4eb 319
31961943
BR
320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 326 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 331
31961943 332 /* Port private flags (pp_flags) */
0ea9e179
JG
333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
335};
336
ee9ccdf7
JG
337#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
338#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 339#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 340
095fec88 341enum {
baf14aa1
JG
342 /* DMA boundary 0xffff is required by the s/g splitting
343 * we need on /length/ in mv_fill-sg().
344 */
345 MV_DMA_BOUNDARY = 0xffffU,
095fec88 346
0ea9e179
JG
347 /* mask of register bits containing lower 32 bits
348 * of EDMA request queue DMA address
349 */
095fec88
JG
350 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351
0ea9e179 352 /* ditto, for response queue */
095fec88
JG
353 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
354};
355
522479fb
JG
356enum chip_type {
357 chip_504x,
358 chip_508x,
359 chip_5080,
360 chip_604x,
361 chip_608x,
e4e7b892
JG
362 chip_6042,
363 chip_7042,
522479fb
JG
364};
365
31961943
BR
366/* Command ReQuest Block: 32B */
367struct mv_crqb {
e1469874
ML
368 __le32 sg_addr;
369 __le32 sg_addr_hi;
370 __le16 ctrl_flags;
371 __le16 ata_cmd[11];
31961943 372};
20f733e7 373
e4e7b892 374struct mv_crqb_iie {
e1469874
ML
375 __le32 addr;
376 __le32 addr_hi;
377 __le32 flags;
378 __le32 len;
379 __le32 ata_cmd[4];
e4e7b892
JG
380};
381
31961943
BR
382/* Command ResPonse Block: 8B */
383struct mv_crpb {
e1469874
ML
384 __le16 id;
385 __le16 flags;
386 __le32 tmstmp;
20f733e7
BR
387};
388
31961943
BR
389/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
390struct mv_sg {
e1469874
ML
391 __le32 addr;
392 __le32 flags_size;
393 __le32 addr_hi;
394 __le32 reserved;
31961943 395};
20f733e7 396
31961943
BR
397struct mv_port_priv {
398 struct mv_crqb *crqb;
399 dma_addr_t crqb_dma;
400 struct mv_crpb *crpb;
401 dma_addr_t crpb_dma;
402 struct mv_sg *sg_tbl;
403 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
404
405 unsigned int req_idx;
406 unsigned int resp_idx;
407
31961943
BR
408 u32 pp_flags;
409};
410
bca1c4eb
JG
411struct mv_port_signal {
412 u32 amps;
413 u32 pre;
414};
415
02a121da
ML
416struct mv_host_priv {
417 u32 hp_flags;
418 struct mv_port_signal signal[8];
419 const struct mv_hw_ops *ops;
420 u32 irq_cause_ofs;
421 u32 irq_mask_ofs;
422 u32 unmask_all_irqs;
423};
424
47c2b677 425struct mv_hw_ops {
2a47ce06
JG
426 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int port);
47c2b677
JG
428 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
429 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
430 void __iomem *mmio);
c9d39130
JG
431 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int n_hc);
522479fb
JG
433 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
434 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
435};
436
20f733e7 437static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
438static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
439static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
440static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
441static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
442static int mv_port_start(struct ata_port *ap);
443static void mv_port_stop(struct ata_port *ap);
444static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 445static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 446static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
447static void mv_error_handler(struct ata_port *ap);
448static void mv_post_int_cmd(struct ata_queued_cmd *qc);
449static void mv_eh_freeze(struct ata_port *ap);
450static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
451static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
452
2a47ce06
JG
453static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
47c2b677
JG
455static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
456static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
c9d39130
JG
458static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
522479fb
JG
460static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
461static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 462
2a47ce06
JG
463static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
464 unsigned int port);
47c2b677
JG
465static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
466static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
467 void __iomem *mmio);
c9d39130
JG
468static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
469 unsigned int n_hc);
522479fb
JG
470static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
471static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
472static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int port_no);
0c58912e
ML
474static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
475 void __iomem *port_mmio);
47c2b677 476
c5d3e45a
JG
477static struct scsi_host_template mv5_sht = {
478 .module = THIS_MODULE,
479 .name = DRV_NAME,
480 .ioctl = ata_scsi_ioctl,
481 .queuecommand = ata_scsi_queuecmd,
482 .can_queue = ATA_DEF_QUEUE,
483 .this_id = ATA_SHT_THIS_ID,
baf14aa1 484 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
485 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
486 .emulated = ATA_SHT_EMULATED,
487 .use_clustering = 1,
488 .proc_name = DRV_NAME,
489 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 490 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
491 .slave_destroy = ata_scsi_slave_destroy,
492 .bios_param = ata_std_bios_param,
493};
494
495static struct scsi_host_template mv6_sht = {
20f733e7
BR
496 .module = THIS_MODULE,
497 .name = DRV_NAME,
498 .ioctl = ata_scsi_ioctl,
499 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 500 .can_queue = ATA_DEF_QUEUE,
20f733e7 501 .this_id = ATA_SHT_THIS_ID,
baf14aa1 502 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
503 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
504 .emulated = ATA_SHT_EMULATED,
d88184fb 505 .use_clustering = 1,
20f733e7
BR
506 .proc_name = DRV_NAME,
507 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 508 .slave_configure = ata_scsi_slave_config,
ccf68c34 509 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 510 .bios_param = ata_std_bios_param,
20f733e7
BR
511};
512
c9d39130 513static const struct ata_port_operations mv5_ops = {
c9d39130
JG
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
cffacd85 520 .cable_detect = ata_cable_sata,
c9d39130
JG
521
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
0d5ff566 524 .data_xfer = ata_data_xfer,
c9d39130 525
c9d39130 526 .irq_clear = mv_irq_clear,
246ce3b6 527 .irq_on = ata_irq_on,
c9d39130 528
bdd4ddde
JG
529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
532 .thaw = mv_eh_thaw,
533
c9d39130
JG
534 .scr_read = mv5_scr_read,
535 .scr_write = mv5_scr_write,
536
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
c9d39130
JG
539};
540
541static const struct ata_port_operations mv6_ops = {
20f733e7
BR
542 .tf_load = ata_tf_load,
543 .tf_read = ata_tf_read,
544 .check_status = ata_check_status,
545 .exec_command = ata_exec_command,
546 .dev_select = ata_std_dev_select,
547
cffacd85 548 .cable_detect = ata_cable_sata,
20f733e7 549
31961943
BR
550 .qc_prep = mv_qc_prep,
551 .qc_issue = mv_qc_issue,
0d5ff566 552 .data_xfer = ata_data_xfer,
20f733e7 553
20f733e7 554 .irq_clear = mv_irq_clear,
246ce3b6 555 .irq_on = ata_irq_on,
20f733e7 556
bdd4ddde
JG
557 .error_handler = mv_error_handler,
558 .post_internal_cmd = mv_post_int_cmd,
559 .freeze = mv_eh_freeze,
560 .thaw = mv_eh_thaw,
561
20f733e7
BR
562 .scr_read = mv_scr_read,
563 .scr_write = mv_scr_write,
564
31961943
BR
565 .port_start = mv_port_start,
566 .port_stop = mv_port_stop,
20f733e7
BR
567};
568
e4e7b892 569static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
570 .tf_load = ata_tf_load,
571 .tf_read = ata_tf_read,
572 .check_status = ata_check_status,
573 .exec_command = ata_exec_command,
574 .dev_select = ata_std_dev_select,
575
cffacd85 576 .cable_detect = ata_cable_sata,
e4e7b892
JG
577
578 .qc_prep = mv_qc_prep_iie,
579 .qc_issue = mv_qc_issue,
0d5ff566 580 .data_xfer = ata_data_xfer,
e4e7b892 581
e4e7b892 582 .irq_clear = mv_irq_clear,
246ce3b6 583 .irq_on = ata_irq_on,
e4e7b892 584
bdd4ddde
JG
585 .error_handler = mv_error_handler,
586 .post_internal_cmd = mv_post_int_cmd,
587 .freeze = mv_eh_freeze,
588 .thaw = mv_eh_thaw,
589
e4e7b892
JG
590 .scr_read = mv_scr_read,
591 .scr_write = mv_scr_write,
592
593 .port_start = mv_port_start,
594 .port_stop = mv_port_stop,
e4e7b892
JG
595};
596
98ac62de 597static const struct ata_port_info mv_port_info[] = {
20f733e7 598 { /* chip_504x */
cca3974e 599 .flags = MV_COMMON_FLAGS,
31961943 600 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 601 .udma_mask = ATA_UDMA6,
c9d39130 602 .port_ops = &mv5_ops,
20f733e7
BR
603 },
604 { /* chip_508x */
c5d3e45a 605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 606 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 607 .udma_mask = ATA_UDMA6,
c9d39130 608 .port_ops = &mv5_ops,
20f733e7 609 },
47c2b677 610 { /* chip_5080 */
c5d3e45a 611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 612 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 613 .udma_mask = ATA_UDMA6,
c9d39130 614 .port_ops = &mv5_ops,
47c2b677 615 },
20f733e7 616 { /* chip_604x */
c5d3e45a 617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 618 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 619 .udma_mask = ATA_UDMA6,
c9d39130 620 .port_ops = &mv6_ops,
20f733e7
BR
621 },
622 { /* chip_608x */
c5d3e45a
JG
623 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
624 MV_FLAG_DUAL_HC,
31961943 625 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 626 .udma_mask = ATA_UDMA6,
c9d39130 627 .port_ops = &mv6_ops,
20f733e7 628 },
e4e7b892 629 { /* chip_6042 */
c5d3e45a 630 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 631 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 632 .udma_mask = ATA_UDMA6,
e4e7b892
JG
633 .port_ops = &mv_iie_ops,
634 },
635 { /* chip_7042 */
c5d3e45a 636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 637 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 638 .udma_mask = ATA_UDMA6,
e4e7b892
JG
639 .port_ops = &mv_iie_ops,
640 },
20f733e7
BR
641};
642
3b7d697d 643static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
644 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
645 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
646 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
647 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
648 /* RocketRAID 1740/174x have different identifiers */
649 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
650 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
651
652 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
653 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
654 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
655 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
656 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
657
658 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
659
d9f9c6bc
FA
660 /* Adaptec 1430SA */
661 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
662
02a121da 663 /* Marvell 7042 support */
6a3d586d
MT
664 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
665
02a121da
ML
666 /* Highpoint RocketRAID PCIe series */
667 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
668 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
669
2d2744fc 670 { } /* terminate list */
20f733e7
BR
671};
672
673static struct pci_driver mv_pci_driver = {
674 .name = DRV_NAME,
675 .id_table = mv_pci_tbl,
676 .probe = mv_init_one,
677 .remove = ata_pci_remove_one,
678};
679
47c2b677
JG
680static const struct mv_hw_ops mv5xxx_ops = {
681 .phy_errata = mv5_phy_errata,
682 .enable_leds = mv5_enable_leds,
683 .read_preamp = mv5_read_preamp,
684 .reset_hc = mv5_reset_hc,
522479fb
JG
685 .reset_flash = mv5_reset_flash,
686 .reset_bus = mv5_reset_bus,
47c2b677
JG
687};
688
689static const struct mv_hw_ops mv6xxx_ops = {
690 .phy_errata = mv6_phy_errata,
691 .enable_leds = mv6_enable_leds,
692 .read_preamp = mv6_read_preamp,
693 .reset_hc = mv6_reset_hc,
522479fb
JG
694 .reset_flash = mv6_reset_flash,
695 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
696};
697
ddef9bb3
JG
698/*
699 * module options
700 */
701static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
702
703
d88184fb
JG
704/* move to PCI layer or libata core? */
705static int pci_go_64(struct pci_dev *pdev)
706{
707 int rc;
708
709 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
710 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
711 if (rc) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
713 if (rc) {
714 dev_printk(KERN_ERR, &pdev->dev,
715 "64-bit DMA enable failed\n");
716 return rc;
717 }
718 }
719 } else {
720 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
721 if (rc) {
722 dev_printk(KERN_ERR, &pdev->dev,
723 "32-bit DMA enable failed\n");
724 return rc;
725 }
726 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
727 if (rc) {
728 dev_printk(KERN_ERR, &pdev->dev,
729 "32-bit consistent DMA enable failed\n");
730 return rc;
731 }
732 }
733
734 return rc;
735}
736
20f733e7
BR
737/*
738 * Functions
739 */
740
741static inline void writelfl(unsigned long data, void __iomem *addr)
742{
743 writel(data, addr);
744 (void) readl(addr); /* flush to avoid PCI posted write */
745}
746
20f733e7
BR
747static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
748{
749 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
750}
751
c9d39130
JG
752static inline unsigned int mv_hc_from_port(unsigned int port)
753{
754 return port >> MV_PORT_HC_SHIFT;
755}
756
757static inline unsigned int mv_hardport_from_port(unsigned int port)
758{
759 return port & MV_PORT_MASK;
760}
761
762static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
763 unsigned int port)
764{
765 return mv_hc_base(base, mv_hc_from_port(port));
766}
767
20f733e7
BR
768static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
769{
c9d39130 770 return mv_hc_base_from_port(base, port) +
8b260248 771 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 772 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
773}
774
775static inline void __iomem *mv_ap_base(struct ata_port *ap)
776{
0d5ff566 777 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
778}
779
cca3974e 780static inline int mv_get_hc_count(unsigned long port_flags)
31961943 781{
cca3974e 782 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
783}
784
785static void mv_irq_clear(struct ata_port *ap)
20f733e7 786{
20f733e7
BR
787}
788
c5d3e45a
JG
789static void mv_set_edma_ptrs(void __iomem *port_mmio,
790 struct mv_host_priv *hpriv,
791 struct mv_port_priv *pp)
792{
bdd4ddde
JG
793 u32 index;
794
c5d3e45a
JG
795 /*
796 * initialize request queue
797 */
bdd4ddde
JG
798 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
799
c5d3e45a
JG
800 WARN_ON(pp->crqb_dma & 0x3ff);
801 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 802 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
803 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
804
805 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 806 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
807 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
808 else
bdd4ddde 809 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
810
811 /*
812 * initialize response queue
813 */
bdd4ddde
JG
814 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
815
c5d3e45a
JG
816 WARN_ON(pp->crpb_dma & 0xff);
817 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
818
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 820 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
821 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
822 else
bdd4ddde 823 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 824
bdd4ddde 825 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 826 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
827}
828
05b308e1
BR
829/**
830 * mv_start_dma - Enable eDMA engine
831 * @base: port base address
832 * @pp: port private data
833 *
beec7dbc
TH
834 * Verify the local cache of the eDMA state is accurate with a
835 * WARN_ON.
05b308e1
BR
836 *
837 * LOCKING:
838 * Inherited from caller.
839 */
0c58912e 840static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
c5d3e45a 841 struct mv_port_priv *pp)
20f733e7 842{
c5d3e45a 843 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
844 struct mv_host_priv *hpriv = ap->host->private_data;
845 int hard_port = mv_hardport_from_port(ap->port_no);
846 void __iomem *hc_mmio = mv_hc_base_from_port(
847 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
848 u32 hc_irq_cause, ipending;
849
bdd4ddde 850 /* clear EDMA event indicators, if any */
f630d562 851 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 852
0c58912e
ML
853 /* clear EDMA interrupt indicator, if any */
854 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
855 ipending = (DEV_IRQ << hard_port) |
856 (CRPB_DMA_DONE << hard_port);
857 if (hc_irq_cause & ipending) {
858 writelfl(hc_irq_cause & ~ipending,
859 hc_mmio + HC_IRQ_CAUSE_OFS);
860 }
861
862 mv_edma_cfg(ap, hpriv, port_mmio);
863
864 /* clear FIS IRQ Cause */
865 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
866
f630d562 867 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 868
f630d562 869 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
870 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
871 }
f630d562 872 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
873}
874
05b308e1 875/**
0ea9e179 876 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
877 * @ap: ATA channel to manipulate
878 *
beec7dbc
TH
879 * Verify the local cache of the eDMA state is accurate with a
880 * WARN_ON.
05b308e1
BR
881 *
882 * LOCKING:
883 * Inherited from caller.
884 */
0ea9e179 885static int __mv_stop_dma(struct ata_port *ap)
20f733e7 886{
31961943
BR
887 void __iomem *port_mmio = mv_ap_base(ap);
888 struct mv_port_priv *pp = ap->private_data;
31961943 889 u32 reg;
c5d3e45a 890 int i, err = 0;
31961943 891
4537deb5 892 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 893 /* Disable EDMA if active. The disable bit auto clears.
31961943 894 */
31961943
BR
895 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
896 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 897 } else {
beec7dbc 898 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 899 }
8b260248 900
31961943
BR
901 /* now properly wait for the eDMA to stop */
902 for (i = 1000; i > 0; i--) {
903 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 904 if (!(reg & EDMA_EN))
31961943 905 break;
4537deb5 906
31961943
BR
907 udelay(100);
908 }
909
c5d3e45a 910 if (reg & EDMA_EN) {
f15a1daf 911 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 912 err = -EIO;
31961943 913 }
c5d3e45a
JG
914
915 return err;
20f733e7
BR
916}
917
0ea9e179
JG
918static int mv_stop_dma(struct ata_port *ap)
919{
920 unsigned long flags;
921 int rc;
922
923 spin_lock_irqsave(&ap->host->lock, flags);
924 rc = __mv_stop_dma(ap);
925 spin_unlock_irqrestore(&ap->host->lock, flags);
926
927 return rc;
928}
929
8a70f8dc 930#ifdef ATA_DEBUG
31961943 931static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 932{
31961943
BR
933 int b, w;
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%p: ", start + b);
936 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 937 printk("%08x ", readl(start + b));
31961943
BR
938 b += sizeof(u32);
939 }
940 printk("\n");
941 }
31961943 942}
8a70f8dc
JG
943#endif
944
31961943
BR
945static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
946{
947#ifdef ATA_DEBUG
948 int b, w;
949 u32 dw;
950 for (b = 0; b < bytes; ) {
951 DPRINTK("%02x: ", b);
952 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
953 (void) pci_read_config_dword(pdev, b, &dw);
954 printk("%08x ", dw);
31961943
BR
955 b += sizeof(u32);
956 }
957 printk("\n");
958 }
959#endif
960}
961static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 struct pci_dev *pdev)
963{
964#ifdef ATA_DEBUG
8b260248 965 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
966 port >> MV_PORT_HC_SHIFT);
967 void __iomem *port_base;
968 int start_port, num_ports, p, start_hc, num_hcs, hc;
969
970 if (0 > port) {
971 start_hc = start_port = 0;
972 num_ports = 8; /* shld be benign for 4 port devs */
973 num_hcs = 2;
974 } else {
975 start_hc = port >> MV_PORT_HC_SHIFT;
976 start_port = port;
977 num_ports = num_hcs = 1;
978 }
8b260248 979 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
980 num_ports > 1 ? num_ports - 1 : start_port);
981
982 if (NULL != pdev) {
983 DPRINTK("PCI config space regs:\n");
984 mv_dump_pci_cfg(pdev, 0x68);
985 }
986 DPRINTK("PCI regs:\n");
987 mv_dump_mem(mmio_base+0xc00, 0x3c);
988 mv_dump_mem(mmio_base+0xd00, 0x34);
989 mv_dump_mem(mmio_base+0xf00, 0x4);
990 mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 992 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
993 DPRINTK("HC regs (HC %i):\n", hc);
994 mv_dump_mem(hc_base, 0x1c);
995 }
996 for (p = start_port; p < start_port + num_ports; p++) {
997 port_base = mv_port_base(mmio_base, p);
2dcb407e 998 DPRINTK("EDMA regs (port %i):\n", p);
31961943 999 mv_dump_mem(port_base, 0x54);
2dcb407e 1000 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1001 mv_dump_mem(port_base+0x300, 0x60);
1002 }
1003#endif
20f733e7
BR
1004}
1005
1006static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1007{
1008 unsigned int ofs;
1009
1010 switch (sc_reg_in) {
1011 case SCR_STATUS:
1012 case SCR_CONTROL:
1013 case SCR_ERROR:
1014 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 break;
1016 case SCR_ACTIVE:
1017 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1018 break;
1019 default:
1020 ofs = 0xffffffffU;
1021 break;
1022 }
1023 return ofs;
1024}
1025
da3dbb17 1026static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1027{
1028 unsigned int ofs = mv_scr_offset(sc_reg_in);
1029
da3dbb17
TH
1030 if (ofs != 0xffffffffU) {
1031 *val = readl(mv_ap_base(ap) + ofs);
1032 return 0;
1033 } else
1034 return -EINVAL;
20f733e7
BR
1035}
1036
da3dbb17 1037static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1038{
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1040
da3dbb17 1041 if (ofs != 0xffffffffU) {
20f733e7 1042 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1043 return 0;
1044 } else
1045 return -EINVAL;
20f733e7
BR
1046}
1047
c5d3e45a
JG
1048static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1049 void __iomem *port_mmio)
e4e7b892 1050{
0c58912e 1051 u32 cfg;
e4e7b892
JG
1052
1053 /* set up non-NCQ EDMA configuration */
0c58912e 1054 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1055
0c58912e 1056 if (IS_GEN_I(hpriv))
e4e7b892
JG
1057 cfg |= (1 << 8); /* enab config burst size mask */
1058
0c58912e 1059 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1060 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1061
1062 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1063 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1064 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1065 cfg |= (1 << 18); /* enab early completion */
e728eabe 1066 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1067 }
1068
1069 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1070}
1071
05b308e1
BR
1072/**
1073 * mv_port_start - Port specific init/start routine.
1074 * @ap: ATA channel to manipulate
1075 *
1076 * Allocate and point to DMA memory, init port private memory,
1077 * zero indices.
1078 *
1079 * LOCKING:
1080 * Inherited from caller.
1081 */
31961943
BR
1082static int mv_port_start(struct ata_port *ap)
1083{
cca3974e
JG
1084 struct device *dev = ap->host->dev;
1085 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1086 struct mv_port_priv *pp;
1087 void __iomem *port_mmio = mv_ap_base(ap);
1088 void *mem;
1089 dma_addr_t mem_dma;
0ea9e179 1090 unsigned long flags;
24dc5f33 1091 int rc;
31961943 1092
24dc5f33 1093 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1094 if (!pp)
24dc5f33 1095 return -ENOMEM;
31961943 1096
24dc5f33
TH
1097 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1098 GFP_KERNEL);
6037d6bb 1099 if (!mem)
24dc5f33 1100 return -ENOMEM;
31961943
BR
1101 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1102
6037d6bb
JG
1103 rc = ata_pad_alloc(ap, dev);
1104 if (rc)
24dc5f33 1105 return rc;
6037d6bb 1106
8b260248 1107 /* First item in chunk of DMA memory:
31961943
BR
1108 * 32-slot command request table (CRQB), 32 bytes each in size
1109 */
1110 pp->crqb = mem;
1111 pp->crqb_dma = mem_dma;
1112 mem += MV_CRQB_Q_SZ;
1113 mem_dma += MV_CRQB_Q_SZ;
1114
8b260248 1115 /* Second item:
31961943
BR
1116 * 32-slot command response table (CRPB), 8 bytes each in size
1117 */
1118 pp->crpb = mem;
1119 pp->crpb_dma = mem_dma;
1120 mem += MV_CRPB_Q_SZ;
1121 mem_dma += MV_CRPB_Q_SZ;
1122
1123 /* Third item:
1124 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1125 */
1126 pp->sg_tbl = mem;
1127 pp->sg_tbl_dma = mem_dma;
1128
0ea9e179
JG
1129 spin_lock_irqsave(&ap->host->lock, flags);
1130
c5d3e45a 1131 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1132
c5d3e45a 1133 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1134
0ea9e179
JG
1135 spin_unlock_irqrestore(&ap->host->lock, flags);
1136
31961943
BR
1137 /* Don't turn on EDMA here...do it before DMA commands only. Else
1138 * we'll be unable to send non-data, PIO, etc due to restricted access
1139 * to shadow regs.
1140 */
1141 ap->private_data = pp;
1142 return 0;
1143}
1144
05b308e1
BR
1145/**
1146 * mv_port_stop - Port specific cleanup/stop routine.
1147 * @ap: ATA channel to manipulate
1148 *
1149 * Stop DMA, cleanup port memory.
1150 *
1151 * LOCKING:
cca3974e 1152 * This routine uses the host lock to protect the DMA stop.
05b308e1 1153 */
31961943
BR
1154static void mv_port_stop(struct ata_port *ap)
1155{
31961943 1156 mv_stop_dma(ap);
31961943
BR
1157}
1158
05b308e1
BR
1159/**
1160 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1161 * @qc: queued command whose SG list to source from
1162 *
1163 * Populate the SG list and mark the last entry.
1164 *
1165 * LOCKING:
1166 * Inherited from caller.
1167 */
6c08772e 1168static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1169{
1170 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1171 struct scatterlist *sg;
3be6cbd7 1172 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1173 unsigned int si;
31961943 1174
d88184fb 1175 mv_sg = pp->sg_tbl;
ff2aeb1e 1176 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1177 dma_addr_t addr = sg_dma_address(sg);
1178 u32 sg_len = sg_dma_len(sg);
22374677 1179
4007b493
OJ
1180 while (sg_len) {
1181 u32 offset = addr & 0xffff;
1182 u32 len = sg_len;
22374677 1183
4007b493
OJ
1184 if ((offset + sg_len > 0x10000))
1185 len = 0x10000 - offset;
1186
1187 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1188 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1189 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1190
1191 sg_len -= len;
1192 addr += len;
1193
3be6cbd7 1194 last_sg = mv_sg;
4007b493 1195 mv_sg++;
4007b493 1196 }
31961943 1197 }
3be6cbd7
JG
1198
1199 if (likely(last_sg))
1200 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1201}
1202
5796d1c4 1203static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1204{
559eedad 1205 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1206 (last ? CRQB_CMD_LAST : 0);
559eedad 1207 *cmdw = cpu_to_le16(tmp);
31961943
BR
1208}
1209
05b308e1
BR
1210/**
1211 * mv_qc_prep - Host specific command preparation.
1212 * @qc: queued command to prepare
1213 *
1214 * This routine simply redirects to the general purpose routine
1215 * if command is not DMA. Else, it handles prep of the CRQB
1216 * (command request block), does some sanity checking, and calls
1217 * the SG load routine.
1218 *
1219 * LOCKING:
1220 * Inherited from caller.
1221 */
31961943
BR
1222static void mv_qc_prep(struct ata_queued_cmd *qc)
1223{
1224 struct ata_port *ap = qc->ap;
1225 struct mv_port_priv *pp = ap->private_data;
e1469874 1226 __le16 *cw;
31961943
BR
1227 struct ata_taskfile *tf;
1228 u16 flags = 0;
a6432436 1229 unsigned in_index;
31961943 1230
2dcb407e 1231 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1232 return;
20f733e7 1233
31961943
BR
1234 /* Fill in command request block
1235 */
e4e7b892 1236 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1237 flags |= CRQB_FLAG_READ;
beec7dbc 1238 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1239 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1240 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1241
bdd4ddde
JG
1242 /* get current queue index from software */
1243 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1244
1245 pp->crqb[in_index].sg_addr =
31961943 1246 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1247 pp->crqb[in_index].sg_addr_hi =
31961943 1248 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1249 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1250
a6432436 1251 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1252 tf = &qc->tf;
1253
1254 /* Sadly, the CRQB cannot accomodate all registers--there are
1255 * only 11 bytes...so we must pick and choose required
1256 * registers based on the command. So, we drop feature and
1257 * hob_feature for [RW] DMA commands, but they are needed for
1258 * NCQ. NCQ will drop hob_nsect.
20f733e7 1259 */
31961943
BR
1260 switch (tf->command) {
1261 case ATA_CMD_READ:
1262 case ATA_CMD_READ_EXT:
1263 case ATA_CMD_WRITE:
1264 case ATA_CMD_WRITE_EXT:
c15d85c8 1265 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1266 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1267 break;
1268#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1269 case ATA_CMD_FPDMA_READ:
1270 case ATA_CMD_FPDMA_WRITE:
8b260248 1271 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1272 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1273 break;
1274#endif /* FIXME: remove this line when NCQ added */
1275 default:
1276 /* The only other commands EDMA supports in non-queued and
1277 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1278 * of which are defined/used by Linux. If we get here, this
1279 * driver needs work.
1280 *
1281 * FIXME: modify libata to give qc_prep a return value and
1282 * return error here.
1283 */
1284 BUG_ON(tf->command);
1285 break;
1286 }
1287 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1288 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1290 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1292 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1294 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1295 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1296
e4e7b892
JG
1297 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1298 return;
1299 mv_fill_sg(qc);
1300}
1301
1302/**
1303 * mv_qc_prep_iie - Host specific command preparation.
1304 * @qc: queued command to prepare
1305 *
1306 * This routine simply redirects to the general purpose routine
1307 * if command is not DMA. Else, it handles prep of the CRQB
1308 * (command request block), does some sanity checking, and calls
1309 * the SG load routine.
1310 *
1311 * LOCKING:
1312 * Inherited from caller.
1313 */
1314static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1315{
1316 struct ata_port *ap = qc->ap;
1317 struct mv_port_priv *pp = ap->private_data;
1318 struct mv_crqb_iie *crqb;
1319 struct ata_taskfile *tf;
a6432436 1320 unsigned in_index;
e4e7b892
JG
1321 u32 flags = 0;
1322
2dcb407e 1323 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1324 return;
1325
e4e7b892
JG
1326 /* Fill in Gen IIE command request block
1327 */
1328 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1329 flags |= CRQB_FLAG_READ;
1330
beec7dbc 1331 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1332 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1333 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1334 what we use as our tag */
e4e7b892 1335
bdd4ddde
JG
1336 /* get current queue index from software */
1337 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1338
1339 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1340 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1341 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1342 crqb->flags = cpu_to_le32(flags);
1343
1344 tf = &qc->tf;
1345 crqb->ata_cmd[0] = cpu_to_le32(
1346 (tf->command << 16) |
1347 (tf->feature << 24)
1348 );
1349 crqb->ata_cmd[1] = cpu_to_le32(
1350 (tf->lbal << 0) |
1351 (tf->lbam << 8) |
1352 (tf->lbah << 16) |
1353 (tf->device << 24)
1354 );
1355 crqb->ata_cmd[2] = cpu_to_le32(
1356 (tf->hob_lbal << 0) |
1357 (tf->hob_lbam << 8) |
1358 (tf->hob_lbah << 16) |
1359 (tf->hob_feature << 24)
1360 );
1361 crqb->ata_cmd[3] = cpu_to_le32(
1362 (tf->nsect << 0) |
1363 (tf->hob_nsect << 8)
1364 );
1365
1366 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1367 return;
31961943
BR
1368 mv_fill_sg(qc);
1369}
1370
05b308e1
BR
1371/**
1372 * mv_qc_issue - Initiate a command to the host
1373 * @qc: queued command to start
1374 *
1375 * This routine simply redirects to the general purpose routine
1376 * if command is not DMA. Else, it sanity checks our local
1377 * caches of the request producer/consumer indices then enables
1378 * DMA and bumps the request producer index.
1379 *
1380 * LOCKING:
1381 * Inherited from caller.
1382 */
9a3d9eb0 1383static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1384{
c5d3e45a
JG
1385 struct ata_port *ap = qc->ap;
1386 void __iomem *port_mmio = mv_ap_base(ap);
1387 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1388 u32 in_index;
31961943 1389
c5d3e45a 1390 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1391 /* We're about to send a non-EDMA capable command to the
1392 * port. Turn off EDMA so there won't be problems accessing
1393 * shadow block, etc registers.
1394 */
0ea9e179 1395 __mv_stop_dma(ap);
31961943
BR
1396 return ata_qc_issue_prot(qc);
1397 }
1398
0c58912e 1399 mv_start_dma(ap, port_mmio, pp);
bdd4ddde
JG
1400
1401 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1402
31961943 1403 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1404 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1405 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1406
bdd4ddde 1407 pp->req_idx++;
31961943 1408
bdd4ddde 1409 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1410
1411 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1412 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1413 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1414
1415 return 0;
1416}
1417
05b308e1
BR
1418/**
1419 * mv_err_intr - Handle error interrupts on the port
1420 * @ap: ATA channel to manipulate
9b358e30 1421 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1422 *
1423 * In most cases, just clear the interrupt and move on. However,
1424 * some cases require an eDMA reset, which is done right before
1425 * the COMRESET in mv_phy_reset(). The SERR case requires a
1426 * clear of pending errors in the SATA SERROR register. Finally,
1427 * if the port disabled DMA, update our cached copy to match.
1428 *
1429 * LOCKING:
1430 * Inherited from caller.
1431 */
bdd4ddde 1432static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1433{
1434 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1435 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1436 struct mv_port_priv *pp = ap->private_data;
1437 struct mv_host_priv *hpriv = ap->host->private_data;
1438 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1439 unsigned int action = 0, err_mask = 0;
9af5c9c9 1440 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1441
bdd4ddde 1442 ata_ehi_clear_desc(ehi);
20f733e7 1443
bdd4ddde
JG
1444 if (!edma_enabled) {
1445 /* just a guess: do we need to do this? should we
1446 * expand this, and do it in all cases?
1447 */
936fd732
TH
1448 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1449 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1450 }
bdd4ddde
JG
1451
1452 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1453
1454 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1455
1456 /*
1457 * all generations share these EDMA error cause bits
1458 */
1459
1460 if (edma_err_cause & EDMA_ERR_DEV)
1461 err_mask |= AC_ERR_DEV;
1462 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1463 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1464 EDMA_ERR_INTRL_PAR)) {
1465 err_mask |= AC_ERR_ATA_BUS;
1466 action |= ATA_EH_HARDRESET;
b64bbc39 1467 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1468 }
1469 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1470 ata_ehi_hotplugged(ehi);
1471 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1472 "dev disconnect" : "dev connect");
3606a380 1473 action |= ATA_EH_HARDRESET;
bdd4ddde
JG
1474 }
1475
ee9ccdf7 1476 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1477 eh_freeze_mask = EDMA_EH_FREEZE_5;
1478
1479 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1480 struct mv_port_priv *pp = ap->private_data;
1481 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1482 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1483 }
1484 } else {
1485 eh_freeze_mask = EDMA_EH_FREEZE;
1486
1487 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1488 struct mv_port_priv *pp = ap->private_data;
1489 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1490 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1491 }
1492
1493 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1494 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1495 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1496 err_mask = AC_ERR_ATA_BUS;
1497 action |= ATA_EH_HARDRESET;
1498 }
afb0edd9 1499 }
20f733e7
BR
1500
1501 /* Clear EDMA now that SERR cleanup done */
3606a380 1502 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1503
bdd4ddde
JG
1504 if (!err_mask) {
1505 err_mask = AC_ERR_OTHER;
1506 action |= ATA_EH_HARDRESET;
1507 }
1508
1509 ehi->serror |= serr;
1510 ehi->action |= action;
1511
1512 if (qc)
1513 qc->err_mask |= err_mask;
1514 else
1515 ehi->err_mask |= err_mask;
1516
1517 if (edma_err_cause & eh_freeze_mask)
1518 ata_port_freeze(ap);
1519 else
1520 ata_port_abort(ap);
1521}
1522
1523static void mv_intr_pio(struct ata_port *ap)
1524{
1525 struct ata_queued_cmd *qc;
1526 u8 ata_status;
1527
1528 /* ignore spurious intr if drive still BUSY */
1529 ata_status = readb(ap->ioaddr.status_addr);
1530 if (unlikely(ata_status & ATA_BUSY))
1531 return;
1532
1533 /* get active ATA command */
9af5c9c9 1534 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1535 if (unlikely(!qc)) /* no active tag */
1536 return;
1537 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1538 return;
1539
1540 /* and finally, complete the ATA command */
1541 qc->err_mask |= ac_err_mask(ata_status);
1542 ata_qc_complete(qc);
1543}
1544
1545static void mv_intr_edma(struct ata_port *ap)
1546{
1547 void __iomem *port_mmio = mv_ap_base(ap);
1548 struct mv_host_priv *hpriv = ap->host->private_data;
1549 struct mv_port_priv *pp = ap->private_data;
1550 struct ata_queued_cmd *qc;
1551 u32 out_index, in_index;
1552 bool work_done = false;
1553
1554 /* get h/w response queue pointer */
1555 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1556 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1557
1558 while (1) {
1559 u16 status;
6c1153e0 1560 unsigned int tag;
bdd4ddde
JG
1561
1562 /* get s/w response queue last-read pointer, and compare */
1563 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1564 if (in_index == out_index)
1565 break;
1566
bdd4ddde 1567 /* 50xx: get active ATA command */
0ea9e179 1568 if (IS_GEN_I(hpriv))
9af5c9c9 1569 tag = ap->link.active_tag;
bdd4ddde 1570
6c1153e0
JG
1571 /* Gen II/IIE: get active ATA command via tag, to enable
1572 * support for queueing. this works transparently for
1573 * queued and non-queued modes.
bdd4ddde 1574 */
6c1153e0
JG
1575 else if (IS_GEN_II(hpriv))
1576 tag = (le16_to_cpu(pp->crpb[out_index].id)
1577 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1578
6c1153e0
JG
1579 else /* IS_GEN_IIE */
1580 tag = (le16_to_cpu(pp->crpb[out_index].id)
1581 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1582
6c1153e0 1583 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1584
1585 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1586 * bits (WARNING: might not necessarily be associated
1587 * with this command), which -should- be clear
1588 * if all is well
1589 */
1590 status = le16_to_cpu(pp->crpb[out_index].flags);
1591 if (unlikely(status & 0xff)) {
1592 mv_err_intr(ap, qc);
1593 return;
1594 }
1595
1596 /* and finally, complete the ATA command */
1597 if (qc) {
1598 qc->err_mask |=
1599 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1600 ata_qc_complete(qc);
1601 }
1602
0ea9e179 1603 /* advance software response queue pointer, to
bdd4ddde
JG
1604 * indicate (after the loop completes) to hardware
1605 * that we have consumed a response queue entry.
1606 */
1607 work_done = true;
1608 pp->resp_idx++;
1609 }
1610
1611 if (work_done)
1612 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1613 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1614 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1615}
1616
05b308e1
BR
1617/**
1618 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1619 * @host: host specific structure
05b308e1
BR
1620 * @relevant: port error bits relevant to this host controller
1621 * @hc: which host controller we're to look at
1622 *
1623 * Read then write clear the HC interrupt status then walk each
1624 * port connected to the HC and see if it needs servicing. Port
1625 * success ints are reported in the HC interrupt status reg, the
1626 * port error ints are reported in the higher level main
1627 * interrupt status register and thus are passed in via the
1628 * 'relevant' argument.
1629 *
1630 * LOCKING:
1631 * Inherited from caller.
1632 */
cca3974e 1633static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1634{
0d5ff566 1635 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1636 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1637 u32 hc_irq_cause;
c5d3e45a 1638 int port, port0;
20f733e7 1639
35177265 1640 if (hc == 0)
20f733e7 1641 port0 = 0;
35177265 1642 else
20f733e7 1643 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1644
1645 /* we'll need the HC success int register in most cases */
1646 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1647 if (!hc_irq_cause)
1648 return;
1649
1650 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1651
1652 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1653 hc, relevant, hc_irq_cause);
20f733e7
BR
1654
1655 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1656 struct ata_port *ap = host->ports[port];
63af2a5c 1657 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1658 int have_err_bits, hard_port, shift;
55d8ca4f 1659
bdd4ddde 1660 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1661 continue;
1662
31961943 1663 shift = port << 1; /* (port * 2) */
20f733e7
BR
1664 if (port >= MV_PORTS_PER_HC) {
1665 shift++; /* skip bit 8 in the HC Main IRQ reg */
1666 }
bdd4ddde
JG
1667 have_err_bits = ((PORT0_ERR << shift) & relevant);
1668
1669 if (unlikely(have_err_bits)) {
1670 struct ata_queued_cmd *qc;
8b260248 1671
9af5c9c9 1672 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1673 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1674 continue;
1675
1676 mv_err_intr(ap, qc);
1677 continue;
1678 }
1679
1680 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1681
1682 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1683 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1684 mv_intr_edma(ap);
1685 } else {
1686 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1687 mv_intr_pio(ap);
20f733e7
BR
1688 }
1689 }
1690 VPRINTK("EXIT\n");
1691}
1692
bdd4ddde
JG
1693static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1694{
02a121da 1695 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1696 struct ata_port *ap;
1697 struct ata_queued_cmd *qc;
1698 struct ata_eh_info *ehi;
1699 unsigned int i, err_mask, printed = 0;
1700 u32 err_cause;
1701
02a121da 1702 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1703
1704 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1705 err_cause);
1706
1707 DPRINTK("All regs @ PCI error\n");
1708 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1709
02a121da 1710 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1711
1712 for (i = 0; i < host->n_ports; i++) {
1713 ap = host->ports[i];
936fd732 1714 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1715 ehi = &ap->link.eh_info;
bdd4ddde
JG
1716 ata_ehi_clear_desc(ehi);
1717 if (!printed++)
1718 ata_ehi_push_desc(ehi,
1719 "PCI err cause 0x%08x", err_cause);
1720 err_mask = AC_ERR_HOST_BUS;
1721 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1722 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1723 if (qc)
1724 qc->err_mask |= err_mask;
1725 else
1726 ehi->err_mask |= err_mask;
1727
1728 ata_port_freeze(ap);
1729 }
1730 }
1731}
1732
05b308e1 1733/**
c5d3e45a 1734 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1735 * @irq: unused
1736 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1737 *
1738 * Read the read only register to determine if any host
1739 * controllers have pending interrupts. If so, call lower level
1740 * routine to handle. Also check for PCI errors which are only
1741 * reported here.
1742 *
8b260248 1743 * LOCKING:
cca3974e 1744 * This routine holds the host lock while processing pending
05b308e1
BR
1745 * interrupts.
1746 */
7d12e780 1747static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1748{
cca3974e 1749 struct ata_host *host = dev_instance;
20f733e7 1750 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1751 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
646a4da5 1752 u32 irq_stat, irq_mask;
20f733e7 1753
646a4da5 1754 spin_lock(&host->lock);
20f733e7 1755 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
646a4da5 1756 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
1757
1758 /* check the cases where we either have nothing pending or have read
1759 * a bogus register value which can indicate HW removal or PCI fault
1760 */
646a4da5
ML
1761 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1762 goto out_unlock;
20f733e7 1763
cca3974e 1764 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1765
bdd4ddde
JG
1766 if (unlikely(irq_stat & PCI_ERR)) {
1767 mv_pci_error(host, mmio);
1768 handled = 1;
1769 goto out_unlock; /* skip all other HC irq handling */
1770 }
1771
20f733e7
BR
1772 for (hc = 0; hc < n_hcs; hc++) {
1773 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1774 if (relevant) {
cca3974e 1775 mv_host_intr(host, relevant, hc);
bdd4ddde 1776 handled = 1;
20f733e7
BR
1777 }
1778 }
615ab953 1779
bdd4ddde 1780out_unlock:
cca3974e 1781 spin_unlock(&host->lock);
20f733e7
BR
1782
1783 return IRQ_RETVAL(handled);
1784}
1785
c9d39130
JG
1786static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1787{
1788 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1789 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1790
1791 return hc_mmio + ofs;
1792}
1793
1794static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1795{
1796 unsigned int ofs;
1797
1798 switch (sc_reg_in) {
1799 case SCR_STATUS:
1800 case SCR_ERROR:
1801 case SCR_CONTROL:
1802 ofs = sc_reg_in * sizeof(u32);
1803 break;
1804 default:
1805 ofs = 0xffffffffU;
1806 break;
1807 }
1808 return ofs;
1809}
1810
da3dbb17 1811static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1812{
0d5ff566
TH
1813 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1814 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1815 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1816
da3dbb17
TH
1817 if (ofs != 0xffffffffU) {
1818 *val = readl(addr + ofs);
1819 return 0;
1820 } else
1821 return -EINVAL;
c9d39130
JG
1822}
1823
da3dbb17 1824static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1825{
0d5ff566
TH
1826 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1827 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1828 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1829
da3dbb17 1830 if (ofs != 0xffffffffU) {
0d5ff566 1831 writelfl(val, addr + ofs);
da3dbb17
TH
1832 return 0;
1833 } else
1834 return -EINVAL;
c9d39130
JG
1835}
1836
522479fb
JG
1837static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1838{
522479fb
JG
1839 int early_5080;
1840
44c10138 1841 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1842
1843 if (!early_5080) {
1844 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1845 tmp |= (1 << 0);
1846 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1847 }
1848
1849 mv_reset_pci_bus(pdev, mmio);
1850}
1851
1852static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1853{
1854 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1855}
1856
47c2b677 1857static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1858 void __iomem *mmio)
1859{
c9d39130
JG
1860 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1861 u32 tmp;
1862
1863 tmp = readl(phy_mmio + MV5_PHY_MODE);
1864
1865 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1866 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1867}
1868
47c2b677 1869static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1870{
522479fb
JG
1871 u32 tmp;
1872
1873 writel(0, mmio + MV_GPIO_PORT_CTL);
1874
1875 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1876
1877 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1878 tmp |= ~(1 << 0);
1879 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1880}
1881
2a47ce06
JG
1882static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1883 unsigned int port)
bca1c4eb 1884{
c9d39130
JG
1885 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1886 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1887 u32 tmp;
1888 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1889
1890 if (fix_apm_sq) {
1891 tmp = readl(phy_mmio + MV5_LT_MODE);
1892 tmp |= (1 << 19);
1893 writel(tmp, phy_mmio + MV5_LT_MODE);
1894
1895 tmp = readl(phy_mmio + MV5_PHY_CTL);
1896 tmp &= ~0x3;
1897 tmp |= 0x1;
1898 writel(tmp, phy_mmio + MV5_PHY_CTL);
1899 }
1900
1901 tmp = readl(phy_mmio + MV5_PHY_MODE);
1902 tmp &= ~mask;
1903 tmp |= hpriv->signal[port].pre;
1904 tmp |= hpriv->signal[port].amps;
1905 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1906}
1907
c9d39130
JG
1908
1909#undef ZERO
1910#define ZERO(reg) writel(0, port_mmio + (reg))
1911static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1912 unsigned int port)
1913{
1914 void __iomem *port_mmio = mv_port_base(mmio, port);
1915
1916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1917
1918 mv_channel_reset(hpriv, mmio, port);
1919
1920 ZERO(0x028); /* command */
1921 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1922 ZERO(0x004); /* timer */
1923 ZERO(0x008); /* irq err cause */
1924 ZERO(0x00c); /* irq err mask */
1925 ZERO(0x010); /* rq bah */
1926 ZERO(0x014); /* rq inp */
1927 ZERO(0x018); /* rq outp */
1928 ZERO(0x01c); /* respq bah */
1929 ZERO(0x024); /* respq outp */
1930 ZERO(0x020); /* respq inp */
1931 ZERO(0x02c); /* test control */
1932 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1933}
1934#undef ZERO
1935
1936#define ZERO(reg) writel(0, hc_mmio + (reg))
1937static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1938 unsigned int hc)
47c2b677 1939{
c9d39130
JG
1940 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1941 u32 tmp;
1942
1943 ZERO(0x00c);
1944 ZERO(0x010);
1945 ZERO(0x014);
1946 ZERO(0x018);
1947
1948 tmp = readl(hc_mmio + 0x20);
1949 tmp &= 0x1c1c1c1c;
1950 tmp |= 0x03030303;
1951 writel(tmp, hc_mmio + 0x20);
1952}
1953#undef ZERO
1954
1955static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 unsigned int n_hc)
1957{
1958 unsigned int hc, port;
1959
1960 for (hc = 0; hc < n_hc; hc++) {
1961 for (port = 0; port < MV_PORTS_PER_HC; port++)
1962 mv5_reset_hc_port(hpriv, mmio,
1963 (hc * MV_PORTS_PER_HC) + port);
1964
1965 mv5_reset_one_hc(hpriv, mmio, hc);
1966 }
1967
1968 return 0;
47c2b677
JG
1969}
1970
101ffae2
JG
1971#undef ZERO
1972#define ZERO(reg) writel(0, mmio + (reg))
1973static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1974{
02a121da
ML
1975 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1976 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
1977 u32 tmp;
1978
1979 tmp = readl(mmio + MV_PCI_MODE);
1980 tmp &= 0xff00ffff;
1981 writel(tmp, mmio + MV_PCI_MODE);
1982
1983 ZERO(MV_PCI_DISC_TIMER);
1984 ZERO(MV_PCI_MSI_TRIGGER);
1985 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1986 ZERO(HC_MAIN_IRQ_MASK_OFS);
1987 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
1988 ZERO(hpriv->irq_cause_ofs);
1989 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
1990 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1991 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1992 ZERO(MV_PCI_ERR_ATTRIBUTE);
1993 ZERO(MV_PCI_ERR_COMMAND);
1994}
1995#undef ZERO
1996
1997static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1998{
1999 u32 tmp;
2000
2001 mv5_reset_flash(hpriv, mmio);
2002
2003 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2004 tmp &= 0x3;
2005 tmp |= (1 << 5) | (1 << 6);
2006 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2007}
2008
2009/**
2010 * mv6_reset_hc - Perform the 6xxx global soft reset
2011 * @mmio: base address of the HBA
2012 *
2013 * This routine only applies to 6xxx parts.
2014 *
2015 * LOCKING:
2016 * Inherited from caller.
2017 */
c9d39130
JG
2018static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2019 unsigned int n_hc)
101ffae2
JG
2020{
2021 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2022 int i, rc = 0;
2023 u32 t;
2024
2025 /* Following procedure defined in PCI "main command and status
2026 * register" table.
2027 */
2028 t = readl(reg);
2029 writel(t | STOP_PCI_MASTER, reg);
2030
2031 for (i = 0; i < 1000; i++) {
2032 udelay(1);
2033 t = readl(reg);
2dcb407e 2034 if (PCI_MASTER_EMPTY & t)
101ffae2 2035 break;
101ffae2
JG
2036 }
2037 if (!(PCI_MASTER_EMPTY & t)) {
2038 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2039 rc = 1;
2040 goto done;
2041 }
2042
2043 /* set reset */
2044 i = 5;
2045 do {
2046 writel(t | GLOB_SFT_RST, reg);
2047 t = readl(reg);
2048 udelay(1);
2049 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2050
2051 if (!(GLOB_SFT_RST & t)) {
2052 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2053 rc = 1;
2054 goto done;
2055 }
2056
2057 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2058 i = 5;
2059 do {
2060 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2061 t = readl(reg);
2062 udelay(1);
2063 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2064
2065 if (GLOB_SFT_RST & t) {
2066 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2067 rc = 1;
2068 }
2069done:
2070 return rc;
2071}
2072
47c2b677 2073static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2074 void __iomem *mmio)
2075{
2076 void __iomem *port_mmio;
2077 u32 tmp;
2078
ba3fe8fb
JG
2079 tmp = readl(mmio + MV_RESET_CFG);
2080 if ((tmp & (1 << 0)) == 0) {
47c2b677 2081 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2082 hpriv->signal[idx].pre = 0x1 << 5;
2083 return;
2084 }
2085
2086 port_mmio = mv_port_base(mmio, idx);
2087 tmp = readl(port_mmio + PHY_MODE2);
2088
2089 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2090 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2091}
2092
47c2b677 2093static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2094{
47c2b677 2095 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2096}
2097
c9d39130 2098static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2099 unsigned int port)
bca1c4eb 2100{
c9d39130
JG
2101 void __iomem *port_mmio = mv_port_base(mmio, port);
2102
bca1c4eb 2103 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2104 int fix_phy_mode2 =
2105 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2106 int fix_phy_mode4 =
47c2b677
JG
2107 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2108 u32 m2, tmp;
2109
2110 if (fix_phy_mode2) {
2111 m2 = readl(port_mmio + PHY_MODE2);
2112 m2 &= ~(1 << 16);
2113 m2 |= (1 << 31);
2114 writel(m2, port_mmio + PHY_MODE2);
2115
2116 udelay(200);
2117
2118 m2 = readl(port_mmio + PHY_MODE2);
2119 m2 &= ~((1 << 16) | (1 << 31));
2120 writel(m2, port_mmio + PHY_MODE2);
2121
2122 udelay(200);
2123 }
2124
2125 /* who knows what this magic does */
2126 tmp = readl(port_mmio + PHY_MODE3);
2127 tmp &= ~0x7F800000;
2128 tmp |= 0x2A800000;
2129 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2130
2131 if (fix_phy_mode4) {
47c2b677 2132 u32 m4;
bca1c4eb
JG
2133
2134 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2135
2136 if (hp_flags & MV_HP_ERRATA_60X1B2)
2137 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2138
2139 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2140
2141 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2142
2143 if (hp_flags & MV_HP_ERRATA_60X1B2)
2144 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2145 }
2146
2147 /* Revert values of pre-emphasis and signal amps to the saved ones */
2148 m2 = readl(port_mmio + PHY_MODE2);
2149
2150 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2151 m2 |= hpriv->signal[port].amps;
2152 m2 |= hpriv->signal[port].pre;
47c2b677 2153 m2 &= ~(1 << 16);
bca1c4eb 2154
e4e7b892
JG
2155 /* according to mvSata 3.6.1, some IIE values are fixed */
2156 if (IS_GEN_IIE(hpriv)) {
2157 m2 &= ~0xC30FF01F;
2158 m2 |= 0x0000900F;
2159 }
2160
bca1c4eb
JG
2161 writel(m2, port_mmio + PHY_MODE2);
2162}
2163
c9d39130
JG
2164static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2165 unsigned int port_no)
2166{
2167 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2168
2169 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2170
ee9ccdf7 2171 if (IS_GEN_II(hpriv)) {
c9d39130 2172 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2173 ifctl |= (1 << 7); /* enable gen2i speed */
2174 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2175 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2176 }
2177
2178 udelay(25); /* allow reset propagation */
2179
2180 /* Spec never mentions clearing the bit. Marvell's driver does
2181 * clear the bit, however.
2182 */
2183 writelfl(0, port_mmio + EDMA_CMD_OFS);
2184
2185 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2186
ee9ccdf7 2187 if (IS_GEN_I(hpriv))
c9d39130
JG
2188 mdelay(1);
2189}
2190
05b308e1 2191/**
bdd4ddde 2192 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2193 * @ap: ATA channel to manipulate
2194 *
2195 * Part of this is taken from __sata_phy_reset and modified to
2196 * not sleep since this routine gets called from interrupt level.
2197 *
2198 * LOCKING:
2199 * Inherited from caller. This is coded to safe to call at
2200 * interrupt level, i.e. it does not sleep.
31961943 2201 */
bdd4ddde
JG
2202static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2203 unsigned long deadline)
20f733e7 2204{
095fec88 2205 struct mv_port_priv *pp = ap->private_data;
cca3974e 2206 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2207 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2208 int retry = 5;
2209 u32 sstatus;
20f733e7
BR
2210
2211 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2212
da3dbb17
TH
2213#ifdef DEBUG
2214 {
2215 u32 sstatus, serror, scontrol;
2216
2217 mv_scr_read(ap, SCR_STATUS, &sstatus);
2218 mv_scr_read(ap, SCR_ERROR, &serror);
2219 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2220 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2221 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2222 }
2223#endif
20f733e7 2224
22374677
JG
2225 /* Issue COMRESET via SControl */
2226comreset_retry:
936fd732 2227 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2228 msleep(1);
22374677 2229
936fd732 2230 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2231 msleep(20);
22374677 2232
31961943 2233 do {
936fd732 2234 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2235 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2236 break;
22374677 2237
bdd4ddde 2238 msleep(1);
c5d3e45a 2239 } while (time_before(jiffies, deadline));
20f733e7 2240
22374677 2241 /* work around errata */
ee9ccdf7 2242 if (IS_GEN_II(hpriv) &&
22374677
JG
2243 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2244 (retry-- > 0))
2245 goto comreset_retry;
095fec88 2246
da3dbb17
TH
2247#ifdef DEBUG
2248 {
2249 u32 sstatus, serror, scontrol;
2250
2251 mv_scr_read(ap, SCR_STATUS, &sstatus);
2252 mv_scr_read(ap, SCR_ERROR, &serror);
2253 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2254 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2255 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2256 }
2257#endif
31961943 2258
936fd732 2259 if (ata_link_offline(&ap->link)) {
bdd4ddde 2260 *class = ATA_DEV_NONE;
20f733e7
BR
2261 return;
2262 }
2263
22374677
JG
2264 /* even after SStatus reflects that device is ready,
2265 * it seems to take a while for link to be fully
2266 * established (and thus Status no longer 0x80/0x7F),
2267 * so we poll a bit for that, here.
2268 */
2269 retry = 20;
2270 while (1) {
2271 u8 drv_stat = ata_check_status(ap);
2272 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2273 break;
bdd4ddde 2274 msleep(500);
22374677
JG
2275 if (retry-- <= 0)
2276 break;
bdd4ddde
JG
2277 if (time_after(jiffies, deadline))
2278 break;
22374677
JG
2279 }
2280
bdd4ddde
JG
2281 /* FIXME: if we passed the deadline, the following
2282 * code probably produces an invalid result
2283 */
20f733e7 2284
bdd4ddde 2285 /* finally, read device signature from TF registers */
3f19859e 2286 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2287
2288 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2289
bdd4ddde 2290 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2291
bca1c4eb 2292 VPRINTK("EXIT\n");
20f733e7
BR
2293}
2294
cc0680a5 2295static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2296{
cc0680a5 2297 struct ata_port *ap = link->ap;
bdd4ddde 2298 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2299 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2300 int rc;
0ea9e179 2301
bdd4ddde
JG
2302 rc = mv_stop_dma(ap);
2303 if (rc)
2304 ehc->i.action |= ATA_EH_HARDRESET;
2305
2306 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2307 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2308 ehc->i.action |= ATA_EH_HARDRESET;
2309 }
2310
2311 /* if we're about to do hardreset, nothing more to do */
2312 if (ehc->i.action & ATA_EH_HARDRESET)
2313 return 0;
2314
cc0680a5 2315 if (ata_link_online(link))
bdd4ddde
JG
2316 rc = ata_wait_ready(ap, deadline);
2317 else
2318 rc = -ENODEV;
2319
2320 return rc;
22374677
JG
2321}
2322
cc0680a5 2323static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2324 unsigned long deadline)
31961943 2325{
cc0680a5 2326 struct ata_port *ap = link->ap;
bdd4ddde 2327 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2328 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2329
bdd4ddde 2330 mv_stop_dma(ap);
31961943 2331
bdd4ddde 2332 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2333
bdd4ddde
JG
2334 mv_phy_reset(ap, class, deadline);
2335
2336 return 0;
2337}
2338
cc0680a5 2339static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2340{
cc0680a5 2341 struct ata_port *ap = link->ap;
bdd4ddde
JG
2342 u32 serr;
2343
2344 /* print link status */
cc0680a5 2345 sata_print_link_status(link);
31961943 2346
bdd4ddde 2347 /* clear SError */
cc0680a5
TH
2348 sata_scr_read(link, SCR_ERROR, &serr);
2349 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2350
2351 /* bail out if no device is present */
2352 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2353 DPRINTK("EXIT, no device\n");
2354 return;
9b358e30 2355 }
bdd4ddde
JG
2356
2357 /* set up device control */
2358 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2359}
2360
2361static void mv_error_handler(struct ata_port *ap)
2362{
2363 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2364 mv_hardreset, mv_postreset);
2365}
2366
2367static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2368{
2369 mv_stop_dma(qc->ap);
2370}
2371
2372static void mv_eh_freeze(struct ata_port *ap)
2373{
2374 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2375 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2376 u32 tmp, mask;
2377 unsigned int shift;
2378
2379 /* FIXME: handle coalescing completion events properly */
2380
2381 shift = ap->port_no * 2;
2382 if (hc > 0)
2383 shift++;
2384
2385 mask = 0x3 << shift;
2386
2387 /* disable assertion of portN err, done events */
2388 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2389 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2390}
2391
2392static void mv_eh_thaw(struct ata_port *ap)
2393{
2394 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2395 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2396 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2397 void __iomem *port_mmio = mv_ap_base(ap);
2398 u32 tmp, mask, hc_irq_cause;
2399 unsigned int shift, hc_port_no = ap->port_no;
2400
2401 /* FIXME: handle coalescing completion events properly */
2402
2403 shift = ap->port_no * 2;
2404 if (hc > 0) {
2405 shift++;
2406 hc_port_no -= 4;
2407 }
2408
2409 mask = 0x3 << shift;
2410
2411 /* clear EDMA errors on this port */
2412 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2413
2414 /* clear pending irq events */
2415 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2416 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2417 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2418 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2419
2420 /* enable assertion of portN err, done events */
2421 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2422 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2423}
2424
05b308e1
BR
2425/**
2426 * mv_port_init - Perform some early initialization on a single port.
2427 * @port: libata data structure storing shadow register addresses
2428 * @port_mmio: base address of the port
2429 *
2430 * Initialize shadow register mmio addresses, clear outstanding
2431 * interrupts on the port, and unmask interrupts for the future
2432 * start of the port.
2433 *
2434 * LOCKING:
2435 * Inherited from caller.
2436 */
31961943 2437static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2438{
0d5ff566 2439 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2440 unsigned serr_ofs;
2441
8b260248 2442 /* PIO related setup
31961943
BR
2443 */
2444 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2445 port->error_addr =
31961943
BR
2446 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2447 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2448 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2449 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2450 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2451 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2452 port->status_addr =
31961943
BR
2453 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2454 /* special case: control/altstatus doesn't have ATA_REG_ address */
2455 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2456
2457 /* unused: */
8d9db2d2 2458 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2459
31961943
BR
2460 /* Clear any currently outstanding port interrupt conditions */
2461 serr_ofs = mv_scr_offset(SCR_ERROR);
2462 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2463 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2464
646a4da5
ML
2465 /* unmask all non-transient EDMA error interrupts */
2466 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2467
8b260248 2468 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2469 readl(port_mmio + EDMA_CFG_OFS),
2470 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2471 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2472}
2473
4447d351 2474static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2475{
4447d351
TH
2476 struct pci_dev *pdev = to_pci_dev(host->dev);
2477 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2478 u32 hp_flags = hpriv->hp_flags;
2479
5796d1c4 2480 switch (board_idx) {
47c2b677
JG
2481 case chip_5080:
2482 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2483 hp_flags |= MV_HP_GEN_I;
47c2b677 2484
44c10138 2485 switch (pdev->revision) {
47c2b677
JG
2486 case 0x1:
2487 hp_flags |= MV_HP_ERRATA_50XXB0;
2488 break;
2489 case 0x3:
2490 hp_flags |= MV_HP_ERRATA_50XXB2;
2491 break;
2492 default:
2493 dev_printk(KERN_WARNING, &pdev->dev,
2494 "Applying 50XXB2 workarounds to unknown rev\n");
2495 hp_flags |= MV_HP_ERRATA_50XXB2;
2496 break;
2497 }
2498 break;
2499
bca1c4eb
JG
2500 case chip_504x:
2501 case chip_508x:
47c2b677 2502 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2503 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2504
44c10138 2505 switch (pdev->revision) {
47c2b677
JG
2506 case 0x0:
2507 hp_flags |= MV_HP_ERRATA_50XXB0;
2508 break;
2509 case 0x3:
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2511 break;
2512 default:
2513 dev_printk(KERN_WARNING, &pdev->dev,
2514 "Applying B2 workarounds to unknown rev\n");
2515 hp_flags |= MV_HP_ERRATA_50XXB2;
2516 break;
bca1c4eb
JG
2517 }
2518 break;
2519
2520 case chip_604x:
2521 case chip_608x:
47c2b677 2522 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2523 hp_flags |= MV_HP_GEN_II;
47c2b677 2524
44c10138 2525 switch (pdev->revision) {
47c2b677
JG
2526 case 0x7:
2527 hp_flags |= MV_HP_ERRATA_60X1B2;
2528 break;
2529 case 0x9:
2530 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2531 break;
2532 default:
2533 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2534 "Applying B2 workarounds to unknown rev\n");
2535 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2536 break;
2537 }
2538 break;
2539
e4e7b892 2540 case chip_7042:
02a121da 2541 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2542 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2543 (pdev->device == 0x2300 || pdev->device == 0x2310))
2544 {
4e520033
ML
2545 /*
2546 * Highpoint RocketRAID PCIe 23xx series cards:
2547 *
2548 * Unconfigured drives are treated as "Legacy"
2549 * by the BIOS, and it overwrites sector 8 with
2550 * a "Lgcy" metadata block prior to Linux boot.
2551 *
2552 * Configured drives (RAID or JBOD) leave sector 8
2553 * alone, but instead overwrite a high numbered
2554 * sector for the RAID metadata. This sector can
2555 * be determined exactly, by truncating the physical
2556 * drive capacity to a nice even GB value.
2557 *
2558 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2559 *
2560 * Warn the user, lest they think we're just buggy.
2561 */
2562 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2563 " BIOS CORRUPTS DATA on all attached drives,"
2564 " regardless of if/how they are configured."
2565 " BEWARE!\n");
2566 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2567 " use sectors 8-9 on \"Legacy\" drives,"
2568 " and avoid the final two gigabytes on"
2569 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2570 }
e4e7b892
JG
2571 case chip_6042:
2572 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2573 hp_flags |= MV_HP_GEN_IIE;
2574
44c10138 2575 switch (pdev->revision) {
e4e7b892
JG
2576 case 0x0:
2577 hp_flags |= MV_HP_ERRATA_XX42A0;
2578 break;
2579 case 0x1:
2580 hp_flags |= MV_HP_ERRATA_60X1C0;
2581 break;
2582 default:
2583 dev_printk(KERN_WARNING, &pdev->dev,
2584 "Applying 60X1C0 workarounds to unknown rev\n");
2585 hp_flags |= MV_HP_ERRATA_60X1C0;
2586 break;
2587 }
2588 break;
2589
bca1c4eb 2590 default:
5796d1c4
JG
2591 dev_printk(KERN_ERR, &pdev->dev,
2592 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2593 return 1;
2594 }
2595
2596 hpriv->hp_flags = hp_flags;
02a121da
ML
2597 if (hp_flags & MV_HP_PCIE) {
2598 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2599 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2600 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2601 } else {
2602 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2603 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2604 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2605 }
bca1c4eb
JG
2606
2607 return 0;
2608}
2609
05b308e1 2610/**
47c2b677 2611 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2612 * @host: ATA host to initialize
2613 * @board_idx: controller index
05b308e1
BR
2614 *
2615 * If possible, do an early global reset of the host. Then do
2616 * our port init and clear/unmask all/relevant host interrupts.
2617 *
2618 * LOCKING:
2619 * Inherited from caller.
2620 */
4447d351 2621static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2622{
2623 int rc = 0, n_hc, port, hc;
4447d351
TH
2624 struct pci_dev *pdev = to_pci_dev(host->dev);
2625 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2626 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2627
47c2b677
JG
2628 /* global interrupt mask */
2629 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2630
4447d351 2631 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2632 if (rc)
2633 goto done;
2634
4447d351 2635 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2636
4447d351 2637 for (port = 0; port < host->n_ports; port++)
47c2b677 2638 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2639
c9d39130 2640 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2641 if (rc)
20f733e7 2642 goto done;
20f733e7 2643
522479fb
JG
2644 hpriv->ops->reset_flash(hpriv, mmio);
2645 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2646 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2647
4447d351 2648 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2649 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2650 void __iomem *port_mmio = mv_port_base(mmio, port);
2651
2a47ce06 2652 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2653 ifctl |= (1 << 7); /* enable gen2i speed */
2654 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2655 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2656 }
2657
c9d39130 2658 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2659 }
2660
4447d351 2661 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2662 struct ata_port *ap = host->ports[port];
2a47ce06 2663 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2664 unsigned int offset = port_mmio - mmio;
2665
2666 mv_port_init(&ap->ioaddr, port_mmio);
2667
2668 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2669 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
20f733e7
BR
2670 }
2671
2672 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2674
2675 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2676 "(before clear)=0x%08x\n", hc,
2677 readl(hc_mmio + HC_CFG_OFS),
2678 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2679
2680 /* Clear any currently outstanding hc interrupt conditions */
2681 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2682 }
2683
31961943 2684 /* Clear any currently outstanding host interrupt conditions */
02a121da 2685 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943
BR
2686
2687 /* and unmask interrupt generation for host regs */
02a121da 2688 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
fb621e2f 2689
ee9ccdf7 2690 if (IS_GEN_I(hpriv))
fb621e2f
JG
2691 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2692 else
2693 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2694
2695 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2696 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2697 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2698 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
02a121da
ML
2699 readl(mmio + hpriv->irq_cause_ofs),
2700 readl(mmio + hpriv->irq_mask_ofs));
bca1c4eb 2701
31961943 2702done:
20f733e7
BR
2703 return rc;
2704}
2705
05b308e1
BR
2706/**
2707 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2708 * @host: ATA host to print info about
05b308e1
BR
2709 *
2710 * FIXME: complete this.
2711 *
2712 * LOCKING:
2713 * Inherited from caller.
2714 */
4447d351 2715static void mv_print_info(struct ata_host *host)
31961943 2716{
4447d351
TH
2717 struct pci_dev *pdev = to_pci_dev(host->dev);
2718 struct mv_host_priv *hpriv = host->private_data;
44c10138 2719 u8 scc;
c1e4fe71 2720 const char *scc_s, *gen;
31961943
BR
2721
2722 /* Use this to determine the HW stepping of the chip so we know
2723 * what errata to workaround
2724 */
31961943
BR
2725 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2726 if (scc == 0)
2727 scc_s = "SCSI";
2728 else if (scc == 0x01)
2729 scc_s = "RAID";
2730 else
c1e4fe71
JG
2731 scc_s = "?";
2732
2733 if (IS_GEN_I(hpriv))
2734 gen = "I";
2735 else if (IS_GEN_II(hpriv))
2736 gen = "II";
2737 else if (IS_GEN_IIE(hpriv))
2738 gen = "IIE";
2739 else
2740 gen = "?";
31961943 2741
a9524a76 2742 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2743 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2744 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2745 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2746}
2747
05b308e1
BR
2748/**
2749 * mv_init_one - handle a positive probe of a Marvell host
2750 * @pdev: PCI device found
2751 * @ent: PCI device ID entry for the matched host
2752 *
2753 * LOCKING:
2754 * Inherited from caller.
2755 */
20f733e7
BR
2756static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2757{
2dcb407e 2758 static int printed_version;
20f733e7 2759 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2760 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2761 struct ata_host *host;
2762 struct mv_host_priv *hpriv;
2763 int n_ports, rc;
20f733e7 2764
a9524a76
JG
2765 if (!printed_version++)
2766 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2767
4447d351
TH
2768 /* allocate host */
2769 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2770
2771 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2772 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2773 if (!host || !hpriv)
2774 return -ENOMEM;
2775 host->private_data = hpriv;
2776
2777 /* acquire resources */
24dc5f33
TH
2778 rc = pcim_enable_device(pdev);
2779 if (rc)
20f733e7 2780 return rc;
20f733e7 2781
0d5ff566
TH
2782 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2783 if (rc == -EBUSY)
24dc5f33 2784 pcim_pin_device(pdev);
0d5ff566 2785 if (rc)
24dc5f33 2786 return rc;
4447d351 2787 host->iomap = pcim_iomap_table(pdev);
20f733e7 2788
d88184fb
JG
2789 rc = pci_go_64(pdev);
2790 if (rc)
2791 return rc;
2792
20f733e7 2793 /* initialize adapter */
4447d351 2794 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2795 if (rc)
2796 return rc;
20f733e7 2797
31961943 2798 /* Enable interrupts */
6a59dcf8 2799 if (msi && pci_enable_msi(pdev))
31961943 2800 pci_intx(pdev, 1);
20f733e7 2801
31961943 2802 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2803 mv_print_info(host);
20f733e7 2804
4447d351 2805 pci_set_master(pdev);
ea8b4db9 2806 pci_try_set_mwi(pdev);
4447d351 2807 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2808 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2809}
2810
2811static int __init mv_init(void)
2812{
b7887196 2813 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2814}
2815
2816static void __exit mv_exit(void)
2817{
2818 pci_unregister_driver(&mv_pci_driver);
2819}
2820
2821MODULE_AUTHOR("Brett Russ");
2822MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2823MODULE_LICENSE("GPL");
2824MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2825MODULE_VERSION(DRV_VERSION);
2826
ddef9bb3
JG
2827module_param(msi, int, 0444);
2828MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2829
20f733e7
BR
2830module_init(mv_init);
2831module_exit(mv_exit);
This page took 0.39961 seconds and 5 git commands to generate.