sata_mv ncq Fix EDMA configuration
[deliverable/linux.git] / drivers / ata / sata_mv.c
... / ...
CommitLineData
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
68#include <linux/dma-mapping.h>
69#include <linux/device.h>
70#include <scsi/scsi_host.h>
71#include <scsi/scsi_cmnd.h>
72#include <scsi/scsi_device.h>
73#include <linux/libata.h>
74
75#define DRV_NAME "sata_mv"
76#define DRV_VERSION "1.01"
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
147 /* PCI interface registers */
148
149 PCI_COMMAND_OFS = 0xc00,
150
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
174
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
214 PHY_MODE3 = 0x310,
215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
248
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
256
257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
265
266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
281 EDMA_ERR_CRQB_PAR |
282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
296 EDMA_ERR_CRQB_PAR |
297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
300
301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
316
317 EDMA_IORDY_TMOUT = 0x34,
318 EDMA_ARB_CFG = 0x38,
319
320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
326 MV_HP_ERRATA_XX42A0 = (1 << 5),
327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
331
332 /* Port private flags (pp_flags) */
333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
335};
336
337#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
338#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
339#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
340
341enum {
342 /* DMA boundary 0xffff is required by the s/g splitting
343 * we need on /length/ in mv_fill-sg().
344 */
345 MV_DMA_BOUNDARY = 0xffffU,
346
347 /* mask of register bits containing lower 32 bits
348 * of EDMA request queue DMA address
349 */
350 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351
352 /* ditto, for response queue */
353 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
354};
355
356enum chip_type {
357 chip_504x,
358 chip_508x,
359 chip_5080,
360 chip_604x,
361 chip_608x,
362 chip_6042,
363 chip_7042,
364};
365
366/* Command ReQuest Block: 32B */
367struct mv_crqb {
368 __le32 sg_addr;
369 __le32 sg_addr_hi;
370 __le16 ctrl_flags;
371 __le16 ata_cmd[11];
372};
373
374struct mv_crqb_iie {
375 __le32 addr;
376 __le32 addr_hi;
377 __le32 flags;
378 __le32 len;
379 __le32 ata_cmd[4];
380};
381
382/* Command ResPonse Block: 8B */
383struct mv_crpb {
384 __le16 id;
385 __le16 flags;
386 __le32 tmstmp;
387};
388
389/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
390struct mv_sg {
391 __le32 addr;
392 __le32 flags_size;
393 __le32 addr_hi;
394 __le32 reserved;
395};
396
397struct mv_port_priv {
398 struct mv_crqb *crqb;
399 dma_addr_t crqb_dma;
400 struct mv_crpb *crpb;
401 dma_addr_t crpb_dma;
402 struct mv_sg *sg_tbl;
403 dma_addr_t sg_tbl_dma;
404
405 unsigned int req_idx;
406 unsigned int resp_idx;
407
408 u32 pp_flags;
409};
410
411struct mv_port_signal {
412 u32 amps;
413 u32 pre;
414};
415
416struct mv_host_priv {
417 u32 hp_flags;
418 struct mv_port_signal signal[8];
419 const struct mv_hw_ops *ops;
420 u32 irq_cause_ofs;
421 u32 irq_mask_ofs;
422 u32 unmask_all_irqs;
423};
424
425struct mv_hw_ops {
426 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int port);
428 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
429 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
430 void __iomem *mmio);
431 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int n_hc);
433 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
434 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
435};
436
437static void mv_irq_clear(struct ata_port *ap);
438static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
439static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
440static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
441static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
442static int mv_port_start(struct ata_port *ap);
443static void mv_port_stop(struct ata_port *ap);
444static void mv_qc_prep(struct ata_queued_cmd *qc);
445static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
446static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
447static void mv_error_handler(struct ata_port *ap);
448static void mv_post_int_cmd(struct ata_queued_cmd *qc);
449static void mv_eh_freeze(struct ata_port *ap);
450static void mv_eh_thaw(struct ata_port *ap);
451static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
452
453static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
455static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
456static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
458static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
460static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
461static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
462
463static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
464 unsigned int port);
465static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
466static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
467 void __iomem *mmio);
468static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
469 unsigned int n_hc);
470static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
471static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
472static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int port_no);
474static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
475 void __iomem *port_mmio);
476
477static struct scsi_host_template mv5_sht = {
478 .module = THIS_MODULE,
479 .name = DRV_NAME,
480 .ioctl = ata_scsi_ioctl,
481 .queuecommand = ata_scsi_queuecmd,
482 .can_queue = ATA_DEF_QUEUE,
483 .this_id = ATA_SHT_THIS_ID,
484 .sg_tablesize = MV_MAX_SG_CT / 2,
485 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
486 .emulated = ATA_SHT_EMULATED,
487 .use_clustering = 1,
488 .proc_name = DRV_NAME,
489 .dma_boundary = MV_DMA_BOUNDARY,
490 .slave_configure = ata_scsi_slave_config,
491 .slave_destroy = ata_scsi_slave_destroy,
492 .bios_param = ata_std_bios_param,
493};
494
495static struct scsi_host_template mv6_sht = {
496 .module = THIS_MODULE,
497 .name = DRV_NAME,
498 .ioctl = ata_scsi_ioctl,
499 .queuecommand = ata_scsi_queuecmd,
500 .can_queue = ATA_DEF_QUEUE,
501 .this_id = ATA_SHT_THIS_ID,
502 .sg_tablesize = MV_MAX_SG_CT / 2,
503 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
504 .emulated = ATA_SHT_EMULATED,
505 .use_clustering = 1,
506 .proc_name = DRV_NAME,
507 .dma_boundary = MV_DMA_BOUNDARY,
508 .slave_configure = ata_scsi_slave_config,
509 .slave_destroy = ata_scsi_slave_destroy,
510 .bios_param = ata_std_bios_param,
511};
512
513static const struct ata_port_operations mv5_ops = {
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
520 .cable_detect = ata_cable_sata,
521
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
524 .data_xfer = ata_data_xfer,
525
526 .irq_clear = mv_irq_clear,
527 .irq_on = ata_irq_on,
528
529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
532 .thaw = mv_eh_thaw,
533
534 .scr_read = mv5_scr_read,
535 .scr_write = mv5_scr_write,
536
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
539};
540
541static const struct ata_port_operations mv6_ops = {
542 .tf_load = ata_tf_load,
543 .tf_read = ata_tf_read,
544 .check_status = ata_check_status,
545 .exec_command = ata_exec_command,
546 .dev_select = ata_std_dev_select,
547
548 .cable_detect = ata_cable_sata,
549
550 .qc_prep = mv_qc_prep,
551 .qc_issue = mv_qc_issue,
552 .data_xfer = ata_data_xfer,
553
554 .irq_clear = mv_irq_clear,
555 .irq_on = ata_irq_on,
556
557 .error_handler = mv_error_handler,
558 .post_internal_cmd = mv_post_int_cmd,
559 .freeze = mv_eh_freeze,
560 .thaw = mv_eh_thaw,
561
562 .scr_read = mv_scr_read,
563 .scr_write = mv_scr_write,
564
565 .port_start = mv_port_start,
566 .port_stop = mv_port_stop,
567};
568
569static const struct ata_port_operations mv_iie_ops = {
570 .tf_load = ata_tf_load,
571 .tf_read = ata_tf_read,
572 .check_status = ata_check_status,
573 .exec_command = ata_exec_command,
574 .dev_select = ata_std_dev_select,
575
576 .cable_detect = ata_cable_sata,
577
578 .qc_prep = mv_qc_prep_iie,
579 .qc_issue = mv_qc_issue,
580 .data_xfer = ata_data_xfer,
581
582 .irq_clear = mv_irq_clear,
583 .irq_on = ata_irq_on,
584
585 .error_handler = mv_error_handler,
586 .post_internal_cmd = mv_post_int_cmd,
587 .freeze = mv_eh_freeze,
588 .thaw = mv_eh_thaw,
589
590 .scr_read = mv_scr_read,
591 .scr_write = mv_scr_write,
592
593 .port_start = mv_port_start,
594 .port_stop = mv_port_stop,
595};
596
597static const struct ata_port_info mv_port_info[] = {
598 { /* chip_504x */
599 .flags = MV_COMMON_FLAGS,
600 .pio_mask = 0x1f, /* pio0-4 */
601 .udma_mask = ATA_UDMA6,
602 .port_ops = &mv5_ops,
603 },
604 { /* chip_508x */
605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv5_ops,
609 },
610 { /* chip_5080 */
611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
612 .pio_mask = 0x1f, /* pio0-4 */
613 .udma_mask = ATA_UDMA6,
614 .port_ops = &mv5_ops,
615 },
616 { /* chip_604x */
617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
618 .pio_mask = 0x1f, /* pio0-4 */
619 .udma_mask = ATA_UDMA6,
620 .port_ops = &mv6_ops,
621 },
622 { /* chip_608x */
623 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
624 MV_FLAG_DUAL_HC,
625 .pio_mask = 0x1f, /* pio0-4 */
626 .udma_mask = ATA_UDMA6,
627 .port_ops = &mv6_ops,
628 },
629 { /* chip_6042 */
630 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
631 .pio_mask = 0x1f, /* pio0-4 */
632 .udma_mask = ATA_UDMA6,
633 .port_ops = &mv_iie_ops,
634 },
635 { /* chip_7042 */
636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
637 .pio_mask = 0x1f, /* pio0-4 */
638 .udma_mask = ATA_UDMA6,
639 .port_ops = &mv_iie_ops,
640 },
641};
642
643static const struct pci_device_id mv_pci_tbl[] = {
644 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
645 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
646 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
647 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
648 /* RocketRAID 1740/174x have different identifiers */
649 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
650 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
651
652 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
653 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
654 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
655 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
656 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
657
658 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
659
660 /* Adaptec 1430SA */
661 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
662
663 /* Marvell 7042 support */
664 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
665
666 /* Highpoint RocketRAID PCIe series */
667 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
668 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
669
670 { } /* terminate list */
671};
672
673static struct pci_driver mv_pci_driver = {
674 .name = DRV_NAME,
675 .id_table = mv_pci_tbl,
676 .probe = mv_init_one,
677 .remove = ata_pci_remove_one,
678};
679
680static const struct mv_hw_ops mv5xxx_ops = {
681 .phy_errata = mv5_phy_errata,
682 .enable_leds = mv5_enable_leds,
683 .read_preamp = mv5_read_preamp,
684 .reset_hc = mv5_reset_hc,
685 .reset_flash = mv5_reset_flash,
686 .reset_bus = mv5_reset_bus,
687};
688
689static const struct mv_hw_ops mv6xxx_ops = {
690 .phy_errata = mv6_phy_errata,
691 .enable_leds = mv6_enable_leds,
692 .read_preamp = mv6_read_preamp,
693 .reset_hc = mv6_reset_hc,
694 .reset_flash = mv6_reset_flash,
695 .reset_bus = mv_reset_pci_bus,
696};
697
698/*
699 * module options
700 */
701static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
702
703
704/* move to PCI layer or libata core? */
705static int pci_go_64(struct pci_dev *pdev)
706{
707 int rc;
708
709 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
710 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
711 if (rc) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
713 if (rc) {
714 dev_printk(KERN_ERR, &pdev->dev,
715 "64-bit DMA enable failed\n");
716 return rc;
717 }
718 }
719 } else {
720 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
721 if (rc) {
722 dev_printk(KERN_ERR, &pdev->dev,
723 "32-bit DMA enable failed\n");
724 return rc;
725 }
726 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
727 if (rc) {
728 dev_printk(KERN_ERR, &pdev->dev,
729 "32-bit consistent DMA enable failed\n");
730 return rc;
731 }
732 }
733
734 return rc;
735}
736
737/*
738 * Functions
739 */
740
741static inline void writelfl(unsigned long data, void __iomem *addr)
742{
743 writel(data, addr);
744 (void) readl(addr); /* flush to avoid PCI posted write */
745}
746
747static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
748{
749 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
750}
751
752static inline unsigned int mv_hc_from_port(unsigned int port)
753{
754 return port >> MV_PORT_HC_SHIFT;
755}
756
757static inline unsigned int mv_hardport_from_port(unsigned int port)
758{
759 return port & MV_PORT_MASK;
760}
761
762static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
763 unsigned int port)
764{
765 return mv_hc_base(base, mv_hc_from_port(port));
766}
767
768static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
769{
770 return mv_hc_base_from_port(base, port) +
771 MV_SATAHC_ARBTR_REG_SZ +
772 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
773}
774
775static inline void __iomem *mv_ap_base(struct ata_port *ap)
776{
777 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
778}
779
780static inline int mv_get_hc_count(unsigned long port_flags)
781{
782 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
783}
784
785static void mv_irq_clear(struct ata_port *ap)
786{
787}
788
789static void mv_set_edma_ptrs(void __iomem *port_mmio,
790 struct mv_host_priv *hpriv,
791 struct mv_port_priv *pp)
792{
793 u32 index;
794
795 /*
796 * initialize request queue
797 */
798 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
799
800 WARN_ON(pp->crqb_dma & 0x3ff);
801 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
802 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
803 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
804
805 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
806 writelfl((pp->crqb_dma & 0xffffffff) | index,
807 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
808 else
809 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
810
811 /*
812 * initialize response queue
813 */
814 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
815
816 WARN_ON(pp->crpb_dma & 0xff);
817 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
818
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
820 writelfl((pp->crpb_dma & 0xffffffff) | index,
821 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
822 else
823 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
824
825 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
826 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
827}
828
829/**
830 * mv_start_dma - Enable eDMA engine
831 * @base: port base address
832 * @pp: port private data
833 *
834 * Verify the local cache of the eDMA state is accurate with a
835 * WARN_ON.
836 *
837 * LOCKING:
838 * Inherited from caller.
839 */
840static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
841 struct mv_port_priv *pp)
842{
843 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
844 struct mv_host_priv *hpriv = ap->host->private_data;
845 int hard_port = mv_hardport_from_port(ap->port_no);
846 void __iomem *hc_mmio = mv_hc_base_from_port(
847 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
848 u32 hc_irq_cause, ipending;
849
850 /* clear EDMA event indicators, if any */
851 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
852
853 /* clear EDMA interrupt indicator, if any */
854 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
855 ipending = (DEV_IRQ << hard_port) |
856 (CRPB_DMA_DONE << hard_port);
857 if (hc_irq_cause & ipending) {
858 writelfl(hc_irq_cause & ~ipending,
859 hc_mmio + HC_IRQ_CAUSE_OFS);
860 }
861
862 mv_edma_cfg(ap, hpriv, port_mmio);
863
864 /* clear FIS IRQ Cause */
865 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
866
867 mv_set_edma_ptrs(port_mmio, hpriv, pp);
868
869 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
870 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
871 }
872 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
873}
874
875/**
876 * __mv_stop_dma - Disable eDMA engine
877 * @ap: ATA channel to manipulate
878 *
879 * Verify the local cache of the eDMA state is accurate with a
880 * WARN_ON.
881 *
882 * LOCKING:
883 * Inherited from caller.
884 */
885static int __mv_stop_dma(struct ata_port *ap)
886{
887 void __iomem *port_mmio = mv_ap_base(ap);
888 struct mv_port_priv *pp = ap->private_data;
889 u32 reg;
890 int i, err = 0;
891
892 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
893 /* Disable EDMA if active. The disable bit auto clears.
894 */
895 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
896 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
897 } else {
898 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
899 }
900
901 /* now properly wait for the eDMA to stop */
902 for (i = 1000; i > 0; i--) {
903 reg = readl(port_mmio + EDMA_CMD_OFS);
904 if (!(reg & EDMA_EN))
905 break;
906
907 udelay(100);
908 }
909
910 if (reg & EDMA_EN) {
911 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
912 err = -EIO;
913 }
914
915 return err;
916}
917
918static int mv_stop_dma(struct ata_port *ap)
919{
920 unsigned long flags;
921 int rc;
922
923 spin_lock_irqsave(&ap->host->lock, flags);
924 rc = __mv_stop_dma(ap);
925 spin_unlock_irqrestore(&ap->host->lock, flags);
926
927 return rc;
928}
929
930#ifdef ATA_DEBUG
931static void mv_dump_mem(void __iomem *start, unsigned bytes)
932{
933 int b, w;
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%p: ", start + b);
936 for (w = 0; b < bytes && w < 4; w++) {
937 printk("%08x ", readl(start + b));
938 b += sizeof(u32);
939 }
940 printk("\n");
941 }
942}
943#endif
944
945static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
946{
947#ifdef ATA_DEBUG
948 int b, w;
949 u32 dw;
950 for (b = 0; b < bytes; ) {
951 DPRINTK("%02x: ", b);
952 for (w = 0; b < bytes && w < 4; w++) {
953 (void) pci_read_config_dword(pdev, b, &dw);
954 printk("%08x ", dw);
955 b += sizeof(u32);
956 }
957 printk("\n");
958 }
959#endif
960}
961static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 struct pci_dev *pdev)
963{
964#ifdef ATA_DEBUG
965 void __iomem *hc_base = mv_hc_base(mmio_base,
966 port >> MV_PORT_HC_SHIFT);
967 void __iomem *port_base;
968 int start_port, num_ports, p, start_hc, num_hcs, hc;
969
970 if (0 > port) {
971 start_hc = start_port = 0;
972 num_ports = 8; /* shld be benign for 4 port devs */
973 num_hcs = 2;
974 } else {
975 start_hc = port >> MV_PORT_HC_SHIFT;
976 start_port = port;
977 num_ports = num_hcs = 1;
978 }
979 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
980 num_ports > 1 ? num_ports - 1 : start_port);
981
982 if (NULL != pdev) {
983 DPRINTK("PCI config space regs:\n");
984 mv_dump_pci_cfg(pdev, 0x68);
985 }
986 DPRINTK("PCI regs:\n");
987 mv_dump_mem(mmio_base+0xc00, 0x3c);
988 mv_dump_mem(mmio_base+0xd00, 0x34);
989 mv_dump_mem(mmio_base+0xf00, 0x4);
990 mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
992 hc_base = mv_hc_base(mmio_base, hc);
993 DPRINTK("HC regs (HC %i):\n", hc);
994 mv_dump_mem(hc_base, 0x1c);
995 }
996 for (p = start_port; p < start_port + num_ports; p++) {
997 port_base = mv_port_base(mmio_base, p);
998 DPRINTK("EDMA regs (port %i):\n", p);
999 mv_dump_mem(port_base, 0x54);
1000 DPRINTK("SATA regs (port %i):\n", p);
1001 mv_dump_mem(port_base+0x300, 0x60);
1002 }
1003#endif
1004}
1005
1006static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1007{
1008 unsigned int ofs;
1009
1010 switch (sc_reg_in) {
1011 case SCR_STATUS:
1012 case SCR_CONTROL:
1013 case SCR_ERROR:
1014 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 break;
1016 case SCR_ACTIVE:
1017 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1018 break;
1019 default:
1020 ofs = 0xffffffffU;
1021 break;
1022 }
1023 return ofs;
1024}
1025
1026static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1027{
1028 unsigned int ofs = mv_scr_offset(sc_reg_in);
1029
1030 if (ofs != 0xffffffffU) {
1031 *val = readl(mv_ap_base(ap) + ofs);
1032 return 0;
1033 } else
1034 return -EINVAL;
1035}
1036
1037static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1038{
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1040
1041 if (ofs != 0xffffffffU) {
1042 writelfl(val, mv_ap_base(ap) + ofs);
1043 return 0;
1044 } else
1045 return -EINVAL;
1046}
1047
1048static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1049 void __iomem *port_mmio)
1050{
1051 u32 cfg;
1052
1053 /* set up non-NCQ EDMA configuration */
1054 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1055
1056 if (IS_GEN_I(hpriv))
1057 cfg |= (1 << 8); /* enab config burst size mask */
1058
1059 else if (IS_GEN_II(hpriv))
1060 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1061
1062 else if (IS_GEN_IIE(hpriv)) {
1063 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1064 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1065 cfg |= (1 << 18); /* enab early completion */
1066 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1067 }
1068
1069 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1070}
1071
1072/**
1073 * mv_port_start - Port specific init/start routine.
1074 * @ap: ATA channel to manipulate
1075 *
1076 * Allocate and point to DMA memory, init port private memory,
1077 * zero indices.
1078 *
1079 * LOCKING:
1080 * Inherited from caller.
1081 */
1082static int mv_port_start(struct ata_port *ap)
1083{
1084 struct device *dev = ap->host->dev;
1085 struct mv_host_priv *hpriv = ap->host->private_data;
1086 struct mv_port_priv *pp;
1087 void __iomem *port_mmio = mv_ap_base(ap);
1088 void *mem;
1089 dma_addr_t mem_dma;
1090 unsigned long flags;
1091 int rc;
1092
1093 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1094 if (!pp)
1095 return -ENOMEM;
1096
1097 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1098 GFP_KERNEL);
1099 if (!mem)
1100 return -ENOMEM;
1101 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1102
1103 rc = ata_pad_alloc(ap, dev);
1104 if (rc)
1105 return rc;
1106
1107 /* First item in chunk of DMA memory:
1108 * 32-slot command request table (CRQB), 32 bytes each in size
1109 */
1110 pp->crqb = mem;
1111 pp->crqb_dma = mem_dma;
1112 mem += MV_CRQB_Q_SZ;
1113 mem_dma += MV_CRQB_Q_SZ;
1114
1115 /* Second item:
1116 * 32-slot command response table (CRPB), 8 bytes each in size
1117 */
1118 pp->crpb = mem;
1119 pp->crpb_dma = mem_dma;
1120 mem += MV_CRPB_Q_SZ;
1121 mem_dma += MV_CRPB_Q_SZ;
1122
1123 /* Third item:
1124 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1125 */
1126 pp->sg_tbl = mem;
1127 pp->sg_tbl_dma = mem_dma;
1128
1129 spin_lock_irqsave(&ap->host->lock, flags);
1130
1131 mv_edma_cfg(ap, hpriv, port_mmio);
1132
1133 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1134
1135 spin_unlock_irqrestore(&ap->host->lock, flags);
1136
1137 /* Don't turn on EDMA here...do it before DMA commands only. Else
1138 * we'll be unable to send non-data, PIO, etc due to restricted access
1139 * to shadow regs.
1140 */
1141 ap->private_data = pp;
1142 return 0;
1143}
1144
1145/**
1146 * mv_port_stop - Port specific cleanup/stop routine.
1147 * @ap: ATA channel to manipulate
1148 *
1149 * Stop DMA, cleanup port memory.
1150 *
1151 * LOCKING:
1152 * This routine uses the host lock to protect the DMA stop.
1153 */
1154static void mv_port_stop(struct ata_port *ap)
1155{
1156 mv_stop_dma(ap);
1157}
1158
1159/**
1160 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1161 * @qc: queued command whose SG list to source from
1162 *
1163 * Populate the SG list and mark the last entry.
1164 *
1165 * LOCKING:
1166 * Inherited from caller.
1167 */
1168static void mv_fill_sg(struct ata_queued_cmd *qc)
1169{
1170 struct mv_port_priv *pp = qc->ap->private_data;
1171 struct scatterlist *sg;
1172 struct mv_sg *mv_sg, *last_sg = NULL;
1173 unsigned int si;
1174
1175 mv_sg = pp->sg_tbl;
1176 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1177 dma_addr_t addr = sg_dma_address(sg);
1178 u32 sg_len = sg_dma_len(sg);
1179
1180 while (sg_len) {
1181 u32 offset = addr & 0xffff;
1182 u32 len = sg_len;
1183
1184 if ((offset + sg_len > 0x10000))
1185 len = 0x10000 - offset;
1186
1187 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1188 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1189 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1190
1191 sg_len -= len;
1192 addr += len;
1193
1194 last_sg = mv_sg;
1195 mv_sg++;
1196 }
1197 }
1198
1199 if (likely(last_sg))
1200 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1201}
1202
1203static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1204{
1205 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1206 (last ? CRQB_CMD_LAST : 0);
1207 *cmdw = cpu_to_le16(tmp);
1208}
1209
1210/**
1211 * mv_qc_prep - Host specific command preparation.
1212 * @qc: queued command to prepare
1213 *
1214 * This routine simply redirects to the general purpose routine
1215 * if command is not DMA. Else, it handles prep of the CRQB
1216 * (command request block), does some sanity checking, and calls
1217 * the SG load routine.
1218 *
1219 * LOCKING:
1220 * Inherited from caller.
1221 */
1222static void mv_qc_prep(struct ata_queued_cmd *qc)
1223{
1224 struct ata_port *ap = qc->ap;
1225 struct mv_port_priv *pp = ap->private_data;
1226 __le16 *cw;
1227 struct ata_taskfile *tf;
1228 u16 flags = 0;
1229 unsigned in_index;
1230
1231 if (qc->tf.protocol != ATA_PROT_DMA)
1232 return;
1233
1234 /* Fill in command request block
1235 */
1236 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1237 flags |= CRQB_FLAG_READ;
1238 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1239 flags |= qc->tag << CRQB_TAG_SHIFT;
1240 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1241
1242 /* get current queue index from software */
1243 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1244
1245 pp->crqb[in_index].sg_addr =
1246 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1247 pp->crqb[in_index].sg_addr_hi =
1248 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1249 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1250
1251 cw = &pp->crqb[in_index].ata_cmd[0];
1252 tf = &qc->tf;
1253
1254 /* Sadly, the CRQB cannot accomodate all registers--there are
1255 * only 11 bytes...so we must pick and choose required
1256 * registers based on the command. So, we drop feature and
1257 * hob_feature for [RW] DMA commands, but they are needed for
1258 * NCQ. NCQ will drop hob_nsect.
1259 */
1260 switch (tf->command) {
1261 case ATA_CMD_READ:
1262 case ATA_CMD_READ_EXT:
1263 case ATA_CMD_WRITE:
1264 case ATA_CMD_WRITE_EXT:
1265 case ATA_CMD_WRITE_FUA_EXT:
1266 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1267 break;
1268#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1269 case ATA_CMD_FPDMA_READ:
1270 case ATA_CMD_FPDMA_WRITE:
1271 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1272 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1273 break;
1274#endif /* FIXME: remove this line when NCQ added */
1275 default:
1276 /* The only other commands EDMA supports in non-queued and
1277 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1278 * of which are defined/used by Linux. If we get here, this
1279 * driver needs work.
1280 *
1281 * FIXME: modify libata to give qc_prep a return value and
1282 * return error here.
1283 */
1284 BUG_ON(tf->command);
1285 break;
1286 }
1287 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1288 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1290 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1292 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1294 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1295 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1296
1297 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1298 return;
1299 mv_fill_sg(qc);
1300}
1301
1302/**
1303 * mv_qc_prep_iie - Host specific command preparation.
1304 * @qc: queued command to prepare
1305 *
1306 * This routine simply redirects to the general purpose routine
1307 * if command is not DMA. Else, it handles prep of the CRQB
1308 * (command request block), does some sanity checking, and calls
1309 * the SG load routine.
1310 *
1311 * LOCKING:
1312 * Inherited from caller.
1313 */
1314static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1315{
1316 struct ata_port *ap = qc->ap;
1317 struct mv_port_priv *pp = ap->private_data;
1318 struct mv_crqb_iie *crqb;
1319 struct ata_taskfile *tf;
1320 unsigned in_index;
1321 u32 flags = 0;
1322
1323 if (qc->tf.protocol != ATA_PROT_DMA)
1324 return;
1325
1326 /* Fill in Gen IIE command request block
1327 */
1328 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1329 flags |= CRQB_FLAG_READ;
1330
1331 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1332 flags |= qc->tag << CRQB_TAG_SHIFT;
1333 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1334 what we use as our tag */
1335
1336 /* get current queue index from software */
1337 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1338
1339 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1340 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1341 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1342 crqb->flags = cpu_to_le32(flags);
1343
1344 tf = &qc->tf;
1345 crqb->ata_cmd[0] = cpu_to_le32(
1346 (tf->command << 16) |
1347 (tf->feature << 24)
1348 );
1349 crqb->ata_cmd[1] = cpu_to_le32(
1350 (tf->lbal << 0) |
1351 (tf->lbam << 8) |
1352 (tf->lbah << 16) |
1353 (tf->device << 24)
1354 );
1355 crqb->ata_cmd[2] = cpu_to_le32(
1356 (tf->hob_lbal << 0) |
1357 (tf->hob_lbam << 8) |
1358 (tf->hob_lbah << 16) |
1359 (tf->hob_feature << 24)
1360 );
1361 crqb->ata_cmd[3] = cpu_to_le32(
1362 (tf->nsect << 0) |
1363 (tf->hob_nsect << 8)
1364 );
1365
1366 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1367 return;
1368 mv_fill_sg(qc);
1369}
1370
1371/**
1372 * mv_qc_issue - Initiate a command to the host
1373 * @qc: queued command to start
1374 *
1375 * This routine simply redirects to the general purpose routine
1376 * if command is not DMA. Else, it sanity checks our local
1377 * caches of the request producer/consumer indices then enables
1378 * DMA and bumps the request producer index.
1379 *
1380 * LOCKING:
1381 * Inherited from caller.
1382 */
1383static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1384{
1385 struct ata_port *ap = qc->ap;
1386 void __iomem *port_mmio = mv_ap_base(ap);
1387 struct mv_port_priv *pp = ap->private_data;
1388 u32 in_index;
1389
1390 if (qc->tf.protocol != ATA_PROT_DMA) {
1391 /* We're about to send a non-EDMA capable command to the
1392 * port. Turn off EDMA so there won't be problems accessing
1393 * shadow block, etc registers.
1394 */
1395 __mv_stop_dma(ap);
1396 return ata_qc_issue_prot(qc);
1397 }
1398
1399 mv_start_dma(ap, port_mmio, pp);
1400
1401 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1402
1403 /* until we do queuing, the queue should be empty at this point */
1404 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1405 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1406
1407 pp->req_idx++;
1408
1409 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1410
1411 /* and write the request in pointer to kick the EDMA to life */
1412 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1413 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1414
1415 return 0;
1416}
1417
1418/**
1419 * mv_err_intr - Handle error interrupts on the port
1420 * @ap: ATA channel to manipulate
1421 * @reset_allowed: bool: 0 == don't trigger from reset here
1422 *
1423 * In most cases, just clear the interrupt and move on. However,
1424 * some cases require an eDMA reset, which is done right before
1425 * the COMRESET in mv_phy_reset(). The SERR case requires a
1426 * clear of pending errors in the SATA SERROR register. Finally,
1427 * if the port disabled DMA, update our cached copy to match.
1428 *
1429 * LOCKING:
1430 * Inherited from caller.
1431 */
1432static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1433{
1434 void __iomem *port_mmio = mv_ap_base(ap);
1435 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1436 struct mv_port_priv *pp = ap->private_data;
1437 struct mv_host_priv *hpriv = ap->host->private_data;
1438 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1439 unsigned int action = 0, err_mask = 0;
1440 struct ata_eh_info *ehi = &ap->link.eh_info;
1441
1442 ata_ehi_clear_desc(ehi);
1443
1444 if (!edma_enabled) {
1445 /* just a guess: do we need to do this? should we
1446 * expand this, and do it in all cases?
1447 */
1448 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1449 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1450 }
1451
1452 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1453
1454 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1455
1456 /*
1457 * all generations share these EDMA error cause bits
1458 */
1459
1460 if (edma_err_cause & EDMA_ERR_DEV)
1461 err_mask |= AC_ERR_DEV;
1462 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1463 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1464 EDMA_ERR_INTRL_PAR)) {
1465 err_mask |= AC_ERR_ATA_BUS;
1466 action |= ATA_EH_HARDRESET;
1467 ata_ehi_push_desc(ehi, "parity error");
1468 }
1469 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1470 ata_ehi_hotplugged(ehi);
1471 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1472 "dev disconnect" : "dev connect");
1473 action |= ATA_EH_HARDRESET;
1474 }
1475
1476 if (IS_GEN_I(hpriv)) {
1477 eh_freeze_mask = EDMA_EH_FREEZE_5;
1478
1479 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1480 struct mv_port_priv *pp = ap->private_data;
1481 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1482 ata_ehi_push_desc(ehi, "EDMA self-disable");
1483 }
1484 } else {
1485 eh_freeze_mask = EDMA_EH_FREEZE;
1486
1487 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1488 struct mv_port_priv *pp = ap->private_data;
1489 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1490 ata_ehi_push_desc(ehi, "EDMA self-disable");
1491 }
1492
1493 if (edma_err_cause & EDMA_ERR_SERR) {
1494 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1495 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1496 err_mask = AC_ERR_ATA_BUS;
1497 action |= ATA_EH_HARDRESET;
1498 }
1499 }
1500
1501 /* Clear EDMA now that SERR cleanup done */
1502 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1503
1504 if (!err_mask) {
1505 err_mask = AC_ERR_OTHER;
1506 action |= ATA_EH_HARDRESET;
1507 }
1508
1509 ehi->serror |= serr;
1510 ehi->action |= action;
1511
1512 if (qc)
1513 qc->err_mask |= err_mask;
1514 else
1515 ehi->err_mask |= err_mask;
1516
1517 if (edma_err_cause & eh_freeze_mask)
1518 ata_port_freeze(ap);
1519 else
1520 ata_port_abort(ap);
1521}
1522
1523static void mv_intr_pio(struct ata_port *ap)
1524{
1525 struct ata_queued_cmd *qc;
1526 u8 ata_status;
1527
1528 /* ignore spurious intr if drive still BUSY */
1529 ata_status = readb(ap->ioaddr.status_addr);
1530 if (unlikely(ata_status & ATA_BUSY))
1531 return;
1532
1533 /* get active ATA command */
1534 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1535 if (unlikely(!qc)) /* no active tag */
1536 return;
1537 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1538 return;
1539
1540 /* and finally, complete the ATA command */
1541 qc->err_mask |= ac_err_mask(ata_status);
1542 ata_qc_complete(qc);
1543}
1544
1545static void mv_intr_edma(struct ata_port *ap)
1546{
1547 void __iomem *port_mmio = mv_ap_base(ap);
1548 struct mv_host_priv *hpriv = ap->host->private_data;
1549 struct mv_port_priv *pp = ap->private_data;
1550 struct ata_queued_cmd *qc;
1551 u32 out_index, in_index;
1552 bool work_done = false;
1553
1554 /* get h/w response queue pointer */
1555 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1556 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1557
1558 while (1) {
1559 u16 status;
1560 unsigned int tag;
1561
1562 /* get s/w response queue last-read pointer, and compare */
1563 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1564 if (in_index == out_index)
1565 break;
1566
1567 /* 50xx: get active ATA command */
1568 if (IS_GEN_I(hpriv))
1569 tag = ap->link.active_tag;
1570
1571 /* Gen II/IIE: get active ATA command via tag, to enable
1572 * support for queueing. this works transparently for
1573 * queued and non-queued modes.
1574 */
1575 else if (IS_GEN_II(hpriv))
1576 tag = (le16_to_cpu(pp->crpb[out_index].id)
1577 >> CRPB_IOID_SHIFT_6) & 0x3f;
1578
1579 else /* IS_GEN_IIE */
1580 tag = (le16_to_cpu(pp->crpb[out_index].id)
1581 >> CRPB_IOID_SHIFT_7) & 0x3f;
1582
1583 qc = ata_qc_from_tag(ap, tag);
1584
1585 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1586 * bits (WARNING: might not necessarily be associated
1587 * with this command), which -should- be clear
1588 * if all is well
1589 */
1590 status = le16_to_cpu(pp->crpb[out_index].flags);
1591 if (unlikely(status & 0xff)) {
1592 mv_err_intr(ap, qc);
1593 return;
1594 }
1595
1596 /* and finally, complete the ATA command */
1597 if (qc) {
1598 qc->err_mask |=
1599 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1600 ata_qc_complete(qc);
1601 }
1602
1603 /* advance software response queue pointer, to
1604 * indicate (after the loop completes) to hardware
1605 * that we have consumed a response queue entry.
1606 */
1607 work_done = true;
1608 pp->resp_idx++;
1609 }
1610
1611 if (work_done)
1612 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1613 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1614 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1615}
1616
1617/**
1618 * mv_host_intr - Handle all interrupts on the given host controller
1619 * @host: host specific structure
1620 * @relevant: port error bits relevant to this host controller
1621 * @hc: which host controller we're to look at
1622 *
1623 * Read then write clear the HC interrupt status then walk each
1624 * port connected to the HC and see if it needs servicing. Port
1625 * success ints are reported in the HC interrupt status reg, the
1626 * port error ints are reported in the higher level main
1627 * interrupt status register and thus are passed in via the
1628 * 'relevant' argument.
1629 *
1630 * LOCKING:
1631 * Inherited from caller.
1632 */
1633static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1634{
1635 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1636 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1637 u32 hc_irq_cause;
1638 int port, port0;
1639
1640 if (hc == 0)
1641 port0 = 0;
1642 else
1643 port0 = MV_PORTS_PER_HC;
1644
1645 /* we'll need the HC success int register in most cases */
1646 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1647 if (!hc_irq_cause)
1648 return;
1649
1650 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1651
1652 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1653 hc, relevant, hc_irq_cause);
1654
1655 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1656 struct ata_port *ap = host->ports[port];
1657 struct mv_port_priv *pp = ap->private_data;
1658 int have_err_bits, hard_port, shift;
1659
1660 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1661 continue;
1662
1663 shift = port << 1; /* (port * 2) */
1664 if (port >= MV_PORTS_PER_HC) {
1665 shift++; /* skip bit 8 in the HC Main IRQ reg */
1666 }
1667 have_err_bits = ((PORT0_ERR << shift) & relevant);
1668
1669 if (unlikely(have_err_bits)) {
1670 struct ata_queued_cmd *qc;
1671
1672 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1673 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1674 continue;
1675
1676 mv_err_intr(ap, qc);
1677 continue;
1678 }
1679
1680 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1681
1682 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1683 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1684 mv_intr_edma(ap);
1685 } else {
1686 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1687 mv_intr_pio(ap);
1688 }
1689 }
1690 VPRINTK("EXIT\n");
1691}
1692
1693static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1694{
1695 struct mv_host_priv *hpriv = host->private_data;
1696 struct ata_port *ap;
1697 struct ata_queued_cmd *qc;
1698 struct ata_eh_info *ehi;
1699 unsigned int i, err_mask, printed = 0;
1700 u32 err_cause;
1701
1702 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1703
1704 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1705 err_cause);
1706
1707 DPRINTK("All regs @ PCI error\n");
1708 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1709
1710 writelfl(0, mmio + hpriv->irq_cause_ofs);
1711
1712 for (i = 0; i < host->n_ports; i++) {
1713 ap = host->ports[i];
1714 if (!ata_link_offline(&ap->link)) {
1715 ehi = &ap->link.eh_info;
1716 ata_ehi_clear_desc(ehi);
1717 if (!printed++)
1718 ata_ehi_push_desc(ehi,
1719 "PCI err cause 0x%08x", err_cause);
1720 err_mask = AC_ERR_HOST_BUS;
1721 ehi->action = ATA_EH_HARDRESET;
1722 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1723 if (qc)
1724 qc->err_mask |= err_mask;
1725 else
1726 ehi->err_mask |= err_mask;
1727
1728 ata_port_freeze(ap);
1729 }
1730 }
1731}
1732
1733/**
1734 * mv_interrupt - Main interrupt event handler
1735 * @irq: unused
1736 * @dev_instance: private data; in this case the host structure
1737 *
1738 * Read the read only register to determine if any host
1739 * controllers have pending interrupts. If so, call lower level
1740 * routine to handle. Also check for PCI errors which are only
1741 * reported here.
1742 *
1743 * LOCKING:
1744 * This routine holds the host lock while processing pending
1745 * interrupts.
1746 */
1747static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1748{
1749 struct ata_host *host = dev_instance;
1750 unsigned int hc, handled = 0, n_hcs;
1751 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1752 u32 irq_stat, irq_mask;
1753
1754 spin_lock(&host->lock);
1755 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1756 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1757
1758 /* check the cases where we either have nothing pending or have read
1759 * a bogus register value which can indicate HW removal or PCI fault
1760 */
1761 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1762 goto out_unlock;
1763
1764 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1765
1766 if (unlikely(irq_stat & PCI_ERR)) {
1767 mv_pci_error(host, mmio);
1768 handled = 1;
1769 goto out_unlock; /* skip all other HC irq handling */
1770 }
1771
1772 for (hc = 0; hc < n_hcs; hc++) {
1773 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1774 if (relevant) {
1775 mv_host_intr(host, relevant, hc);
1776 handled = 1;
1777 }
1778 }
1779
1780out_unlock:
1781 spin_unlock(&host->lock);
1782
1783 return IRQ_RETVAL(handled);
1784}
1785
1786static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1787{
1788 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1789 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1790
1791 return hc_mmio + ofs;
1792}
1793
1794static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1795{
1796 unsigned int ofs;
1797
1798 switch (sc_reg_in) {
1799 case SCR_STATUS:
1800 case SCR_ERROR:
1801 case SCR_CONTROL:
1802 ofs = sc_reg_in * sizeof(u32);
1803 break;
1804 default:
1805 ofs = 0xffffffffU;
1806 break;
1807 }
1808 return ofs;
1809}
1810
1811static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1812{
1813 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1814 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1815 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1816
1817 if (ofs != 0xffffffffU) {
1818 *val = readl(addr + ofs);
1819 return 0;
1820 } else
1821 return -EINVAL;
1822}
1823
1824static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1825{
1826 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1827 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1828 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1829
1830 if (ofs != 0xffffffffU) {
1831 writelfl(val, addr + ofs);
1832 return 0;
1833 } else
1834 return -EINVAL;
1835}
1836
1837static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1838{
1839 int early_5080;
1840
1841 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1842
1843 if (!early_5080) {
1844 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1845 tmp |= (1 << 0);
1846 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1847 }
1848
1849 mv_reset_pci_bus(pdev, mmio);
1850}
1851
1852static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1853{
1854 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1855}
1856
1857static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1858 void __iomem *mmio)
1859{
1860 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1861 u32 tmp;
1862
1863 tmp = readl(phy_mmio + MV5_PHY_MODE);
1864
1865 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1866 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1867}
1868
1869static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1870{
1871 u32 tmp;
1872
1873 writel(0, mmio + MV_GPIO_PORT_CTL);
1874
1875 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1876
1877 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1878 tmp |= ~(1 << 0);
1879 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1880}
1881
1882static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1883 unsigned int port)
1884{
1885 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1886 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1887 u32 tmp;
1888 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1889
1890 if (fix_apm_sq) {
1891 tmp = readl(phy_mmio + MV5_LT_MODE);
1892 tmp |= (1 << 19);
1893 writel(tmp, phy_mmio + MV5_LT_MODE);
1894
1895 tmp = readl(phy_mmio + MV5_PHY_CTL);
1896 tmp &= ~0x3;
1897 tmp |= 0x1;
1898 writel(tmp, phy_mmio + MV5_PHY_CTL);
1899 }
1900
1901 tmp = readl(phy_mmio + MV5_PHY_MODE);
1902 tmp &= ~mask;
1903 tmp |= hpriv->signal[port].pre;
1904 tmp |= hpriv->signal[port].amps;
1905 writel(tmp, phy_mmio + MV5_PHY_MODE);
1906}
1907
1908
1909#undef ZERO
1910#define ZERO(reg) writel(0, port_mmio + (reg))
1911static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1912 unsigned int port)
1913{
1914 void __iomem *port_mmio = mv_port_base(mmio, port);
1915
1916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1917
1918 mv_channel_reset(hpriv, mmio, port);
1919
1920 ZERO(0x028); /* command */
1921 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1922 ZERO(0x004); /* timer */
1923 ZERO(0x008); /* irq err cause */
1924 ZERO(0x00c); /* irq err mask */
1925 ZERO(0x010); /* rq bah */
1926 ZERO(0x014); /* rq inp */
1927 ZERO(0x018); /* rq outp */
1928 ZERO(0x01c); /* respq bah */
1929 ZERO(0x024); /* respq outp */
1930 ZERO(0x020); /* respq inp */
1931 ZERO(0x02c); /* test control */
1932 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1933}
1934#undef ZERO
1935
1936#define ZERO(reg) writel(0, hc_mmio + (reg))
1937static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1938 unsigned int hc)
1939{
1940 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1941 u32 tmp;
1942
1943 ZERO(0x00c);
1944 ZERO(0x010);
1945 ZERO(0x014);
1946 ZERO(0x018);
1947
1948 tmp = readl(hc_mmio + 0x20);
1949 tmp &= 0x1c1c1c1c;
1950 tmp |= 0x03030303;
1951 writel(tmp, hc_mmio + 0x20);
1952}
1953#undef ZERO
1954
1955static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 unsigned int n_hc)
1957{
1958 unsigned int hc, port;
1959
1960 for (hc = 0; hc < n_hc; hc++) {
1961 for (port = 0; port < MV_PORTS_PER_HC; port++)
1962 mv5_reset_hc_port(hpriv, mmio,
1963 (hc * MV_PORTS_PER_HC) + port);
1964
1965 mv5_reset_one_hc(hpriv, mmio, hc);
1966 }
1967
1968 return 0;
1969}
1970
1971#undef ZERO
1972#define ZERO(reg) writel(0, mmio + (reg))
1973static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1974{
1975 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1976 struct mv_host_priv *hpriv = host->private_data;
1977 u32 tmp;
1978
1979 tmp = readl(mmio + MV_PCI_MODE);
1980 tmp &= 0xff00ffff;
1981 writel(tmp, mmio + MV_PCI_MODE);
1982
1983 ZERO(MV_PCI_DISC_TIMER);
1984 ZERO(MV_PCI_MSI_TRIGGER);
1985 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1986 ZERO(HC_MAIN_IRQ_MASK_OFS);
1987 ZERO(MV_PCI_SERR_MASK);
1988 ZERO(hpriv->irq_cause_ofs);
1989 ZERO(hpriv->irq_mask_ofs);
1990 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1991 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1992 ZERO(MV_PCI_ERR_ATTRIBUTE);
1993 ZERO(MV_PCI_ERR_COMMAND);
1994}
1995#undef ZERO
1996
1997static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1998{
1999 u32 tmp;
2000
2001 mv5_reset_flash(hpriv, mmio);
2002
2003 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2004 tmp &= 0x3;
2005 tmp |= (1 << 5) | (1 << 6);
2006 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2007}
2008
2009/**
2010 * mv6_reset_hc - Perform the 6xxx global soft reset
2011 * @mmio: base address of the HBA
2012 *
2013 * This routine only applies to 6xxx parts.
2014 *
2015 * LOCKING:
2016 * Inherited from caller.
2017 */
2018static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2019 unsigned int n_hc)
2020{
2021 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2022 int i, rc = 0;
2023 u32 t;
2024
2025 /* Following procedure defined in PCI "main command and status
2026 * register" table.
2027 */
2028 t = readl(reg);
2029 writel(t | STOP_PCI_MASTER, reg);
2030
2031 for (i = 0; i < 1000; i++) {
2032 udelay(1);
2033 t = readl(reg);
2034 if (PCI_MASTER_EMPTY & t)
2035 break;
2036 }
2037 if (!(PCI_MASTER_EMPTY & t)) {
2038 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2039 rc = 1;
2040 goto done;
2041 }
2042
2043 /* set reset */
2044 i = 5;
2045 do {
2046 writel(t | GLOB_SFT_RST, reg);
2047 t = readl(reg);
2048 udelay(1);
2049 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2050
2051 if (!(GLOB_SFT_RST & t)) {
2052 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2053 rc = 1;
2054 goto done;
2055 }
2056
2057 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2058 i = 5;
2059 do {
2060 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2061 t = readl(reg);
2062 udelay(1);
2063 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2064
2065 if (GLOB_SFT_RST & t) {
2066 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2067 rc = 1;
2068 }
2069done:
2070 return rc;
2071}
2072
2073static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2074 void __iomem *mmio)
2075{
2076 void __iomem *port_mmio;
2077 u32 tmp;
2078
2079 tmp = readl(mmio + MV_RESET_CFG);
2080 if ((tmp & (1 << 0)) == 0) {
2081 hpriv->signal[idx].amps = 0x7 << 8;
2082 hpriv->signal[idx].pre = 0x1 << 5;
2083 return;
2084 }
2085
2086 port_mmio = mv_port_base(mmio, idx);
2087 tmp = readl(port_mmio + PHY_MODE2);
2088
2089 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2090 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2091}
2092
2093static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2094{
2095 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2096}
2097
2098static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2099 unsigned int port)
2100{
2101 void __iomem *port_mmio = mv_port_base(mmio, port);
2102
2103 u32 hp_flags = hpriv->hp_flags;
2104 int fix_phy_mode2 =
2105 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2106 int fix_phy_mode4 =
2107 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2108 u32 m2, tmp;
2109
2110 if (fix_phy_mode2) {
2111 m2 = readl(port_mmio + PHY_MODE2);
2112 m2 &= ~(1 << 16);
2113 m2 |= (1 << 31);
2114 writel(m2, port_mmio + PHY_MODE2);
2115
2116 udelay(200);
2117
2118 m2 = readl(port_mmio + PHY_MODE2);
2119 m2 &= ~((1 << 16) | (1 << 31));
2120 writel(m2, port_mmio + PHY_MODE2);
2121
2122 udelay(200);
2123 }
2124
2125 /* who knows what this magic does */
2126 tmp = readl(port_mmio + PHY_MODE3);
2127 tmp &= ~0x7F800000;
2128 tmp |= 0x2A800000;
2129 writel(tmp, port_mmio + PHY_MODE3);
2130
2131 if (fix_phy_mode4) {
2132 u32 m4;
2133
2134 m4 = readl(port_mmio + PHY_MODE4);
2135
2136 if (hp_flags & MV_HP_ERRATA_60X1B2)
2137 tmp = readl(port_mmio + 0x310);
2138
2139 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2140
2141 writel(m4, port_mmio + PHY_MODE4);
2142
2143 if (hp_flags & MV_HP_ERRATA_60X1B2)
2144 writel(tmp, port_mmio + 0x310);
2145 }
2146
2147 /* Revert values of pre-emphasis and signal amps to the saved ones */
2148 m2 = readl(port_mmio + PHY_MODE2);
2149
2150 m2 &= ~MV_M2_PREAMP_MASK;
2151 m2 |= hpriv->signal[port].amps;
2152 m2 |= hpriv->signal[port].pre;
2153 m2 &= ~(1 << 16);
2154
2155 /* according to mvSata 3.6.1, some IIE values are fixed */
2156 if (IS_GEN_IIE(hpriv)) {
2157 m2 &= ~0xC30FF01F;
2158 m2 |= 0x0000900F;
2159 }
2160
2161 writel(m2, port_mmio + PHY_MODE2);
2162}
2163
2164static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2165 unsigned int port_no)
2166{
2167 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2168
2169 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2170
2171 if (IS_GEN_II(hpriv)) {
2172 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2173 ifctl |= (1 << 7); /* enable gen2i speed */
2174 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2175 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2176 }
2177
2178 udelay(25); /* allow reset propagation */
2179
2180 /* Spec never mentions clearing the bit. Marvell's driver does
2181 * clear the bit, however.
2182 */
2183 writelfl(0, port_mmio + EDMA_CMD_OFS);
2184
2185 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2186
2187 if (IS_GEN_I(hpriv))
2188 mdelay(1);
2189}
2190
2191/**
2192 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2193 * @ap: ATA channel to manipulate
2194 *
2195 * Part of this is taken from __sata_phy_reset and modified to
2196 * not sleep since this routine gets called from interrupt level.
2197 *
2198 * LOCKING:
2199 * Inherited from caller. This is coded to safe to call at
2200 * interrupt level, i.e. it does not sleep.
2201 */
2202static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2203 unsigned long deadline)
2204{
2205 struct mv_port_priv *pp = ap->private_data;
2206 struct mv_host_priv *hpriv = ap->host->private_data;
2207 void __iomem *port_mmio = mv_ap_base(ap);
2208 int retry = 5;
2209 u32 sstatus;
2210
2211 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2212
2213#ifdef DEBUG
2214 {
2215 u32 sstatus, serror, scontrol;
2216
2217 mv_scr_read(ap, SCR_STATUS, &sstatus);
2218 mv_scr_read(ap, SCR_ERROR, &serror);
2219 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2220 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2221 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2222 }
2223#endif
2224
2225 /* Issue COMRESET via SControl */
2226comreset_retry:
2227 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2228 msleep(1);
2229
2230 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2231 msleep(20);
2232
2233 do {
2234 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2235 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2236 break;
2237
2238 msleep(1);
2239 } while (time_before(jiffies, deadline));
2240
2241 /* work around errata */
2242 if (IS_GEN_II(hpriv) &&
2243 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2244 (retry-- > 0))
2245 goto comreset_retry;
2246
2247#ifdef DEBUG
2248 {
2249 u32 sstatus, serror, scontrol;
2250
2251 mv_scr_read(ap, SCR_STATUS, &sstatus);
2252 mv_scr_read(ap, SCR_ERROR, &serror);
2253 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2254 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2255 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2256 }
2257#endif
2258
2259 if (ata_link_offline(&ap->link)) {
2260 *class = ATA_DEV_NONE;
2261 return;
2262 }
2263
2264 /* even after SStatus reflects that device is ready,
2265 * it seems to take a while for link to be fully
2266 * established (and thus Status no longer 0x80/0x7F),
2267 * so we poll a bit for that, here.
2268 */
2269 retry = 20;
2270 while (1) {
2271 u8 drv_stat = ata_check_status(ap);
2272 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2273 break;
2274 msleep(500);
2275 if (retry-- <= 0)
2276 break;
2277 if (time_after(jiffies, deadline))
2278 break;
2279 }
2280
2281 /* FIXME: if we passed the deadline, the following
2282 * code probably produces an invalid result
2283 */
2284
2285 /* finally, read device signature from TF registers */
2286 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2287
2288 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2289
2290 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2291
2292 VPRINTK("EXIT\n");
2293}
2294
2295static int mv_prereset(struct ata_link *link, unsigned long deadline)
2296{
2297 struct ata_port *ap = link->ap;
2298 struct mv_port_priv *pp = ap->private_data;
2299 struct ata_eh_context *ehc = &link->eh_context;
2300 int rc;
2301
2302 rc = mv_stop_dma(ap);
2303 if (rc)
2304 ehc->i.action |= ATA_EH_HARDRESET;
2305
2306 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2307 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2308 ehc->i.action |= ATA_EH_HARDRESET;
2309 }
2310
2311 /* if we're about to do hardreset, nothing more to do */
2312 if (ehc->i.action & ATA_EH_HARDRESET)
2313 return 0;
2314
2315 if (ata_link_online(link))
2316 rc = ata_wait_ready(ap, deadline);
2317 else
2318 rc = -ENODEV;
2319
2320 return rc;
2321}
2322
2323static int mv_hardreset(struct ata_link *link, unsigned int *class,
2324 unsigned long deadline)
2325{
2326 struct ata_port *ap = link->ap;
2327 struct mv_host_priv *hpriv = ap->host->private_data;
2328 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2329
2330 mv_stop_dma(ap);
2331
2332 mv_channel_reset(hpriv, mmio, ap->port_no);
2333
2334 mv_phy_reset(ap, class, deadline);
2335
2336 return 0;
2337}
2338
2339static void mv_postreset(struct ata_link *link, unsigned int *classes)
2340{
2341 struct ata_port *ap = link->ap;
2342 u32 serr;
2343
2344 /* print link status */
2345 sata_print_link_status(link);
2346
2347 /* clear SError */
2348 sata_scr_read(link, SCR_ERROR, &serr);
2349 sata_scr_write_flush(link, SCR_ERROR, serr);
2350
2351 /* bail out if no device is present */
2352 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2353 DPRINTK("EXIT, no device\n");
2354 return;
2355 }
2356
2357 /* set up device control */
2358 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2359}
2360
2361static void mv_error_handler(struct ata_port *ap)
2362{
2363 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2364 mv_hardreset, mv_postreset);
2365}
2366
2367static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2368{
2369 mv_stop_dma(qc->ap);
2370}
2371
2372static void mv_eh_freeze(struct ata_port *ap)
2373{
2374 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2375 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2376 u32 tmp, mask;
2377 unsigned int shift;
2378
2379 /* FIXME: handle coalescing completion events properly */
2380
2381 shift = ap->port_no * 2;
2382 if (hc > 0)
2383 shift++;
2384
2385 mask = 0x3 << shift;
2386
2387 /* disable assertion of portN err, done events */
2388 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2389 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2390}
2391
2392static void mv_eh_thaw(struct ata_port *ap)
2393{
2394 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2395 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2396 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2397 void __iomem *port_mmio = mv_ap_base(ap);
2398 u32 tmp, mask, hc_irq_cause;
2399 unsigned int shift, hc_port_no = ap->port_no;
2400
2401 /* FIXME: handle coalescing completion events properly */
2402
2403 shift = ap->port_no * 2;
2404 if (hc > 0) {
2405 shift++;
2406 hc_port_no -= 4;
2407 }
2408
2409 mask = 0x3 << shift;
2410
2411 /* clear EDMA errors on this port */
2412 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2413
2414 /* clear pending irq events */
2415 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2416 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2417 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2418 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2419
2420 /* enable assertion of portN err, done events */
2421 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2422 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2423}
2424
2425/**
2426 * mv_port_init - Perform some early initialization on a single port.
2427 * @port: libata data structure storing shadow register addresses
2428 * @port_mmio: base address of the port
2429 *
2430 * Initialize shadow register mmio addresses, clear outstanding
2431 * interrupts on the port, and unmask interrupts for the future
2432 * start of the port.
2433 *
2434 * LOCKING:
2435 * Inherited from caller.
2436 */
2437static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2438{
2439 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2440 unsigned serr_ofs;
2441
2442 /* PIO related setup
2443 */
2444 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2445 port->error_addr =
2446 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2447 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2448 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2449 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2450 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2451 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2452 port->status_addr =
2453 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2454 /* special case: control/altstatus doesn't have ATA_REG_ address */
2455 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2456
2457 /* unused: */
2458 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2459
2460 /* Clear any currently outstanding port interrupt conditions */
2461 serr_ofs = mv_scr_offset(SCR_ERROR);
2462 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2463 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2464
2465 /* unmask all non-transient EDMA error interrupts */
2466 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2467
2468 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2469 readl(port_mmio + EDMA_CFG_OFS),
2470 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2471 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2472}
2473
2474static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2475{
2476 struct pci_dev *pdev = to_pci_dev(host->dev);
2477 struct mv_host_priv *hpriv = host->private_data;
2478 u32 hp_flags = hpriv->hp_flags;
2479
2480 switch (board_idx) {
2481 case chip_5080:
2482 hpriv->ops = &mv5xxx_ops;
2483 hp_flags |= MV_HP_GEN_I;
2484
2485 switch (pdev->revision) {
2486 case 0x1:
2487 hp_flags |= MV_HP_ERRATA_50XXB0;
2488 break;
2489 case 0x3:
2490 hp_flags |= MV_HP_ERRATA_50XXB2;
2491 break;
2492 default:
2493 dev_printk(KERN_WARNING, &pdev->dev,
2494 "Applying 50XXB2 workarounds to unknown rev\n");
2495 hp_flags |= MV_HP_ERRATA_50XXB2;
2496 break;
2497 }
2498 break;
2499
2500 case chip_504x:
2501 case chip_508x:
2502 hpriv->ops = &mv5xxx_ops;
2503 hp_flags |= MV_HP_GEN_I;
2504
2505 switch (pdev->revision) {
2506 case 0x0:
2507 hp_flags |= MV_HP_ERRATA_50XXB0;
2508 break;
2509 case 0x3:
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2511 break;
2512 default:
2513 dev_printk(KERN_WARNING, &pdev->dev,
2514 "Applying B2 workarounds to unknown rev\n");
2515 hp_flags |= MV_HP_ERRATA_50XXB2;
2516 break;
2517 }
2518 break;
2519
2520 case chip_604x:
2521 case chip_608x:
2522 hpriv->ops = &mv6xxx_ops;
2523 hp_flags |= MV_HP_GEN_II;
2524
2525 switch (pdev->revision) {
2526 case 0x7:
2527 hp_flags |= MV_HP_ERRATA_60X1B2;
2528 break;
2529 case 0x9:
2530 hp_flags |= MV_HP_ERRATA_60X1C0;
2531 break;
2532 default:
2533 dev_printk(KERN_WARNING, &pdev->dev,
2534 "Applying B2 workarounds to unknown rev\n");
2535 hp_flags |= MV_HP_ERRATA_60X1B2;
2536 break;
2537 }
2538 break;
2539
2540 case chip_7042:
2541 hp_flags |= MV_HP_PCIE;
2542 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2543 (pdev->device == 0x2300 || pdev->device == 0x2310))
2544 {
2545 /*
2546 * Highpoint RocketRAID PCIe 23xx series cards:
2547 *
2548 * Unconfigured drives are treated as "Legacy"
2549 * by the BIOS, and it overwrites sector 8 with
2550 * a "Lgcy" metadata block prior to Linux boot.
2551 *
2552 * Configured drives (RAID or JBOD) leave sector 8
2553 * alone, but instead overwrite a high numbered
2554 * sector for the RAID metadata. This sector can
2555 * be determined exactly, by truncating the physical
2556 * drive capacity to a nice even GB value.
2557 *
2558 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2559 *
2560 * Warn the user, lest they think we're just buggy.
2561 */
2562 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2563 " BIOS CORRUPTS DATA on all attached drives,"
2564 " regardless of if/how they are configured."
2565 " BEWARE!\n");
2566 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2567 " use sectors 8-9 on \"Legacy\" drives,"
2568 " and avoid the final two gigabytes on"
2569 " all RocketRAID BIOS initialized drives.\n");
2570 }
2571 case chip_6042:
2572 hpriv->ops = &mv6xxx_ops;
2573 hp_flags |= MV_HP_GEN_IIE;
2574
2575 switch (pdev->revision) {
2576 case 0x0:
2577 hp_flags |= MV_HP_ERRATA_XX42A0;
2578 break;
2579 case 0x1:
2580 hp_flags |= MV_HP_ERRATA_60X1C0;
2581 break;
2582 default:
2583 dev_printk(KERN_WARNING, &pdev->dev,
2584 "Applying 60X1C0 workarounds to unknown rev\n");
2585 hp_flags |= MV_HP_ERRATA_60X1C0;
2586 break;
2587 }
2588 break;
2589
2590 default:
2591 dev_printk(KERN_ERR, &pdev->dev,
2592 "BUG: invalid board index %u\n", board_idx);
2593 return 1;
2594 }
2595
2596 hpriv->hp_flags = hp_flags;
2597 if (hp_flags & MV_HP_PCIE) {
2598 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2599 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2600 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2601 } else {
2602 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2603 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2604 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2605 }
2606
2607 return 0;
2608}
2609
2610/**
2611 * mv_init_host - Perform some early initialization of the host.
2612 * @host: ATA host to initialize
2613 * @board_idx: controller index
2614 *
2615 * If possible, do an early global reset of the host. Then do
2616 * our port init and clear/unmask all/relevant host interrupts.
2617 *
2618 * LOCKING:
2619 * Inherited from caller.
2620 */
2621static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2622{
2623 int rc = 0, n_hc, port, hc;
2624 struct pci_dev *pdev = to_pci_dev(host->dev);
2625 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2626 struct mv_host_priv *hpriv = host->private_data;
2627
2628 /* global interrupt mask */
2629 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2630
2631 rc = mv_chip_id(host, board_idx);
2632 if (rc)
2633 goto done;
2634
2635 n_hc = mv_get_hc_count(host->ports[0]->flags);
2636
2637 for (port = 0; port < host->n_ports; port++)
2638 hpriv->ops->read_preamp(hpriv, port, mmio);
2639
2640 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2641 if (rc)
2642 goto done;
2643
2644 hpriv->ops->reset_flash(hpriv, mmio);
2645 hpriv->ops->reset_bus(pdev, mmio);
2646 hpriv->ops->enable_leds(hpriv, mmio);
2647
2648 for (port = 0; port < host->n_ports; port++) {
2649 if (IS_GEN_II(hpriv)) {
2650 void __iomem *port_mmio = mv_port_base(mmio, port);
2651
2652 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2653 ifctl |= (1 << 7); /* enable gen2i speed */
2654 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2655 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2656 }
2657
2658 hpriv->ops->phy_errata(hpriv, mmio, port);
2659 }
2660
2661 for (port = 0; port < host->n_ports; port++) {
2662 struct ata_port *ap = host->ports[port];
2663 void __iomem *port_mmio = mv_port_base(mmio, port);
2664 unsigned int offset = port_mmio - mmio;
2665
2666 mv_port_init(&ap->ioaddr, port_mmio);
2667
2668 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2669 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2670 }
2671
2672 for (hc = 0; hc < n_hc; hc++) {
2673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2674
2675 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2676 "(before clear)=0x%08x\n", hc,
2677 readl(hc_mmio + HC_CFG_OFS),
2678 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2679
2680 /* Clear any currently outstanding hc interrupt conditions */
2681 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2682 }
2683
2684 /* Clear any currently outstanding host interrupt conditions */
2685 writelfl(0, mmio + hpriv->irq_cause_ofs);
2686
2687 /* and unmask interrupt generation for host regs */
2688 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2689
2690 if (IS_GEN_I(hpriv))
2691 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2692 else
2693 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2694
2695 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2696 "PCI int cause/mask=0x%08x/0x%08x\n",
2697 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2698 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2699 readl(mmio + hpriv->irq_cause_ofs),
2700 readl(mmio + hpriv->irq_mask_ofs));
2701
2702done:
2703 return rc;
2704}
2705
2706/**
2707 * mv_print_info - Dump key info to kernel log for perusal.
2708 * @host: ATA host to print info about
2709 *
2710 * FIXME: complete this.
2711 *
2712 * LOCKING:
2713 * Inherited from caller.
2714 */
2715static void mv_print_info(struct ata_host *host)
2716{
2717 struct pci_dev *pdev = to_pci_dev(host->dev);
2718 struct mv_host_priv *hpriv = host->private_data;
2719 u8 scc;
2720 const char *scc_s, *gen;
2721
2722 /* Use this to determine the HW stepping of the chip so we know
2723 * what errata to workaround
2724 */
2725 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2726 if (scc == 0)
2727 scc_s = "SCSI";
2728 else if (scc == 0x01)
2729 scc_s = "RAID";
2730 else
2731 scc_s = "?";
2732
2733 if (IS_GEN_I(hpriv))
2734 gen = "I";
2735 else if (IS_GEN_II(hpriv))
2736 gen = "II";
2737 else if (IS_GEN_IIE(hpriv))
2738 gen = "IIE";
2739 else
2740 gen = "?";
2741
2742 dev_printk(KERN_INFO, &pdev->dev,
2743 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2744 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2745 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2746}
2747
2748/**
2749 * mv_init_one - handle a positive probe of a Marvell host
2750 * @pdev: PCI device found
2751 * @ent: PCI device ID entry for the matched host
2752 *
2753 * LOCKING:
2754 * Inherited from caller.
2755 */
2756static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2757{
2758 static int printed_version;
2759 unsigned int board_idx = (unsigned int)ent->driver_data;
2760 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2761 struct ata_host *host;
2762 struct mv_host_priv *hpriv;
2763 int n_ports, rc;
2764
2765 if (!printed_version++)
2766 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2767
2768 /* allocate host */
2769 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2770
2771 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2772 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2773 if (!host || !hpriv)
2774 return -ENOMEM;
2775 host->private_data = hpriv;
2776
2777 /* acquire resources */
2778 rc = pcim_enable_device(pdev);
2779 if (rc)
2780 return rc;
2781
2782 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2783 if (rc == -EBUSY)
2784 pcim_pin_device(pdev);
2785 if (rc)
2786 return rc;
2787 host->iomap = pcim_iomap_table(pdev);
2788
2789 rc = pci_go_64(pdev);
2790 if (rc)
2791 return rc;
2792
2793 /* initialize adapter */
2794 rc = mv_init_host(host, board_idx);
2795 if (rc)
2796 return rc;
2797
2798 /* Enable interrupts */
2799 if (msi && pci_enable_msi(pdev))
2800 pci_intx(pdev, 1);
2801
2802 mv_dump_pci_cfg(pdev, 0x68);
2803 mv_print_info(host);
2804
2805 pci_set_master(pdev);
2806 pci_try_set_mwi(pdev);
2807 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2808 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2809}
2810
2811static int __init mv_init(void)
2812{
2813 return pci_register_driver(&mv_pci_driver);
2814}
2815
2816static void __exit mv_exit(void)
2817{
2818 pci_unregister_driver(&mv_pci_driver);
2819}
2820
2821MODULE_AUTHOR("Brett Russ");
2822MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2823MODULE_LICENSE("GPL");
2824MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2825MODULE_VERSION(DRV_VERSION);
2826
2827module_param(msi, int, 0444);
2828MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2829
2830module_init(mv_init);
2831module_exit(mv_exit);
This page took 0.0343 seconds and 5 git commands to generate.