Merge tag 'platform-drivers-x86-v4.8-1' of git://git.infradead.org/users/dvhart/linux...
[deliverable/linux.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 *
8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28 /*
29 * sata_mv TODO list:
30 *
31 * --> Develop a low-power-consumption strategy, and implement it.
32 *
33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
34 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
42
43 /*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/pci.h>
55 #include <linux/init.h>
56 #include <linux/blkdev.h>
57 #include <linux/delay.h>
58 #include <linux/interrupt.h>
59 #include <linux/dmapool.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/device.h>
62 #include <linux/clk.h>
63 #include <linux/phy/phy.h>
64 #include <linux/platform_device.h>
65 #include <linux/ata_platform.h>
66 #include <linux/mbus.h>
67 #include <linux/bitops.h>
68 #include <linux/gfp.h>
69 #include <linux/of.h>
70 #include <linux/of_irq.h>
71 #include <scsi/scsi_host.h>
72 #include <scsi/scsi_cmnd.h>
73 #include <scsi/scsi_device.h>
74 #include <linux/libata.h>
75
76 #define DRV_NAME "sata_mv"
77 #define DRV_VERSION "1.28"
78
79 /*
80 * module options
81 */
82
83 #ifdef CONFIG_PCI
84 static int msi;
85 module_param(msi, int, S_IRUGO);
86 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
87 #endif
88
89 static int irq_coalescing_io_count;
90 module_param(irq_coalescing_io_count, int, S_IRUGO);
91 MODULE_PARM_DESC(irq_coalescing_io_count,
92 "IRQ coalescing I/O count threshold (0..255)");
93
94 static int irq_coalescing_usecs;
95 module_param(irq_coalescing_usecs, int, S_IRUGO);
96 MODULE_PARM_DESC(irq_coalescing_usecs,
97 "IRQ coalescing time threshold in usecs");
98
99 enum {
100 /* BAR's are enumerated in terms of pci_resource_start() terms */
101 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
102 MV_IO_BAR = 2, /* offset 0x18: IO space */
103 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
104
105 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
106 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
107
108 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
109 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
110 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
111 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
112
113 MV_PCI_REG_BASE = 0,
114
115 /*
116 * Per-chip ("all ports") interrupt coalescing feature.
117 * This is only for GEN_II / GEN_IIE hardware.
118 *
119 * Coalescing defers the interrupt until either the IO_THRESHOLD
120 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
121 */
122 COAL_REG_BASE = 0x18000,
123 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
124 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
125
126 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
127 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
128
129 /*
130 * Registers for the (unused here) transaction coalescing feature:
131 */
132 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
133 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
134
135 SATAHC0_REG_BASE = 0x20000,
136 FLASH_CTL = 0x1046c,
137 GPIO_PORT_CTL = 0x104f0,
138 RESET_CFG = 0x180d8,
139
140 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
141 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
142 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
143 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
144
145 MV_MAX_Q_DEPTH = 32,
146 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
147
148 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
149 * CRPB needs alignment on a 256B boundary. Size == 256B
150 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
151 */
152 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
153 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
154 MV_MAX_SG_CT = 256,
155 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
156
157 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
158 MV_PORT_HC_SHIFT = 2,
159 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
160 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
161 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
162
163 /* Host Flags */
164 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
165
166 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
167
168 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
169
170 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
171 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
172
173 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
174
175 CRQB_FLAG_READ = (1 << 0),
176 CRQB_TAG_SHIFT = 1,
177 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
178 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
179 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
180 CRQB_CMD_ADDR_SHIFT = 8,
181 CRQB_CMD_CS = (0x2 << 11),
182 CRQB_CMD_LAST = (1 << 15),
183
184 CRPB_FLAG_STATUS_SHIFT = 8,
185 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
186 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
187
188 EPRD_FLAG_END_OF_TBL = (1 << 31),
189
190 /* PCI interface registers */
191
192 MV_PCI_COMMAND = 0xc00,
193 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
194 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
195
196 PCI_MAIN_CMD_STS = 0xd30,
197 STOP_PCI_MASTER = (1 << 2),
198 PCI_MASTER_EMPTY = (1 << 3),
199 GLOB_SFT_RST = (1 << 4),
200
201 MV_PCI_MODE = 0xd00,
202 MV_PCI_MODE_MASK = 0x30,
203
204 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
205 MV_PCI_DISC_TIMER = 0xd04,
206 MV_PCI_MSI_TRIGGER = 0xc38,
207 MV_PCI_SERR_MASK = 0xc28,
208 MV_PCI_XBAR_TMOUT = 0x1d04,
209 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
210 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
211 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
212 MV_PCI_ERR_COMMAND = 0x1d50,
213
214 PCI_IRQ_CAUSE = 0x1d58,
215 PCI_IRQ_MASK = 0x1d5c,
216 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
217
218 PCIE_IRQ_CAUSE = 0x1900,
219 PCIE_IRQ_MASK = 0x1910,
220 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
221
222 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
223 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
224 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
225 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
226 SOC_HC_MAIN_IRQ_MASK = 0x20024,
227 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
228 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
229 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
230 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
231 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
232 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
233 PCI_ERR = (1 << 18),
234 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
235 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
236 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
237 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
238 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
239 GPIO_INT = (1 << 22),
240 SELF_INT = (1 << 23),
241 TWSI_INT = (1 << 24),
242 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
243 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
244 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
245
246 /* SATAHC registers */
247 HC_CFG = 0x00,
248
249 HC_IRQ_CAUSE = 0x14,
250 DMA_IRQ = (1 << 0), /* shift by port # */
251 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
252 DEV_IRQ = (1 << 8), /* shift by port # */
253
254 /*
255 * Per-HC (Host-Controller) interrupt coalescing feature.
256 * This is present on all chip generations.
257 *
258 * Coalescing defers the interrupt until either the IO_THRESHOLD
259 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
260 */
261 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
262 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
263
264 SOC_LED_CTRL = 0x2c,
265 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
266 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
267 /* with dev activity LED */
268
269 /* Shadow block registers */
270 SHD_BLK = 0x100,
271 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
272
273 /* SATA registers */
274 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
275 SATA_ACTIVE = 0x350,
276 FIS_IRQ_CAUSE = 0x364,
277 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
278
279 LTMODE = 0x30c, /* requires read-after-write */
280 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
281
282 PHY_MODE2 = 0x330,
283 PHY_MODE3 = 0x310,
284
285 PHY_MODE4 = 0x314, /* requires read-after-write */
286 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
287 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
288 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
289 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
290
291 SATA_IFCTL = 0x344,
292 SATA_TESTCTL = 0x348,
293 SATA_IFSTAT = 0x34c,
294 VENDOR_UNIQUE_FIS = 0x35c,
295
296 FISCFG = 0x360,
297 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
298 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
299
300 PHY_MODE9_GEN2 = 0x398,
301 PHY_MODE9_GEN1 = 0x39c,
302 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
303
304 MV5_PHY_MODE = 0x74,
305 MV5_LTMODE = 0x30,
306 MV5_PHY_CTL = 0x0C,
307 SATA_IFCFG = 0x050,
308 LP_PHY_CTL = 0x058,
309 LP_PHY_CTL_PIN_PU_PLL = (1 << 0),
310 LP_PHY_CTL_PIN_PU_RX = (1 << 1),
311 LP_PHY_CTL_PIN_PU_TX = (1 << 2),
312 LP_PHY_CTL_GEN_TX_3G = (1 << 5),
313 LP_PHY_CTL_GEN_RX_3G = (1 << 9),
314
315 MV_M2_PREAMP_MASK = 0x7e0,
316
317 /* Port registers */
318 EDMA_CFG = 0,
319 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
320 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
321 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
322 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
323 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
324 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
325 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
326
327 EDMA_ERR_IRQ_CAUSE = 0x8,
328 EDMA_ERR_IRQ_MASK = 0xc,
329 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
330 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
331 EDMA_ERR_DEV = (1 << 2), /* device error */
332 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
333 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
334 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
335 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
336 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
337 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
338 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
339 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
340 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
341 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
342 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
343
344 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
345 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
346 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
347 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
348 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
349
350 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
351
352 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
353 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
354 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
355 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
356 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
357 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
358
359 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
360
361 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
362 EDMA_ERR_OVERRUN_5 = (1 << 5),
363 EDMA_ERR_UNDERRUN_5 = (1 << 6),
364
365 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
366 EDMA_ERR_LNK_CTRL_RX_1 |
367 EDMA_ERR_LNK_CTRL_RX_3 |
368 EDMA_ERR_LNK_CTRL_TX,
369
370 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
371 EDMA_ERR_PRD_PAR |
372 EDMA_ERR_DEV_DCON |
373 EDMA_ERR_DEV_CON |
374 EDMA_ERR_SERR |
375 EDMA_ERR_SELF_DIS |
376 EDMA_ERR_CRQB_PAR |
377 EDMA_ERR_CRPB_PAR |
378 EDMA_ERR_INTRL_PAR |
379 EDMA_ERR_IORDY |
380 EDMA_ERR_LNK_CTRL_RX_2 |
381 EDMA_ERR_LNK_DATA_RX |
382 EDMA_ERR_LNK_DATA_TX |
383 EDMA_ERR_TRANS_PROTO,
384
385 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
386 EDMA_ERR_PRD_PAR |
387 EDMA_ERR_DEV_DCON |
388 EDMA_ERR_DEV_CON |
389 EDMA_ERR_OVERRUN_5 |
390 EDMA_ERR_UNDERRUN_5 |
391 EDMA_ERR_SELF_DIS_5 |
392 EDMA_ERR_CRQB_PAR |
393 EDMA_ERR_CRPB_PAR |
394 EDMA_ERR_INTRL_PAR |
395 EDMA_ERR_IORDY,
396
397 EDMA_REQ_Q_BASE_HI = 0x10,
398 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
399
400 EDMA_REQ_Q_OUT_PTR = 0x18,
401 EDMA_REQ_Q_PTR_SHIFT = 5,
402
403 EDMA_RSP_Q_BASE_HI = 0x1c,
404 EDMA_RSP_Q_IN_PTR = 0x20,
405 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
406 EDMA_RSP_Q_PTR_SHIFT = 3,
407
408 EDMA_CMD = 0x28, /* EDMA command register */
409 EDMA_EN = (1 << 0), /* enable EDMA */
410 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
411 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
412
413 EDMA_STATUS = 0x30, /* EDMA engine status */
414 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
415 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
416
417 EDMA_IORDY_TMOUT = 0x34,
418 EDMA_ARB_CFG = 0x38,
419
420 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
421 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
422
423 BMDMA_CMD = 0x224, /* bmdma command register */
424 BMDMA_STATUS = 0x228, /* bmdma status register */
425 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
426 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
427
428 /* Host private flags (hp_flags) */
429 MV_HP_FLAG_MSI = (1 << 0),
430 MV_HP_ERRATA_50XXB0 = (1 << 1),
431 MV_HP_ERRATA_50XXB2 = (1 << 2),
432 MV_HP_ERRATA_60X1B2 = (1 << 3),
433 MV_HP_ERRATA_60X1C0 = (1 << 4),
434 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
435 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
436 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
437 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
438 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
439 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
440 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
441 MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */
442
443 /* Port private flags (pp_flags) */
444 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
445 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
446 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
447 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
448 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
449 };
450
451 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
452 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
453 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
454 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
455 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
456
457 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
458 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
459
460 enum {
461 /* DMA boundary 0xffff is required by the s/g splitting
462 * we need on /length/ in mv_fill-sg().
463 */
464 MV_DMA_BOUNDARY = 0xffffU,
465
466 /* mask of register bits containing lower 32 bits
467 * of EDMA request queue DMA address
468 */
469 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
470
471 /* ditto, for response queue */
472 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
473 };
474
475 enum chip_type {
476 chip_504x,
477 chip_508x,
478 chip_5080,
479 chip_604x,
480 chip_608x,
481 chip_6042,
482 chip_7042,
483 chip_soc,
484 };
485
486 /* Command ReQuest Block: 32B */
487 struct mv_crqb {
488 __le32 sg_addr;
489 __le32 sg_addr_hi;
490 __le16 ctrl_flags;
491 __le16 ata_cmd[11];
492 };
493
494 struct mv_crqb_iie {
495 __le32 addr;
496 __le32 addr_hi;
497 __le32 flags;
498 __le32 len;
499 __le32 ata_cmd[4];
500 };
501
502 /* Command ResPonse Block: 8B */
503 struct mv_crpb {
504 __le16 id;
505 __le16 flags;
506 __le32 tmstmp;
507 };
508
509 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
510 struct mv_sg {
511 __le32 addr;
512 __le32 flags_size;
513 __le32 addr_hi;
514 __le32 reserved;
515 };
516
517 /*
518 * We keep a local cache of a few frequently accessed port
519 * registers here, to avoid having to read them (very slow)
520 * when switching between EDMA and non-EDMA modes.
521 */
522 struct mv_cached_regs {
523 u32 fiscfg;
524 u32 ltmode;
525 u32 haltcond;
526 u32 unknown_rsvd;
527 };
528
529 struct mv_port_priv {
530 struct mv_crqb *crqb;
531 dma_addr_t crqb_dma;
532 struct mv_crpb *crpb;
533 dma_addr_t crpb_dma;
534 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
535 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
536
537 unsigned int req_idx;
538 unsigned int resp_idx;
539
540 u32 pp_flags;
541 struct mv_cached_regs cached;
542 unsigned int delayed_eh_pmp_map;
543 };
544
545 struct mv_port_signal {
546 u32 amps;
547 u32 pre;
548 };
549
550 struct mv_host_priv {
551 u32 hp_flags;
552 unsigned int board_idx;
553 u32 main_irq_mask;
554 struct mv_port_signal signal[8];
555 const struct mv_hw_ops *ops;
556 int n_ports;
557 void __iomem *base;
558 void __iomem *main_irq_cause_addr;
559 void __iomem *main_irq_mask_addr;
560 u32 irq_cause_offset;
561 u32 irq_mask_offset;
562 u32 unmask_all_irqs;
563
564 /*
565 * Needed on some devices that require their clocks to be enabled.
566 * These are optional: if the platform device does not have any
567 * clocks, they won't be used. Also, if the underlying hardware
568 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
569 * all the clock operations become no-ops (see clk.h).
570 */
571 struct clk *clk;
572 struct clk **port_clks;
573 /*
574 * Some devices have a SATA PHY which can be enabled/disabled
575 * in order to save power. These are optional: if the platform
576 * devices does not have any phy, they won't be used.
577 */
578 struct phy **port_phys;
579 /*
580 * These consistent DMA memory pools give us guaranteed
581 * alignment for hardware-accessed data structures,
582 * and less memory waste in accomplishing the alignment.
583 */
584 struct dma_pool *crqb_pool;
585 struct dma_pool *crpb_pool;
586 struct dma_pool *sg_tbl_pool;
587 };
588
589 struct mv_hw_ops {
590 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
591 unsigned int port);
592 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
593 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
594 void __iomem *mmio);
595 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
596 unsigned int n_hc);
597 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
598 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
599 };
600
601 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
602 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
603 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
604 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
605 static int mv_port_start(struct ata_port *ap);
606 static void mv_port_stop(struct ata_port *ap);
607 static int mv_qc_defer(struct ata_queued_cmd *qc);
608 static void mv_qc_prep(struct ata_queued_cmd *qc);
609 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
610 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
611 static int mv_hardreset(struct ata_link *link, unsigned int *class,
612 unsigned long deadline);
613 static void mv_eh_freeze(struct ata_port *ap);
614 static void mv_eh_thaw(struct ata_port *ap);
615 static void mv6_dev_config(struct ata_device *dev);
616
617 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
618 unsigned int port);
619 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
620 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
621 void __iomem *mmio);
622 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
623 unsigned int n_hc);
624 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
625 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
626
627 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
628 unsigned int port);
629 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
630 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
631 void __iomem *mmio);
632 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
633 unsigned int n_hc);
634 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
635 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
636 void __iomem *mmio);
637 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
638 void __iomem *mmio);
639 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
640 void __iomem *mmio, unsigned int n_hc);
641 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
642 void __iomem *mmio);
643 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
644 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
645 void __iomem *mmio, unsigned int port);
646 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
647 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
648 unsigned int port_no);
649 static int mv_stop_edma(struct ata_port *ap);
650 static int mv_stop_edma_engine(void __iomem *port_mmio);
651 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
652
653 static void mv_pmp_select(struct ata_port *ap, int pmp);
654 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
655 unsigned long deadline);
656 static int mv_softreset(struct ata_link *link, unsigned int *class,
657 unsigned long deadline);
658 static void mv_pmp_error_handler(struct ata_port *ap);
659 static void mv_process_crpb_entries(struct ata_port *ap,
660 struct mv_port_priv *pp);
661
662 static void mv_sff_irq_clear(struct ata_port *ap);
663 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
664 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
665 static void mv_bmdma_start(struct ata_queued_cmd *qc);
666 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
667 static u8 mv_bmdma_status(struct ata_port *ap);
668 static u8 mv_sff_check_status(struct ata_port *ap);
669
670 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
671 * because we have to allow room for worst case splitting of
672 * PRDs for 64K boundaries in mv_fill_sg().
673 */
674 #ifdef CONFIG_PCI
675 static struct scsi_host_template mv5_sht = {
676 ATA_BASE_SHT(DRV_NAME),
677 .sg_tablesize = MV_MAX_SG_CT / 2,
678 .dma_boundary = MV_DMA_BOUNDARY,
679 };
680 #endif
681 static struct scsi_host_template mv6_sht = {
682 ATA_NCQ_SHT(DRV_NAME),
683 .can_queue = MV_MAX_Q_DEPTH - 1,
684 .sg_tablesize = MV_MAX_SG_CT / 2,
685 .dma_boundary = MV_DMA_BOUNDARY,
686 };
687
688 static struct ata_port_operations mv5_ops = {
689 .inherits = &ata_sff_port_ops,
690
691 .lost_interrupt = ATA_OP_NULL,
692
693 .qc_defer = mv_qc_defer,
694 .qc_prep = mv_qc_prep,
695 .qc_issue = mv_qc_issue,
696
697 .freeze = mv_eh_freeze,
698 .thaw = mv_eh_thaw,
699 .hardreset = mv_hardreset,
700
701 .scr_read = mv5_scr_read,
702 .scr_write = mv5_scr_write,
703
704 .port_start = mv_port_start,
705 .port_stop = mv_port_stop,
706 };
707
708 static struct ata_port_operations mv6_ops = {
709 .inherits = &ata_bmdma_port_ops,
710
711 .lost_interrupt = ATA_OP_NULL,
712
713 .qc_defer = mv_qc_defer,
714 .qc_prep = mv_qc_prep,
715 .qc_issue = mv_qc_issue,
716
717 .dev_config = mv6_dev_config,
718
719 .freeze = mv_eh_freeze,
720 .thaw = mv_eh_thaw,
721 .hardreset = mv_hardreset,
722 .softreset = mv_softreset,
723 .pmp_hardreset = mv_pmp_hardreset,
724 .pmp_softreset = mv_softreset,
725 .error_handler = mv_pmp_error_handler,
726
727 .scr_read = mv_scr_read,
728 .scr_write = mv_scr_write,
729
730 .sff_check_status = mv_sff_check_status,
731 .sff_irq_clear = mv_sff_irq_clear,
732 .check_atapi_dma = mv_check_atapi_dma,
733 .bmdma_setup = mv_bmdma_setup,
734 .bmdma_start = mv_bmdma_start,
735 .bmdma_stop = mv_bmdma_stop,
736 .bmdma_status = mv_bmdma_status,
737
738 .port_start = mv_port_start,
739 .port_stop = mv_port_stop,
740 };
741
742 static struct ata_port_operations mv_iie_ops = {
743 .inherits = &mv6_ops,
744 .dev_config = ATA_OP_NULL,
745 .qc_prep = mv_qc_prep_iie,
746 };
747
748 static const struct ata_port_info mv_port_info[] = {
749 { /* chip_504x */
750 .flags = MV_GEN_I_FLAGS,
751 .pio_mask = ATA_PIO4,
752 .udma_mask = ATA_UDMA6,
753 .port_ops = &mv5_ops,
754 },
755 { /* chip_508x */
756 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
757 .pio_mask = ATA_PIO4,
758 .udma_mask = ATA_UDMA6,
759 .port_ops = &mv5_ops,
760 },
761 { /* chip_5080 */
762 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
763 .pio_mask = ATA_PIO4,
764 .udma_mask = ATA_UDMA6,
765 .port_ops = &mv5_ops,
766 },
767 { /* chip_604x */
768 .flags = MV_GEN_II_FLAGS,
769 .pio_mask = ATA_PIO4,
770 .udma_mask = ATA_UDMA6,
771 .port_ops = &mv6_ops,
772 },
773 { /* chip_608x */
774 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
775 .pio_mask = ATA_PIO4,
776 .udma_mask = ATA_UDMA6,
777 .port_ops = &mv6_ops,
778 },
779 { /* chip_6042 */
780 .flags = MV_GEN_IIE_FLAGS,
781 .pio_mask = ATA_PIO4,
782 .udma_mask = ATA_UDMA6,
783 .port_ops = &mv_iie_ops,
784 },
785 { /* chip_7042 */
786 .flags = MV_GEN_IIE_FLAGS,
787 .pio_mask = ATA_PIO4,
788 .udma_mask = ATA_UDMA6,
789 .port_ops = &mv_iie_ops,
790 },
791 { /* chip_soc */
792 .flags = MV_GEN_IIE_FLAGS,
793 .pio_mask = ATA_PIO4,
794 .udma_mask = ATA_UDMA6,
795 .port_ops = &mv_iie_ops,
796 },
797 };
798
799 static const struct pci_device_id mv_pci_tbl[] = {
800 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
801 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
802 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
803 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
804 /* RocketRAID 1720/174x have different identifiers */
805 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
806 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
807 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
808
809 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
810 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
811 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
812 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
813 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
814
815 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
816
817 /* Adaptec 1430SA */
818 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
819
820 /* Marvell 7042 support */
821 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
822
823 /* Highpoint RocketRAID PCIe series */
824 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
825 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
826
827 { } /* terminate list */
828 };
829
830 static const struct mv_hw_ops mv5xxx_ops = {
831 .phy_errata = mv5_phy_errata,
832 .enable_leds = mv5_enable_leds,
833 .read_preamp = mv5_read_preamp,
834 .reset_hc = mv5_reset_hc,
835 .reset_flash = mv5_reset_flash,
836 .reset_bus = mv5_reset_bus,
837 };
838
839 static const struct mv_hw_ops mv6xxx_ops = {
840 .phy_errata = mv6_phy_errata,
841 .enable_leds = mv6_enable_leds,
842 .read_preamp = mv6_read_preamp,
843 .reset_hc = mv6_reset_hc,
844 .reset_flash = mv6_reset_flash,
845 .reset_bus = mv_reset_pci_bus,
846 };
847
848 static const struct mv_hw_ops mv_soc_ops = {
849 .phy_errata = mv6_phy_errata,
850 .enable_leds = mv_soc_enable_leds,
851 .read_preamp = mv_soc_read_preamp,
852 .reset_hc = mv_soc_reset_hc,
853 .reset_flash = mv_soc_reset_flash,
854 .reset_bus = mv_soc_reset_bus,
855 };
856
857 static const struct mv_hw_ops mv_soc_65n_ops = {
858 .phy_errata = mv_soc_65n_phy_errata,
859 .enable_leds = mv_soc_enable_leds,
860 .reset_hc = mv_soc_reset_hc,
861 .reset_flash = mv_soc_reset_flash,
862 .reset_bus = mv_soc_reset_bus,
863 };
864
865 /*
866 * Functions
867 */
868
869 static inline void writelfl(unsigned long data, void __iomem *addr)
870 {
871 writel(data, addr);
872 (void) readl(addr); /* flush to avoid PCI posted write */
873 }
874
875 static inline unsigned int mv_hc_from_port(unsigned int port)
876 {
877 return port >> MV_PORT_HC_SHIFT;
878 }
879
880 static inline unsigned int mv_hardport_from_port(unsigned int port)
881 {
882 return port & MV_PORT_MASK;
883 }
884
885 /*
886 * Consolidate some rather tricky bit shift calculations.
887 * This is hot-path stuff, so not a function.
888 * Simple code, with two return values, so macro rather than inline.
889 *
890 * port is the sole input, in range 0..7.
891 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
892 * hardport is the other output, in range 0..3.
893 *
894 * Note that port and hardport may be the same variable in some cases.
895 */
896 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
897 { \
898 shift = mv_hc_from_port(port) * HC_SHIFT; \
899 hardport = mv_hardport_from_port(port); \
900 shift += hardport * 2; \
901 }
902
903 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
904 {
905 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
906 }
907
908 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
909 unsigned int port)
910 {
911 return mv_hc_base(base, mv_hc_from_port(port));
912 }
913
914 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
915 {
916 return mv_hc_base_from_port(base, port) +
917 MV_SATAHC_ARBTR_REG_SZ +
918 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
919 }
920
921 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
922 {
923 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
924 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
925
926 return hc_mmio + ofs;
927 }
928
929 static inline void __iomem *mv_host_base(struct ata_host *host)
930 {
931 struct mv_host_priv *hpriv = host->private_data;
932 return hpriv->base;
933 }
934
935 static inline void __iomem *mv_ap_base(struct ata_port *ap)
936 {
937 return mv_port_base(mv_host_base(ap->host), ap->port_no);
938 }
939
940 static inline int mv_get_hc_count(unsigned long port_flags)
941 {
942 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
943 }
944
945 /**
946 * mv_save_cached_regs - (re-)initialize cached port registers
947 * @ap: the port whose registers we are caching
948 *
949 * Initialize the local cache of port registers,
950 * so that reading them over and over again can
951 * be avoided on the hotter paths of this driver.
952 * This saves a few microseconds each time we switch
953 * to/from EDMA mode to perform (eg.) a drive cache flush.
954 */
955 static void mv_save_cached_regs(struct ata_port *ap)
956 {
957 void __iomem *port_mmio = mv_ap_base(ap);
958 struct mv_port_priv *pp = ap->private_data;
959
960 pp->cached.fiscfg = readl(port_mmio + FISCFG);
961 pp->cached.ltmode = readl(port_mmio + LTMODE);
962 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
963 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
964 }
965
966 /**
967 * mv_write_cached_reg - write to a cached port register
968 * @addr: hardware address of the register
969 * @old: pointer to cached value of the register
970 * @new: new value for the register
971 *
972 * Write a new value to a cached register,
973 * but only if the value is different from before.
974 */
975 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
976 {
977 if (new != *old) {
978 unsigned long laddr;
979 *old = new;
980 /*
981 * Workaround for 88SX60x1-B2 FEr SATA#13:
982 * Read-after-write is needed to prevent generating 64-bit
983 * write cycles on the PCI bus for SATA interface registers
984 * at offsets ending in 0x4 or 0xc.
985 *
986 * Looks like a lot of fuss, but it avoids an unnecessary
987 * +1 usec read-after-write delay for unaffected registers.
988 */
989 laddr = (unsigned long)addr & 0xffff;
990 if (laddr >= 0x300 && laddr <= 0x33c) {
991 laddr &= 0x000f;
992 if (laddr == 0x4 || laddr == 0xc) {
993 writelfl(new, addr); /* read after write */
994 return;
995 }
996 }
997 writel(new, addr); /* unaffected by the errata */
998 }
999 }
1000
1001 static void mv_set_edma_ptrs(void __iomem *port_mmio,
1002 struct mv_host_priv *hpriv,
1003 struct mv_port_priv *pp)
1004 {
1005 u32 index;
1006
1007 /*
1008 * initialize request queue
1009 */
1010 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
1011 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1012
1013 WARN_ON(pp->crqb_dma & 0x3ff);
1014 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1015 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1016 port_mmio + EDMA_REQ_Q_IN_PTR);
1017 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1018
1019 /*
1020 * initialize response queue
1021 */
1022 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
1023 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1024
1025 WARN_ON(pp->crpb_dma & 0xff);
1026 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1027 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1028 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1029 port_mmio + EDMA_RSP_Q_OUT_PTR);
1030 }
1031
1032 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1033 {
1034 /*
1035 * When writing to the main_irq_mask in hardware,
1036 * we must ensure exclusivity between the interrupt coalescing bits
1037 * and the corresponding individual port DONE_IRQ bits.
1038 *
1039 * Note that this register is really an "IRQ enable" register,
1040 * not an "IRQ mask" register as Marvell's naming might suggest.
1041 */
1042 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1043 mask &= ~DONE_IRQ_0_3;
1044 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1045 mask &= ~DONE_IRQ_4_7;
1046 writelfl(mask, hpriv->main_irq_mask_addr);
1047 }
1048
1049 static void mv_set_main_irq_mask(struct ata_host *host,
1050 u32 disable_bits, u32 enable_bits)
1051 {
1052 struct mv_host_priv *hpriv = host->private_data;
1053 u32 old_mask, new_mask;
1054
1055 old_mask = hpriv->main_irq_mask;
1056 new_mask = (old_mask & ~disable_bits) | enable_bits;
1057 if (new_mask != old_mask) {
1058 hpriv->main_irq_mask = new_mask;
1059 mv_write_main_irq_mask(new_mask, hpriv);
1060 }
1061 }
1062
1063 static void mv_enable_port_irqs(struct ata_port *ap,
1064 unsigned int port_bits)
1065 {
1066 unsigned int shift, hardport, port = ap->port_no;
1067 u32 disable_bits, enable_bits;
1068
1069 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1070
1071 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1072 enable_bits = port_bits << shift;
1073 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1074 }
1075
1076 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1077 void __iomem *port_mmio,
1078 unsigned int port_irqs)
1079 {
1080 struct mv_host_priv *hpriv = ap->host->private_data;
1081 int hardport = mv_hardport_from_port(ap->port_no);
1082 void __iomem *hc_mmio = mv_hc_base_from_port(
1083 mv_host_base(ap->host), ap->port_no);
1084 u32 hc_irq_cause;
1085
1086 /* clear EDMA event indicators, if any */
1087 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1088
1089 /* clear pending irq events */
1090 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1091 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1092
1093 /* clear FIS IRQ Cause */
1094 if (IS_GEN_IIE(hpriv))
1095 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1096
1097 mv_enable_port_irqs(ap, port_irqs);
1098 }
1099
1100 static void mv_set_irq_coalescing(struct ata_host *host,
1101 unsigned int count, unsigned int usecs)
1102 {
1103 struct mv_host_priv *hpriv = host->private_data;
1104 void __iomem *mmio = hpriv->base, *hc_mmio;
1105 u32 coal_enable = 0;
1106 unsigned long flags;
1107 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1108 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1109 ALL_PORTS_COAL_DONE;
1110
1111 /* Disable IRQ coalescing if either threshold is zero */
1112 if (!usecs || !count) {
1113 clks = count = 0;
1114 } else {
1115 /* Respect maximum limits of the hardware */
1116 clks = usecs * COAL_CLOCKS_PER_USEC;
1117 if (clks > MAX_COAL_TIME_THRESHOLD)
1118 clks = MAX_COAL_TIME_THRESHOLD;
1119 if (count > MAX_COAL_IO_COUNT)
1120 count = MAX_COAL_IO_COUNT;
1121 }
1122
1123 spin_lock_irqsave(&host->lock, flags);
1124 mv_set_main_irq_mask(host, coal_disable, 0);
1125
1126 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1127 /*
1128 * GEN_II/GEN_IIE with dual host controllers:
1129 * one set of global thresholds for the entire chip.
1130 */
1131 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1132 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1133 /* clear leftover coal IRQ bit */
1134 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1135 if (count)
1136 coal_enable = ALL_PORTS_COAL_DONE;
1137 clks = count = 0; /* force clearing of regular regs below */
1138 }
1139
1140 /*
1141 * All chips: independent thresholds for each HC on the chip.
1142 */
1143 hc_mmio = mv_hc_base_from_port(mmio, 0);
1144 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1145 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1146 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1147 if (count)
1148 coal_enable |= PORTS_0_3_COAL_DONE;
1149 if (is_dual_hc) {
1150 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1151 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1152 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1153 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1154 if (count)
1155 coal_enable |= PORTS_4_7_COAL_DONE;
1156 }
1157
1158 mv_set_main_irq_mask(host, 0, coal_enable);
1159 spin_unlock_irqrestore(&host->lock, flags);
1160 }
1161
1162 /**
1163 * mv_start_edma - Enable eDMA engine
1164 * @base: port base address
1165 * @pp: port private data
1166 *
1167 * Verify the local cache of the eDMA state is accurate with a
1168 * WARN_ON.
1169 *
1170 * LOCKING:
1171 * Inherited from caller.
1172 */
1173 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1174 struct mv_port_priv *pp, u8 protocol)
1175 {
1176 int want_ncq = (protocol == ATA_PROT_NCQ);
1177
1178 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1179 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1180 if (want_ncq != using_ncq)
1181 mv_stop_edma(ap);
1182 }
1183 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1184 struct mv_host_priv *hpriv = ap->host->private_data;
1185
1186 mv_edma_cfg(ap, want_ncq, 1);
1187
1188 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1189 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1190
1191 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1192 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1193 }
1194 }
1195
1196 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1197 {
1198 void __iomem *port_mmio = mv_ap_base(ap);
1199 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1200 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1201 int i;
1202
1203 /*
1204 * Wait for the EDMA engine to finish transactions in progress.
1205 * No idea what a good "timeout" value might be, but measurements
1206 * indicate that it often requires hundreds of microseconds
1207 * with two drives in-use. So we use the 15msec value above
1208 * as a rough guess at what even more drives might require.
1209 */
1210 for (i = 0; i < timeout; ++i) {
1211 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1212 if ((edma_stat & empty_idle) == empty_idle)
1213 break;
1214 udelay(per_loop);
1215 }
1216 /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1217 }
1218
1219 /**
1220 * mv_stop_edma_engine - Disable eDMA engine
1221 * @port_mmio: io base address
1222 *
1223 * LOCKING:
1224 * Inherited from caller.
1225 */
1226 static int mv_stop_edma_engine(void __iomem *port_mmio)
1227 {
1228 int i;
1229
1230 /* Disable eDMA. The disable bit auto clears. */
1231 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1232
1233 /* Wait for the chip to confirm eDMA is off. */
1234 for (i = 10000; i > 0; i--) {
1235 u32 reg = readl(port_mmio + EDMA_CMD);
1236 if (!(reg & EDMA_EN))
1237 return 0;
1238 udelay(10);
1239 }
1240 return -EIO;
1241 }
1242
1243 static int mv_stop_edma(struct ata_port *ap)
1244 {
1245 void __iomem *port_mmio = mv_ap_base(ap);
1246 struct mv_port_priv *pp = ap->private_data;
1247 int err = 0;
1248
1249 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1250 return 0;
1251 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1252 mv_wait_for_edma_empty_idle(ap);
1253 if (mv_stop_edma_engine(port_mmio)) {
1254 ata_port_err(ap, "Unable to stop eDMA\n");
1255 err = -EIO;
1256 }
1257 mv_edma_cfg(ap, 0, 0);
1258 return err;
1259 }
1260
1261 #ifdef ATA_DEBUG
1262 static void mv_dump_mem(void __iomem *start, unsigned bytes)
1263 {
1264 int b, w;
1265 for (b = 0; b < bytes; ) {
1266 DPRINTK("%p: ", start + b);
1267 for (w = 0; b < bytes && w < 4; w++) {
1268 printk("%08x ", readl(start + b));
1269 b += sizeof(u32);
1270 }
1271 printk("\n");
1272 }
1273 }
1274 #endif
1275 #if defined(ATA_DEBUG) || defined(CONFIG_PCI)
1276 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1277 {
1278 #ifdef ATA_DEBUG
1279 int b, w;
1280 u32 dw;
1281 for (b = 0; b < bytes; ) {
1282 DPRINTK("%02x: ", b);
1283 for (w = 0; b < bytes && w < 4; w++) {
1284 (void) pci_read_config_dword(pdev, b, &dw);
1285 printk("%08x ", dw);
1286 b += sizeof(u32);
1287 }
1288 printk("\n");
1289 }
1290 #endif
1291 }
1292 #endif
1293 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1294 struct pci_dev *pdev)
1295 {
1296 #ifdef ATA_DEBUG
1297 void __iomem *hc_base = mv_hc_base(mmio_base,
1298 port >> MV_PORT_HC_SHIFT);
1299 void __iomem *port_base;
1300 int start_port, num_ports, p, start_hc, num_hcs, hc;
1301
1302 if (0 > port) {
1303 start_hc = start_port = 0;
1304 num_ports = 8; /* shld be benign for 4 port devs */
1305 num_hcs = 2;
1306 } else {
1307 start_hc = port >> MV_PORT_HC_SHIFT;
1308 start_port = port;
1309 num_ports = num_hcs = 1;
1310 }
1311 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1312 num_ports > 1 ? num_ports - 1 : start_port);
1313
1314 if (NULL != pdev) {
1315 DPRINTK("PCI config space regs:\n");
1316 mv_dump_pci_cfg(pdev, 0x68);
1317 }
1318 DPRINTK("PCI regs:\n");
1319 mv_dump_mem(mmio_base+0xc00, 0x3c);
1320 mv_dump_mem(mmio_base+0xd00, 0x34);
1321 mv_dump_mem(mmio_base+0xf00, 0x4);
1322 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1323 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1324 hc_base = mv_hc_base(mmio_base, hc);
1325 DPRINTK("HC regs (HC %i):\n", hc);
1326 mv_dump_mem(hc_base, 0x1c);
1327 }
1328 for (p = start_port; p < start_port + num_ports; p++) {
1329 port_base = mv_port_base(mmio_base, p);
1330 DPRINTK("EDMA regs (port %i):\n", p);
1331 mv_dump_mem(port_base, 0x54);
1332 DPRINTK("SATA regs (port %i):\n", p);
1333 mv_dump_mem(port_base+0x300, 0x60);
1334 }
1335 #endif
1336 }
1337
1338 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1339 {
1340 unsigned int ofs;
1341
1342 switch (sc_reg_in) {
1343 case SCR_STATUS:
1344 case SCR_CONTROL:
1345 case SCR_ERROR:
1346 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1347 break;
1348 case SCR_ACTIVE:
1349 ofs = SATA_ACTIVE; /* active is not with the others */
1350 break;
1351 default:
1352 ofs = 0xffffffffU;
1353 break;
1354 }
1355 return ofs;
1356 }
1357
1358 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1359 {
1360 unsigned int ofs = mv_scr_offset(sc_reg_in);
1361
1362 if (ofs != 0xffffffffU) {
1363 *val = readl(mv_ap_base(link->ap) + ofs);
1364 return 0;
1365 } else
1366 return -EINVAL;
1367 }
1368
1369 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1370 {
1371 unsigned int ofs = mv_scr_offset(sc_reg_in);
1372
1373 if (ofs != 0xffffffffU) {
1374 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1375 struct mv_host_priv *hpriv = link->ap->host->private_data;
1376 if (sc_reg_in == SCR_CONTROL) {
1377 /*
1378 * Workaround for 88SX60x1 FEr SATA#26:
1379 *
1380 * COMRESETs have to take care not to accidentally
1381 * put the drive to sleep when writing SCR_CONTROL.
1382 * Setting bits 12..15 prevents this problem.
1383 *
1384 * So if we see an outbound COMMRESET, set those bits.
1385 * Ditto for the followup write that clears the reset.
1386 *
1387 * The proprietary driver does this for
1388 * all chip versions, and so do we.
1389 */
1390 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1391 val |= 0xf000;
1392
1393 if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1394 void __iomem *lp_phy_addr =
1395 mv_ap_base(link->ap) + LP_PHY_CTL;
1396 /*
1397 * Set PHY speed according to SControl speed.
1398 */
1399 u32 lp_phy_val =
1400 LP_PHY_CTL_PIN_PU_PLL |
1401 LP_PHY_CTL_PIN_PU_RX |
1402 LP_PHY_CTL_PIN_PU_TX;
1403
1404 if ((val & 0xf0) != 0x10)
1405 lp_phy_val |=
1406 LP_PHY_CTL_GEN_TX_3G |
1407 LP_PHY_CTL_GEN_RX_3G;
1408
1409 writelfl(lp_phy_val, lp_phy_addr);
1410 }
1411 }
1412 writelfl(val, addr);
1413 return 0;
1414 } else
1415 return -EINVAL;
1416 }
1417
1418 static void mv6_dev_config(struct ata_device *adev)
1419 {
1420 /*
1421 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1422 *
1423 * Gen-II does not support NCQ over a port multiplier
1424 * (no FIS-based switching).
1425 */
1426 if (adev->flags & ATA_DFLAG_NCQ) {
1427 if (sata_pmp_attached(adev->link->ap)) {
1428 adev->flags &= ~ATA_DFLAG_NCQ;
1429 ata_dev_info(adev,
1430 "NCQ disabled for command-based switching\n");
1431 }
1432 }
1433 }
1434
1435 static int mv_qc_defer(struct ata_queued_cmd *qc)
1436 {
1437 struct ata_link *link = qc->dev->link;
1438 struct ata_port *ap = link->ap;
1439 struct mv_port_priv *pp = ap->private_data;
1440
1441 /*
1442 * Don't allow new commands if we're in a delayed EH state
1443 * for NCQ and/or FIS-based switching.
1444 */
1445 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1446 return ATA_DEFER_PORT;
1447
1448 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1449 * can run concurrently.
1450 * set excl_link when we want to send a PIO command in DMA mode
1451 * or a non-NCQ command in NCQ mode.
1452 * When we receive a command from that link, and there are no
1453 * outstanding commands, mark a flag to clear excl_link and let
1454 * the command go through.
1455 */
1456 if (unlikely(ap->excl_link)) {
1457 if (link == ap->excl_link) {
1458 if (ap->nr_active_links)
1459 return ATA_DEFER_PORT;
1460 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1461 return 0;
1462 } else
1463 return ATA_DEFER_PORT;
1464 }
1465
1466 /*
1467 * If the port is completely idle, then allow the new qc.
1468 */
1469 if (ap->nr_active_links == 0)
1470 return 0;
1471
1472 /*
1473 * The port is operating in host queuing mode (EDMA) with NCQ
1474 * enabled, allow multiple NCQ commands. EDMA also allows
1475 * queueing multiple DMA commands but libata core currently
1476 * doesn't allow it.
1477 */
1478 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1479 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1480 if (ata_is_ncq(qc->tf.protocol))
1481 return 0;
1482 else {
1483 ap->excl_link = link;
1484 return ATA_DEFER_PORT;
1485 }
1486 }
1487
1488 return ATA_DEFER_PORT;
1489 }
1490
1491 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1492 {
1493 struct mv_port_priv *pp = ap->private_data;
1494 void __iomem *port_mmio;
1495
1496 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1497 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1498 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1499
1500 ltmode = *old_ltmode & ~LTMODE_BIT8;
1501 haltcond = *old_haltcond | EDMA_ERR_DEV;
1502
1503 if (want_fbs) {
1504 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1505 ltmode = *old_ltmode | LTMODE_BIT8;
1506 if (want_ncq)
1507 haltcond &= ~EDMA_ERR_DEV;
1508 else
1509 fiscfg |= FISCFG_WAIT_DEV_ERR;
1510 } else {
1511 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1512 }
1513
1514 port_mmio = mv_ap_base(ap);
1515 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1516 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1517 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1518 }
1519
1520 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1521 {
1522 struct mv_host_priv *hpriv = ap->host->private_data;
1523 u32 old, new;
1524
1525 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1526 old = readl(hpriv->base + GPIO_PORT_CTL);
1527 if (want_ncq)
1528 new = old | (1 << 22);
1529 else
1530 new = old & ~(1 << 22);
1531 if (new != old)
1532 writel(new, hpriv->base + GPIO_PORT_CTL);
1533 }
1534
1535 /**
1536 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1537 * @ap: Port being initialized
1538 *
1539 * There are two DMA modes on these chips: basic DMA, and EDMA.
1540 *
1541 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1542 * of basic DMA on the GEN_IIE versions of the chips.
1543 *
1544 * This bit survives EDMA resets, and must be set for basic DMA
1545 * to function, and should be cleared when EDMA is active.
1546 */
1547 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1548 {
1549 struct mv_port_priv *pp = ap->private_data;
1550 u32 new, *old = &pp->cached.unknown_rsvd;
1551
1552 if (enable_bmdma)
1553 new = *old | 1;
1554 else
1555 new = *old & ~1;
1556 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1557 }
1558
1559 /*
1560 * SOC chips have an issue whereby the HDD LEDs don't always blink
1561 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1562 * of the SOC takes care of it, generating a steady blink rate when
1563 * any drive on the chip is active.
1564 *
1565 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1566 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1567 *
1568 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1569 * LED operation works then, and provides better (more accurate) feedback.
1570 *
1571 * Note that this code assumes that an SOC never has more than one HC onboard.
1572 */
1573 static void mv_soc_led_blink_enable(struct ata_port *ap)
1574 {
1575 struct ata_host *host = ap->host;
1576 struct mv_host_priv *hpriv = host->private_data;
1577 void __iomem *hc_mmio;
1578 u32 led_ctrl;
1579
1580 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1581 return;
1582 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1583 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1584 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1585 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1586 }
1587
1588 static void mv_soc_led_blink_disable(struct ata_port *ap)
1589 {
1590 struct ata_host *host = ap->host;
1591 struct mv_host_priv *hpriv = host->private_data;
1592 void __iomem *hc_mmio;
1593 u32 led_ctrl;
1594 unsigned int port;
1595
1596 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1597 return;
1598
1599 /* disable led-blink only if no ports are using NCQ */
1600 for (port = 0; port < hpriv->n_ports; port++) {
1601 struct ata_port *this_ap = host->ports[port];
1602 struct mv_port_priv *pp = this_ap->private_data;
1603
1604 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1605 return;
1606 }
1607
1608 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1609 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1610 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1611 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1612 }
1613
1614 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1615 {
1616 u32 cfg;
1617 struct mv_port_priv *pp = ap->private_data;
1618 struct mv_host_priv *hpriv = ap->host->private_data;
1619 void __iomem *port_mmio = mv_ap_base(ap);
1620
1621 /* set up non-NCQ EDMA configuration */
1622 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1623 pp->pp_flags &=
1624 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1625
1626 if (IS_GEN_I(hpriv))
1627 cfg |= (1 << 8); /* enab config burst size mask */
1628
1629 else if (IS_GEN_II(hpriv)) {
1630 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1631 mv_60x1_errata_sata25(ap, want_ncq);
1632
1633 } else if (IS_GEN_IIE(hpriv)) {
1634 int want_fbs = sata_pmp_attached(ap);
1635 /*
1636 * Possible future enhancement:
1637 *
1638 * The chip can use FBS with non-NCQ, if we allow it,
1639 * But first we need to have the error handling in place
1640 * for this mode (datasheet section 7.3.15.4.2.3).
1641 * So disallow non-NCQ FBS for now.
1642 */
1643 want_fbs &= want_ncq;
1644
1645 mv_config_fbs(ap, want_ncq, want_fbs);
1646
1647 if (want_fbs) {
1648 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1649 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1650 }
1651
1652 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1653 if (want_edma) {
1654 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1655 if (!IS_SOC(hpriv))
1656 cfg |= (1 << 18); /* enab early completion */
1657 }
1658 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1659 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1660 mv_bmdma_enable_iie(ap, !want_edma);
1661
1662 if (IS_SOC(hpriv)) {
1663 if (want_ncq)
1664 mv_soc_led_blink_enable(ap);
1665 else
1666 mv_soc_led_blink_disable(ap);
1667 }
1668 }
1669
1670 if (want_ncq) {
1671 cfg |= EDMA_CFG_NCQ;
1672 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1673 }
1674
1675 writelfl(cfg, port_mmio + EDMA_CFG);
1676 }
1677
1678 static void mv_port_free_dma_mem(struct ata_port *ap)
1679 {
1680 struct mv_host_priv *hpriv = ap->host->private_data;
1681 struct mv_port_priv *pp = ap->private_data;
1682 int tag;
1683
1684 if (pp->crqb) {
1685 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1686 pp->crqb = NULL;
1687 }
1688 if (pp->crpb) {
1689 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1690 pp->crpb = NULL;
1691 }
1692 /*
1693 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1694 * For later hardware, we have one unique sg_tbl per NCQ tag.
1695 */
1696 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1697 if (pp->sg_tbl[tag]) {
1698 if (tag == 0 || !IS_GEN_I(hpriv))
1699 dma_pool_free(hpriv->sg_tbl_pool,
1700 pp->sg_tbl[tag],
1701 pp->sg_tbl_dma[tag]);
1702 pp->sg_tbl[tag] = NULL;
1703 }
1704 }
1705 }
1706
1707 /**
1708 * mv_port_start - Port specific init/start routine.
1709 * @ap: ATA channel to manipulate
1710 *
1711 * Allocate and point to DMA memory, init port private memory,
1712 * zero indices.
1713 *
1714 * LOCKING:
1715 * Inherited from caller.
1716 */
1717 static int mv_port_start(struct ata_port *ap)
1718 {
1719 struct device *dev = ap->host->dev;
1720 struct mv_host_priv *hpriv = ap->host->private_data;
1721 struct mv_port_priv *pp;
1722 unsigned long flags;
1723 int tag;
1724
1725 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1726 if (!pp)
1727 return -ENOMEM;
1728 ap->private_data = pp;
1729
1730 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1731 if (!pp->crqb)
1732 return -ENOMEM;
1733 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1734
1735 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1736 if (!pp->crpb)
1737 goto out_port_free_dma_mem;
1738 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1739
1740 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1741 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1742 ap->flags |= ATA_FLAG_AN;
1743 /*
1744 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1745 * For later hardware, we need one unique sg_tbl per NCQ tag.
1746 */
1747 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1748 if (tag == 0 || !IS_GEN_I(hpriv)) {
1749 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1750 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1751 if (!pp->sg_tbl[tag])
1752 goto out_port_free_dma_mem;
1753 } else {
1754 pp->sg_tbl[tag] = pp->sg_tbl[0];
1755 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1756 }
1757 }
1758
1759 spin_lock_irqsave(ap->lock, flags);
1760 mv_save_cached_regs(ap);
1761 mv_edma_cfg(ap, 0, 0);
1762 spin_unlock_irqrestore(ap->lock, flags);
1763
1764 return 0;
1765
1766 out_port_free_dma_mem:
1767 mv_port_free_dma_mem(ap);
1768 return -ENOMEM;
1769 }
1770
1771 /**
1772 * mv_port_stop - Port specific cleanup/stop routine.
1773 * @ap: ATA channel to manipulate
1774 *
1775 * Stop DMA, cleanup port memory.
1776 *
1777 * LOCKING:
1778 * This routine uses the host lock to protect the DMA stop.
1779 */
1780 static void mv_port_stop(struct ata_port *ap)
1781 {
1782 unsigned long flags;
1783
1784 spin_lock_irqsave(ap->lock, flags);
1785 mv_stop_edma(ap);
1786 mv_enable_port_irqs(ap, 0);
1787 spin_unlock_irqrestore(ap->lock, flags);
1788 mv_port_free_dma_mem(ap);
1789 }
1790
1791 /**
1792 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1793 * @qc: queued command whose SG list to source from
1794 *
1795 * Populate the SG list and mark the last entry.
1796 *
1797 * LOCKING:
1798 * Inherited from caller.
1799 */
1800 static void mv_fill_sg(struct ata_queued_cmd *qc)
1801 {
1802 struct mv_port_priv *pp = qc->ap->private_data;
1803 struct scatterlist *sg;
1804 struct mv_sg *mv_sg, *last_sg = NULL;
1805 unsigned int si;
1806
1807 mv_sg = pp->sg_tbl[qc->tag];
1808 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1809 dma_addr_t addr = sg_dma_address(sg);
1810 u32 sg_len = sg_dma_len(sg);
1811
1812 while (sg_len) {
1813 u32 offset = addr & 0xffff;
1814 u32 len = sg_len;
1815
1816 if (offset + len > 0x10000)
1817 len = 0x10000 - offset;
1818
1819 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1820 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1821 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1822 mv_sg->reserved = 0;
1823
1824 sg_len -= len;
1825 addr += len;
1826
1827 last_sg = mv_sg;
1828 mv_sg++;
1829 }
1830 }
1831
1832 if (likely(last_sg))
1833 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1834 mb(); /* ensure data structure is visible to the chipset */
1835 }
1836
1837 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1838 {
1839 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1840 (last ? CRQB_CMD_LAST : 0);
1841 *cmdw = cpu_to_le16(tmp);
1842 }
1843
1844 /**
1845 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1846 * @ap: Port associated with this ATA transaction.
1847 *
1848 * We need this only for ATAPI bmdma transactions,
1849 * as otherwise we experience spurious interrupts
1850 * after libata-sff handles the bmdma interrupts.
1851 */
1852 static void mv_sff_irq_clear(struct ata_port *ap)
1853 {
1854 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1855 }
1856
1857 /**
1858 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1859 * @qc: queued command to check for chipset/DMA compatibility.
1860 *
1861 * The bmdma engines cannot handle speculative data sizes
1862 * (bytecount under/over flow). So only allow DMA for
1863 * data transfer commands with known data sizes.
1864 *
1865 * LOCKING:
1866 * Inherited from caller.
1867 */
1868 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1869 {
1870 struct scsi_cmnd *scmd = qc->scsicmd;
1871
1872 if (scmd) {
1873 switch (scmd->cmnd[0]) {
1874 case READ_6:
1875 case READ_10:
1876 case READ_12:
1877 case WRITE_6:
1878 case WRITE_10:
1879 case WRITE_12:
1880 case GPCMD_READ_CD:
1881 case GPCMD_SEND_DVD_STRUCTURE:
1882 case GPCMD_SEND_CUE_SHEET:
1883 return 0; /* DMA is safe */
1884 }
1885 }
1886 return -EOPNOTSUPP; /* use PIO instead */
1887 }
1888
1889 /**
1890 * mv_bmdma_setup - Set up BMDMA transaction
1891 * @qc: queued command to prepare DMA for.
1892 *
1893 * LOCKING:
1894 * Inherited from caller.
1895 */
1896 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1897 {
1898 struct ata_port *ap = qc->ap;
1899 void __iomem *port_mmio = mv_ap_base(ap);
1900 struct mv_port_priv *pp = ap->private_data;
1901
1902 mv_fill_sg(qc);
1903
1904 /* clear all DMA cmd bits */
1905 writel(0, port_mmio + BMDMA_CMD);
1906
1907 /* load PRD table addr. */
1908 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1909 port_mmio + BMDMA_PRD_HIGH);
1910 writelfl(pp->sg_tbl_dma[qc->tag],
1911 port_mmio + BMDMA_PRD_LOW);
1912
1913 /* issue r/w command */
1914 ap->ops->sff_exec_command(ap, &qc->tf);
1915 }
1916
1917 /**
1918 * mv_bmdma_start - Start a BMDMA transaction
1919 * @qc: queued command to start DMA on.
1920 *
1921 * LOCKING:
1922 * Inherited from caller.
1923 */
1924 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1925 {
1926 struct ata_port *ap = qc->ap;
1927 void __iomem *port_mmio = mv_ap_base(ap);
1928 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1929 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1930
1931 /* start host DMA transaction */
1932 writelfl(cmd, port_mmio + BMDMA_CMD);
1933 }
1934
1935 /**
1936 * mv_bmdma_stop - Stop BMDMA transfer
1937 * @qc: queued command to stop DMA on.
1938 *
1939 * Clears the ATA_DMA_START flag in the bmdma control register
1940 *
1941 * LOCKING:
1942 * Inherited from caller.
1943 */
1944 static void mv_bmdma_stop_ap(struct ata_port *ap)
1945 {
1946 void __iomem *port_mmio = mv_ap_base(ap);
1947 u32 cmd;
1948
1949 /* clear start/stop bit */
1950 cmd = readl(port_mmio + BMDMA_CMD);
1951 if (cmd & ATA_DMA_START) {
1952 cmd &= ~ATA_DMA_START;
1953 writelfl(cmd, port_mmio + BMDMA_CMD);
1954
1955 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1956 ata_sff_dma_pause(ap);
1957 }
1958 }
1959
1960 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1961 {
1962 mv_bmdma_stop_ap(qc->ap);
1963 }
1964
1965 /**
1966 * mv_bmdma_status - Read BMDMA status
1967 * @ap: port for which to retrieve DMA status.
1968 *
1969 * Read and return equivalent of the sff BMDMA status register.
1970 *
1971 * LOCKING:
1972 * Inherited from caller.
1973 */
1974 static u8 mv_bmdma_status(struct ata_port *ap)
1975 {
1976 void __iomem *port_mmio = mv_ap_base(ap);
1977 u32 reg, status;
1978
1979 /*
1980 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1981 * and the ATA_DMA_INTR bit doesn't exist.
1982 */
1983 reg = readl(port_mmio + BMDMA_STATUS);
1984 if (reg & ATA_DMA_ACTIVE)
1985 status = ATA_DMA_ACTIVE;
1986 else if (reg & ATA_DMA_ERR)
1987 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1988 else {
1989 /*
1990 * Just because DMA_ACTIVE is 0 (DMA completed),
1991 * this does _not_ mean the device is "done".
1992 * So we should not yet be signalling ATA_DMA_INTR
1993 * in some cases. Eg. DSM/TRIM, and perhaps others.
1994 */
1995 mv_bmdma_stop_ap(ap);
1996 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1997 status = 0;
1998 else
1999 status = ATA_DMA_INTR;
2000 }
2001 return status;
2002 }
2003
2004 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
2005 {
2006 struct ata_taskfile *tf = &qc->tf;
2007 /*
2008 * Workaround for 88SX60x1 FEr SATA#24.
2009 *
2010 * Chip may corrupt WRITEs if multi_count >= 4kB.
2011 * Note that READs are unaffected.
2012 *
2013 * It's not clear if this errata really means "4K bytes",
2014 * or if it always happens for multi_count > 7
2015 * regardless of device sector_size.
2016 *
2017 * So, for safety, any write with multi_count > 7
2018 * gets converted here into a regular PIO write instead:
2019 */
2020 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
2021 if (qc->dev->multi_count > 7) {
2022 switch (tf->command) {
2023 case ATA_CMD_WRITE_MULTI:
2024 tf->command = ATA_CMD_PIO_WRITE;
2025 break;
2026 case ATA_CMD_WRITE_MULTI_FUA_EXT:
2027 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
2028 /* fall through */
2029 case ATA_CMD_WRITE_MULTI_EXT:
2030 tf->command = ATA_CMD_PIO_WRITE_EXT;
2031 break;
2032 }
2033 }
2034 }
2035 }
2036
2037 /**
2038 * mv_qc_prep - Host specific command preparation.
2039 * @qc: queued command to prepare
2040 *
2041 * This routine simply redirects to the general purpose routine
2042 * if command is not DMA. Else, it handles prep of the CRQB
2043 * (command request block), does some sanity checking, and calls
2044 * the SG load routine.
2045 *
2046 * LOCKING:
2047 * Inherited from caller.
2048 */
2049 static void mv_qc_prep(struct ata_queued_cmd *qc)
2050 {
2051 struct ata_port *ap = qc->ap;
2052 struct mv_port_priv *pp = ap->private_data;
2053 __le16 *cw;
2054 struct ata_taskfile *tf = &qc->tf;
2055 u16 flags = 0;
2056 unsigned in_index;
2057
2058 switch (tf->protocol) {
2059 case ATA_PROT_DMA:
2060 if (tf->command == ATA_CMD_DSM)
2061 return;
2062 /* fall-thru */
2063 case ATA_PROT_NCQ:
2064 break; /* continue below */
2065 case ATA_PROT_PIO:
2066 mv_rw_multi_errata_sata24(qc);
2067 return;
2068 default:
2069 return;
2070 }
2071
2072 /* Fill in command request block
2073 */
2074 if (!(tf->flags & ATA_TFLAG_WRITE))
2075 flags |= CRQB_FLAG_READ;
2076 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2077 flags |= qc->tag << CRQB_TAG_SHIFT;
2078 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2079
2080 /* get current queue index from software */
2081 in_index = pp->req_idx;
2082
2083 pp->crqb[in_index].sg_addr =
2084 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2085 pp->crqb[in_index].sg_addr_hi =
2086 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2087 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2088
2089 cw = &pp->crqb[in_index].ata_cmd[0];
2090
2091 /* Sadly, the CRQB cannot accommodate all registers--there are
2092 * only 11 bytes...so we must pick and choose required
2093 * registers based on the command. So, we drop feature and
2094 * hob_feature for [RW] DMA commands, but they are needed for
2095 * NCQ. NCQ will drop hob_nsect, which is not needed there
2096 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2097 */
2098 switch (tf->command) {
2099 case ATA_CMD_READ:
2100 case ATA_CMD_READ_EXT:
2101 case ATA_CMD_WRITE:
2102 case ATA_CMD_WRITE_EXT:
2103 case ATA_CMD_WRITE_FUA_EXT:
2104 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2105 break;
2106 case ATA_CMD_FPDMA_READ:
2107 case ATA_CMD_FPDMA_WRITE:
2108 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2109 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2110 break;
2111 default:
2112 /* The only other commands EDMA supports in non-queued and
2113 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2114 * of which are defined/used by Linux. If we get here, this
2115 * driver needs work.
2116 *
2117 * FIXME: modify libata to give qc_prep a return value and
2118 * return error here.
2119 */
2120 BUG_ON(tf->command);
2121 break;
2122 }
2123 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2124 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2125 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2126 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2127 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2128 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2129 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2130 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2131 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2132
2133 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2134 return;
2135 mv_fill_sg(qc);
2136 }
2137
2138 /**
2139 * mv_qc_prep_iie - Host specific command preparation.
2140 * @qc: queued command to prepare
2141 *
2142 * This routine simply redirects to the general purpose routine
2143 * if command is not DMA. Else, it handles prep of the CRQB
2144 * (command request block), does some sanity checking, and calls
2145 * the SG load routine.
2146 *
2147 * LOCKING:
2148 * Inherited from caller.
2149 */
2150 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2151 {
2152 struct ata_port *ap = qc->ap;
2153 struct mv_port_priv *pp = ap->private_data;
2154 struct mv_crqb_iie *crqb;
2155 struct ata_taskfile *tf = &qc->tf;
2156 unsigned in_index;
2157 u32 flags = 0;
2158
2159 if ((tf->protocol != ATA_PROT_DMA) &&
2160 (tf->protocol != ATA_PROT_NCQ))
2161 return;
2162 if (tf->command == ATA_CMD_DSM)
2163 return; /* use bmdma for this */
2164
2165 /* Fill in Gen IIE command request block */
2166 if (!(tf->flags & ATA_TFLAG_WRITE))
2167 flags |= CRQB_FLAG_READ;
2168
2169 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2170 flags |= qc->tag << CRQB_TAG_SHIFT;
2171 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2172 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2173
2174 /* get current queue index from software */
2175 in_index = pp->req_idx;
2176
2177 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2178 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2179 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2180 crqb->flags = cpu_to_le32(flags);
2181
2182 crqb->ata_cmd[0] = cpu_to_le32(
2183 (tf->command << 16) |
2184 (tf->feature << 24)
2185 );
2186 crqb->ata_cmd[1] = cpu_to_le32(
2187 (tf->lbal << 0) |
2188 (tf->lbam << 8) |
2189 (tf->lbah << 16) |
2190 (tf->device << 24)
2191 );
2192 crqb->ata_cmd[2] = cpu_to_le32(
2193 (tf->hob_lbal << 0) |
2194 (tf->hob_lbam << 8) |
2195 (tf->hob_lbah << 16) |
2196 (tf->hob_feature << 24)
2197 );
2198 crqb->ata_cmd[3] = cpu_to_le32(
2199 (tf->nsect << 0) |
2200 (tf->hob_nsect << 8)
2201 );
2202
2203 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2204 return;
2205 mv_fill_sg(qc);
2206 }
2207
2208 /**
2209 * mv_sff_check_status - fetch device status, if valid
2210 * @ap: ATA port to fetch status from
2211 *
2212 * When using command issue via mv_qc_issue_fis(),
2213 * the initial ATA_BUSY state does not show up in the
2214 * ATA status (shadow) register. This can confuse libata!
2215 *
2216 * So we have a hook here to fake ATA_BUSY for that situation,
2217 * until the first time a BUSY, DRQ, or ERR bit is seen.
2218 *
2219 * The rest of the time, it simply returns the ATA status register.
2220 */
2221 static u8 mv_sff_check_status(struct ata_port *ap)
2222 {
2223 u8 stat = ioread8(ap->ioaddr.status_addr);
2224 struct mv_port_priv *pp = ap->private_data;
2225
2226 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2227 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2228 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2229 else
2230 stat = ATA_BUSY;
2231 }
2232 return stat;
2233 }
2234
2235 /**
2236 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2237 * @fis: fis to be sent
2238 * @nwords: number of 32-bit words in the fis
2239 */
2240 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2241 {
2242 void __iomem *port_mmio = mv_ap_base(ap);
2243 u32 ifctl, old_ifctl, ifstat;
2244 int i, timeout = 200, final_word = nwords - 1;
2245
2246 /* Initiate FIS transmission mode */
2247 old_ifctl = readl(port_mmio + SATA_IFCTL);
2248 ifctl = 0x100 | (old_ifctl & 0xf);
2249 writelfl(ifctl, port_mmio + SATA_IFCTL);
2250
2251 /* Send all words of the FIS except for the final word */
2252 for (i = 0; i < final_word; ++i)
2253 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2254
2255 /* Flag end-of-transmission, and then send the final word */
2256 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2257 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2258
2259 /*
2260 * Wait for FIS transmission to complete.
2261 * This typically takes just a single iteration.
2262 */
2263 do {
2264 ifstat = readl(port_mmio + SATA_IFSTAT);
2265 } while (!(ifstat & 0x1000) && --timeout);
2266
2267 /* Restore original port configuration */
2268 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2269
2270 /* See if it worked */
2271 if ((ifstat & 0x3000) != 0x1000) {
2272 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2273 __func__, ifstat);
2274 return AC_ERR_OTHER;
2275 }
2276 return 0;
2277 }
2278
2279 /**
2280 * mv_qc_issue_fis - Issue a command directly as a FIS
2281 * @qc: queued command to start
2282 *
2283 * Note that the ATA shadow registers are not updated
2284 * after command issue, so the device will appear "READY"
2285 * if polled, even while it is BUSY processing the command.
2286 *
2287 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2288 *
2289 * Note: we don't get updated shadow regs on *completion*
2290 * of non-data commands. So avoid sending them via this function,
2291 * as they will appear to have completed immediately.
2292 *
2293 * GEN_IIE has special registers that we could get the result tf from,
2294 * but earlier chipsets do not. For now, we ignore those registers.
2295 */
2296 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2297 {
2298 struct ata_port *ap = qc->ap;
2299 struct mv_port_priv *pp = ap->private_data;
2300 struct ata_link *link = qc->dev->link;
2301 u32 fis[5];
2302 int err = 0;
2303
2304 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2305 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2306 if (err)
2307 return err;
2308
2309 switch (qc->tf.protocol) {
2310 case ATAPI_PROT_PIO:
2311 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2312 /* fall through */
2313 case ATAPI_PROT_NODATA:
2314 ap->hsm_task_state = HSM_ST_FIRST;
2315 break;
2316 case ATA_PROT_PIO:
2317 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2318 if (qc->tf.flags & ATA_TFLAG_WRITE)
2319 ap->hsm_task_state = HSM_ST_FIRST;
2320 else
2321 ap->hsm_task_state = HSM_ST;
2322 break;
2323 default:
2324 ap->hsm_task_state = HSM_ST_LAST;
2325 break;
2326 }
2327
2328 if (qc->tf.flags & ATA_TFLAG_POLLING)
2329 ata_sff_queue_pio_task(link, 0);
2330 return 0;
2331 }
2332
2333 /**
2334 * mv_qc_issue - Initiate a command to the host
2335 * @qc: queued command to start
2336 *
2337 * This routine simply redirects to the general purpose routine
2338 * if command is not DMA. Else, it sanity checks our local
2339 * caches of the request producer/consumer indices then enables
2340 * DMA and bumps the request producer index.
2341 *
2342 * LOCKING:
2343 * Inherited from caller.
2344 */
2345 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2346 {
2347 static int limit_warnings = 10;
2348 struct ata_port *ap = qc->ap;
2349 void __iomem *port_mmio = mv_ap_base(ap);
2350 struct mv_port_priv *pp = ap->private_data;
2351 u32 in_index;
2352 unsigned int port_irqs;
2353
2354 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2355
2356 switch (qc->tf.protocol) {
2357 case ATA_PROT_DMA:
2358 if (qc->tf.command == ATA_CMD_DSM) {
2359 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2360 return AC_ERR_OTHER;
2361 break; /* use bmdma for this */
2362 }
2363 /* fall thru */
2364 case ATA_PROT_NCQ:
2365 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2366 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2367 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2368
2369 /* Write the request in pointer to kick the EDMA to life */
2370 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2371 port_mmio + EDMA_REQ_Q_IN_PTR);
2372 return 0;
2373
2374 case ATA_PROT_PIO:
2375 /*
2376 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2377 *
2378 * Someday, we might implement special polling workarounds
2379 * for these, but it all seems rather unnecessary since we
2380 * normally use only DMA for commands which transfer more
2381 * than a single block of data.
2382 *
2383 * Much of the time, this could just work regardless.
2384 * So for now, just log the incident, and allow the attempt.
2385 */
2386 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2387 --limit_warnings;
2388 ata_link_warn(qc->dev->link, DRV_NAME
2389 ": attempting PIO w/multiple DRQ: "
2390 "this may fail due to h/w errata\n");
2391 }
2392 /* drop through */
2393 case ATA_PROT_NODATA:
2394 case ATAPI_PROT_PIO:
2395 case ATAPI_PROT_NODATA:
2396 if (ap->flags & ATA_FLAG_PIO_POLLING)
2397 qc->tf.flags |= ATA_TFLAG_POLLING;
2398 break;
2399 }
2400
2401 if (qc->tf.flags & ATA_TFLAG_POLLING)
2402 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2403 else
2404 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2405
2406 /*
2407 * We're about to send a non-EDMA capable command to the
2408 * port. Turn off EDMA so there won't be problems accessing
2409 * shadow block, etc registers.
2410 */
2411 mv_stop_edma(ap);
2412 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2413 mv_pmp_select(ap, qc->dev->link->pmp);
2414
2415 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2416 struct mv_host_priv *hpriv = ap->host->private_data;
2417 /*
2418 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2419 *
2420 * After any NCQ error, the READ_LOG_EXT command
2421 * from libata-eh *must* use mv_qc_issue_fis().
2422 * Otherwise it might fail, due to chip errata.
2423 *
2424 * Rather than special-case it, we'll just *always*
2425 * use this method here for READ_LOG_EXT, making for
2426 * easier testing.
2427 */
2428 if (IS_GEN_II(hpriv))
2429 return mv_qc_issue_fis(qc);
2430 }
2431 return ata_bmdma_qc_issue(qc);
2432 }
2433
2434 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2435 {
2436 struct mv_port_priv *pp = ap->private_data;
2437 struct ata_queued_cmd *qc;
2438
2439 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2440 return NULL;
2441 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2442 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2443 return qc;
2444 return NULL;
2445 }
2446
2447 static void mv_pmp_error_handler(struct ata_port *ap)
2448 {
2449 unsigned int pmp, pmp_map;
2450 struct mv_port_priv *pp = ap->private_data;
2451
2452 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2453 /*
2454 * Perform NCQ error analysis on failed PMPs
2455 * before we freeze the port entirely.
2456 *
2457 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2458 */
2459 pmp_map = pp->delayed_eh_pmp_map;
2460 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2461 for (pmp = 0; pmp_map != 0; pmp++) {
2462 unsigned int this_pmp = (1 << pmp);
2463 if (pmp_map & this_pmp) {
2464 struct ata_link *link = &ap->pmp_link[pmp];
2465 pmp_map &= ~this_pmp;
2466 ata_eh_analyze_ncq_error(link);
2467 }
2468 }
2469 ata_port_freeze(ap);
2470 }
2471 sata_pmp_error_handler(ap);
2472 }
2473
2474 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2475 {
2476 void __iomem *port_mmio = mv_ap_base(ap);
2477
2478 return readl(port_mmio + SATA_TESTCTL) >> 16;
2479 }
2480
2481 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2482 {
2483 struct ata_eh_info *ehi;
2484 unsigned int pmp;
2485
2486 /*
2487 * Initialize EH info for PMPs which saw device errors
2488 */
2489 ehi = &ap->link.eh_info;
2490 for (pmp = 0; pmp_map != 0; pmp++) {
2491 unsigned int this_pmp = (1 << pmp);
2492 if (pmp_map & this_pmp) {
2493 struct ata_link *link = &ap->pmp_link[pmp];
2494
2495 pmp_map &= ~this_pmp;
2496 ehi = &link->eh_info;
2497 ata_ehi_clear_desc(ehi);
2498 ata_ehi_push_desc(ehi, "dev err");
2499 ehi->err_mask |= AC_ERR_DEV;
2500 ehi->action |= ATA_EH_RESET;
2501 ata_link_abort(link);
2502 }
2503 }
2504 }
2505
2506 static int mv_req_q_empty(struct ata_port *ap)
2507 {
2508 void __iomem *port_mmio = mv_ap_base(ap);
2509 u32 in_ptr, out_ptr;
2510
2511 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2512 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2513 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2514 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2515 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2516 }
2517
2518 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2519 {
2520 struct mv_port_priv *pp = ap->private_data;
2521 int failed_links;
2522 unsigned int old_map, new_map;
2523
2524 /*
2525 * Device error during FBS+NCQ operation:
2526 *
2527 * Set a port flag to prevent further I/O being enqueued.
2528 * Leave the EDMA running to drain outstanding commands from this port.
2529 * Perform the post-mortem/EH only when all responses are complete.
2530 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2531 */
2532 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2533 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2534 pp->delayed_eh_pmp_map = 0;
2535 }
2536 old_map = pp->delayed_eh_pmp_map;
2537 new_map = old_map | mv_get_err_pmp_map(ap);
2538
2539 if (old_map != new_map) {
2540 pp->delayed_eh_pmp_map = new_map;
2541 mv_pmp_eh_prep(ap, new_map & ~old_map);
2542 }
2543 failed_links = hweight16(new_map);
2544
2545 ata_port_info(ap,
2546 "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2547 __func__, pp->delayed_eh_pmp_map,
2548 ap->qc_active, failed_links,
2549 ap->nr_active_links);
2550
2551 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2552 mv_process_crpb_entries(ap, pp);
2553 mv_stop_edma(ap);
2554 mv_eh_freeze(ap);
2555 ata_port_info(ap, "%s: done\n", __func__);
2556 return 1; /* handled */
2557 }
2558 ata_port_info(ap, "%s: waiting\n", __func__);
2559 return 1; /* handled */
2560 }
2561
2562 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2563 {
2564 /*
2565 * Possible future enhancement:
2566 *
2567 * FBS+non-NCQ operation is not yet implemented.
2568 * See related notes in mv_edma_cfg().
2569 *
2570 * Device error during FBS+non-NCQ operation:
2571 *
2572 * We need to snapshot the shadow registers for each failed command.
2573 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2574 */
2575 return 0; /* not handled */
2576 }
2577
2578 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2579 {
2580 struct mv_port_priv *pp = ap->private_data;
2581
2582 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2583 return 0; /* EDMA was not active: not handled */
2584 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2585 return 0; /* FBS was not active: not handled */
2586
2587 if (!(edma_err_cause & EDMA_ERR_DEV))
2588 return 0; /* non DEV error: not handled */
2589 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2590 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2591 return 0; /* other problems: not handled */
2592
2593 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2594 /*
2595 * EDMA should NOT have self-disabled for this case.
2596 * If it did, then something is wrong elsewhere,
2597 * and we cannot handle it here.
2598 */
2599 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2600 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2601 __func__, edma_err_cause, pp->pp_flags);
2602 return 0; /* not handled */
2603 }
2604 return mv_handle_fbs_ncq_dev_err(ap);
2605 } else {
2606 /*
2607 * EDMA should have self-disabled for this case.
2608 * If it did not, then something is wrong elsewhere,
2609 * and we cannot handle it here.
2610 */
2611 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2612 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2613 __func__, edma_err_cause, pp->pp_flags);
2614 return 0; /* not handled */
2615 }
2616 return mv_handle_fbs_non_ncq_dev_err(ap);
2617 }
2618 return 0; /* not handled */
2619 }
2620
2621 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2622 {
2623 struct ata_eh_info *ehi = &ap->link.eh_info;
2624 char *when = "idle";
2625
2626 ata_ehi_clear_desc(ehi);
2627 if (edma_was_enabled) {
2628 when = "EDMA enabled";
2629 } else {
2630 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2631 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2632 when = "polling";
2633 }
2634 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2635 ehi->err_mask |= AC_ERR_OTHER;
2636 ehi->action |= ATA_EH_RESET;
2637 ata_port_freeze(ap);
2638 }
2639
2640 /**
2641 * mv_err_intr - Handle error interrupts on the port
2642 * @ap: ATA channel to manipulate
2643 *
2644 * Most cases require a full reset of the chip's state machine,
2645 * which also performs a COMRESET.
2646 * Also, if the port disabled DMA, update our cached copy to match.
2647 *
2648 * LOCKING:
2649 * Inherited from caller.
2650 */
2651 static void mv_err_intr(struct ata_port *ap)
2652 {
2653 void __iomem *port_mmio = mv_ap_base(ap);
2654 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2655 u32 fis_cause = 0;
2656 struct mv_port_priv *pp = ap->private_data;
2657 struct mv_host_priv *hpriv = ap->host->private_data;
2658 unsigned int action = 0, err_mask = 0;
2659 struct ata_eh_info *ehi = &ap->link.eh_info;
2660 struct ata_queued_cmd *qc;
2661 int abort = 0;
2662
2663 /*
2664 * Read and clear the SError and err_cause bits.
2665 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2666 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2667 */
2668 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2669 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2670
2671 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2672 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2673 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2674 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2675 }
2676 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2677
2678 if (edma_err_cause & EDMA_ERR_DEV) {
2679 /*
2680 * Device errors during FIS-based switching operation
2681 * require special handling.
2682 */
2683 if (mv_handle_dev_err(ap, edma_err_cause))
2684 return;
2685 }
2686
2687 qc = mv_get_active_qc(ap);
2688 ata_ehi_clear_desc(ehi);
2689 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2690 edma_err_cause, pp->pp_flags);
2691
2692 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2693 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2694 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2695 u32 ec = edma_err_cause &
2696 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2697 sata_async_notification(ap);
2698 if (!ec)
2699 return; /* Just an AN; no need for the nukes */
2700 ata_ehi_push_desc(ehi, "SDB notify");
2701 }
2702 }
2703 /*
2704 * All generations share these EDMA error cause bits:
2705 */
2706 if (edma_err_cause & EDMA_ERR_DEV) {
2707 err_mask |= AC_ERR_DEV;
2708 action |= ATA_EH_RESET;
2709 ata_ehi_push_desc(ehi, "dev error");
2710 }
2711 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2712 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2713 EDMA_ERR_INTRL_PAR)) {
2714 err_mask |= AC_ERR_ATA_BUS;
2715 action |= ATA_EH_RESET;
2716 ata_ehi_push_desc(ehi, "parity error");
2717 }
2718 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2719 ata_ehi_hotplugged(ehi);
2720 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2721 "dev disconnect" : "dev connect");
2722 action |= ATA_EH_RESET;
2723 }
2724
2725 /*
2726 * Gen-I has a different SELF_DIS bit,
2727 * different FREEZE bits, and no SERR bit:
2728 */
2729 if (IS_GEN_I(hpriv)) {
2730 eh_freeze_mask = EDMA_EH_FREEZE_5;
2731 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2732 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2733 ata_ehi_push_desc(ehi, "EDMA self-disable");
2734 }
2735 } else {
2736 eh_freeze_mask = EDMA_EH_FREEZE;
2737 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2738 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2739 ata_ehi_push_desc(ehi, "EDMA self-disable");
2740 }
2741 if (edma_err_cause & EDMA_ERR_SERR) {
2742 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2743 err_mask |= AC_ERR_ATA_BUS;
2744 action |= ATA_EH_RESET;
2745 }
2746 }
2747
2748 if (!err_mask) {
2749 err_mask = AC_ERR_OTHER;
2750 action |= ATA_EH_RESET;
2751 }
2752
2753 ehi->serror |= serr;
2754 ehi->action |= action;
2755
2756 if (qc)
2757 qc->err_mask |= err_mask;
2758 else
2759 ehi->err_mask |= err_mask;
2760
2761 if (err_mask == AC_ERR_DEV) {
2762 /*
2763 * Cannot do ata_port_freeze() here,
2764 * because it would kill PIO access,
2765 * which is needed for further diagnosis.
2766 */
2767 mv_eh_freeze(ap);
2768 abort = 1;
2769 } else if (edma_err_cause & eh_freeze_mask) {
2770 /*
2771 * Note to self: ata_port_freeze() calls ata_port_abort()
2772 */
2773 ata_port_freeze(ap);
2774 } else {
2775 abort = 1;
2776 }
2777
2778 if (abort) {
2779 if (qc)
2780 ata_link_abort(qc->dev->link);
2781 else
2782 ata_port_abort(ap);
2783 }
2784 }
2785
2786 static bool mv_process_crpb_response(struct ata_port *ap,
2787 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2788 {
2789 u8 ata_status;
2790 u16 edma_status = le16_to_cpu(response->flags);
2791
2792 /*
2793 * edma_status from a response queue entry:
2794 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2795 * MSB is saved ATA status from command completion.
2796 */
2797 if (!ncq_enabled) {
2798 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2799 if (err_cause) {
2800 /*
2801 * Error will be seen/handled by
2802 * mv_err_intr(). So do nothing at all here.
2803 */
2804 return false;
2805 }
2806 }
2807 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2808 if (!ac_err_mask(ata_status))
2809 return true;
2810 /* else: leave it for mv_err_intr() */
2811 return false;
2812 }
2813
2814 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2815 {
2816 void __iomem *port_mmio = mv_ap_base(ap);
2817 struct mv_host_priv *hpriv = ap->host->private_data;
2818 u32 in_index;
2819 bool work_done = false;
2820 u32 done_mask = 0;
2821 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2822
2823 /* Get the hardware queue position index */
2824 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2825 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2826
2827 /* Process new responses from since the last time we looked */
2828 while (in_index != pp->resp_idx) {
2829 unsigned int tag;
2830 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2831
2832 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2833
2834 if (IS_GEN_I(hpriv)) {
2835 /* 50xx: no NCQ, only one command active at a time */
2836 tag = ap->link.active_tag;
2837 } else {
2838 /* Gen II/IIE: get command tag from CRPB entry */
2839 tag = le16_to_cpu(response->id) & 0x1f;
2840 }
2841 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2842 done_mask |= 1 << tag;
2843 work_done = true;
2844 }
2845
2846 if (work_done) {
2847 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2848
2849 /* Update the software queue position index in hardware */
2850 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2851 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2852 port_mmio + EDMA_RSP_Q_OUT_PTR);
2853 }
2854 }
2855
2856 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2857 {
2858 struct mv_port_priv *pp;
2859 int edma_was_enabled;
2860
2861 /*
2862 * Grab a snapshot of the EDMA_EN flag setting,
2863 * so that we have a consistent view for this port,
2864 * even if something we call of our routines changes it.
2865 */
2866 pp = ap->private_data;
2867 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2868 /*
2869 * Process completed CRPB response(s) before other events.
2870 */
2871 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2872 mv_process_crpb_entries(ap, pp);
2873 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2874 mv_handle_fbs_ncq_dev_err(ap);
2875 }
2876 /*
2877 * Handle chip-reported errors, or continue on to handle PIO.
2878 */
2879 if (unlikely(port_cause & ERR_IRQ)) {
2880 mv_err_intr(ap);
2881 } else if (!edma_was_enabled) {
2882 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2883 if (qc)
2884 ata_bmdma_port_intr(ap, qc);
2885 else
2886 mv_unexpected_intr(ap, edma_was_enabled);
2887 }
2888 }
2889
2890 /**
2891 * mv_host_intr - Handle all interrupts on the given host controller
2892 * @host: host specific structure
2893 * @main_irq_cause: Main interrupt cause register for the chip.
2894 *
2895 * LOCKING:
2896 * Inherited from caller.
2897 */
2898 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2899 {
2900 struct mv_host_priv *hpriv = host->private_data;
2901 void __iomem *mmio = hpriv->base, *hc_mmio;
2902 unsigned int handled = 0, port;
2903
2904 /* If asserted, clear the "all ports" IRQ coalescing bit */
2905 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2906 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2907
2908 for (port = 0; port < hpriv->n_ports; port++) {
2909 struct ata_port *ap = host->ports[port];
2910 unsigned int p, shift, hardport, port_cause;
2911
2912 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2913 /*
2914 * Each hc within the host has its own hc_irq_cause register,
2915 * where the interrupting ports bits get ack'd.
2916 */
2917 if (hardport == 0) { /* first port on this hc ? */
2918 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2919 u32 port_mask, ack_irqs;
2920 /*
2921 * Skip this entire hc if nothing pending for any ports
2922 */
2923 if (!hc_cause) {
2924 port += MV_PORTS_PER_HC - 1;
2925 continue;
2926 }
2927 /*
2928 * We don't need/want to read the hc_irq_cause register,
2929 * because doing so hurts performance, and
2930 * main_irq_cause already gives us everything we need.
2931 *
2932 * But we do have to *write* to the hc_irq_cause to ack
2933 * the ports that we are handling this time through.
2934 *
2935 * This requires that we create a bitmap for those
2936 * ports which interrupted us, and use that bitmap
2937 * to ack (only) those ports via hc_irq_cause.
2938 */
2939 ack_irqs = 0;
2940 if (hc_cause & PORTS_0_3_COAL_DONE)
2941 ack_irqs = HC_COAL_IRQ;
2942 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2943 if ((port + p) >= hpriv->n_ports)
2944 break;
2945 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2946 if (hc_cause & port_mask)
2947 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2948 }
2949 hc_mmio = mv_hc_base_from_port(mmio, port);
2950 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2951 handled = 1;
2952 }
2953 /*
2954 * Handle interrupts signalled for this port:
2955 */
2956 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2957 if (port_cause)
2958 mv_port_intr(ap, port_cause);
2959 }
2960 return handled;
2961 }
2962
2963 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2964 {
2965 struct mv_host_priv *hpriv = host->private_data;
2966 struct ata_port *ap;
2967 struct ata_queued_cmd *qc;
2968 struct ata_eh_info *ehi;
2969 unsigned int i, err_mask, printed = 0;
2970 u32 err_cause;
2971
2972 err_cause = readl(mmio + hpriv->irq_cause_offset);
2973
2974 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2975
2976 DPRINTK("All regs @ PCI error\n");
2977 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2978
2979 writelfl(0, mmio + hpriv->irq_cause_offset);
2980
2981 for (i = 0; i < host->n_ports; i++) {
2982 ap = host->ports[i];
2983 if (!ata_link_offline(&ap->link)) {
2984 ehi = &ap->link.eh_info;
2985 ata_ehi_clear_desc(ehi);
2986 if (!printed++)
2987 ata_ehi_push_desc(ehi,
2988 "PCI err cause 0x%08x", err_cause);
2989 err_mask = AC_ERR_HOST_BUS;
2990 ehi->action = ATA_EH_RESET;
2991 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2992 if (qc)
2993 qc->err_mask |= err_mask;
2994 else
2995 ehi->err_mask |= err_mask;
2996
2997 ata_port_freeze(ap);
2998 }
2999 }
3000 return 1; /* handled */
3001 }
3002
3003 /**
3004 * mv_interrupt - Main interrupt event handler
3005 * @irq: unused
3006 * @dev_instance: private data; in this case the host structure
3007 *
3008 * Read the read only register to determine if any host
3009 * controllers have pending interrupts. If so, call lower level
3010 * routine to handle. Also check for PCI errors which are only
3011 * reported here.
3012 *
3013 * LOCKING:
3014 * This routine holds the host lock while processing pending
3015 * interrupts.
3016 */
3017 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
3018 {
3019 struct ata_host *host = dev_instance;
3020 struct mv_host_priv *hpriv = host->private_data;
3021 unsigned int handled = 0;
3022 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3023 u32 main_irq_cause, pending_irqs;
3024
3025 spin_lock(&host->lock);
3026
3027 /* for MSI: block new interrupts while in here */
3028 if (using_msi)
3029 mv_write_main_irq_mask(0, hpriv);
3030
3031 main_irq_cause = readl(hpriv->main_irq_cause_addr);
3032 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
3033 /*
3034 * Deal with cases where we either have nothing pending, or have read
3035 * a bogus register value which can indicate HW removal or PCI fault.
3036 */
3037 if (pending_irqs && main_irq_cause != 0xffffffffU) {
3038 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3039 handled = mv_pci_error(host, hpriv->base);
3040 else
3041 handled = mv_host_intr(host, pending_irqs);
3042 }
3043
3044 /* for MSI: unmask; interrupt cause bits will retrigger now */
3045 if (using_msi)
3046 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3047
3048 spin_unlock(&host->lock);
3049
3050 return IRQ_RETVAL(handled);
3051 }
3052
3053 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3054 {
3055 unsigned int ofs;
3056
3057 switch (sc_reg_in) {
3058 case SCR_STATUS:
3059 case SCR_ERROR:
3060 case SCR_CONTROL:
3061 ofs = sc_reg_in * sizeof(u32);
3062 break;
3063 default:
3064 ofs = 0xffffffffU;
3065 break;
3066 }
3067 return ofs;
3068 }
3069
3070 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3071 {
3072 struct mv_host_priv *hpriv = link->ap->host->private_data;
3073 void __iomem *mmio = hpriv->base;
3074 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3075 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3076
3077 if (ofs != 0xffffffffU) {
3078 *val = readl(addr + ofs);
3079 return 0;
3080 } else
3081 return -EINVAL;
3082 }
3083
3084 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3085 {
3086 struct mv_host_priv *hpriv = link->ap->host->private_data;
3087 void __iomem *mmio = hpriv->base;
3088 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3089 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3090
3091 if (ofs != 0xffffffffU) {
3092 writelfl(val, addr + ofs);
3093 return 0;
3094 } else
3095 return -EINVAL;
3096 }
3097
3098 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3099 {
3100 struct pci_dev *pdev = to_pci_dev(host->dev);
3101 int early_5080;
3102
3103 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3104
3105 if (!early_5080) {
3106 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3107 tmp |= (1 << 0);
3108 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3109 }
3110
3111 mv_reset_pci_bus(host, mmio);
3112 }
3113
3114 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3115 {
3116 writel(0x0fcfffff, mmio + FLASH_CTL);
3117 }
3118
3119 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3120 void __iomem *mmio)
3121 {
3122 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3123 u32 tmp;
3124
3125 tmp = readl(phy_mmio + MV5_PHY_MODE);
3126
3127 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3128 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
3129 }
3130
3131 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3132 {
3133 u32 tmp;
3134
3135 writel(0, mmio + GPIO_PORT_CTL);
3136
3137 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3138
3139 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3140 tmp |= ~(1 << 0);
3141 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3142 }
3143
3144 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3145 unsigned int port)
3146 {
3147 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3148 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3149 u32 tmp;
3150 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3151
3152 if (fix_apm_sq) {
3153 tmp = readl(phy_mmio + MV5_LTMODE);
3154 tmp |= (1 << 19);
3155 writel(tmp, phy_mmio + MV5_LTMODE);
3156
3157 tmp = readl(phy_mmio + MV5_PHY_CTL);
3158 tmp &= ~0x3;
3159 tmp |= 0x1;
3160 writel(tmp, phy_mmio + MV5_PHY_CTL);
3161 }
3162
3163 tmp = readl(phy_mmio + MV5_PHY_MODE);
3164 tmp &= ~mask;
3165 tmp |= hpriv->signal[port].pre;
3166 tmp |= hpriv->signal[port].amps;
3167 writel(tmp, phy_mmio + MV5_PHY_MODE);
3168 }
3169
3170
3171 #undef ZERO
3172 #define ZERO(reg) writel(0, port_mmio + (reg))
3173 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3174 unsigned int port)
3175 {
3176 void __iomem *port_mmio = mv_port_base(mmio, port);
3177
3178 mv_reset_channel(hpriv, mmio, port);
3179
3180 ZERO(0x028); /* command */
3181 writel(0x11f, port_mmio + EDMA_CFG);
3182 ZERO(0x004); /* timer */
3183 ZERO(0x008); /* irq err cause */
3184 ZERO(0x00c); /* irq err mask */
3185 ZERO(0x010); /* rq bah */
3186 ZERO(0x014); /* rq inp */
3187 ZERO(0x018); /* rq outp */
3188 ZERO(0x01c); /* respq bah */
3189 ZERO(0x024); /* respq outp */
3190 ZERO(0x020); /* respq inp */
3191 ZERO(0x02c); /* test control */
3192 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3193 }
3194 #undef ZERO
3195
3196 #define ZERO(reg) writel(0, hc_mmio + (reg))
3197 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3198 unsigned int hc)
3199 {
3200 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3201 u32 tmp;
3202
3203 ZERO(0x00c);
3204 ZERO(0x010);
3205 ZERO(0x014);
3206 ZERO(0x018);
3207
3208 tmp = readl(hc_mmio + 0x20);
3209 tmp &= 0x1c1c1c1c;
3210 tmp |= 0x03030303;
3211 writel(tmp, hc_mmio + 0x20);
3212 }
3213 #undef ZERO
3214
3215 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3216 unsigned int n_hc)
3217 {
3218 unsigned int hc, port;
3219
3220 for (hc = 0; hc < n_hc; hc++) {
3221 for (port = 0; port < MV_PORTS_PER_HC; port++)
3222 mv5_reset_hc_port(hpriv, mmio,
3223 (hc * MV_PORTS_PER_HC) + port);
3224
3225 mv5_reset_one_hc(hpriv, mmio, hc);
3226 }
3227
3228 return 0;
3229 }
3230
3231 #undef ZERO
3232 #define ZERO(reg) writel(0, mmio + (reg))
3233 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3234 {
3235 struct mv_host_priv *hpriv = host->private_data;
3236 u32 tmp;
3237
3238 tmp = readl(mmio + MV_PCI_MODE);
3239 tmp &= 0xff00ffff;
3240 writel(tmp, mmio + MV_PCI_MODE);
3241
3242 ZERO(MV_PCI_DISC_TIMER);
3243 ZERO(MV_PCI_MSI_TRIGGER);
3244 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3245 ZERO(MV_PCI_SERR_MASK);
3246 ZERO(hpriv->irq_cause_offset);
3247 ZERO(hpriv->irq_mask_offset);
3248 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3249 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3250 ZERO(MV_PCI_ERR_ATTRIBUTE);
3251 ZERO(MV_PCI_ERR_COMMAND);
3252 }
3253 #undef ZERO
3254
3255 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3256 {
3257 u32 tmp;
3258
3259 mv5_reset_flash(hpriv, mmio);
3260
3261 tmp = readl(mmio + GPIO_PORT_CTL);
3262 tmp &= 0x3;
3263 tmp |= (1 << 5) | (1 << 6);
3264 writel(tmp, mmio + GPIO_PORT_CTL);
3265 }
3266
3267 /**
3268 * mv6_reset_hc - Perform the 6xxx global soft reset
3269 * @mmio: base address of the HBA
3270 *
3271 * This routine only applies to 6xxx parts.
3272 *
3273 * LOCKING:
3274 * Inherited from caller.
3275 */
3276 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3277 unsigned int n_hc)
3278 {
3279 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3280 int i, rc = 0;
3281 u32 t;
3282
3283 /* Following procedure defined in PCI "main command and status
3284 * register" table.
3285 */
3286 t = readl(reg);
3287 writel(t | STOP_PCI_MASTER, reg);
3288
3289 for (i = 0; i < 1000; i++) {
3290 udelay(1);
3291 t = readl(reg);
3292 if (PCI_MASTER_EMPTY & t)
3293 break;
3294 }
3295 if (!(PCI_MASTER_EMPTY & t)) {
3296 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3297 rc = 1;
3298 goto done;
3299 }
3300
3301 /* set reset */
3302 i = 5;
3303 do {
3304 writel(t | GLOB_SFT_RST, reg);
3305 t = readl(reg);
3306 udelay(1);
3307 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3308
3309 if (!(GLOB_SFT_RST & t)) {
3310 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3311 rc = 1;
3312 goto done;
3313 }
3314
3315 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3316 i = 5;
3317 do {
3318 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3319 t = readl(reg);
3320 udelay(1);
3321 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3322
3323 if (GLOB_SFT_RST & t) {
3324 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3325 rc = 1;
3326 }
3327 done:
3328 return rc;
3329 }
3330
3331 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3332 void __iomem *mmio)
3333 {
3334 void __iomem *port_mmio;
3335 u32 tmp;
3336
3337 tmp = readl(mmio + RESET_CFG);
3338 if ((tmp & (1 << 0)) == 0) {
3339 hpriv->signal[idx].amps = 0x7 << 8;
3340 hpriv->signal[idx].pre = 0x1 << 5;
3341 return;
3342 }
3343
3344 port_mmio = mv_port_base(mmio, idx);
3345 tmp = readl(port_mmio + PHY_MODE2);
3346
3347 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3348 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3349 }
3350
3351 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3352 {
3353 writel(0x00000060, mmio + GPIO_PORT_CTL);
3354 }
3355
3356 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3357 unsigned int port)
3358 {
3359 void __iomem *port_mmio = mv_port_base(mmio, port);
3360
3361 u32 hp_flags = hpriv->hp_flags;
3362 int fix_phy_mode2 =
3363 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3364 int fix_phy_mode4 =
3365 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3366 u32 m2, m3;
3367
3368 if (fix_phy_mode2) {
3369 m2 = readl(port_mmio + PHY_MODE2);
3370 m2 &= ~(1 << 16);
3371 m2 |= (1 << 31);
3372 writel(m2, port_mmio + PHY_MODE2);
3373
3374 udelay(200);
3375
3376 m2 = readl(port_mmio + PHY_MODE2);
3377 m2 &= ~((1 << 16) | (1 << 31));
3378 writel(m2, port_mmio + PHY_MODE2);
3379
3380 udelay(200);
3381 }
3382
3383 /*
3384 * Gen-II/IIe PHY_MODE3 errata RM#2:
3385 * Achieves better receiver noise performance than the h/w default:
3386 */
3387 m3 = readl(port_mmio + PHY_MODE3);
3388 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3389
3390 /* Guideline 88F5182 (GL# SATA-S11) */
3391 if (IS_SOC(hpriv))
3392 m3 &= ~0x1c;
3393
3394 if (fix_phy_mode4) {
3395 u32 m4 = readl(port_mmio + PHY_MODE4);
3396 /*
3397 * Enforce reserved-bit restrictions on GenIIe devices only.
3398 * For earlier chipsets, force only the internal config field
3399 * (workaround for errata FEr SATA#10 part 1).
3400 */
3401 if (IS_GEN_IIE(hpriv))
3402 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3403 else
3404 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3405 writel(m4, port_mmio + PHY_MODE4);
3406 }
3407 /*
3408 * Workaround for 60x1-B2 errata SATA#13:
3409 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3410 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3411 * Or ensure we use writelfl() when writing PHY_MODE4.
3412 */
3413 writel(m3, port_mmio + PHY_MODE3);
3414
3415 /* Revert values of pre-emphasis and signal amps to the saved ones */
3416 m2 = readl(port_mmio + PHY_MODE2);
3417
3418 m2 &= ~MV_M2_PREAMP_MASK;
3419 m2 |= hpriv->signal[port].amps;
3420 m2 |= hpriv->signal[port].pre;
3421 m2 &= ~(1 << 16);
3422
3423 /* according to mvSata 3.6.1, some IIE values are fixed */
3424 if (IS_GEN_IIE(hpriv)) {
3425 m2 &= ~0xC30FF01F;
3426 m2 |= 0x0000900F;
3427 }
3428
3429 writel(m2, port_mmio + PHY_MODE2);
3430 }
3431
3432 /* TODO: use the generic LED interface to configure the SATA Presence */
3433 /* & Acitivy LEDs on the board */
3434 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3435 void __iomem *mmio)
3436 {
3437 return;
3438 }
3439
3440 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3441 void __iomem *mmio)
3442 {
3443 void __iomem *port_mmio;
3444 u32 tmp;
3445
3446 port_mmio = mv_port_base(mmio, idx);
3447 tmp = readl(port_mmio + PHY_MODE2);
3448
3449 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3450 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3451 }
3452
3453 #undef ZERO
3454 #define ZERO(reg) writel(0, port_mmio + (reg))
3455 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3456 void __iomem *mmio, unsigned int port)
3457 {
3458 void __iomem *port_mmio = mv_port_base(mmio, port);
3459
3460 mv_reset_channel(hpriv, mmio, port);
3461
3462 ZERO(0x028); /* command */
3463 writel(0x101f, port_mmio + EDMA_CFG);
3464 ZERO(0x004); /* timer */
3465 ZERO(0x008); /* irq err cause */
3466 ZERO(0x00c); /* irq err mask */
3467 ZERO(0x010); /* rq bah */
3468 ZERO(0x014); /* rq inp */
3469 ZERO(0x018); /* rq outp */
3470 ZERO(0x01c); /* respq bah */
3471 ZERO(0x024); /* respq outp */
3472 ZERO(0x020); /* respq inp */
3473 ZERO(0x02c); /* test control */
3474 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3475 }
3476
3477 #undef ZERO
3478
3479 #define ZERO(reg) writel(0, hc_mmio + (reg))
3480 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3481 void __iomem *mmio)
3482 {
3483 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3484
3485 ZERO(0x00c);
3486 ZERO(0x010);
3487 ZERO(0x014);
3488
3489 }
3490
3491 #undef ZERO
3492
3493 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3494 void __iomem *mmio, unsigned int n_hc)
3495 {
3496 unsigned int port;
3497
3498 for (port = 0; port < hpriv->n_ports; port++)
3499 mv_soc_reset_hc_port(hpriv, mmio, port);
3500
3501 mv_soc_reset_one_hc(hpriv, mmio);
3502
3503 return 0;
3504 }
3505
3506 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3507 void __iomem *mmio)
3508 {
3509 return;
3510 }
3511
3512 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3513 {
3514 return;
3515 }
3516
3517 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3518 void __iomem *mmio, unsigned int port)
3519 {
3520 void __iomem *port_mmio = mv_port_base(mmio, port);
3521 u32 reg;
3522
3523 reg = readl(port_mmio + PHY_MODE3);
3524 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3525 reg |= (0x1 << 27);
3526 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3527 reg |= (0x1 << 29);
3528 writel(reg, port_mmio + PHY_MODE3);
3529
3530 reg = readl(port_mmio + PHY_MODE4);
3531 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3532 reg |= (0x1 << 16);
3533 writel(reg, port_mmio + PHY_MODE4);
3534
3535 reg = readl(port_mmio + PHY_MODE9_GEN2);
3536 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3537 reg |= 0x8;
3538 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3539 writel(reg, port_mmio + PHY_MODE9_GEN2);
3540
3541 reg = readl(port_mmio + PHY_MODE9_GEN1);
3542 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3543 reg |= 0x8;
3544 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3545 writel(reg, port_mmio + PHY_MODE9_GEN1);
3546 }
3547
3548 /**
3549 * soc_is_65 - check if the soc is 65 nano device
3550 *
3551 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3552 * register, this register should contain non-zero value and it exists only
3553 * in the 65 nano devices, when reading it from older devices we get 0.
3554 */
3555 static bool soc_is_65n(struct mv_host_priv *hpriv)
3556 {
3557 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3558
3559 if (readl(port0_mmio + PHYCFG_OFS))
3560 return true;
3561 return false;
3562 }
3563
3564 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3565 {
3566 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3567
3568 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
3569 if (want_gen2i)
3570 ifcfg |= (1 << 7); /* enable gen2i speed */
3571 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3572 }
3573
3574 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3575 unsigned int port_no)
3576 {
3577 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3578
3579 /*
3580 * The datasheet warns against setting EDMA_RESET when EDMA is active
3581 * (but doesn't say what the problem might be). So we first try
3582 * to disable the EDMA engine before doing the EDMA_RESET operation.
3583 */
3584 mv_stop_edma_engine(port_mmio);
3585 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3586
3587 if (!IS_GEN_I(hpriv)) {
3588 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3589 mv_setup_ifcfg(port_mmio, 1);
3590 }
3591 /*
3592 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3593 * link, and physical layers. It resets all SATA interface registers
3594 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3595 */
3596 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3597 udelay(25); /* allow reset propagation */
3598 writelfl(0, port_mmio + EDMA_CMD);
3599
3600 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3601
3602 if (IS_GEN_I(hpriv))
3603 mdelay(1);
3604 }
3605
3606 static void mv_pmp_select(struct ata_port *ap, int pmp)
3607 {
3608 if (sata_pmp_supported(ap)) {
3609 void __iomem *port_mmio = mv_ap_base(ap);
3610 u32 reg = readl(port_mmio + SATA_IFCTL);
3611 int old = reg & 0xf;
3612
3613 if (old != pmp) {
3614 reg = (reg & ~0xf) | pmp;
3615 writelfl(reg, port_mmio + SATA_IFCTL);
3616 }
3617 }
3618 }
3619
3620 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3621 unsigned long deadline)
3622 {
3623 mv_pmp_select(link->ap, sata_srst_pmp(link));
3624 return sata_std_hardreset(link, class, deadline);
3625 }
3626
3627 static int mv_softreset(struct ata_link *link, unsigned int *class,
3628 unsigned long deadline)
3629 {
3630 mv_pmp_select(link->ap, sata_srst_pmp(link));
3631 return ata_sff_softreset(link, class, deadline);
3632 }
3633
3634 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3635 unsigned long deadline)
3636 {
3637 struct ata_port *ap = link->ap;
3638 struct mv_host_priv *hpriv = ap->host->private_data;
3639 struct mv_port_priv *pp = ap->private_data;
3640 void __iomem *mmio = hpriv->base;
3641 int rc, attempts = 0, extra = 0;
3642 u32 sstatus;
3643 bool online;
3644
3645 mv_reset_channel(hpriv, mmio, ap->port_no);
3646 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3647 pp->pp_flags &=
3648 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3649
3650 /* Workaround for errata FEr SATA#10 (part 2) */
3651 do {
3652 const unsigned long *timing =
3653 sata_ehc_deb_timing(&link->eh_context);
3654
3655 rc = sata_link_hardreset(link, timing, deadline + extra,
3656 &online, NULL);
3657 rc = online ? -EAGAIN : rc;
3658 if (rc)
3659 return rc;
3660 sata_scr_read(link, SCR_STATUS, &sstatus);
3661 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3662 /* Force 1.5gb/s link speed and try again */
3663 mv_setup_ifcfg(mv_ap_base(ap), 0);
3664 if (time_after(jiffies + HZ, deadline))
3665 extra = HZ; /* only extend it once, max */
3666 }
3667 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3668 mv_save_cached_regs(ap);
3669 mv_edma_cfg(ap, 0, 0);
3670
3671 return rc;
3672 }
3673
3674 static void mv_eh_freeze(struct ata_port *ap)
3675 {
3676 mv_stop_edma(ap);
3677 mv_enable_port_irqs(ap, 0);
3678 }
3679
3680 static void mv_eh_thaw(struct ata_port *ap)
3681 {
3682 struct mv_host_priv *hpriv = ap->host->private_data;
3683 unsigned int port = ap->port_no;
3684 unsigned int hardport = mv_hardport_from_port(port);
3685 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3686 void __iomem *port_mmio = mv_ap_base(ap);
3687 u32 hc_irq_cause;
3688
3689 /* clear EDMA errors on this port */
3690 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3691
3692 /* clear pending irq events */
3693 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3694 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3695
3696 mv_enable_port_irqs(ap, ERR_IRQ);
3697 }
3698
3699 /**
3700 * mv_port_init - Perform some early initialization on a single port.
3701 * @port: libata data structure storing shadow register addresses
3702 * @port_mmio: base address of the port
3703 *
3704 * Initialize shadow register mmio addresses, clear outstanding
3705 * interrupts on the port, and unmask interrupts for the future
3706 * start of the port.
3707 *
3708 * LOCKING:
3709 * Inherited from caller.
3710 */
3711 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3712 {
3713 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3714
3715 /* PIO related setup
3716 */
3717 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3718 port->error_addr =
3719 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3720 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3721 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3722 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3723 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3724 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3725 port->status_addr =
3726 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3727 /* special case: control/altstatus doesn't have ATA_REG_ address */
3728 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3729
3730 /* Clear any currently outstanding port interrupt conditions */
3731 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3732 writelfl(readl(serr), serr);
3733 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3734
3735 /* unmask all non-transient EDMA error interrupts */
3736 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3737
3738 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3739 readl(port_mmio + EDMA_CFG),
3740 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3741 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3742 }
3743
3744 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3745 {
3746 struct mv_host_priv *hpriv = host->private_data;
3747 void __iomem *mmio = hpriv->base;
3748 u32 reg;
3749
3750 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3751 return 0; /* not PCI-X capable */
3752 reg = readl(mmio + MV_PCI_MODE);
3753 if ((reg & MV_PCI_MODE_MASK) == 0)
3754 return 0; /* conventional PCI mode */
3755 return 1; /* chip is in PCI-X mode */
3756 }
3757
3758 static int mv_pci_cut_through_okay(struct ata_host *host)
3759 {
3760 struct mv_host_priv *hpriv = host->private_data;
3761 void __iomem *mmio = hpriv->base;
3762 u32 reg;
3763
3764 if (!mv_in_pcix_mode(host)) {
3765 reg = readl(mmio + MV_PCI_COMMAND);
3766 if (reg & MV_PCI_COMMAND_MRDTRIG)
3767 return 0; /* not okay */
3768 }
3769 return 1; /* okay */
3770 }
3771
3772 static void mv_60x1b2_errata_pci7(struct ata_host *host)
3773 {
3774 struct mv_host_priv *hpriv = host->private_data;
3775 void __iomem *mmio = hpriv->base;
3776
3777 /* workaround for 60x1-B2 errata PCI#7 */
3778 if (mv_in_pcix_mode(host)) {
3779 u32 reg = readl(mmio + MV_PCI_COMMAND);
3780 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3781 }
3782 }
3783
3784 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3785 {
3786 struct pci_dev *pdev = to_pci_dev(host->dev);
3787 struct mv_host_priv *hpriv = host->private_data;
3788 u32 hp_flags = hpriv->hp_flags;
3789
3790 switch (board_idx) {
3791 case chip_5080:
3792 hpriv->ops = &mv5xxx_ops;
3793 hp_flags |= MV_HP_GEN_I;
3794
3795 switch (pdev->revision) {
3796 case 0x1:
3797 hp_flags |= MV_HP_ERRATA_50XXB0;
3798 break;
3799 case 0x3:
3800 hp_flags |= MV_HP_ERRATA_50XXB2;
3801 break;
3802 default:
3803 dev_warn(&pdev->dev,
3804 "Applying 50XXB2 workarounds to unknown rev\n");
3805 hp_flags |= MV_HP_ERRATA_50XXB2;
3806 break;
3807 }
3808 break;
3809
3810 case chip_504x:
3811 case chip_508x:
3812 hpriv->ops = &mv5xxx_ops;
3813 hp_flags |= MV_HP_GEN_I;
3814
3815 switch (pdev->revision) {
3816 case 0x0:
3817 hp_flags |= MV_HP_ERRATA_50XXB0;
3818 break;
3819 case 0x3:
3820 hp_flags |= MV_HP_ERRATA_50XXB2;
3821 break;
3822 default:
3823 dev_warn(&pdev->dev,
3824 "Applying B2 workarounds to unknown rev\n");
3825 hp_flags |= MV_HP_ERRATA_50XXB2;
3826 break;
3827 }
3828 break;
3829
3830 case chip_604x:
3831 case chip_608x:
3832 hpriv->ops = &mv6xxx_ops;
3833 hp_flags |= MV_HP_GEN_II;
3834
3835 switch (pdev->revision) {
3836 case 0x7:
3837 mv_60x1b2_errata_pci7(host);
3838 hp_flags |= MV_HP_ERRATA_60X1B2;
3839 break;
3840 case 0x9:
3841 hp_flags |= MV_HP_ERRATA_60X1C0;
3842 break;
3843 default:
3844 dev_warn(&pdev->dev,
3845 "Applying B2 workarounds to unknown rev\n");
3846 hp_flags |= MV_HP_ERRATA_60X1B2;
3847 break;
3848 }
3849 break;
3850
3851 case chip_7042:
3852 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3853 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3854 (pdev->device == 0x2300 || pdev->device == 0x2310))
3855 {
3856 /*
3857 * Highpoint RocketRAID PCIe 23xx series cards:
3858 *
3859 * Unconfigured drives are treated as "Legacy"
3860 * by the BIOS, and it overwrites sector 8 with
3861 * a "Lgcy" metadata block prior to Linux boot.
3862 *
3863 * Configured drives (RAID or JBOD) leave sector 8
3864 * alone, but instead overwrite a high numbered
3865 * sector for the RAID metadata. This sector can
3866 * be determined exactly, by truncating the physical
3867 * drive capacity to a nice even GB value.
3868 *
3869 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3870 *
3871 * Warn the user, lest they think we're just buggy.
3872 */
3873 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3874 " BIOS CORRUPTS DATA on all attached drives,"
3875 " regardless of if/how they are configured."
3876 " BEWARE!\n");
3877 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3878 " use sectors 8-9 on \"Legacy\" drives,"
3879 " and avoid the final two gigabytes on"
3880 " all RocketRAID BIOS initialized drives.\n");
3881 }
3882 /* drop through */
3883 case chip_6042:
3884 hpriv->ops = &mv6xxx_ops;
3885 hp_flags |= MV_HP_GEN_IIE;
3886 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3887 hp_flags |= MV_HP_CUT_THROUGH;
3888
3889 switch (pdev->revision) {
3890 case 0x2: /* Rev.B0: the first/only public release */
3891 hp_flags |= MV_HP_ERRATA_60X1C0;
3892 break;
3893 default:
3894 dev_warn(&pdev->dev,
3895 "Applying 60X1C0 workarounds to unknown rev\n");
3896 hp_flags |= MV_HP_ERRATA_60X1C0;
3897 break;
3898 }
3899 break;
3900 case chip_soc:
3901 if (soc_is_65n(hpriv))
3902 hpriv->ops = &mv_soc_65n_ops;
3903 else
3904 hpriv->ops = &mv_soc_ops;
3905 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3906 MV_HP_ERRATA_60X1C0;
3907 break;
3908
3909 default:
3910 dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
3911 return 1;
3912 }
3913
3914 hpriv->hp_flags = hp_flags;
3915 if (hp_flags & MV_HP_PCIE) {
3916 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3917 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3918 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3919 } else {
3920 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3921 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3922 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3923 }
3924
3925 return 0;
3926 }
3927
3928 /**
3929 * mv_init_host - Perform some early initialization of the host.
3930 * @host: ATA host to initialize
3931 *
3932 * If possible, do an early global reset of the host. Then do
3933 * our port init and clear/unmask all/relevant host interrupts.
3934 *
3935 * LOCKING:
3936 * Inherited from caller.
3937 */
3938 static int mv_init_host(struct ata_host *host)
3939 {
3940 int rc = 0, n_hc, port, hc;
3941 struct mv_host_priv *hpriv = host->private_data;
3942 void __iomem *mmio = hpriv->base;
3943
3944 rc = mv_chip_id(host, hpriv->board_idx);
3945 if (rc)
3946 goto done;
3947
3948 if (IS_SOC(hpriv)) {
3949 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3950 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3951 } else {
3952 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3953 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3954 }
3955
3956 /* initialize shadow irq mask with register's value */
3957 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3958
3959 /* global interrupt mask: 0 == mask everything */
3960 mv_set_main_irq_mask(host, ~0, 0);
3961
3962 n_hc = mv_get_hc_count(host->ports[0]->flags);
3963
3964 for (port = 0; port < host->n_ports; port++)
3965 if (hpriv->ops->read_preamp)
3966 hpriv->ops->read_preamp(hpriv, port, mmio);
3967
3968 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3969 if (rc)
3970 goto done;
3971
3972 hpriv->ops->reset_flash(hpriv, mmio);
3973 hpriv->ops->reset_bus(host, mmio);
3974 hpriv->ops->enable_leds(hpriv, mmio);
3975
3976 for (port = 0; port < host->n_ports; port++) {
3977 struct ata_port *ap = host->ports[port];
3978 void __iomem *port_mmio = mv_port_base(mmio, port);
3979
3980 mv_port_init(&ap->ioaddr, port_mmio);
3981 }
3982
3983 for (hc = 0; hc < n_hc; hc++) {
3984 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3985
3986 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3987 "(before clear)=0x%08x\n", hc,
3988 readl(hc_mmio + HC_CFG),
3989 readl(hc_mmio + HC_IRQ_CAUSE));
3990
3991 /* Clear any currently outstanding hc interrupt conditions */
3992 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3993 }
3994
3995 if (!IS_SOC(hpriv)) {
3996 /* Clear any currently outstanding host interrupt conditions */
3997 writelfl(0, mmio + hpriv->irq_cause_offset);
3998
3999 /* and unmask interrupt generation for host regs */
4000 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
4001 }
4002
4003 /*
4004 * enable only global host interrupts for now.
4005 * The per-port interrupts get done later as ports are set up.
4006 */
4007 mv_set_main_irq_mask(host, 0, PCI_ERR);
4008 mv_set_irq_coalescing(host, irq_coalescing_io_count,
4009 irq_coalescing_usecs);
4010 done:
4011 return rc;
4012 }
4013
4014 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
4015 {
4016 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
4017 MV_CRQB_Q_SZ, 0);
4018 if (!hpriv->crqb_pool)
4019 return -ENOMEM;
4020
4021 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4022 MV_CRPB_Q_SZ, 0);
4023 if (!hpriv->crpb_pool)
4024 return -ENOMEM;
4025
4026 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4027 MV_SG_TBL_SZ, 0);
4028 if (!hpriv->sg_tbl_pool)
4029 return -ENOMEM;
4030
4031 return 0;
4032 }
4033
4034 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4035 const struct mbus_dram_target_info *dram)
4036 {
4037 int i;
4038
4039 for (i = 0; i < 4; i++) {
4040 writel(0, hpriv->base + WINDOW_CTRL(i));
4041 writel(0, hpriv->base + WINDOW_BASE(i));
4042 }
4043
4044 for (i = 0; i < dram->num_cs; i++) {
4045 const struct mbus_dram_window *cs = dram->cs + i;
4046
4047 writel(((cs->size - 1) & 0xffff0000) |
4048 (cs->mbus_attr << 8) |
4049 (dram->mbus_dram_target_id << 4) | 1,
4050 hpriv->base + WINDOW_CTRL(i));
4051 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4052 }
4053 }
4054
4055 /**
4056 * mv_platform_probe - handle a positive probe of an soc Marvell
4057 * host
4058 * @pdev: platform device found
4059 *
4060 * LOCKING:
4061 * Inherited from caller.
4062 */
4063 static int mv_platform_probe(struct platform_device *pdev)
4064 {
4065 const struct mv_sata_platform_data *mv_platform_data;
4066 const struct mbus_dram_target_info *dram;
4067 const struct ata_port_info *ppi[] =
4068 { &mv_port_info[chip_soc], NULL };
4069 struct ata_host *host;
4070 struct mv_host_priv *hpriv;
4071 struct resource *res;
4072 int n_ports = 0, irq = 0;
4073 int rc;
4074 int port;
4075
4076 ata_print_version_once(&pdev->dev, DRV_VERSION);
4077
4078 /*
4079 * Simple resource validation ..
4080 */
4081 if (unlikely(pdev->num_resources != 2)) {
4082 dev_err(&pdev->dev, "invalid number of resources\n");
4083 return -EINVAL;
4084 }
4085
4086 /*
4087 * Get the register base first
4088 */
4089 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4090 if (res == NULL)
4091 return -EINVAL;
4092
4093 /* allocate host */
4094 if (pdev->dev.of_node) {
4095 of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
4096 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4097 } else {
4098 mv_platform_data = dev_get_platdata(&pdev->dev);
4099 n_ports = mv_platform_data->n_ports;
4100 irq = platform_get_irq(pdev, 0);
4101 }
4102
4103 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4104 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4105
4106 if (!host || !hpriv)
4107 return -ENOMEM;
4108 hpriv->port_clks = devm_kzalloc(&pdev->dev,
4109 sizeof(struct clk *) * n_ports,
4110 GFP_KERNEL);
4111 if (!hpriv->port_clks)
4112 return -ENOMEM;
4113 hpriv->port_phys = devm_kzalloc(&pdev->dev,
4114 sizeof(struct phy *) * n_ports,
4115 GFP_KERNEL);
4116 if (!hpriv->port_phys)
4117 return -ENOMEM;
4118 host->private_data = hpriv;
4119 hpriv->board_idx = chip_soc;
4120
4121 host->iomap = NULL;
4122 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4123 resource_size(res));
4124 hpriv->base -= SATAHC0_REG_BASE;
4125
4126 hpriv->clk = clk_get(&pdev->dev, NULL);
4127 if (IS_ERR(hpriv->clk))
4128 dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4129 else
4130 clk_prepare_enable(hpriv->clk);
4131
4132 for (port = 0; port < n_ports; port++) {
4133 char port_number[16];
4134 sprintf(port_number, "%d", port);
4135 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4136 if (!IS_ERR(hpriv->port_clks[port]))
4137 clk_prepare_enable(hpriv->port_clks[port]);
4138
4139 sprintf(port_number, "port%d", port);
4140 hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4141 port_number);
4142 if (IS_ERR(hpriv->port_phys[port])) {
4143 rc = PTR_ERR(hpriv->port_phys[port]);
4144 hpriv->port_phys[port] = NULL;
4145 if (rc != -EPROBE_DEFER)
4146 dev_warn(&pdev->dev, "error getting phy %d", rc);
4147
4148 /* Cleanup only the initialized ports */
4149 hpriv->n_ports = port;
4150 goto err;
4151 } else
4152 phy_power_on(hpriv->port_phys[port]);
4153 }
4154
4155 /* All the ports have been initialized */
4156 hpriv->n_ports = n_ports;
4157
4158 /*
4159 * (Re-)program MBUS remapping windows if we are asked to.
4160 */
4161 dram = mv_mbus_dram_info();
4162 if (dram)
4163 mv_conf_mbus_windows(hpriv, dram);
4164
4165 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4166 if (rc)
4167 goto err;
4168
4169 /*
4170 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
4171 * updated in the LP_PHY_CTL register.
4172 */
4173 if (pdev->dev.of_node &&
4174 of_device_is_compatible(pdev->dev.of_node,
4175 "marvell,armada-370-sata"))
4176 hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4177
4178 /* initialize adapter */
4179 rc = mv_init_host(host);
4180 if (rc)
4181 goto err;
4182
4183 dev_info(&pdev->dev, "slots %u ports %d\n",
4184 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4185
4186 rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4187 if (!rc)
4188 return 0;
4189
4190 err:
4191 if (!IS_ERR(hpriv->clk)) {
4192 clk_disable_unprepare(hpriv->clk);
4193 clk_put(hpriv->clk);
4194 }
4195 for (port = 0; port < hpriv->n_ports; port++) {
4196 if (!IS_ERR(hpriv->port_clks[port])) {
4197 clk_disable_unprepare(hpriv->port_clks[port]);
4198 clk_put(hpriv->port_clks[port]);
4199 }
4200 phy_power_off(hpriv->port_phys[port]);
4201 }
4202
4203 return rc;
4204 }
4205
4206 /*
4207 *
4208 * mv_platform_remove - unplug a platform interface
4209 * @pdev: platform device
4210 *
4211 * A platform bus SATA device has been unplugged. Perform the needed
4212 * cleanup. Also called on module unload for any active devices.
4213 */
4214 static int mv_platform_remove(struct platform_device *pdev)
4215 {
4216 struct ata_host *host = platform_get_drvdata(pdev);
4217 struct mv_host_priv *hpriv = host->private_data;
4218 int port;
4219 ata_host_detach(host);
4220
4221 if (!IS_ERR(hpriv->clk)) {
4222 clk_disable_unprepare(hpriv->clk);
4223 clk_put(hpriv->clk);
4224 }
4225 for (port = 0; port < host->n_ports; port++) {
4226 if (!IS_ERR(hpriv->port_clks[port])) {
4227 clk_disable_unprepare(hpriv->port_clks[port]);
4228 clk_put(hpriv->port_clks[port]);
4229 }
4230 phy_power_off(hpriv->port_phys[port]);
4231 }
4232 return 0;
4233 }
4234
4235 #ifdef CONFIG_PM_SLEEP
4236 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4237 {
4238 struct ata_host *host = platform_get_drvdata(pdev);
4239 if (host)
4240 return ata_host_suspend(host, state);
4241 else
4242 return 0;
4243 }
4244
4245 static int mv_platform_resume(struct platform_device *pdev)
4246 {
4247 struct ata_host *host = platform_get_drvdata(pdev);
4248 const struct mbus_dram_target_info *dram;
4249 int ret;
4250
4251 if (host) {
4252 struct mv_host_priv *hpriv = host->private_data;
4253
4254 /*
4255 * (Re-)program MBUS remapping windows if we are asked to.
4256 */
4257 dram = mv_mbus_dram_info();
4258 if (dram)
4259 mv_conf_mbus_windows(hpriv, dram);
4260
4261 /* initialize adapter */
4262 ret = mv_init_host(host);
4263 if (ret) {
4264 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4265 return ret;
4266 }
4267 ata_host_resume(host);
4268 }
4269
4270 return 0;
4271 }
4272 #else
4273 #define mv_platform_suspend NULL
4274 #define mv_platform_resume NULL
4275 #endif
4276
4277 #ifdef CONFIG_OF
4278 static struct of_device_id mv_sata_dt_ids[] = {
4279 { .compatible = "marvell,armada-370-sata", },
4280 { .compatible = "marvell,orion-sata", },
4281 {},
4282 };
4283 MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4284 #endif
4285
4286 static struct platform_driver mv_platform_driver = {
4287 .probe = mv_platform_probe,
4288 .remove = mv_platform_remove,
4289 .suspend = mv_platform_suspend,
4290 .resume = mv_platform_resume,
4291 .driver = {
4292 .name = DRV_NAME,
4293 .of_match_table = of_match_ptr(mv_sata_dt_ids),
4294 },
4295 };
4296
4297
4298 #ifdef CONFIG_PCI
4299 static int mv_pci_init_one(struct pci_dev *pdev,
4300 const struct pci_device_id *ent);
4301 #ifdef CONFIG_PM_SLEEP
4302 static int mv_pci_device_resume(struct pci_dev *pdev);
4303 #endif
4304
4305
4306 static struct pci_driver mv_pci_driver = {
4307 .name = DRV_NAME,
4308 .id_table = mv_pci_tbl,
4309 .probe = mv_pci_init_one,
4310 .remove = ata_pci_remove_one,
4311 #ifdef CONFIG_PM_SLEEP
4312 .suspend = ata_pci_device_suspend,
4313 .resume = mv_pci_device_resume,
4314 #endif
4315
4316 };
4317
4318 /* move to PCI layer or libata core? */
4319 static int pci_go_64(struct pci_dev *pdev)
4320 {
4321 int rc;
4322
4323 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4324 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4325 if (rc) {
4326 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4327 if (rc) {
4328 dev_err(&pdev->dev,
4329 "64-bit DMA enable failed\n");
4330 return rc;
4331 }
4332 }
4333 } else {
4334 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4335 if (rc) {
4336 dev_err(&pdev->dev, "32-bit DMA enable failed\n");
4337 return rc;
4338 }
4339 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4340 if (rc) {
4341 dev_err(&pdev->dev,
4342 "32-bit consistent DMA enable failed\n");
4343 return rc;
4344 }
4345 }
4346
4347 return rc;
4348 }
4349
4350 /**
4351 * mv_print_info - Dump key info to kernel log for perusal.
4352 * @host: ATA host to print info about
4353 *
4354 * FIXME: complete this.
4355 *
4356 * LOCKING:
4357 * Inherited from caller.
4358 */
4359 static void mv_print_info(struct ata_host *host)
4360 {
4361 struct pci_dev *pdev = to_pci_dev(host->dev);
4362 struct mv_host_priv *hpriv = host->private_data;
4363 u8 scc;
4364 const char *scc_s, *gen;
4365
4366 /* Use this to determine the HW stepping of the chip so we know
4367 * what errata to workaround
4368 */
4369 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4370 if (scc == 0)
4371 scc_s = "SCSI";
4372 else if (scc == 0x01)
4373 scc_s = "RAID";
4374 else
4375 scc_s = "?";
4376
4377 if (IS_GEN_I(hpriv))
4378 gen = "I";
4379 else if (IS_GEN_II(hpriv))
4380 gen = "II";
4381 else if (IS_GEN_IIE(hpriv))
4382 gen = "IIE";
4383 else
4384 gen = "?";
4385
4386 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4387 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4388 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4389 }
4390
4391 /**
4392 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
4393 * @pdev: PCI device found
4394 * @ent: PCI device ID entry for the matched host
4395 *
4396 * LOCKING:
4397 * Inherited from caller.
4398 */
4399 static int mv_pci_init_one(struct pci_dev *pdev,
4400 const struct pci_device_id *ent)
4401 {
4402 unsigned int board_idx = (unsigned int)ent->driver_data;
4403 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4404 struct ata_host *host;
4405 struct mv_host_priv *hpriv;
4406 int n_ports, port, rc;
4407
4408 ata_print_version_once(&pdev->dev, DRV_VERSION);
4409
4410 /* allocate host */
4411 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4412
4413 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4414 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4415 if (!host || !hpriv)
4416 return -ENOMEM;
4417 host->private_data = hpriv;
4418 hpriv->n_ports = n_ports;
4419 hpriv->board_idx = board_idx;
4420
4421 /* acquire resources */
4422 rc = pcim_enable_device(pdev);
4423 if (rc)
4424 return rc;
4425
4426 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4427 if (rc == -EBUSY)
4428 pcim_pin_device(pdev);
4429 if (rc)
4430 return rc;
4431 host->iomap = pcim_iomap_table(pdev);
4432 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4433
4434 rc = pci_go_64(pdev);
4435 if (rc)
4436 return rc;
4437
4438 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4439 if (rc)
4440 return rc;
4441
4442 for (port = 0; port < host->n_ports; port++) {
4443 struct ata_port *ap = host->ports[port];
4444 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4445 unsigned int offset = port_mmio - hpriv->base;
4446
4447 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4448 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4449 }
4450
4451 /* initialize adapter */
4452 rc = mv_init_host(host);
4453 if (rc)
4454 return rc;
4455
4456 /* Enable message-switched interrupts, if requested */
4457 if (msi && pci_enable_msi(pdev) == 0)
4458 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4459
4460 mv_dump_pci_cfg(pdev, 0x68);
4461 mv_print_info(host);
4462
4463 pci_set_master(pdev);
4464 pci_try_set_mwi(pdev);
4465 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4466 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4467 }
4468
4469 #ifdef CONFIG_PM_SLEEP
4470 static int mv_pci_device_resume(struct pci_dev *pdev)
4471 {
4472 struct ata_host *host = pci_get_drvdata(pdev);
4473 int rc;
4474
4475 rc = ata_pci_device_do_resume(pdev);
4476 if (rc)
4477 return rc;
4478
4479 /* initialize adapter */
4480 rc = mv_init_host(host);
4481 if (rc)
4482 return rc;
4483
4484 ata_host_resume(host);
4485
4486 return 0;
4487 }
4488 #endif
4489 #endif
4490
4491 static int __init mv_init(void)
4492 {
4493 int rc = -ENODEV;
4494 #ifdef CONFIG_PCI
4495 rc = pci_register_driver(&mv_pci_driver);
4496 if (rc < 0)
4497 return rc;
4498 #endif
4499 rc = platform_driver_register(&mv_platform_driver);
4500
4501 #ifdef CONFIG_PCI
4502 if (rc < 0)
4503 pci_unregister_driver(&mv_pci_driver);
4504 #endif
4505 return rc;
4506 }
4507
4508 static void __exit mv_exit(void)
4509 {
4510 #ifdef CONFIG_PCI
4511 pci_unregister_driver(&mv_pci_driver);
4512 #endif
4513 platform_driver_unregister(&mv_platform_driver);
4514 }
4515
4516 MODULE_AUTHOR("Brett Russ");
4517 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4518 MODULE_LICENSE("GPL");
4519 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4520 MODULE_VERSION(DRV_VERSION);
4521 MODULE_ALIAS("platform:" DRV_NAME);
4522
4523 module_init(mv_init);
4524 module_exit(mv_exit);
This page took 0.117677 seconds and 6 git commands to generate.