2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/sched.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <linux/libata.h>
38 #define DRV_NAME "sata_mv"
39 #define DRV_VERSION "0.7"
42 /* BAR's are enumerated in terms of pci_resource_start() terms */
43 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
44 MV_IO_BAR
= 2, /* offset 0x18: IO space */
45 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
47 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
48 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
51 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
52 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
53 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
54 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
55 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
56 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
58 MV_SATAHC0_REG_BASE
= 0x20000,
59 MV_FLASH_CTL
= 0x1046c,
60 MV_GPIO_PORT_CTL
= 0x104f0,
61 MV_RESET_CFG
= 0x180d8,
63 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
64 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
65 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
66 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
68 MV_USE_Q_DEPTH
= ATA_DEF_QUEUE
,
71 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
73 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
74 * CRPB needs alignment on a 256B boundary. Size == 256B
75 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
76 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
78 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
79 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
81 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
82 MV_PORT_PRIV_DMA_SZ
= (MV_CRQB_Q_SZ
+ MV_CRPB_Q_SZ
+ MV_SG_TBL_SZ
),
85 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
87 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
91 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
92 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
93 MV_COMMON_FLAGS
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
94 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
95 ATA_FLAG_NO_ATAPI
| ATA_FLAG_PIO_POLLING
),
96 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
98 CRQB_FLAG_READ
= (1 << 0),
100 CRQB_CMD_ADDR_SHIFT
= 8,
101 CRQB_CMD_CS
= (0x2 << 11),
102 CRQB_CMD_LAST
= (1 << 15),
104 CRPB_FLAG_STATUS_SHIFT
= 8,
106 EPRD_FLAG_END_OF_TBL
= (1 << 31),
108 /* PCI interface registers */
110 PCI_COMMAND_OFS
= 0xc00,
112 PCI_MAIN_CMD_STS_OFS
= 0xd30,
113 STOP_PCI_MASTER
= (1 << 2),
114 PCI_MASTER_EMPTY
= (1 << 3),
115 GLOB_SFT_RST
= (1 << 4),
118 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
119 MV_PCI_DISC_TIMER
= 0xd04,
120 MV_PCI_MSI_TRIGGER
= 0xc38,
121 MV_PCI_SERR_MASK
= 0xc28,
122 MV_PCI_XBAR_TMOUT
= 0x1d04,
123 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
124 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
125 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
126 MV_PCI_ERR_COMMAND
= 0x1d50,
128 PCI_IRQ_CAUSE_OFS
= 0x1d58,
129 PCI_IRQ_MASK_OFS
= 0x1d5c,
130 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
132 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
133 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
134 PORT0_ERR
= (1 << 0), /* shift by port # */
135 PORT0_DONE
= (1 << 1), /* shift by port # */
136 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
137 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
139 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
140 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
141 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
142 GPIO_INT
= (1 << 22),
143 SELF_INT
= (1 << 23),
144 TWSI_INT
= (1 << 24),
145 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
146 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
147 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
150 /* SATAHC registers */
153 HC_IRQ_CAUSE_OFS
= 0x14,
154 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
155 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
156 DEV_IRQ
= (1 << 8), /* shift by port # */
158 /* Shadow block registers */
160 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
163 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
164 SATA_ACTIVE_OFS
= 0x350,
171 SATA_INTERFACE_CTL
= 0x050,
173 MV_M2_PREAMP_MASK
= 0x7e0,
177 EDMA_CFG_Q_DEPTH
= 0, /* queueing disabled */
178 EDMA_CFG_NCQ
= (1 << 5),
179 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
180 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
181 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
183 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
184 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
185 EDMA_ERR_D_PAR
= (1 << 0),
186 EDMA_ERR_PRD_PAR
= (1 << 1),
187 EDMA_ERR_DEV
= (1 << 2),
188 EDMA_ERR_DEV_DCON
= (1 << 3),
189 EDMA_ERR_DEV_CON
= (1 << 4),
190 EDMA_ERR_SERR
= (1 << 5),
191 EDMA_ERR_SELF_DIS
= (1 << 7),
192 EDMA_ERR_BIST_ASYNC
= (1 << 8),
193 EDMA_ERR_CRBQ_PAR
= (1 << 9),
194 EDMA_ERR_CRPB_PAR
= (1 << 10),
195 EDMA_ERR_INTRL_PAR
= (1 << 11),
196 EDMA_ERR_IORDY
= (1 << 12),
197 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13),
198 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
199 EDMA_ERR_LNK_DATA_RX
= (0xf << 17),
200 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21),
201 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26),
202 EDMA_ERR_TRANS_PROTO
= (1 << 31),
203 EDMA_ERR_FATAL
= (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
204 EDMA_ERR_DEV_DCON
| EDMA_ERR_CRBQ_PAR
|
205 EDMA_ERR_CRPB_PAR
| EDMA_ERR_INTRL_PAR
|
206 EDMA_ERR_IORDY
| EDMA_ERR_LNK_CTRL_RX_2
|
207 EDMA_ERR_LNK_DATA_RX
|
208 EDMA_ERR_LNK_DATA_TX
|
209 EDMA_ERR_TRANS_PROTO
),
211 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
212 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
214 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
215 EDMA_REQ_Q_PTR_SHIFT
= 5,
217 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
218 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
219 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
220 EDMA_RSP_Q_PTR_SHIFT
= 3,
227 EDMA_IORDY_TMOUT
= 0x34,
230 /* Host private flags (hp_flags) */
231 MV_HP_FLAG_MSI
= (1 << 0),
232 MV_HP_ERRATA_50XXB0
= (1 << 1),
233 MV_HP_ERRATA_50XXB2
= (1 << 2),
234 MV_HP_ERRATA_60X1B2
= (1 << 3),
235 MV_HP_ERRATA_60X1C0
= (1 << 4),
236 MV_HP_ERRATA_XX42A0
= (1 << 5),
237 MV_HP_50XX
= (1 << 6),
238 MV_HP_GEN_IIE
= (1 << 7),
240 /* Port private flags (pp_flags) */
241 MV_PP_FLAG_EDMA_EN
= (1 << 0),
242 MV_PP_FLAG_EDMA_DS_ACT
= (1 << 1),
245 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
246 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
247 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
248 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
249 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
252 /* Our DMA boundary is determined by an ePRD being unable to handle
253 * anything larger than 64KB
255 MV_DMA_BOUNDARY
= 0xffffU
,
257 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
259 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
272 /* Command ReQuest Block: 32B */
288 /* Command ResPonse Block: 8B */
295 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
303 struct mv_port_priv
{
304 struct mv_crqb
*crqb
;
306 struct mv_crpb
*crpb
;
308 struct mv_sg
*sg_tbl
;
309 dma_addr_t sg_tbl_dma
;
313 struct mv_port_signal
{
320 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
322 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
323 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
325 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
327 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
328 void (*reset_bus
)(struct pci_dev
*pdev
, void __iomem
*mmio
);
331 struct mv_host_priv
{
333 struct mv_port_signal signal
[8];
334 const struct mv_hw_ops
*ops
;
337 static void mv_irq_clear(struct ata_port
*ap
);
338 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
339 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
340 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
341 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
342 static void mv_phy_reset(struct ata_port
*ap
);
343 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
);
344 static int mv_port_start(struct ata_port
*ap
);
345 static void mv_port_stop(struct ata_port
*ap
);
346 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
347 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
348 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
349 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
);
350 static void mv_eng_timeout(struct ata_port
*ap
);
351 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
353 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
355 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
356 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
358 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
360 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
361 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
363 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
365 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
366 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
368 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
370 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
371 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
372 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
373 unsigned int port_no
);
374 static void mv_stop_and_reset(struct ata_port
*ap
);
376 static struct scsi_host_template mv_sht
= {
377 .module
= THIS_MODULE
,
379 .ioctl
= ata_scsi_ioctl
,
380 .queuecommand
= ata_scsi_queuecmd
,
381 .can_queue
= MV_USE_Q_DEPTH
,
382 .this_id
= ATA_SHT_THIS_ID
,
383 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
384 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
385 .emulated
= ATA_SHT_EMULATED
,
386 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
387 .proc_name
= DRV_NAME
,
388 .dma_boundary
= MV_DMA_BOUNDARY
,
389 .slave_configure
= ata_scsi_slave_config
,
390 .slave_destroy
= ata_scsi_slave_destroy
,
391 .bios_param
= ata_std_bios_param
,
394 static const struct ata_port_operations mv5_ops
= {
395 .port_disable
= ata_port_disable
,
397 .tf_load
= ata_tf_load
,
398 .tf_read
= ata_tf_read
,
399 .check_status
= ata_check_status
,
400 .exec_command
= ata_exec_command
,
401 .dev_select
= ata_std_dev_select
,
403 .phy_reset
= mv_phy_reset
,
405 .qc_prep
= mv_qc_prep
,
406 .qc_issue
= mv_qc_issue
,
407 .data_xfer
= ata_mmio_data_xfer
,
409 .eng_timeout
= mv_eng_timeout
,
411 .irq_handler
= mv_interrupt
,
412 .irq_clear
= mv_irq_clear
,
414 .scr_read
= mv5_scr_read
,
415 .scr_write
= mv5_scr_write
,
417 .port_start
= mv_port_start
,
418 .port_stop
= mv_port_stop
,
421 static const struct ata_port_operations mv6_ops
= {
422 .port_disable
= ata_port_disable
,
424 .tf_load
= ata_tf_load
,
425 .tf_read
= ata_tf_read
,
426 .check_status
= ata_check_status
,
427 .exec_command
= ata_exec_command
,
428 .dev_select
= ata_std_dev_select
,
430 .phy_reset
= mv_phy_reset
,
432 .qc_prep
= mv_qc_prep
,
433 .qc_issue
= mv_qc_issue
,
434 .data_xfer
= ata_mmio_data_xfer
,
436 .eng_timeout
= mv_eng_timeout
,
438 .irq_handler
= mv_interrupt
,
439 .irq_clear
= mv_irq_clear
,
441 .scr_read
= mv_scr_read
,
442 .scr_write
= mv_scr_write
,
444 .port_start
= mv_port_start
,
445 .port_stop
= mv_port_stop
,
448 static const struct ata_port_operations mv_iie_ops
= {
449 .port_disable
= ata_port_disable
,
451 .tf_load
= ata_tf_load
,
452 .tf_read
= ata_tf_read
,
453 .check_status
= ata_check_status
,
454 .exec_command
= ata_exec_command
,
455 .dev_select
= ata_std_dev_select
,
457 .phy_reset
= mv_phy_reset
,
459 .qc_prep
= mv_qc_prep_iie
,
460 .qc_issue
= mv_qc_issue
,
461 .data_xfer
= ata_mmio_data_xfer
,
463 .eng_timeout
= mv_eng_timeout
,
465 .irq_handler
= mv_interrupt
,
466 .irq_clear
= mv_irq_clear
,
468 .scr_read
= mv_scr_read
,
469 .scr_write
= mv_scr_write
,
471 .port_start
= mv_port_start
,
472 .port_stop
= mv_port_stop
,
475 static const struct ata_port_info mv_port_info
[] = {
478 .flags
= MV_COMMON_FLAGS
,
479 .pio_mask
= 0x1f, /* pio0-4 */
480 .udma_mask
= 0x7f, /* udma0-6 */
481 .port_ops
= &mv5_ops
,
485 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
486 .pio_mask
= 0x1f, /* pio0-4 */
487 .udma_mask
= 0x7f, /* udma0-6 */
488 .port_ops
= &mv5_ops
,
492 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
493 .pio_mask
= 0x1f, /* pio0-4 */
494 .udma_mask
= 0x7f, /* udma0-6 */
495 .port_ops
= &mv5_ops
,
499 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
500 .pio_mask
= 0x1f, /* pio0-4 */
501 .udma_mask
= 0x7f, /* udma0-6 */
502 .port_ops
= &mv6_ops
,
506 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
508 .pio_mask
= 0x1f, /* pio0-4 */
509 .udma_mask
= 0x7f, /* udma0-6 */
510 .port_ops
= &mv6_ops
,
514 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
515 .pio_mask
= 0x1f, /* pio0-4 */
516 .udma_mask
= 0x7f, /* udma0-6 */
517 .port_ops
= &mv_iie_ops
,
521 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
522 .pio_mask
= 0x1f, /* pio0-4 */
523 .udma_mask
= 0x7f, /* udma0-6 */
524 .port_ops
= &mv_iie_ops
,
528 static const struct pci_device_id mv_pci_tbl
[] = {
529 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
530 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
531 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
532 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
534 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
535 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
536 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
537 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
538 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
540 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
542 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
544 { } /* terminate list */
547 static struct pci_driver mv_pci_driver
= {
549 .id_table
= mv_pci_tbl
,
550 .probe
= mv_init_one
,
551 .remove
= ata_pci_remove_one
,
554 static const struct mv_hw_ops mv5xxx_ops
= {
555 .phy_errata
= mv5_phy_errata
,
556 .enable_leds
= mv5_enable_leds
,
557 .read_preamp
= mv5_read_preamp
,
558 .reset_hc
= mv5_reset_hc
,
559 .reset_flash
= mv5_reset_flash
,
560 .reset_bus
= mv5_reset_bus
,
563 static const struct mv_hw_ops mv6xxx_ops
= {
564 .phy_errata
= mv6_phy_errata
,
565 .enable_leds
= mv6_enable_leds
,
566 .read_preamp
= mv6_read_preamp
,
567 .reset_hc
= mv6_reset_hc
,
568 .reset_flash
= mv6_reset_flash
,
569 .reset_bus
= mv_reset_pci_bus
,
575 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
582 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
585 (void) readl(addr
); /* flush to avoid PCI posted write */
588 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
590 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
593 static inline unsigned int mv_hc_from_port(unsigned int port
)
595 return port
>> MV_PORT_HC_SHIFT
;
598 static inline unsigned int mv_hardport_from_port(unsigned int port
)
600 return port
& MV_PORT_MASK
;
603 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
606 return mv_hc_base(base
, mv_hc_from_port(port
));
609 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
611 return mv_hc_base_from_port(base
, port
) +
612 MV_SATAHC_ARBTR_REG_SZ
+
613 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
616 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
618 return mv_port_base(ap
->host
->mmio_base
, ap
->port_no
);
621 static inline int mv_get_hc_count(unsigned long port_flags
)
623 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
626 static void mv_irq_clear(struct ata_port
*ap
)
631 * mv_start_dma - Enable eDMA engine
632 * @base: port base address
633 * @pp: port private data
635 * Verify the local cache of the eDMA state is accurate with a
639 * Inherited from caller.
641 static void mv_start_dma(void __iomem
*base
, struct mv_port_priv
*pp
)
643 if (!(MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
)) {
644 writelfl(EDMA_EN
, base
+ EDMA_CMD_OFS
);
645 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
647 WARN_ON(!(EDMA_EN
& readl(base
+ EDMA_CMD_OFS
)));
651 * mv_stop_dma - Disable eDMA engine
652 * @ap: ATA channel to manipulate
654 * Verify the local cache of the eDMA state is accurate with a
658 * Inherited from caller.
660 static void mv_stop_dma(struct ata_port
*ap
)
662 void __iomem
*port_mmio
= mv_ap_base(ap
);
663 struct mv_port_priv
*pp
= ap
->private_data
;
667 if (MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
) {
668 /* Disable EDMA if active. The disable bit auto clears.
670 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
671 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
673 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
676 /* now properly wait for the eDMA to stop */
677 for (i
= 1000; i
> 0; i
--) {
678 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
679 if (!(EDMA_EN
& reg
)) {
686 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
687 /* FIXME: Consider doing a reset here to recover */
692 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
695 for (b
= 0; b
< bytes
; ) {
696 DPRINTK("%p: ", start
+ b
);
697 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
698 printk("%08x ",readl(start
+ b
));
706 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
711 for (b
= 0; b
< bytes
; ) {
712 DPRINTK("%02x: ", b
);
713 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
714 (void) pci_read_config_dword(pdev
,b
,&dw
);
722 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
723 struct pci_dev
*pdev
)
726 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
727 port
>> MV_PORT_HC_SHIFT
);
728 void __iomem
*port_base
;
729 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
732 start_hc
= start_port
= 0;
733 num_ports
= 8; /* shld be benign for 4 port devs */
736 start_hc
= port
>> MV_PORT_HC_SHIFT
;
738 num_ports
= num_hcs
= 1;
740 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
741 num_ports
> 1 ? num_ports
- 1 : start_port
);
744 DPRINTK("PCI config space regs:\n");
745 mv_dump_pci_cfg(pdev
, 0x68);
747 DPRINTK("PCI regs:\n");
748 mv_dump_mem(mmio_base
+0xc00, 0x3c);
749 mv_dump_mem(mmio_base
+0xd00, 0x34);
750 mv_dump_mem(mmio_base
+0xf00, 0x4);
751 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
752 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
753 hc_base
= mv_hc_base(mmio_base
, hc
);
754 DPRINTK("HC regs (HC %i):\n", hc
);
755 mv_dump_mem(hc_base
, 0x1c);
757 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
758 port_base
= mv_port_base(mmio_base
, p
);
759 DPRINTK("EDMA regs (port %i):\n",p
);
760 mv_dump_mem(port_base
, 0x54);
761 DPRINTK("SATA regs (port %i):\n",p
);
762 mv_dump_mem(port_base
+0x300, 0x60);
767 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
775 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
778 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
787 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
789 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
791 if (0xffffffffU
!= ofs
) {
792 return readl(mv_ap_base(ap
) + ofs
);
798 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
800 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
802 if (0xffffffffU
!= ofs
) {
803 writelfl(val
, mv_ap_base(ap
) + ofs
);
807 static void mv_edma_cfg(struct mv_host_priv
*hpriv
, void __iomem
*port_mmio
)
809 u32 cfg
= readl(port_mmio
+ EDMA_CFG_OFS
);
811 /* set up non-NCQ EDMA configuration */
812 cfg
&= ~0x1f; /* clear queue depth */
813 cfg
&= ~EDMA_CFG_NCQ
; /* clear NCQ mode */
814 cfg
&= ~(1 << 9); /* disable equeue */
817 cfg
|= (1 << 8); /* enab config burst size mask */
819 else if (IS_GEN_II(hpriv
))
820 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
822 else if (IS_GEN_IIE(hpriv
)) {
823 cfg
|= (1 << 23); /* dis RX PM port mask */
824 cfg
&= ~(1 << 16); /* dis FIS-based switching (for now) */
825 cfg
&= ~(1 << 19); /* dis 128-entry queue (for now?) */
826 cfg
|= (1 << 18); /* enab early completion */
827 cfg
|= (1 << 17); /* enab host q cache */
828 cfg
|= (1 << 22); /* enab cutthrough */
831 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
835 * mv_port_start - Port specific init/start routine.
836 * @ap: ATA channel to manipulate
838 * Allocate and point to DMA memory, init port private memory,
842 * Inherited from caller.
844 static int mv_port_start(struct ata_port
*ap
)
846 struct device
*dev
= ap
->host
->dev
;
847 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
848 struct mv_port_priv
*pp
;
849 void __iomem
*port_mmio
= mv_ap_base(ap
);
854 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
858 mem
= dmam_alloc_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, &mem_dma
,
862 memset(mem
, 0, MV_PORT_PRIV_DMA_SZ
);
864 rc
= ata_pad_alloc(ap
, dev
);
868 /* First item in chunk of DMA memory:
869 * 32-slot command request table (CRQB), 32 bytes each in size
872 pp
->crqb_dma
= mem_dma
;
874 mem_dma
+= MV_CRQB_Q_SZ
;
877 * 32-slot command response table (CRPB), 8 bytes each in size
880 pp
->crpb_dma
= mem_dma
;
882 mem_dma
+= MV_CRPB_Q_SZ
;
885 * Table of scatter-gather descriptors (ePRD), 16 bytes each
888 pp
->sg_tbl_dma
= mem_dma
;
890 mv_edma_cfg(hpriv
, port_mmio
);
892 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
893 writelfl(pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
,
894 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
896 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
897 writelfl(pp
->crqb_dma
& 0xffffffff,
898 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
900 writelfl(0, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
902 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
904 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
905 writelfl(pp
->crpb_dma
& 0xffffffff,
906 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
908 writelfl(0, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
910 writelfl(pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
,
911 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
913 /* Don't turn on EDMA here...do it before DMA commands only. Else
914 * we'll be unable to send non-data, PIO, etc due to restricted access
917 ap
->private_data
= pp
;
922 * mv_port_stop - Port specific cleanup/stop routine.
923 * @ap: ATA channel to manipulate
925 * Stop DMA, cleanup port memory.
928 * This routine uses the host lock to protect the DMA stop.
930 static void mv_port_stop(struct ata_port
*ap
)
934 spin_lock_irqsave(&ap
->host
->lock
, flags
);
936 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
940 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
941 * @qc: queued command whose SG list to source from
943 * Populate the SG list and mark the last entry.
946 * Inherited from caller.
948 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
950 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
952 struct scatterlist
*sg
;
954 ata_for_each_sg(sg
, qc
) {
956 u32 sg_len
, len
, offset
;
958 addr
= sg_dma_address(sg
);
959 sg_len
= sg_dma_len(sg
);
962 offset
= addr
& MV_DMA_BOUNDARY
;
964 if ((offset
+ sg_len
) > 0x10000)
965 len
= 0x10000 - offset
;
967 pp
->sg_tbl
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
968 pp
->sg_tbl
[i
].addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
969 pp
->sg_tbl
[i
].flags_size
= cpu_to_le32(len
& 0xffff);
974 if (!sg_len
&& ata_sg_is_last(sg
, qc
))
975 pp
->sg_tbl
[i
].flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
982 static inline unsigned mv_inc_q_index(unsigned index
)
984 return (index
+ 1) & MV_MAX_Q_DEPTH_MASK
;
987 static inline void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
989 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
990 (last
? CRQB_CMD_LAST
: 0);
991 *cmdw
= cpu_to_le16(tmp
);
995 * mv_qc_prep - Host specific command preparation.
996 * @qc: queued command to prepare
998 * This routine simply redirects to the general purpose routine
999 * if command is not DMA. Else, it handles prep of the CRQB
1000 * (command request block), does some sanity checking, and calls
1001 * the SG load routine.
1004 * Inherited from caller.
1006 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1008 struct ata_port
*ap
= qc
->ap
;
1009 struct mv_port_priv
*pp
= ap
->private_data
;
1011 struct ata_taskfile
*tf
;
1015 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1018 /* Fill in command request block
1020 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1021 flags
|= CRQB_FLAG_READ
;
1022 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1023 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1025 /* get current queue index from hardware */
1026 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1027 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1029 pp
->crqb
[in_index
].sg_addr
=
1030 cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1031 pp
->crqb
[in_index
].sg_addr_hi
=
1032 cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1033 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1035 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1038 /* Sadly, the CRQB cannot accomodate all registers--there are
1039 * only 11 bytes...so we must pick and choose required
1040 * registers based on the command. So, we drop feature and
1041 * hob_feature for [RW] DMA commands, but they are needed for
1042 * NCQ. NCQ will drop hob_nsect.
1044 switch (tf
->command
) {
1046 case ATA_CMD_READ_EXT
:
1048 case ATA_CMD_WRITE_EXT
:
1049 case ATA_CMD_WRITE_FUA_EXT
:
1050 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1052 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1053 case ATA_CMD_FPDMA_READ
:
1054 case ATA_CMD_FPDMA_WRITE
:
1055 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1056 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1058 #endif /* FIXME: remove this line when NCQ added */
1060 /* The only other commands EDMA supports in non-queued and
1061 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1062 * of which are defined/used by Linux. If we get here, this
1063 * driver needs work.
1065 * FIXME: modify libata to give qc_prep a return value and
1066 * return error here.
1068 BUG_ON(tf
->command
);
1071 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1072 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1073 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1074 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1075 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1076 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1077 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1078 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1079 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1081 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1087 * mv_qc_prep_iie - Host specific command preparation.
1088 * @qc: queued command to prepare
1090 * This routine simply redirects to the general purpose routine
1091 * if command is not DMA. Else, it handles prep of the CRQB
1092 * (command request block), does some sanity checking, and calls
1093 * the SG load routine.
1096 * Inherited from caller.
1098 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1100 struct ata_port
*ap
= qc
->ap
;
1101 struct mv_port_priv
*pp
= ap
->private_data
;
1102 struct mv_crqb_iie
*crqb
;
1103 struct ata_taskfile
*tf
;
1107 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1110 /* Fill in Gen IIE command request block
1112 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1113 flags
|= CRQB_FLAG_READ
;
1115 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1116 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1118 /* get current queue index from hardware */
1119 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1120 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1122 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1123 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1124 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1125 crqb
->flags
= cpu_to_le32(flags
);
1128 crqb
->ata_cmd
[0] = cpu_to_le32(
1129 (tf
->command
<< 16) |
1132 crqb
->ata_cmd
[1] = cpu_to_le32(
1138 crqb
->ata_cmd
[2] = cpu_to_le32(
1139 (tf
->hob_lbal
<< 0) |
1140 (tf
->hob_lbam
<< 8) |
1141 (tf
->hob_lbah
<< 16) |
1142 (tf
->hob_feature
<< 24)
1144 crqb
->ata_cmd
[3] = cpu_to_le32(
1146 (tf
->hob_nsect
<< 8)
1149 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1155 * mv_qc_issue - Initiate a command to the host
1156 * @qc: queued command to start
1158 * This routine simply redirects to the general purpose routine
1159 * if command is not DMA. Else, it sanity checks our local
1160 * caches of the request producer/consumer indices then enables
1161 * DMA and bumps the request producer index.
1164 * Inherited from caller.
1166 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1168 void __iomem
*port_mmio
= mv_ap_base(qc
->ap
);
1169 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1173 if (ATA_PROT_DMA
!= qc
->tf
.protocol
) {
1174 /* We're about to send a non-EDMA capable command to the
1175 * port. Turn off EDMA so there won't be problems accessing
1176 * shadow block, etc registers.
1178 mv_stop_dma(qc
->ap
);
1179 return ata_qc_issue_prot(qc
);
1182 in_ptr
= readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1183 in_index
= (in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1185 /* until we do queuing, the queue should be empty at this point */
1186 WARN_ON(in_index
!= ((readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
)
1187 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1189 in_index
= mv_inc_q_index(in_index
); /* now incr producer index */
1191 mv_start_dma(port_mmio
, pp
);
1193 /* and write the request in pointer to kick the EDMA to life */
1194 in_ptr
&= EDMA_REQ_Q_BASE_LO_MASK
;
1195 in_ptr
|= in_index
<< EDMA_REQ_Q_PTR_SHIFT
;
1196 writelfl(in_ptr
, port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1202 * mv_get_crpb_status - get status from most recently completed cmd
1203 * @ap: ATA channel to manipulate
1205 * This routine is for use when the port is in DMA mode, when it
1206 * will be using the CRPB (command response block) method of
1207 * returning command completion information. We check indices
1208 * are good, grab status, and bump the response consumer index to
1209 * prove that we're up to date.
1212 * Inherited from caller.
1214 static u8
mv_get_crpb_status(struct ata_port
*ap
)
1216 void __iomem
*port_mmio
= mv_ap_base(ap
);
1217 struct mv_port_priv
*pp
= ap
->private_data
;
1222 out_ptr
= readl(port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1223 out_index
= (out_ptr
>> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1225 ata_status
= le16_to_cpu(pp
->crpb
[out_index
].flags
)
1226 >> CRPB_FLAG_STATUS_SHIFT
;
1228 /* increment our consumer index... */
1229 out_index
= mv_inc_q_index(out_index
);
1231 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1232 WARN_ON(out_index
!= ((readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1233 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1235 /* write out our inc'd consumer index so EDMA knows we're caught up */
1236 out_ptr
&= EDMA_RSP_Q_BASE_LO_MASK
;
1237 out_ptr
|= out_index
<< EDMA_RSP_Q_PTR_SHIFT
;
1238 writelfl(out_ptr
, port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1240 /* Return ATA status register for completed CRPB */
1245 * mv_err_intr - Handle error interrupts on the port
1246 * @ap: ATA channel to manipulate
1247 * @reset_allowed: bool: 0 == don't trigger from reset here
1249 * In most cases, just clear the interrupt and move on. However,
1250 * some cases require an eDMA reset, which is done right before
1251 * the COMRESET in mv_phy_reset(). The SERR case requires a
1252 * clear of pending errors in the SATA SERROR register. Finally,
1253 * if the port disabled DMA, update our cached copy to match.
1256 * Inherited from caller.
1258 static void mv_err_intr(struct ata_port
*ap
, int reset_allowed
)
1260 void __iomem
*port_mmio
= mv_ap_base(ap
);
1261 u32 edma_err_cause
, serr
= 0;
1263 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1265 if (EDMA_ERR_SERR
& edma_err_cause
) {
1266 sata_scr_read(ap
, SCR_ERROR
, &serr
);
1267 sata_scr_write_flush(ap
, SCR_ERROR
, serr
);
1269 if (EDMA_ERR_SELF_DIS
& edma_err_cause
) {
1270 struct mv_port_priv
*pp
= ap
->private_data
;
1271 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1273 DPRINTK(KERN_ERR
"ata%u: port error; EDMA err cause: 0x%08x "
1274 "SERR: 0x%08x\n", ap
->id
, edma_err_cause
, serr
);
1276 /* Clear EDMA now that SERR cleanup done */
1277 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1279 /* check for fatal here and recover if needed */
1280 if (reset_allowed
&& (EDMA_ERR_FATAL
& edma_err_cause
))
1281 mv_stop_and_reset(ap
);
1285 * mv_host_intr - Handle all interrupts on the given host controller
1286 * @host: host specific structure
1287 * @relevant: port error bits relevant to this host controller
1288 * @hc: which host controller we're to look at
1290 * Read then write clear the HC interrupt status then walk each
1291 * port connected to the HC and see if it needs servicing. Port
1292 * success ints are reported in the HC interrupt status reg, the
1293 * port error ints are reported in the higher level main
1294 * interrupt status register and thus are passed in via the
1295 * 'relevant' argument.
1298 * Inherited from caller.
1300 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1302 void __iomem
*mmio
= host
->mmio_base
;
1303 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1304 struct ata_queued_cmd
*qc
;
1306 int shift
, port
, port0
, hard_port
, handled
;
1307 unsigned int err_mask
;
1312 port0
= MV_PORTS_PER_HC
;
1315 /* we'll need the HC success int register in most cases */
1316 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1318 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1321 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1322 hc
,relevant
,hc_irq_cause
);
1324 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
1326 struct ata_port
*ap
= host
->ports
[port
];
1327 struct mv_port_priv
*pp
= ap
->private_data
;
1329 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1330 handled
= 0; /* ensure ata_status is set if handled++ */
1332 /* Note that DEV_IRQ might happen spuriously during EDMA,
1333 * and should be ignored in such cases.
1334 * The cause of this is still under investigation.
1336 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1337 /* EDMA: check for response queue interrupt */
1338 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
) {
1339 ata_status
= mv_get_crpb_status(ap
);
1343 /* PIO: check for device (drive) interrupt */
1344 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
) {
1345 ata_status
= readb((void __iomem
*)
1346 ap
->ioaddr
.status_addr
);
1348 /* ignore spurious intr if drive still BUSY */
1349 if (ata_status
& ATA_BUSY
) {
1356 if (ap
&& (ap
->flags
& ATA_FLAG_DISABLED
))
1359 err_mask
= ac_err_mask(ata_status
);
1361 shift
= port
<< 1; /* (port * 2) */
1362 if (port
>= MV_PORTS_PER_HC
) {
1363 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1365 if ((PORT0_ERR
<< shift
) & relevant
) {
1367 err_mask
|= AC_ERR_OTHER
;
1372 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1373 if (qc
&& (qc
->flags
& ATA_QCFLAG_ACTIVE
)) {
1374 VPRINTK("port %u IRQ found for qc, "
1375 "ata_status 0x%x\n", port
,ata_status
);
1376 /* mark qc status appropriately */
1377 if (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) {
1378 qc
->err_mask
|= err_mask
;
1379 ata_qc_complete(qc
);
1390 * @dev_instance: private data; in this case the host structure
1393 * Read the read only register to determine if any host
1394 * controllers have pending interrupts. If so, call lower level
1395 * routine to handle. Also check for PCI errors which are only
1399 * This routine holds the host lock while processing pending
1402 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1404 struct ata_host
*host
= dev_instance
;
1405 unsigned int hc
, handled
= 0, n_hcs
;
1406 void __iomem
*mmio
= host
->mmio_base
;
1407 struct mv_host_priv
*hpriv
;
1410 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
1412 /* check the cases where we either have nothing pending or have read
1413 * a bogus register value which can indicate HW removal or PCI fault
1415 if (!irq_stat
|| (0xffffffffU
== irq_stat
)) {
1419 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1420 spin_lock(&host
->lock
);
1422 for (hc
= 0; hc
< n_hcs
; hc
++) {
1423 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1425 mv_host_intr(host
, relevant
, hc
);
1430 hpriv
= host
->private_data
;
1431 if (IS_60XX(hpriv
)) {
1432 /* deal with the interrupt coalescing bits */
1433 if (irq_stat
& (TRAN_LO_DONE
| TRAN_HI_DONE
| PORTS_0_7_COAL_DONE
)) {
1434 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_LO
);
1435 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_HI
);
1436 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE
);
1440 if (PCI_ERR
& irq_stat
) {
1441 printk(KERN_ERR DRV_NAME
": PCI ERROR; PCI IRQ cause=0x%08x\n",
1442 readl(mmio
+ PCI_IRQ_CAUSE_OFS
));
1444 DPRINTK("All regs @ PCI error\n");
1445 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1447 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
1450 spin_unlock(&host
->lock
);
1452 return IRQ_RETVAL(handled
);
1455 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1457 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1458 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1460 return hc_mmio
+ ofs
;
1463 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1467 switch (sc_reg_in
) {
1471 ofs
= sc_reg_in
* sizeof(u32
);
1480 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
1482 void __iomem
*mmio
= mv5_phy_base(ap
->host
->mmio_base
, ap
->port_no
);
1483 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1485 if (ofs
!= 0xffffffffU
)
1486 return readl(mmio
+ ofs
);
1491 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1493 void __iomem
*mmio
= mv5_phy_base(ap
->host
->mmio_base
, ap
->port_no
);
1494 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1496 if (ofs
!= 0xffffffffU
)
1497 writelfl(val
, mmio
+ ofs
);
1500 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1505 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
1507 early_5080
= (pdev
->device
== 0x5080) && (rev_id
== 0);
1510 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1512 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1515 mv_reset_pci_bus(pdev
, mmio
);
1518 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1520 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1523 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1526 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1529 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1531 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1532 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1535 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1539 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1541 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1543 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1545 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1548 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1551 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1552 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1554 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1557 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1559 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1561 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1564 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1567 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1569 tmp
|= hpriv
->signal
[port
].pre
;
1570 tmp
|= hpriv
->signal
[port
].amps
;
1571 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1576 #define ZERO(reg) writel(0, port_mmio + (reg))
1577 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1580 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1582 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1584 mv_channel_reset(hpriv
, mmio
, port
);
1586 ZERO(0x028); /* command */
1587 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1588 ZERO(0x004); /* timer */
1589 ZERO(0x008); /* irq err cause */
1590 ZERO(0x00c); /* irq err mask */
1591 ZERO(0x010); /* rq bah */
1592 ZERO(0x014); /* rq inp */
1593 ZERO(0x018); /* rq outp */
1594 ZERO(0x01c); /* respq bah */
1595 ZERO(0x024); /* respq outp */
1596 ZERO(0x020); /* respq inp */
1597 ZERO(0x02c); /* test control */
1598 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1602 #define ZERO(reg) writel(0, hc_mmio + (reg))
1603 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1606 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1614 tmp
= readl(hc_mmio
+ 0x20);
1617 writel(tmp
, hc_mmio
+ 0x20);
1621 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1624 unsigned int hc
, port
;
1626 for (hc
= 0; hc
< n_hc
; hc
++) {
1627 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1628 mv5_reset_hc_port(hpriv
, mmio
,
1629 (hc
* MV_PORTS_PER_HC
) + port
);
1631 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1638 #define ZERO(reg) writel(0, mmio + (reg))
1639 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1643 tmp
= readl(mmio
+ MV_PCI_MODE
);
1645 writel(tmp
, mmio
+ MV_PCI_MODE
);
1647 ZERO(MV_PCI_DISC_TIMER
);
1648 ZERO(MV_PCI_MSI_TRIGGER
);
1649 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1650 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1651 ZERO(MV_PCI_SERR_MASK
);
1652 ZERO(PCI_IRQ_CAUSE_OFS
);
1653 ZERO(PCI_IRQ_MASK_OFS
);
1654 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1655 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1656 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1657 ZERO(MV_PCI_ERR_COMMAND
);
1661 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1665 mv5_reset_flash(hpriv
, mmio
);
1667 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1669 tmp
|= (1 << 5) | (1 << 6);
1670 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1674 * mv6_reset_hc - Perform the 6xxx global soft reset
1675 * @mmio: base address of the HBA
1677 * This routine only applies to 6xxx parts.
1680 * Inherited from caller.
1682 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1685 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
1689 /* Following procedure defined in PCI "main command and status
1693 writel(t
| STOP_PCI_MASTER
, reg
);
1695 for (i
= 0; i
< 1000; i
++) {
1698 if (PCI_MASTER_EMPTY
& t
) {
1702 if (!(PCI_MASTER_EMPTY
& t
)) {
1703 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
1711 writel(t
| GLOB_SFT_RST
, reg
);
1714 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
1716 if (!(GLOB_SFT_RST
& t
)) {
1717 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
1722 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1725 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
1728 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
1730 if (GLOB_SFT_RST
& t
) {
1731 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
1738 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1741 void __iomem
*port_mmio
;
1744 tmp
= readl(mmio
+ MV_RESET_CFG
);
1745 if ((tmp
& (1 << 0)) == 0) {
1746 hpriv
->signal
[idx
].amps
= 0x7 << 8;
1747 hpriv
->signal
[idx
].pre
= 0x1 << 5;
1751 port_mmio
= mv_port_base(mmio
, idx
);
1752 tmp
= readl(port_mmio
+ PHY_MODE2
);
1754 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
1755 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
1758 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1760 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
1763 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1766 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1768 u32 hp_flags
= hpriv
->hp_flags
;
1770 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1772 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1775 if (fix_phy_mode2
) {
1776 m2
= readl(port_mmio
+ PHY_MODE2
);
1779 writel(m2
, port_mmio
+ PHY_MODE2
);
1783 m2
= readl(port_mmio
+ PHY_MODE2
);
1784 m2
&= ~((1 << 16) | (1 << 31));
1785 writel(m2
, port_mmio
+ PHY_MODE2
);
1790 /* who knows what this magic does */
1791 tmp
= readl(port_mmio
+ PHY_MODE3
);
1794 writel(tmp
, port_mmio
+ PHY_MODE3
);
1796 if (fix_phy_mode4
) {
1799 m4
= readl(port_mmio
+ PHY_MODE4
);
1801 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1802 tmp
= readl(port_mmio
+ 0x310);
1804 m4
= (m4
& ~(1 << 1)) | (1 << 0);
1806 writel(m4
, port_mmio
+ PHY_MODE4
);
1808 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1809 writel(tmp
, port_mmio
+ 0x310);
1812 /* Revert values of pre-emphasis and signal amps to the saved ones */
1813 m2
= readl(port_mmio
+ PHY_MODE2
);
1815 m2
&= ~MV_M2_PREAMP_MASK
;
1816 m2
|= hpriv
->signal
[port
].amps
;
1817 m2
|= hpriv
->signal
[port
].pre
;
1820 /* according to mvSata 3.6.1, some IIE values are fixed */
1821 if (IS_GEN_IIE(hpriv
)) {
1826 writel(m2
, port_mmio
+ PHY_MODE2
);
1829 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1830 unsigned int port_no
)
1832 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
1834 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
1836 if (IS_60XX(hpriv
)) {
1837 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
1838 ifctl
|= (1 << 7); /* enable gen2i speed */
1839 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
1840 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
1843 udelay(25); /* allow reset propagation */
1845 /* Spec never mentions clearing the bit. Marvell's driver does
1846 * clear the bit, however.
1848 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
1850 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
1856 static void mv_stop_and_reset(struct ata_port
*ap
)
1858 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1859 void __iomem
*mmio
= ap
->host
->mmio_base
;
1863 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
1865 __mv_phy_reset(ap
, 0);
1868 static inline void __msleep(unsigned int msec
, int can_sleep
)
1877 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1878 * @ap: ATA channel to manipulate
1880 * Part of this is taken from __sata_phy_reset and modified to
1881 * not sleep since this routine gets called from interrupt level.
1884 * Inherited from caller. This is coded to safe to call at
1885 * interrupt level, i.e. it does not sleep.
1887 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
)
1889 struct mv_port_priv
*pp
= ap
->private_data
;
1890 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1891 void __iomem
*port_mmio
= mv_ap_base(ap
);
1892 struct ata_taskfile tf
;
1893 struct ata_device
*dev
= &ap
->device
[0];
1894 unsigned long timeout
;
1898 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
1900 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1901 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1902 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1904 /* Issue COMRESET via SControl */
1906 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1907 __msleep(1, can_sleep
);
1909 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1910 __msleep(20, can_sleep
);
1912 timeout
= jiffies
+ msecs_to_jiffies(200);
1914 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1915 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
1918 __msleep(1, can_sleep
);
1919 } while (time_before(jiffies
, timeout
));
1921 /* work around errata */
1922 if (IS_60XX(hpriv
) &&
1923 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
1925 goto comreset_retry
;
1927 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1928 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1929 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1931 if (ata_port_online(ap
)) {
1934 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1935 ata_port_printk(ap
, KERN_INFO
,
1936 "no device found (phy stat %08x)\n", sstatus
);
1937 ata_port_disable(ap
);
1940 ap
->cbl
= ATA_CBL_SATA
;
1942 /* even after SStatus reflects that device is ready,
1943 * it seems to take a while for link to be fully
1944 * established (and thus Status no longer 0x80/0x7F),
1945 * so we poll a bit for that, here.
1949 u8 drv_stat
= ata_check_status(ap
);
1950 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
1952 __msleep(500, can_sleep
);
1957 tf
.lbah
= readb((void __iomem
*) ap
->ioaddr
.lbah_addr
);
1958 tf
.lbam
= readb((void __iomem
*) ap
->ioaddr
.lbam_addr
);
1959 tf
.lbal
= readb((void __iomem
*) ap
->ioaddr
.lbal_addr
);
1960 tf
.nsect
= readb((void __iomem
*) ap
->ioaddr
.nsect_addr
);
1962 dev
->class = ata_dev_classify(&tf
);
1963 if (!ata_dev_enabled(dev
)) {
1964 VPRINTK("Port disabled post-sig: No device present.\n");
1965 ata_port_disable(ap
);
1968 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1970 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1975 static void mv_phy_reset(struct ata_port
*ap
)
1977 __mv_phy_reset(ap
, 1);
1981 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1982 * @ap: ATA channel to manipulate
1984 * Intent is to clear all pending error conditions, reset the
1985 * chip/bus, fail the command, and move on.
1988 * This routine holds the host lock while failing the command.
1990 static void mv_eng_timeout(struct ata_port
*ap
)
1992 struct ata_queued_cmd
*qc
;
1993 unsigned long flags
;
1995 ata_port_printk(ap
, KERN_ERR
, "Entering mv_eng_timeout\n");
1996 DPRINTK("All regs @ start of eng_timeout\n");
1997 mv_dump_all_regs(ap
->host
->mmio_base
, ap
->port_no
,
1998 to_pci_dev(ap
->host
->dev
));
2000 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2001 printk(KERN_ERR
"mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2002 ap
->host
->mmio_base
, ap
, qc
, qc
->scsicmd
, &qc
->scsicmd
->cmnd
);
2004 spin_lock_irqsave(&ap
->host
->lock
, flags
);
2006 mv_stop_and_reset(ap
);
2007 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
2009 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
2010 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
2011 qc
->err_mask
|= AC_ERR_TIMEOUT
;
2012 ata_eh_qc_complete(qc
);
2017 * mv_port_init - Perform some early initialization on a single port.
2018 * @port: libata data structure storing shadow register addresses
2019 * @port_mmio: base address of the port
2021 * Initialize shadow register mmio addresses, clear outstanding
2022 * interrupts on the port, and unmask interrupts for the future
2023 * start of the port.
2026 * Inherited from caller.
2028 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2030 unsigned long shd_base
= (unsigned long) port_mmio
+ SHD_BLK_OFS
;
2033 /* PIO related setup
2035 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2037 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2038 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2039 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2040 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2041 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2042 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2044 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2045 /* special case: control/altstatus doesn't have ATA_REG_ address */
2046 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2049 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= 0;
2051 /* Clear any currently outstanding port interrupt conditions */
2052 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2053 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2054 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2056 /* unmask all EDMA error interrupts */
2057 writelfl(~0, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2059 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2060 readl(port_mmio
+ EDMA_CFG_OFS
),
2061 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2062 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2065 static int mv_chip_id(struct pci_dev
*pdev
, struct mv_host_priv
*hpriv
,
2066 unsigned int board_idx
)
2069 u32 hp_flags
= hpriv
->hp_flags
;
2071 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2075 hpriv
->ops
= &mv5xxx_ops
;
2076 hp_flags
|= MV_HP_50XX
;
2080 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2083 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2086 dev_printk(KERN_WARNING
, &pdev
->dev
,
2087 "Applying 50XXB2 workarounds to unknown rev\n");
2088 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2095 hpriv
->ops
= &mv5xxx_ops
;
2096 hp_flags
|= MV_HP_50XX
;
2100 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2103 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2106 dev_printk(KERN_WARNING
, &pdev
->dev
,
2107 "Applying B2 workarounds to unknown rev\n");
2108 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2115 hpriv
->ops
= &mv6xxx_ops
;
2119 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2122 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2125 dev_printk(KERN_WARNING
, &pdev
->dev
,
2126 "Applying B2 workarounds to unknown rev\n");
2127 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2134 hpriv
->ops
= &mv6xxx_ops
;
2136 hp_flags
|= MV_HP_GEN_IIE
;
2140 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2143 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2146 dev_printk(KERN_WARNING
, &pdev
->dev
,
2147 "Applying 60X1C0 workarounds to unknown rev\n");
2148 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2154 printk(KERN_ERR DRV_NAME
": BUG: invalid board index %u\n", board_idx
);
2158 hpriv
->hp_flags
= hp_flags
;
2164 * mv_init_host - Perform some early initialization of the host.
2165 * @pdev: host PCI device
2166 * @probe_ent: early data struct representing the host
2168 * If possible, do an early global reset of the host. Then do
2169 * our port init and clear/unmask all/relevant host interrupts.
2172 * Inherited from caller.
2174 static int mv_init_host(struct pci_dev
*pdev
, struct ata_probe_ent
*probe_ent
,
2175 unsigned int board_idx
)
2177 int rc
= 0, n_hc
, port
, hc
;
2178 void __iomem
*mmio
= probe_ent
->mmio_base
;
2179 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2181 /* global interrupt mask */
2182 writel(0, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2184 rc
= mv_chip_id(pdev
, hpriv
, board_idx
);
2188 n_hc
= mv_get_hc_count(probe_ent
->port_flags
);
2189 probe_ent
->n_ports
= MV_PORTS_PER_HC
* n_hc
;
2191 for (port
= 0; port
< probe_ent
->n_ports
; port
++)
2192 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2194 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2198 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2199 hpriv
->ops
->reset_bus(pdev
, mmio
);
2200 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2202 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2203 if (IS_60XX(hpriv
)) {
2204 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2206 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2207 ifctl
|= (1 << 7); /* enable gen2i speed */
2208 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2209 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2212 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2215 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2216 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2217 mv_port_init(&probe_ent
->port
[port
], port_mmio
);
2220 for (hc
= 0; hc
< n_hc
; hc
++) {
2221 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2223 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2224 "(before clear)=0x%08x\n", hc
,
2225 readl(hc_mmio
+ HC_CFG_OFS
),
2226 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2228 /* Clear any currently outstanding hc interrupt conditions */
2229 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2232 /* Clear any currently outstanding host interrupt conditions */
2233 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
2235 /* and unmask interrupt generation for host regs */
2236 writelfl(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
2237 writelfl(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2239 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2240 "PCI int cause/mask=0x%08x/0x%08x\n",
2241 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
2242 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
2243 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
2244 readl(mmio
+ PCI_IRQ_MASK_OFS
));
2251 * mv_print_info - Dump key info to kernel log for perusal.
2252 * @probe_ent: early data struct representing the host
2254 * FIXME: complete this.
2257 * Inherited from caller.
2259 static void mv_print_info(struct ata_probe_ent
*probe_ent
)
2261 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
2262 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2266 /* Use this to determine the HW stepping of the chip so we know
2267 * what errata to workaround
2269 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2271 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2274 else if (scc
== 0x01)
2279 dev_printk(KERN_INFO
, &pdev
->dev
,
2280 "%u slots %u ports %s mode IRQ via %s\n",
2281 (unsigned)MV_MAX_Q_DEPTH
, probe_ent
->n_ports
,
2282 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2286 * mv_init_one - handle a positive probe of a Marvell host
2287 * @pdev: PCI device found
2288 * @ent: PCI device ID entry for the matched host
2291 * Inherited from caller.
2293 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2295 static int printed_version
= 0;
2296 struct device
*dev
= &pdev
->dev
;
2297 struct ata_probe_ent
*probe_ent
;
2298 struct mv_host_priv
*hpriv
;
2299 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
2300 void __iomem
*mmio_base
;
2303 if (!printed_version
++)
2304 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2306 rc
= pcim_enable_device(pdev
);
2309 pci_set_master(pdev
);
2311 rc
= pci_request_regions(pdev
, DRV_NAME
);
2313 pcim_pin_device(pdev
);
2317 probe_ent
= devm_kzalloc(dev
, sizeof(*probe_ent
), GFP_KERNEL
);
2318 if (probe_ent
== NULL
)
2321 probe_ent
->dev
= pci_dev_to_dev(pdev
);
2322 INIT_LIST_HEAD(&probe_ent
->node
);
2324 mmio_base
= pcim_iomap(pdev
, MV_PRIMARY_BAR
, 0);
2325 if (mmio_base
== NULL
)
2328 hpriv
= devm_kzalloc(dev
, sizeof(*hpriv
), GFP_KERNEL
);
2332 probe_ent
->sht
= mv_port_info
[board_idx
].sht
;
2333 probe_ent
->port_flags
= mv_port_info
[board_idx
].flags
;
2334 probe_ent
->pio_mask
= mv_port_info
[board_idx
].pio_mask
;
2335 probe_ent
->udma_mask
= mv_port_info
[board_idx
].udma_mask
;
2336 probe_ent
->port_ops
= mv_port_info
[board_idx
].port_ops
;
2338 probe_ent
->irq
= pdev
->irq
;
2339 probe_ent
->irq_flags
= IRQF_SHARED
;
2340 probe_ent
->mmio_base
= mmio_base
;
2341 probe_ent
->private_data
= hpriv
;
2343 /* initialize adapter */
2344 rc
= mv_init_host(pdev
, probe_ent
, board_idx
);
2348 /* Enable interrupts */
2349 if (msi
&& !pci_enable_msi(pdev
))
2352 mv_dump_pci_cfg(pdev
, 0x68);
2353 mv_print_info(probe_ent
);
2355 if (ata_device_add(probe_ent
) == 0)
2358 devm_kfree(dev
, probe_ent
);
2362 static int __init
mv_init(void)
2364 return pci_register_driver(&mv_pci_driver
);
2367 static void __exit
mv_exit(void)
2369 pci_unregister_driver(&mv_pci_driver
);
2372 MODULE_AUTHOR("Brett Russ");
2373 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2374 MODULE_LICENSE("GPL");
2375 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
2376 MODULE_VERSION(DRV_VERSION
);
2378 module_param(msi
, int, 0444);
2379 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
2381 module_init(mv_init
);
2382 module_exit(mv_exit
);