Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 static int ahci_skip_host_reset;
53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
55
56 static int ahci_enable_alpm(struct ata_port *ap,
57 enum link_pm policy);
58 static void ahci_disable_alpm(struct ata_port *ap);
59 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
60 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
61 size_t size);
62 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
63 ssize_t size);
64 #define MAX_SLOTS 8
65
66 enum {
67 AHCI_PCI_BAR = 5,
68 AHCI_MAX_PORTS = 32,
69 AHCI_MAX_SG = 168, /* hardware max is 64K */
70 AHCI_DMA_BOUNDARY = 0xffffffff,
71 AHCI_MAX_CMDS = 32,
72 AHCI_CMD_SZ = 32,
73 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
74 AHCI_RX_FIS_SZ = 256,
75 AHCI_CMD_TBL_CDB = 0x40,
76 AHCI_CMD_TBL_HDR_SZ = 0x80,
77 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
78 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
79 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
80 AHCI_RX_FIS_SZ,
81 AHCI_IRQ_ON_SG = (1 << 31),
82 AHCI_CMD_ATAPI = (1 << 5),
83 AHCI_CMD_WRITE = (1 << 6),
84 AHCI_CMD_PREFETCH = (1 << 7),
85 AHCI_CMD_RESET = (1 << 8),
86 AHCI_CMD_CLR_BUSY = (1 << 10),
87
88 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
89 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
90 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
91
92 board_ahci = 0,
93 board_ahci_vt8251 = 1,
94 board_ahci_ign_iferr = 2,
95 board_ahci_sb600 = 3,
96 board_ahci_mv = 4,
97 board_ahci_sb700 = 5,
98 board_ahci_mcp65 = 6,
99 board_ahci_nopmp = 7,
100
101 /* global controller registers */
102 HOST_CAP = 0x00, /* host capabilities */
103 HOST_CTL = 0x04, /* global host control */
104 HOST_IRQ_STAT = 0x08, /* interrupt status */
105 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
106 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
107 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
108 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
109
110 /* HOST_CTL bits */
111 HOST_RESET = (1 << 0), /* reset controller; self-clear */
112 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
113 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
114
115 /* HOST_CAP bits */
116 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
117 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
118 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
119 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
120 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
121 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
122 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
123 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
124 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
125
126 /* registers for each SATA port */
127 PORT_LST_ADDR = 0x00, /* command list DMA addr */
128 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
129 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
130 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
131 PORT_IRQ_STAT = 0x10, /* interrupt status */
132 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
133 PORT_CMD = 0x18, /* port command */
134 PORT_TFDATA = 0x20, /* taskfile data */
135 PORT_SIG = 0x24, /* device TF signature */
136 PORT_CMD_ISSUE = 0x38, /* command issue */
137 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
138 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
139 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
140 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
141 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
142
143 /* PORT_IRQ_{STAT,MASK} bits */
144 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
145 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
146 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
147 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
148 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
149 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
150 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
151 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
152
153 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
154 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
155 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
156 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
157 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
158 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
159 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
160 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
161 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
162
163 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
164 PORT_IRQ_IF_ERR |
165 PORT_IRQ_CONNECT |
166 PORT_IRQ_PHYRDY |
167 PORT_IRQ_UNK_FIS |
168 PORT_IRQ_BAD_PMP,
169 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
170 PORT_IRQ_TF_ERR |
171 PORT_IRQ_HBUS_DATA_ERR,
172 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
173 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
174 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
175
176 /* PORT_CMD bits */
177 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
178 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
179 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
180 PORT_CMD_PMP = (1 << 17), /* PMP attached */
181 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
182 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
183 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
184 PORT_CMD_CLO = (1 << 3), /* Command list override */
185 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
186 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
187 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
188
189 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
190 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
191 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
192 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
193
194 /* hpriv->flags bits */
195 AHCI_HFLAG_NO_NCQ = (1 << 0),
196 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
197 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
198 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
199 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
200 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
201 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
202 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
203 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
204 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
205
206 /* ap->flags bits */
207
208 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
209 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
210 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
211 ATA_FLAG_IPM,
212
213 ICH_MAP = 0x90, /* ICH MAP register */
214
215 /* em_ctl bits */
216 EM_CTL_RST = (1 << 9), /* Reset */
217 EM_CTL_TM = (1 << 8), /* Transmit Message */
218 EM_CTL_ALHD = (1 << 26), /* Activity LED */
219 };
220
221 struct ahci_cmd_hdr {
222 __le32 opts;
223 __le32 status;
224 __le32 tbl_addr;
225 __le32 tbl_addr_hi;
226 __le32 reserved[4];
227 };
228
229 struct ahci_sg {
230 __le32 addr;
231 __le32 addr_hi;
232 __le32 reserved;
233 __le32 flags_size;
234 };
235
236 struct ahci_em_priv {
237 enum sw_activity blink_policy;
238 struct timer_list timer;
239 unsigned long saved_activity;
240 unsigned long activity;
241 unsigned long led_state;
242 };
243
244 struct ahci_host_priv {
245 unsigned int flags; /* AHCI_HFLAG_* */
246 u32 cap; /* cap to use */
247 u32 port_map; /* port map to use */
248 u32 saved_cap; /* saved initial cap */
249 u32 saved_port_map; /* saved initial port_map */
250 u32 em_loc; /* enclosure management location */
251 };
252
253 struct ahci_port_priv {
254 struct ata_link *active_link;
255 struct ahci_cmd_hdr *cmd_slot;
256 dma_addr_t cmd_slot_dma;
257 void *cmd_tbl;
258 dma_addr_t cmd_tbl_dma;
259 void *rx_fis;
260 dma_addr_t rx_fis_dma;
261 /* for NCQ spurious interrupt analysis */
262 unsigned int ncq_saw_d2h:1;
263 unsigned int ncq_saw_dmas:1;
264 unsigned int ncq_saw_sdb:1;
265 u32 intr_mask; /* interrupts to enable */
266 struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
267 * per PM slot */
268 };
269
270 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
271 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
272 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
273 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
274 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
275 static int ahci_port_start(struct ata_port *ap);
276 static void ahci_port_stop(struct ata_port *ap);
277 static void ahci_qc_prep(struct ata_queued_cmd *qc);
278 static void ahci_freeze(struct ata_port *ap);
279 static void ahci_thaw(struct ata_port *ap);
280 static void ahci_pmp_attach(struct ata_port *ap);
281 static void ahci_pmp_detach(struct ata_port *ap);
282 static int ahci_softreset(struct ata_link *link, unsigned int *class,
283 unsigned long deadline);
284 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
285 unsigned long deadline);
286 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
287 unsigned long deadline);
288 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
289 unsigned long deadline);
290 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
291 unsigned long deadline);
292 static void ahci_postreset(struct ata_link *link, unsigned int *class);
293 static void ahci_error_handler(struct ata_port *ap);
294 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
295 static int ahci_port_resume(struct ata_port *ap);
296 static void ahci_dev_config(struct ata_device *dev);
297 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
298 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
299 u32 opts);
300 #ifdef CONFIG_PM
301 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
302 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
303 static int ahci_pci_device_resume(struct pci_dev *pdev);
304 #endif
305 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
306 static ssize_t ahci_activity_store(struct ata_device *dev,
307 enum sw_activity val);
308 static void ahci_init_sw_activity(struct ata_link *link);
309
310 static struct device_attribute *ahci_shost_attrs[] = {
311 &dev_attr_link_power_management_policy,
312 &dev_attr_em_message_type,
313 &dev_attr_em_message,
314 NULL
315 };
316
317 static struct device_attribute *ahci_sdev_attrs[] = {
318 &dev_attr_sw_activity,
319 &dev_attr_unload_heads,
320 NULL
321 };
322
323 static struct scsi_host_template ahci_sht = {
324 ATA_NCQ_SHT(DRV_NAME),
325 .can_queue = AHCI_MAX_CMDS - 1,
326 .sg_tablesize = AHCI_MAX_SG,
327 .dma_boundary = AHCI_DMA_BOUNDARY,
328 .shost_attrs = ahci_shost_attrs,
329 .sdev_attrs = ahci_sdev_attrs,
330 };
331
332 static struct ata_port_operations ahci_ops = {
333 .inherits = &sata_pmp_port_ops,
334
335 .qc_defer = sata_pmp_qc_defer_cmd_switch,
336 .qc_prep = ahci_qc_prep,
337 .qc_issue = ahci_qc_issue,
338 .qc_fill_rtf = ahci_qc_fill_rtf,
339
340 .freeze = ahci_freeze,
341 .thaw = ahci_thaw,
342 .softreset = ahci_softreset,
343 .hardreset = ahci_hardreset,
344 .postreset = ahci_postreset,
345 .pmp_softreset = ahci_softreset,
346 .error_handler = ahci_error_handler,
347 .post_internal_cmd = ahci_post_internal_cmd,
348 .dev_config = ahci_dev_config,
349
350 .scr_read = ahci_scr_read,
351 .scr_write = ahci_scr_write,
352 .pmp_attach = ahci_pmp_attach,
353 .pmp_detach = ahci_pmp_detach,
354
355 .enable_pm = ahci_enable_alpm,
356 .disable_pm = ahci_disable_alpm,
357 .em_show = ahci_led_show,
358 .em_store = ahci_led_store,
359 .sw_activity_show = ahci_activity_show,
360 .sw_activity_store = ahci_activity_store,
361 #ifdef CONFIG_PM
362 .port_suspend = ahci_port_suspend,
363 .port_resume = ahci_port_resume,
364 #endif
365 .port_start = ahci_port_start,
366 .port_stop = ahci_port_stop,
367 };
368
369 static struct ata_port_operations ahci_vt8251_ops = {
370 .inherits = &ahci_ops,
371 .hardreset = ahci_vt8251_hardreset,
372 };
373
374 static struct ata_port_operations ahci_p5wdh_ops = {
375 .inherits = &ahci_ops,
376 .hardreset = ahci_p5wdh_hardreset,
377 };
378
379 static struct ata_port_operations ahci_sb600_ops = {
380 .inherits = &ahci_ops,
381 .softreset = ahci_sb600_softreset,
382 .pmp_softreset = ahci_sb600_softreset,
383 };
384
385 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
386
387 static const struct ata_port_info ahci_port_info[] = {
388 /* board_ahci */
389 {
390 .flags = AHCI_FLAG_COMMON,
391 .pio_mask = 0x1f, /* pio0-4 */
392 .udma_mask = ATA_UDMA6,
393 .port_ops = &ahci_ops,
394 },
395 /* board_ahci_vt8251 */
396 {
397 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
398 .flags = AHCI_FLAG_COMMON,
399 .pio_mask = 0x1f, /* pio0-4 */
400 .udma_mask = ATA_UDMA6,
401 .port_ops = &ahci_vt8251_ops,
402 },
403 /* board_ahci_ign_iferr */
404 {
405 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
406 .flags = AHCI_FLAG_COMMON,
407 .pio_mask = 0x1f, /* pio0-4 */
408 .udma_mask = ATA_UDMA6,
409 .port_ops = &ahci_ops,
410 },
411 /* board_ahci_sb600 */
412 {
413 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
414 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
415 AHCI_HFLAG_SECT255),
416 .flags = AHCI_FLAG_COMMON,
417 .pio_mask = 0x1f, /* pio0-4 */
418 .udma_mask = ATA_UDMA6,
419 .port_ops = &ahci_sb600_ops,
420 },
421 /* board_ahci_mv */
422 {
423 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
424 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
425 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
426 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
427 .pio_mask = 0x1f, /* pio0-4 */
428 .udma_mask = ATA_UDMA6,
429 .port_ops = &ahci_ops,
430 },
431 /* board_ahci_sb700 */
432 {
433 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
434 .flags = AHCI_FLAG_COMMON,
435 .pio_mask = 0x1f, /* pio0-4 */
436 .udma_mask = ATA_UDMA6,
437 .port_ops = &ahci_sb600_ops,
438 },
439 /* board_ahci_mcp65 */
440 {
441 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
442 .flags = AHCI_FLAG_COMMON,
443 .pio_mask = 0x1f, /* pio0-4 */
444 .udma_mask = ATA_UDMA6,
445 .port_ops = &ahci_ops,
446 },
447 /* board_ahci_nopmp */
448 {
449 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
450 .flags = AHCI_FLAG_COMMON,
451 .pio_mask = 0x1f, /* pio0-4 */
452 .udma_mask = ATA_UDMA6,
453 .port_ops = &ahci_ops,
454 },
455 };
456
457 static const struct pci_device_id ahci_pci_tbl[] = {
458 /* Intel */
459 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
460 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
461 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
462 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
463 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
464 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
465 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
466 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
467 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
468 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
469 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
470 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
471 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
472 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
473 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
474 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
475 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
476 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
477 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
478 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
479 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
480 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
481 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
482 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
483 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
484 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
485 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
486 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
487 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
488 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
489 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
490 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
491 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
492 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
493 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
494
495 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
496 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
497 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
498
499 /* ATI */
500 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
501 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
502 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
503 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
504 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
505 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
506 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
507
508 /* VIA */
509 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
510 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
511
512 /* NVIDIA */
513 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
514 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
515 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
516 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
517 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
518 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
519 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
520 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
521 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
522 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
523 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
524 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
525 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
526 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
527 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
528 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
529 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
530 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
531 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
532 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
533 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
534 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
535 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
536 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
537 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
538 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
539 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
540 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
541 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
542 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
543 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
544 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
545 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
546 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
547 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
548 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
549 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
550 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
551 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
552 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
553 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
554 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
555 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
556 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
557 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
558 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
559 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
560 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
561 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
562 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
563 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
564 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
565 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
566 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
567 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
568 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
569 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
570 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
571 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
572 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
573 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
574 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
575 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
576 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
577 { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */
578 { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */
579 { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */
580 { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
581
582 /* SiS */
583 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
584 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
585 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
586
587 /* Marvell */
588 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
589 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
590
591 /* Generic, PCI class code for AHCI */
592 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
593 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
594
595 { } /* terminate list */
596 };
597
598
599 static struct pci_driver ahci_pci_driver = {
600 .name = DRV_NAME,
601 .id_table = ahci_pci_tbl,
602 .probe = ahci_init_one,
603 .remove = ata_pci_remove_one,
604 #ifdef CONFIG_PM
605 .suspend = ahci_pci_device_suspend,
606 .resume = ahci_pci_device_resume,
607 #endif
608 };
609
610 static int ahci_em_messages = 1;
611 module_param(ahci_em_messages, int, 0444);
612 /* add other LED protocol types when they become supported */
613 MODULE_PARM_DESC(ahci_em_messages,
614 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
615
616 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
617 static int marvell_enable;
618 #else
619 static int marvell_enable = 1;
620 #endif
621 module_param(marvell_enable, int, 0644);
622 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
623
624
625 static inline int ahci_nr_ports(u32 cap)
626 {
627 return (cap & 0x1f) + 1;
628 }
629
630 static inline void __iomem *__ahci_port_base(struct ata_host *host,
631 unsigned int port_no)
632 {
633 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
634
635 return mmio + 0x100 + (port_no * 0x80);
636 }
637
638 static inline void __iomem *ahci_port_base(struct ata_port *ap)
639 {
640 return __ahci_port_base(ap->host, ap->port_no);
641 }
642
643 static void ahci_enable_ahci(void __iomem *mmio)
644 {
645 int i;
646 u32 tmp;
647
648 /* turn on AHCI_EN */
649 tmp = readl(mmio + HOST_CTL);
650 if (tmp & HOST_AHCI_EN)
651 return;
652
653 /* Some controllers need AHCI_EN to be written multiple times.
654 * Try a few times before giving up.
655 */
656 for (i = 0; i < 5; i++) {
657 tmp |= HOST_AHCI_EN;
658 writel(tmp, mmio + HOST_CTL);
659 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
660 if (tmp & HOST_AHCI_EN)
661 return;
662 msleep(10);
663 }
664
665 WARN_ON(1);
666 }
667
668 /**
669 * ahci_save_initial_config - Save and fixup initial config values
670 * @pdev: target PCI device
671 * @hpriv: host private area to store config values
672 *
673 * Some registers containing configuration info might be setup by
674 * BIOS and might be cleared on reset. This function saves the
675 * initial values of those registers into @hpriv such that they
676 * can be restored after controller reset.
677 *
678 * If inconsistent, config values are fixed up by this function.
679 *
680 * LOCKING:
681 * None.
682 */
683 static void ahci_save_initial_config(struct pci_dev *pdev,
684 struct ahci_host_priv *hpriv)
685 {
686 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
687 u32 cap, port_map;
688 int i;
689 int mv;
690
691 /* make sure AHCI mode is enabled before accessing CAP */
692 ahci_enable_ahci(mmio);
693
694 /* Values prefixed with saved_ are written back to host after
695 * reset. Values without are used for driver operation.
696 */
697 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
698 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
699
700 /* some chips have errata preventing 64bit use */
701 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
702 dev_printk(KERN_INFO, &pdev->dev,
703 "controller can't do 64bit DMA, forcing 32bit\n");
704 cap &= ~HOST_CAP_64;
705 }
706
707 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
708 dev_printk(KERN_INFO, &pdev->dev,
709 "controller can't do NCQ, turning off CAP_NCQ\n");
710 cap &= ~HOST_CAP_NCQ;
711 }
712
713 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
714 dev_printk(KERN_INFO, &pdev->dev,
715 "controller can do NCQ, turning on CAP_NCQ\n");
716 cap |= HOST_CAP_NCQ;
717 }
718
719 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
720 dev_printk(KERN_INFO, &pdev->dev,
721 "controller can't do PMP, turning off CAP_PMP\n");
722 cap &= ~HOST_CAP_PMP;
723 }
724
725 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
726 port_map != 1) {
727 dev_printk(KERN_INFO, &pdev->dev,
728 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
729 port_map, 1);
730 port_map = 1;
731 }
732
733 /*
734 * Temporary Marvell 6145 hack: PATA port presence
735 * is asserted through the standard AHCI port
736 * presence register, as bit 4 (counting from 0)
737 */
738 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
739 if (pdev->device == 0x6121)
740 mv = 0x3;
741 else
742 mv = 0xf;
743 dev_printk(KERN_ERR, &pdev->dev,
744 "MV_AHCI HACK: port_map %x -> %x\n",
745 port_map,
746 port_map & mv);
747 dev_printk(KERN_ERR, &pdev->dev,
748 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
749
750 port_map &= mv;
751 }
752
753 /* cross check port_map and cap.n_ports */
754 if (port_map) {
755 int map_ports = 0;
756
757 for (i = 0; i < AHCI_MAX_PORTS; i++)
758 if (port_map & (1 << i))
759 map_ports++;
760
761 /* If PI has more ports than n_ports, whine, clear
762 * port_map and let it be generated from n_ports.
763 */
764 if (map_ports > ahci_nr_ports(cap)) {
765 dev_printk(KERN_WARNING, &pdev->dev,
766 "implemented port map (0x%x) contains more "
767 "ports than nr_ports (%u), using nr_ports\n",
768 port_map, ahci_nr_ports(cap));
769 port_map = 0;
770 }
771 }
772
773 /* fabricate port_map from cap.nr_ports */
774 if (!port_map) {
775 port_map = (1 << ahci_nr_ports(cap)) - 1;
776 dev_printk(KERN_WARNING, &pdev->dev,
777 "forcing PORTS_IMPL to 0x%x\n", port_map);
778
779 /* write the fixed up value to the PI register */
780 hpriv->saved_port_map = port_map;
781 }
782
783 /* record values to use during operation */
784 hpriv->cap = cap;
785 hpriv->port_map = port_map;
786 }
787
788 /**
789 * ahci_restore_initial_config - Restore initial config
790 * @host: target ATA host
791 *
792 * Restore initial config stored by ahci_save_initial_config().
793 *
794 * LOCKING:
795 * None.
796 */
797 static void ahci_restore_initial_config(struct ata_host *host)
798 {
799 struct ahci_host_priv *hpriv = host->private_data;
800 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
801
802 writel(hpriv->saved_cap, mmio + HOST_CAP);
803 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
804 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
805 }
806
807 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
808 {
809 static const int offset[] = {
810 [SCR_STATUS] = PORT_SCR_STAT,
811 [SCR_CONTROL] = PORT_SCR_CTL,
812 [SCR_ERROR] = PORT_SCR_ERR,
813 [SCR_ACTIVE] = PORT_SCR_ACT,
814 [SCR_NOTIFICATION] = PORT_SCR_NTF,
815 };
816 struct ahci_host_priv *hpriv = ap->host->private_data;
817
818 if (sc_reg < ARRAY_SIZE(offset) &&
819 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
820 return offset[sc_reg];
821 return 0;
822 }
823
824 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
825 {
826 void __iomem *port_mmio = ahci_port_base(link->ap);
827 int offset = ahci_scr_offset(link->ap, sc_reg);
828
829 if (offset) {
830 *val = readl(port_mmio + offset);
831 return 0;
832 }
833 return -EINVAL;
834 }
835
836 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
837 {
838 void __iomem *port_mmio = ahci_port_base(link->ap);
839 int offset = ahci_scr_offset(link->ap, sc_reg);
840
841 if (offset) {
842 writel(val, port_mmio + offset);
843 return 0;
844 }
845 return -EINVAL;
846 }
847
848 static void ahci_start_engine(struct ata_port *ap)
849 {
850 void __iomem *port_mmio = ahci_port_base(ap);
851 u32 tmp;
852
853 /* start DMA */
854 tmp = readl(port_mmio + PORT_CMD);
855 tmp |= PORT_CMD_START;
856 writel(tmp, port_mmio + PORT_CMD);
857 readl(port_mmio + PORT_CMD); /* flush */
858 }
859
860 static int ahci_stop_engine(struct ata_port *ap)
861 {
862 void __iomem *port_mmio = ahci_port_base(ap);
863 u32 tmp;
864
865 tmp = readl(port_mmio + PORT_CMD);
866
867 /* check if the HBA is idle */
868 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
869 return 0;
870
871 /* setting HBA to idle */
872 tmp &= ~PORT_CMD_START;
873 writel(tmp, port_mmio + PORT_CMD);
874
875 /* wait for engine to stop. This could be as long as 500 msec */
876 tmp = ata_wait_register(port_mmio + PORT_CMD,
877 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
878 if (tmp & PORT_CMD_LIST_ON)
879 return -EIO;
880
881 return 0;
882 }
883
884 static void ahci_start_fis_rx(struct ata_port *ap)
885 {
886 void __iomem *port_mmio = ahci_port_base(ap);
887 struct ahci_host_priv *hpriv = ap->host->private_data;
888 struct ahci_port_priv *pp = ap->private_data;
889 u32 tmp;
890
891 /* set FIS registers */
892 if (hpriv->cap & HOST_CAP_64)
893 writel((pp->cmd_slot_dma >> 16) >> 16,
894 port_mmio + PORT_LST_ADDR_HI);
895 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
896
897 if (hpriv->cap & HOST_CAP_64)
898 writel((pp->rx_fis_dma >> 16) >> 16,
899 port_mmio + PORT_FIS_ADDR_HI);
900 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
901
902 /* enable FIS reception */
903 tmp = readl(port_mmio + PORT_CMD);
904 tmp |= PORT_CMD_FIS_RX;
905 writel(tmp, port_mmio + PORT_CMD);
906
907 /* flush */
908 readl(port_mmio + PORT_CMD);
909 }
910
911 static int ahci_stop_fis_rx(struct ata_port *ap)
912 {
913 void __iomem *port_mmio = ahci_port_base(ap);
914 u32 tmp;
915
916 /* disable FIS reception */
917 tmp = readl(port_mmio + PORT_CMD);
918 tmp &= ~PORT_CMD_FIS_RX;
919 writel(tmp, port_mmio + PORT_CMD);
920
921 /* wait for completion, spec says 500ms, give it 1000 */
922 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
923 PORT_CMD_FIS_ON, 10, 1000);
924 if (tmp & PORT_CMD_FIS_ON)
925 return -EBUSY;
926
927 return 0;
928 }
929
930 static void ahci_power_up(struct ata_port *ap)
931 {
932 struct ahci_host_priv *hpriv = ap->host->private_data;
933 void __iomem *port_mmio = ahci_port_base(ap);
934 u32 cmd;
935
936 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
937
938 /* spin up device */
939 if (hpriv->cap & HOST_CAP_SSS) {
940 cmd |= PORT_CMD_SPIN_UP;
941 writel(cmd, port_mmio + PORT_CMD);
942 }
943
944 /* wake up link */
945 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
946 }
947
948 static void ahci_disable_alpm(struct ata_port *ap)
949 {
950 struct ahci_host_priv *hpriv = ap->host->private_data;
951 void __iomem *port_mmio = ahci_port_base(ap);
952 u32 cmd;
953 struct ahci_port_priv *pp = ap->private_data;
954
955 /* IPM bits should be disabled by libata-core */
956 /* get the existing command bits */
957 cmd = readl(port_mmio + PORT_CMD);
958
959 /* disable ALPM and ASP */
960 cmd &= ~PORT_CMD_ASP;
961 cmd &= ~PORT_CMD_ALPE;
962
963 /* force the interface back to active */
964 cmd |= PORT_CMD_ICC_ACTIVE;
965
966 /* write out new cmd value */
967 writel(cmd, port_mmio + PORT_CMD);
968 cmd = readl(port_mmio + PORT_CMD);
969
970 /* wait 10ms to be sure we've come out of any low power state */
971 msleep(10);
972
973 /* clear out any PhyRdy stuff from interrupt status */
974 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
975
976 /* go ahead and clean out PhyRdy Change from Serror too */
977 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
978
979 /*
980 * Clear flag to indicate that we should ignore all PhyRdy
981 * state changes
982 */
983 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
984
985 /*
986 * Enable interrupts on Phy Ready.
987 */
988 pp->intr_mask |= PORT_IRQ_PHYRDY;
989 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
990
991 /*
992 * don't change the link pm policy - we can be called
993 * just to turn of link pm temporarily
994 */
995 }
996
997 static int ahci_enable_alpm(struct ata_port *ap,
998 enum link_pm policy)
999 {
1000 struct ahci_host_priv *hpriv = ap->host->private_data;
1001 void __iomem *port_mmio = ahci_port_base(ap);
1002 u32 cmd;
1003 struct ahci_port_priv *pp = ap->private_data;
1004 u32 asp;
1005
1006 /* Make sure the host is capable of link power management */
1007 if (!(hpriv->cap & HOST_CAP_ALPM))
1008 return -EINVAL;
1009
1010 switch (policy) {
1011 case MAX_PERFORMANCE:
1012 case NOT_AVAILABLE:
1013 /*
1014 * if we came here with NOT_AVAILABLE,
1015 * it just means this is the first time we
1016 * have tried to enable - default to max performance,
1017 * and let the user go to lower power modes on request.
1018 */
1019 ahci_disable_alpm(ap);
1020 return 0;
1021 case MIN_POWER:
1022 /* configure HBA to enter SLUMBER */
1023 asp = PORT_CMD_ASP;
1024 break;
1025 case MEDIUM_POWER:
1026 /* configure HBA to enter PARTIAL */
1027 asp = 0;
1028 break;
1029 default:
1030 return -EINVAL;
1031 }
1032
1033 /*
1034 * Disable interrupts on Phy Ready. This keeps us from
1035 * getting woken up due to spurious phy ready interrupts
1036 * TBD - Hot plug should be done via polling now, is
1037 * that even supported?
1038 */
1039 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1040 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1041
1042 /*
1043 * Set a flag to indicate that we should ignore all PhyRdy
1044 * state changes since these can happen now whenever we
1045 * change link state
1046 */
1047 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1048
1049 /* get the existing command bits */
1050 cmd = readl(port_mmio + PORT_CMD);
1051
1052 /*
1053 * Set ASP based on Policy
1054 */
1055 cmd |= asp;
1056
1057 /*
1058 * Setting this bit will instruct the HBA to aggressively
1059 * enter a lower power link state when it's appropriate and
1060 * based on the value set above for ASP
1061 */
1062 cmd |= PORT_CMD_ALPE;
1063
1064 /* write out new cmd value */
1065 writel(cmd, port_mmio + PORT_CMD);
1066 cmd = readl(port_mmio + PORT_CMD);
1067
1068 /* IPM bits should be set by libata-core */
1069 return 0;
1070 }
1071
1072 #ifdef CONFIG_PM
1073 static void ahci_power_down(struct ata_port *ap)
1074 {
1075 struct ahci_host_priv *hpriv = ap->host->private_data;
1076 void __iomem *port_mmio = ahci_port_base(ap);
1077 u32 cmd, scontrol;
1078
1079 if (!(hpriv->cap & HOST_CAP_SSS))
1080 return;
1081
1082 /* put device into listen mode, first set PxSCTL.DET to 0 */
1083 scontrol = readl(port_mmio + PORT_SCR_CTL);
1084 scontrol &= ~0xf;
1085 writel(scontrol, port_mmio + PORT_SCR_CTL);
1086
1087 /* then set PxCMD.SUD to 0 */
1088 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1089 cmd &= ~PORT_CMD_SPIN_UP;
1090 writel(cmd, port_mmio + PORT_CMD);
1091 }
1092 #endif
1093
1094 static void ahci_start_port(struct ata_port *ap)
1095 {
1096 struct ahci_port_priv *pp = ap->private_data;
1097 struct ata_link *link;
1098 struct ahci_em_priv *emp;
1099
1100 /* enable FIS reception */
1101 ahci_start_fis_rx(ap);
1102
1103 /* enable DMA */
1104 ahci_start_engine(ap);
1105
1106 /* turn on LEDs */
1107 if (ap->flags & ATA_FLAG_EM) {
1108 ata_port_for_each_link(link, ap) {
1109 emp = &pp->em_priv[link->pmp];
1110 ahci_transmit_led_message(ap, emp->led_state, 4);
1111 }
1112 }
1113
1114 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1115 ata_port_for_each_link(link, ap)
1116 ahci_init_sw_activity(link);
1117
1118 }
1119
1120 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1121 {
1122 int rc;
1123
1124 /* disable DMA */
1125 rc = ahci_stop_engine(ap);
1126 if (rc) {
1127 *emsg = "failed to stop engine";
1128 return rc;
1129 }
1130
1131 /* disable FIS reception */
1132 rc = ahci_stop_fis_rx(ap);
1133 if (rc) {
1134 *emsg = "failed stop FIS RX";
1135 return rc;
1136 }
1137
1138 return 0;
1139 }
1140
1141 static int ahci_reset_controller(struct ata_host *host)
1142 {
1143 struct pci_dev *pdev = to_pci_dev(host->dev);
1144 struct ahci_host_priv *hpriv = host->private_data;
1145 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1146 u32 tmp;
1147
1148 /* we must be in AHCI mode, before using anything
1149 * AHCI-specific, such as HOST_RESET.
1150 */
1151 ahci_enable_ahci(mmio);
1152
1153 /* global controller reset */
1154 if (!ahci_skip_host_reset) {
1155 tmp = readl(mmio + HOST_CTL);
1156 if ((tmp & HOST_RESET) == 0) {
1157 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1158 readl(mmio + HOST_CTL); /* flush */
1159 }
1160
1161 /*
1162 * to perform host reset, OS should set HOST_RESET
1163 * and poll until this bit is read to be "0".
1164 * reset must complete within 1 second, or
1165 * the hardware should be considered fried.
1166 */
1167 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1168 HOST_RESET, 10, 1000);
1169
1170 if (tmp & HOST_RESET) {
1171 dev_printk(KERN_ERR, host->dev,
1172 "controller reset failed (0x%x)\n", tmp);
1173 return -EIO;
1174 }
1175
1176 /* turn on AHCI mode */
1177 ahci_enable_ahci(mmio);
1178
1179 /* Some registers might be cleared on reset. Restore
1180 * initial values.
1181 */
1182 ahci_restore_initial_config(host);
1183 } else
1184 dev_printk(KERN_INFO, host->dev,
1185 "skipping global host reset\n");
1186
1187 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1188 u16 tmp16;
1189
1190 /* configure PCS */
1191 pci_read_config_word(pdev, 0x92, &tmp16);
1192 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1193 tmp16 |= hpriv->port_map;
1194 pci_write_config_word(pdev, 0x92, tmp16);
1195 }
1196 }
1197
1198 return 0;
1199 }
1200
1201 static void ahci_sw_activity(struct ata_link *link)
1202 {
1203 struct ata_port *ap = link->ap;
1204 struct ahci_port_priv *pp = ap->private_data;
1205 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1206
1207 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1208 return;
1209
1210 emp->activity++;
1211 if (!timer_pending(&emp->timer))
1212 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1213 }
1214
1215 static void ahci_sw_activity_blink(unsigned long arg)
1216 {
1217 struct ata_link *link = (struct ata_link *)arg;
1218 struct ata_port *ap = link->ap;
1219 struct ahci_port_priv *pp = ap->private_data;
1220 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1221 unsigned long led_message = emp->led_state;
1222 u32 activity_led_state;
1223
1224 led_message &= 0xffff0000;
1225 led_message |= ap->port_no | (link->pmp << 8);
1226
1227 /* check to see if we've had activity. If so,
1228 * toggle state of LED and reset timer. If not,
1229 * turn LED to desired idle state.
1230 */
1231 if (emp->saved_activity != emp->activity) {
1232 emp->saved_activity = emp->activity;
1233 /* get the current LED state */
1234 activity_led_state = led_message & 0x00010000;
1235
1236 if (activity_led_state)
1237 activity_led_state = 0;
1238 else
1239 activity_led_state = 1;
1240
1241 /* clear old state */
1242 led_message &= 0xfff8ffff;
1243
1244 /* toggle state */
1245 led_message |= (activity_led_state << 16);
1246 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1247 } else {
1248 /* switch to idle */
1249 led_message &= 0xfff8ffff;
1250 if (emp->blink_policy == BLINK_OFF)
1251 led_message |= (1 << 16);
1252 }
1253 ahci_transmit_led_message(ap, led_message, 4);
1254 }
1255
1256 static void ahci_init_sw_activity(struct ata_link *link)
1257 {
1258 struct ata_port *ap = link->ap;
1259 struct ahci_port_priv *pp = ap->private_data;
1260 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1261
1262 /* init activity stats, setup timer */
1263 emp->saved_activity = emp->activity = 0;
1264 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1265
1266 /* check our blink policy and set flag for link if it's enabled */
1267 if (emp->blink_policy)
1268 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1269 }
1270
1271 static int ahci_reset_em(struct ata_host *host)
1272 {
1273 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1274 u32 em_ctl;
1275
1276 em_ctl = readl(mmio + HOST_EM_CTL);
1277 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1278 return -EINVAL;
1279
1280 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1281 return 0;
1282 }
1283
1284 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1285 ssize_t size)
1286 {
1287 struct ahci_host_priv *hpriv = ap->host->private_data;
1288 struct ahci_port_priv *pp = ap->private_data;
1289 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1290 u32 em_ctl;
1291 u32 message[] = {0, 0};
1292 unsigned long flags;
1293 int pmp;
1294 struct ahci_em_priv *emp;
1295
1296 /* get the slot number from the message */
1297 pmp = (state & 0x0000ff00) >> 8;
1298 if (pmp < MAX_SLOTS)
1299 emp = &pp->em_priv[pmp];
1300 else
1301 return -EINVAL;
1302
1303 spin_lock_irqsave(ap->lock, flags);
1304
1305 /*
1306 * if we are still busy transmitting a previous message,
1307 * do not allow
1308 */
1309 em_ctl = readl(mmio + HOST_EM_CTL);
1310 if (em_ctl & EM_CTL_TM) {
1311 spin_unlock_irqrestore(ap->lock, flags);
1312 return -EINVAL;
1313 }
1314
1315 /*
1316 * create message header - this is all zero except for
1317 * the message size, which is 4 bytes.
1318 */
1319 message[0] |= (4 << 8);
1320
1321 /* ignore 0:4 of byte zero, fill in port info yourself */
1322 message[1] = ((state & 0xfffffff0) | ap->port_no);
1323
1324 /* write message to EM_LOC */
1325 writel(message[0], mmio + hpriv->em_loc);
1326 writel(message[1], mmio + hpriv->em_loc+4);
1327
1328 /* save off new led state for port/slot */
1329 emp->led_state = message[1];
1330
1331 /*
1332 * tell hardware to transmit the message
1333 */
1334 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1335
1336 spin_unlock_irqrestore(ap->lock, flags);
1337 return size;
1338 }
1339
1340 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1341 {
1342 struct ahci_port_priv *pp = ap->private_data;
1343 struct ata_link *link;
1344 struct ahci_em_priv *emp;
1345 int rc = 0;
1346
1347 ata_port_for_each_link(link, ap) {
1348 emp = &pp->em_priv[link->pmp];
1349 rc += sprintf(buf, "%lx\n", emp->led_state);
1350 }
1351 return rc;
1352 }
1353
1354 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1355 size_t size)
1356 {
1357 int state;
1358 int pmp;
1359 struct ahci_port_priv *pp = ap->private_data;
1360 struct ahci_em_priv *emp;
1361
1362 state = simple_strtoul(buf, NULL, 0);
1363
1364 /* get the slot number from the message */
1365 pmp = (state & 0x0000ff00) >> 8;
1366 if (pmp < MAX_SLOTS)
1367 emp = &pp->em_priv[pmp];
1368 else
1369 return -EINVAL;
1370
1371 /* mask off the activity bits if we are in sw_activity
1372 * mode, user should turn off sw_activity before setting
1373 * activity led through em_message
1374 */
1375 if (emp->blink_policy)
1376 state &= 0xfff8ffff;
1377
1378 return ahci_transmit_led_message(ap, state, size);
1379 }
1380
1381 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1382 {
1383 struct ata_link *link = dev->link;
1384 struct ata_port *ap = link->ap;
1385 struct ahci_port_priv *pp = ap->private_data;
1386 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1387 u32 port_led_state = emp->led_state;
1388
1389 /* save the desired Activity LED behavior */
1390 if (val == OFF) {
1391 /* clear LFLAG */
1392 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1393
1394 /* set the LED to OFF */
1395 port_led_state &= 0xfff80000;
1396 port_led_state |= (ap->port_no | (link->pmp << 8));
1397 ahci_transmit_led_message(ap, port_led_state, 4);
1398 } else {
1399 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1400 if (val == BLINK_OFF) {
1401 /* set LED to ON for idle */
1402 port_led_state &= 0xfff80000;
1403 port_led_state |= (ap->port_no | (link->pmp << 8));
1404 port_led_state |= 0x00010000; /* check this */
1405 ahci_transmit_led_message(ap, port_led_state, 4);
1406 }
1407 }
1408 emp->blink_policy = val;
1409 return 0;
1410 }
1411
1412 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1413 {
1414 struct ata_link *link = dev->link;
1415 struct ata_port *ap = link->ap;
1416 struct ahci_port_priv *pp = ap->private_data;
1417 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1418
1419 /* display the saved value of activity behavior for this
1420 * disk.
1421 */
1422 return sprintf(buf, "%d\n", emp->blink_policy);
1423 }
1424
1425 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1426 int port_no, void __iomem *mmio,
1427 void __iomem *port_mmio)
1428 {
1429 const char *emsg = NULL;
1430 int rc;
1431 u32 tmp;
1432
1433 /* make sure port is not active */
1434 rc = ahci_deinit_port(ap, &emsg);
1435 if (rc)
1436 dev_printk(KERN_WARNING, &pdev->dev,
1437 "%s (%d)\n", emsg, rc);
1438
1439 /* clear SError */
1440 tmp = readl(port_mmio + PORT_SCR_ERR);
1441 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1442 writel(tmp, port_mmio + PORT_SCR_ERR);
1443
1444 /* clear port IRQ */
1445 tmp = readl(port_mmio + PORT_IRQ_STAT);
1446 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1447 if (tmp)
1448 writel(tmp, port_mmio + PORT_IRQ_STAT);
1449
1450 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1451 }
1452
1453 static void ahci_init_controller(struct ata_host *host)
1454 {
1455 struct ahci_host_priv *hpriv = host->private_data;
1456 struct pci_dev *pdev = to_pci_dev(host->dev);
1457 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1458 int i;
1459 void __iomem *port_mmio;
1460 u32 tmp;
1461 int mv;
1462
1463 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1464 if (pdev->device == 0x6121)
1465 mv = 2;
1466 else
1467 mv = 4;
1468 port_mmio = __ahci_port_base(host, mv);
1469
1470 writel(0, port_mmio + PORT_IRQ_MASK);
1471
1472 /* clear port IRQ */
1473 tmp = readl(port_mmio + PORT_IRQ_STAT);
1474 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1475 if (tmp)
1476 writel(tmp, port_mmio + PORT_IRQ_STAT);
1477 }
1478
1479 for (i = 0; i < host->n_ports; i++) {
1480 struct ata_port *ap = host->ports[i];
1481
1482 port_mmio = ahci_port_base(ap);
1483 if (ata_port_is_dummy(ap))
1484 continue;
1485
1486 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1487 }
1488
1489 tmp = readl(mmio + HOST_CTL);
1490 VPRINTK("HOST_CTL 0x%x\n", tmp);
1491 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1492 tmp = readl(mmio + HOST_CTL);
1493 VPRINTK("HOST_CTL 0x%x\n", tmp);
1494 }
1495
1496 static void ahci_dev_config(struct ata_device *dev)
1497 {
1498 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1499
1500 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1501 dev->max_sectors = 255;
1502 ata_dev_printk(dev, KERN_INFO,
1503 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1504 }
1505 }
1506
1507 static unsigned int ahci_dev_classify(struct ata_port *ap)
1508 {
1509 void __iomem *port_mmio = ahci_port_base(ap);
1510 struct ata_taskfile tf;
1511 u32 tmp;
1512
1513 tmp = readl(port_mmio + PORT_SIG);
1514 tf.lbah = (tmp >> 24) & 0xff;
1515 tf.lbam = (tmp >> 16) & 0xff;
1516 tf.lbal = (tmp >> 8) & 0xff;
1517 tf.nsect = (tmp) & 0xff;
1518
1519 return ata_dev_classify(&tf);
1520 }
1521
1522 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1523 u32 opts)
1524 {
1525 dma_addr_t cmd_tbl_dma;
1526
1527 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1528
1529 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1530 pp->cmd_slot[tag].status = 0;
1531 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1532 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1533 }
1534
1535 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1536 {
1537 void __iomem *port_mmio = ahci_port_base(ap);
1538 struct ahci_host_priv *hpriv = ap->host->private_data;
1539 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1540 u32 tmp;
1541 int busy, rc;
1542
1543 /* do we need to kick the port? */
1544 busy = status & (ATA_BUSY | ATA_DRQ);
1545 if (!busy && !force_restart)
1546 return 0;
1547
1548 /* stop engine */
1549 rc = ahci_stop_engine(ap);
1550 if (rc)
1551 goto out_restart;
1552
1553 /* need to do CLO? */
1554 if (!busy) {
1555 rc = 0;
1556 goto out_restart;
1557 }
1558
1559 if (!(hpriv->cap & HOST_CAP_CLO)) {
1560 rc = -EOPNOTSUPP;
1561 goto out_restart;
1562 }
1563
1564 /* perform CLO */
1565 tmp = readl(port_mmio + PORT_CMD);
1566 tmp |= PORT_CMD_CLO;
1567 writel(tmp, port_mmio + PORT_CMD);
1568
1569 rc = 0;
1570 tmp = ata_wait_register(port_mmio + PORT_CMD,
1571 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1572 if (tmp & PORT_CMD_CLO)
1573 rc = -EIO;
1574
1575 /* restart engine */
1576 out_restart:
1577 ahci_start_engine(ap);
1578 return rc;
1579 }
1580
1581 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1582 struct ata_taskfile *tf, int is_cmd, u16 flags,
1583 unsigned long timeout_msec)
1584 {
1585 const u32 cmd_fis_len = 5; /* five dwords */
1586 struct ahci_port_priv *pp = ap->private_data;
1587 void __iomem *port_mmio = ahci_port_base(ap);
1588 u8 *fis = pp->cmd_tbl;
1589 u32 tmp;
1590
1591 /* prep the command */
1592 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1593 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1594
1595 /* issue & wait */
1596 writel(1, port_mmio + PORT_CMD_ISSUE);
1597
1598 if (timeout_msec) {
1599 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1600 1, timeout_msec);
1601 if (tmp & 0x1) {
1602 ahci_kick_engine(ap, 1);
1603 return -EBUSY;
1604 }
1605 } else
1606 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1607
1608 return 0;
1609 }
1610
1611 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1612 int pmp, unsigned long deadline,
1613 int (*check_ready)(struct ata_link *link))
1614 {
1615 struct ata_port *ap = link->ap;
1616 const char *reason = NULL;
1617 unsigned long now, msecs;
1618 struct ata_taskfile tf;
1619 int rc;
1620
1621 DPRINTK("ENTER\n");
1622
1623 /* prepare for SRST (AHCI-1.1 10.4.1) */
1624 rc = ahci_kick_engine(ap, 1);
1625 if (rc && rc != -EOPNOTSUPP)
1626 ata_link_printk(link, KERN_WARNING,
1627 "failed to reset engine (errno=%d)\n", rc);
1628
1629 ata_tf_init(link->device, &tf);
1630
1631 /* issue the first D2H Register FIS */
1632 msecs = 0;
1633 now = jiffies;
1634 if (time_after(now, deadline))
1635 msecs = jiffies_to_msecs(deadline - now);
1636
1637 tf.ctl |= ATA_SRST;
1638 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1639 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1640 rc = -EIO;
1641 reason = "1st FIS failed";
1642 goto fail;
1643 }
1644
1645 /* spec says at least 5us, but be generous and sleep for 1ms */
1646 msleep(1);
1647
1648 /* issue the second D2H Register FIS */
1649 tf.ctl &= ~ATA_SRST;
1650 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1651
1652 /* wait for link to become ready */
1653 rc = ata_wait_after_reset(link, deadline, check_ready);
1654 /* link occupied, -ENODEV too is an error */
1655 if (rc) {
1656 reason = "device not ready";
1657 goto fail;
1658 }
1659 *class = ahci_dev_classify(ap);
1660
1661 DPRINTK("EXIT, class=%u\n", *class);
1662 return 0;
1663
1664 fail:
1665 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1666 return rc;
1667 }
1668
1669 static int ahci_check_ready(struct ata_link *link)
1670 {
1671 void __iomem *port_mmio = ahci_port_base(link->ap);
1672 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1673
1674 return ata_check_ready(status);
1675 }
1676
1677 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1678 unsigned long deadline)
1679 {
1680 int pmp = sata_srst_pmp(link);
1681
1682 DPRINTK("ENTER\n");
1683
1684 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1685 }
1686
1687 static int ahci_sb600_check_ready(struct ata_link *link)
1688 {
1689 void __iomem *port_mmio = ahci_port_base(link->ap);
1690 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1691 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1692
1693 /*
1694 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1695 * which can save timeout delay.
1696 */
1697 if (irq_status & PORT_IRQ_BAD_PMP)
1698 return -EIO;
1699
1700 return ata_check_ready(status);
1701 }
1702
1703 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1704 unsigned long deadline)
1705 {
1706 struct ata_port *ap = link->ap;
1707 void __iomem *port_mmio = ahci_port_base(ap);
1708 int pmp = sata_srst_pmp(link);
1709 int rc;
1710 u32 irq_sts;
1711
1712 DPRINTK("ENTER\n");
1713
1714 rc = ahci_do_softreset(link, class, pmp, deadline,
1715 ahci_sb600_check_ready);
1716
1717 /*
1718 * Soft reset fails on some ATI chips with IPMS set when PMP
1719 * is enabled but SATA HDD/ODD is connected to SATA port,
1720 * do soft reset again to port 0.
1721 */
1722 if (rc == -EIO) {
1723 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1724 if (irq_sts & PORT_IRQ_BAD_PMP) {
1725 ata_link_printk(link, KERN_WARNING,
1726 "failed due to HW bug, retry pmp=0\n");
1727 rc = ahci_do_softreset(link, class, 0, deadline,
1728 ahci_check_ready);
1729 }
1730 }
1731
1732 return rc;
1733 }
1734
1735 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1736 unsigned long deadline)
1737 {
1738 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1739 struct ata_port *ap = link->ap;
1740 struct ahci_port_priv *pp = ap->private_data;
1741 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1742 struct ata_taskfile tf;
1743 bool online;
1744 int rc;
1745
1746 DPRINTK("ENTER\n");
1747
1748 ahci_stop_engine(ap);
1749
1750 /* clear D2H reception area to properly wait for D2H FIS */
1751 ata_tf_init(link->device, &tf);
1752 tf.command = 0x80;
1753 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1754
1755 rc = sata_link_hardreset(link, timing, deadline, &online,
1756 ahci_check_ready);
1757
1758 ahci_start_engine(ap);
1759
1760 if (online)
1761 *class = ahci_dev_classify(ap);
1762
1763 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1764 return rc;
1765 }
1766
1767 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1768 unsigned long deadline)
1769 {
1770 struct ata_port *ap = link->ap;
1771 bool online;
1772 int rc;
1773
1774 DPRINTK("ENTER\n");
1775
1776 ahci_stop_engine(ap);
1777
1778 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1779 deadline, &online, NULL);
1780
1781 ahci_start_engine(ap);
1782
1783 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1784
1785 /* vt8251 doesn't clear BSY on signature FIS reception,
1786 * request follow-up softreset.
1787 */
1788 return online ? -EAGAIN : rc;
1789 }
1790
1791 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1792 unsigned long deadline)
1793 {
1794 struct ata_port *ap = link->ap;
1795 struct ahci_port_priv *pp = ap->private_data;
1796 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1797 struct ata_taskfile tf;
1798 bool online;
1799 int rc;
1800
1801 ahci_stop_engine(ap);
1802
1803 /* clear D2H reception area to properly wait for D2H FIS */
1804 ata_tf_init(link->device, &tf);
1805 tf.command = 0x80;
1806 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1807
1808 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1809 deadline, &online, NULL);
1810
1811 ahci_start_engine(ap);
1812
1813 /* The pseudo configuration device on SIMG4726 attached to
1814 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1815 * hardreset if no device is attached to the first downstream
1816 * port && the pseudo device locks up on SRST w/ PMP==0. To
1817 * work around this, wait for !BSY only briefly. If BSY isn't
1818 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1819 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1820 *
1821 * Wait for two seconds. Devices attached to downstream port
1822 * which can't process the following IDENTIFY after this will
1823 * have to be reset again. For most cases, this should
1824 * suffice while making probing snappish enough.
1825 */
1826 if (online) {
1827 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1828 ahci_check_ready);
1829 if (rc)
1830 ahci_kick_engine(ap, 0);
1831 }
1832 return rc;
1833 }
1834
1835 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1836 {
1837 struct ata_port *ap = link->ap;
1838 void __iomem *port_mmio = ahci_port_base(ap);
1839 u32 new_tmp, tmp;
1840
1841 ata_std_postreset(link, class);
1842
1843 /* Make sure port's ATAPI bit is set appropriately */
1844 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1845 if (*class == ATA_DEV_ATAPI)
1846 new_tmp |= PORT_CMD_ATAPI;
1847 else
1848 new_tmp &= ~PORT_CMD_ATAPI;
1849 if (new_tmp != tmp) {
1850 writel(new_tmp, port_mmio + PORT_CMD);
1851 readl(port_mmio + PORT_CMD); /* flush */
1852 }
1853 }
1854
1855 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1856 {
1857 struct scatterlist *sg;
1858 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1859 unsigned int si;
1860
1861 VPRINTK("ENTER\n");
1862
1863 /*
1864 * Next, the S/G list.
1865 */
1866 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1867 dma_addr_t addr = sg_dma_address(sg);
1868 u32 sg_len = sg_dma_len(sg);
1869
1870 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1871 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1872 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1873 }
1874
1875 return si;
1876 }
1877
1878 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1879 {
1880 struct ata_port *ap = qc->ap;
1881 struct ahci_port_priv *pp = ap->private_data;
1882 int is_atapi = ata_is_atapi(qc->tf.protocol);
1883 void *cmd_tbl;
1884 u32 opts;
1885 const u32 cmd_fis_len = 5; /* five dwords */
1886 unsigned int n_elem;
1887
1888 /*
1889 * Fill in command table information. First, the header,
1890 * a SATA Register - Host to Device command FIS.
1891 */
1892 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1893
1894 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1895 if (is_atapi) {
1896 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1897 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1898 }
1899
1900 n_elem = 0;
1901 if (qc->flags & ATA_QCFLAG_DMAMAP)
1902 n_elem = ahci_fill_sg(qc, cmd_tbl);
1903
1904 /*
1905 * Fill in command slot information.
1906 */
1907 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1908 if (qc->tf.flags & ATA_TFLAG_WRITE)
1909 opts |= AHCI_CMD_WRITE;
1910 if (is_atapi)
1911 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1912
1913 ahci_fill_cmd_slot(pp, qc->tag, opts);
1914 }
1915
1916 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1917 {
1918 struct ahci_host_priv *hpriv = ap->host->private_data;
1919 struct ahci_port_priv *pp = ap->private_data;
1920 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1921 struct ata_link *link = NULL;
1922 struct ata_queued_cmd *active_qc;
1923 struct ata_eh_info *active_ehi;
1924 u32 serror;
1925
1926 /* determine active link */
1927 ata_port_for_each_link(link, ap)
1928 if (ata_link_active(link))
1929 break;
1930 if (!link)
1931 link = &ap->link;
1932
1933 active_qc = ata_qc_from_tag(ap, link->active_tag);
1934 active_ehi = &link->eh_info;
1935
1936 /* record irq stat */
1937 ata_ehi_clear_desc(host_ehi);
1938 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1939
1940 /* AHCI needs SError cleared; otherwise, it might lock up */
1941 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1942 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1943 host_ehi->serror |= serror;
1944
1945 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1946 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1947 irq_stat &= ~PORT_IRQ_IF_ERR;
1948
1949 if (irq_stat & PORT_IRQ_TF_ERR) {
1950 /* If qc is active, charge it; otherwise, the active
1951 * link. There's no active qc on NCQ errors. It will
1952 * be determined by EH by reading log page 10h.
1953 */
1954 if (active_qc)
1955 active_qc->err_mask |= AC_ERR_DEV;
1956 else
1957 active_ehi->err_mask |= AC_ERR_DEV;
1958
1959 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1960 host_ehi->serror &= ~SERR_INTERNAL;
1961 }
1962
1963 if (irq_stat & PORT_IRQ_UNK_FIS) {
1964 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1965
1966 active_ehi->err_mask |= AC_ERR_HSM;
1967 active_ehi->action |= ATA_EH_RESET;
1968 ata_ehi_push_desc(active_ehi,
1969 "unknown FIS %08x %08x %08x %08x" ,
1970 unk[0], unk[1], unk[2], unk[3]);
1971 }
1972
1973 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1974 active_ehi->err_mask |= AC_ERR_HSM;
1975 active_ehi->action |= ATA_EH_RESET;
1976 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1977 }
1978
1979 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1980 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1981 host_ehi->action |= ATA_EH_RESET;
1982 ata_ehi_push_desc(host_ehi, "host bus error");
1983 }
1984
1985 if (irq_stat & PORT_IRQ_IF_ERR) {
1986 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1987 host_ehi->action |= ATA_EH_RESET;
1988 ata_ehi_push_desc(host_ehi, "interface fatal error");
1989 }
1990
1991 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1992 ata_ehi_hotplugged(host_ehi);
1993 ata_ehi_push_desc(host_ehi, "%s",
1994 irq_stat & PORT_IRQ_CONNECT ?
1995 "connection status changed" : "PHY RDY changed");
1996 }
1997
1998 /* okay, let's hand over to EH */
1999
2000 if (irq_stat & PORT_IRQ_FREEZE)
2001 ata_port_freeze(ap);
2002 else
2003 ata_port_abort(ap);
2004 }
2005
2006 static void ahci_port_intr(struct ata_port *ap)
2007 {
2008 void __iomem *port_mmio = ahci_port_base(ap);
2009 struct ata_eh_info *ehi = &ap->link.eh_info;
2010 struct ahci_port_priv *pp = ap->private_data;
2011 struct ahci_host_priv *hpriv = ap->host->private_data;
2012 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2013 u32 status, qc_active;
2014 int rc;
2015
2016 status = readl(port_mmio + PORT_IRQ_STAT);
2017 writel(status, port_mmio + PORT_IRQ_STAT);
2018
2019 /* ignore BAD_PMP while resetting */
2020 if (unlikely(resetting))
2021 status &= ~PORT_IRQ_BAD_PMP;
2022
2023 /* If we are getting PhyRdy, this is
2024 * just a power state change, we should
2025 * clear out this, plus the PhyRdy/Comm
2026 * Wake bits from Serror
2027 */
2028 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2029 (status & PORT_IRQ_PHYRDY)) {
2030 status &= ~PORT_IRQ_PHYRDY;
2031 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2032 }
2033
2034 if (unlikely(status & PORT_IRQ_ERROR)) {
2035 ahci_error_intr(ap, status);
2036 return;
2037 }
2038
2039 if (status & PORT_IRQ_SDB_FIS) {
2040 /* If SNotification is available, leave notification
2041 * handling to sata_async_notification(). If not,
2042 * emulate it by snooping SDB FIS RX area.
2043 *
2044 * Snooping FIS RX area is probably cheaper than
2045 * poking SNotification but some constrollers which
2046 * implement SNotification, ICH9 for example, don't
2047 * store AN SDB FIS into receive area.
2048 */
2049 if (hpriv->cap & HOST_CAP_SNTF)
2050 sata_async_notification(ap);
2051 else {
2052 /* If the 'N' bit in word 0 of the FIS is set,
2053 * we just received asynchronous notification.
2054 * Tell libata about it.
2055 */
2056 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2057 u32 f0 = le32_to_cpu(f[0]);
2058
2059 if (f0 & (1 << 15))
2060 sata_async_notification(ap);
2061 }
2062 }
2063
2064 /* pp->active_link is valid iff any command is in flight */
2065 if (ap->qc_active && pp->active_link->sactive)
2066 qc_active = readl(port_mmio + PORT_SCR_ACT);
2067 else
2068 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2069
2070 rc = ata_qc_complete_multiple(ap, qc_active);
2071
2072 /* while resetting, invalid completions are expected */
2073 if (unlikely(rc < 0 && !resetting)) {
2074 ehi->err_mask |= AC_ERR_HSM;
2075 ehi->action |= ATA_EH_RESET;
2076 ata_port_freeze(ap);
2077 }
2078 }
2079
2080 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2081 {
2082 struct ata_host *host = dev_instance;
2083 struct ahci_host_priv *hpriv;
2084 unsigned int i, handled = 0;
2085 void __iomem *mmio;
2086 u32 irq_stat, irq_masked;
2087
2088 VPRINTK("ENTER\n");
2089
2090 hpriv = host->private_data;
2091 mmio = host->iomap[AHCI_PCI_BAR];
2092
2093 /* sigh. 0xffffffff is a valid return from h/w */
2094 irq_stat = readl(mmio + HOST_IRQ_STAT);
2095 if (!irq_stat)
2096 return IRQ_NONE;
2097
2098 irq_masked = irq_stat & hpriv->port_map;
2099
2100 spin_lock(&host->lock);
2101
2102 for (i = 0; i < host->n_ports; i++) {
2103 struct ata_port *ap;
2104
2105 if (!(irq_masked & (1 << i)))
2106 continue;
2107
2108 ap = host->ports[i];
2109 if (ap) {
2110 ahci_port_intr(ap);
2111 VPRINTK("port %u\n", i);
2112 } else {
2113 VPRINTK("port %u (no irq)\n", i);
2114 if (ata_ratelimit())
2115 dev_printk(KERN_WARNING, host->dev,
2116 "interrupt on disabled port %u\n", i);
2117 }
2118
2119 handled = 1;
2120 }
2121
2122 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2123 * it should be cleared after all the port events are cleared;
2124 * otherwise, it will raise a spurious interrupt after each
2125 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2126 * information.
2127 *
2128 * Also, use the unmasked value to clear interrupt as spurious
2129 * pending event on a dummy port might cause screaming IRQ.
2130 */
2131 writel(irq_stat, mmio + HOST_IRQ_STAT);
2132
2133 spin_unlock(&host->lock);
2134
2135 VPRINTK("EXIT\n");
2136
2137 return IRQ_RETVAL(handled);
2138 }
2139
2140 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2141 {
2142 struct ata_port *ap = qc->ap;
2143 void __iomem *port_mmio = ahci_port_base(ap);
2144 struct ahci_port_priv *pp = ap->private_data;
2145
2146 /* Keep track of the currently active link. It will be used
2147 * in completion path to determine whether NCQ phase is in
2148 * progress.
2149 */
2150 pp->active_link = qc->dev->link;
2151
2152 if (qc->tf.protocol == ATA_PROT_NCQ)
2153 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2154 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2155
2156 ahci_sw_activity(qc->dev->link);
2157
2158 return 0;
2159 }
2160
2161 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2162 {
2163 struct ahci_port_priv *pp = qc->ap->private_data;
2164 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2165
2166 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2167 return true;
2168 }
2169
2170 static void ahci_freeze(struct ata_port *ap)
2171 {
2172 void __iomem *port_mmio = ahci_port_base(ap);
2173
2174 /* turn IRQ off */
2175 writel(0, port_mmio + PORT_IRQ_MASK);
2176 }
2177
2178 static void ahci_thaw(struct ata_port *ap)
2179 {
2180 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2181 void __iomem *port_mmio = ahci_port_base(ap);
2182 u32 tmp;
2183 struct ahci_port_priv *pp = ap->private_data;
2184
2185 /* clear IRQ */
2186 tmp = readl(port_mmio + PORT_IRQ_STAT);
2187 writel(tmp, port_mmio + PORT_IRQ_STAT);
2188 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2189
2190 /* turn IRQ back on */
2191 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2192 }
2193
2194 static void ahci_error_handler(struct ata_port *ap)
2195 {
2196 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2197 /* restart engine */
2198 ahci_stop_engine(ap);
2199 ahci_start_engine(ap);
2200 }
2201
2202 sata_pmp_error_handler(ap);
2203 }
2204
2205 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2206 {
2207 struct ata_port *ap = qc->ap;
2208
2209 /* make DMA engine forget about the failed command */
2210 if (qc->flags & ATA_QCFLAG_FAILED)
2211 ahci_kick_engine(ap, 1);
2212 }
2213
2214 static void ahci_pmp_attach(struct ata_port *ap)
2215 {
2216 void __iomem *port_mmio = ahci_port_base(ap);
2217 struct ahci_port_priv *pp = ap->private_data;
2218 u32 cmd;
2219
2220 cmd = readl(port_mmio + PORT_CMD);
2221 cmd |= PORT_CMD_PMP;
2222 writel(cmd, port_mmio + PORT_CMD);
2223
2224 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2225 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2226 }
2227
2228 static void ahci_pmp_detach(struct ata_port *ap)
2229 {
2230 void __iomem *port_mmio = ahci_port_base(ap);
2231 struct ahci_port_priv *pp = ap->private_data;
2232 u32 cmd;
2233
2234 cmd = readl(port_mmio + PORT_CMD);
2235 cmd &= ~PORT_CMD_PMP;
2236 writel(cmd, port_mmio + PORT_CMD);
2237
2238 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2239 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2240 }
2241
2242 static int ahci_port_resume(struct ata_port *ap)
2243 {
2244 ahci_power_up(ap);
2245 ahci_start_port(ap);
2246
2247 if (sata_pmp_attached(ap))
2248 ahci_pmp_attach(ap);
2249 else
2250 ahci_pmp_detach(ap);
2251
2252 return 0;
2253 }
2254
2255 #ifdef CONFIG_PM
2256 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2257 {
2258 const char *emsg = NULL;
2259 int rc;
2260
2261 rc = ahci_deinit_port(ap, &emsg);
2262 if (rc == 0)
2263 ahci_power_down(ap);
2264 else {
2265 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2266 ahci_start_port(ap);
2267 }
2268
2269 return rc;
2270 }
2271
2272 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2273 {
2274 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2275 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2276 u32 ctl;
2277
2278 if (mesg.event & PM_EVENT_SLEEP) {
2279 /* AHCI spec rev1.1 section 8.3.3:
2280 * Software must disable interrupts prior to requesting a
2281 * transition of the HBA to D3 state.
2282 */
2283 ctl = readl(mmio + HOST_CTL);
2284 ctl &= ~HOST_IRQ_EN;
2285 writel(ctl, mmio + HOST_CTL);
2286 readl(mmio + HOST_CTL); /* flush */
2287 }
2288
2289 return ata_pci_device_suspend(pdev, mesg);
2290 }
2291
2292 static int ahci_pci_device_resume(struct pci_dev *pdev)
2293 {
2294 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2295 int rc;
2296
2297 rc = ata_pci_device_do_resume(pdev);
2298 if (rc)
2299 return rc;
2300
2301 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2302 rc = ahci_reset_controller(host);
2303 if (rc)
2304 return rc;
2305
2306 ahci_init_controller(host);
2307 }
2308
2309 ata_host_resume(host);
2310
2311 return 0;
2312 }
2313 #endif
2314
2315 static int ahci_port_start(struct ata_port *ap)
2316 {
2317 struct device *dev = ap->host->dev;
2318 struct ahci_port_priv *pp;
2319 void *mem;
2320 dma_addr_t mem_dma;
2321
2322 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2323 if (!pp)
2324 return -ENOMEM;
2325
2326 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2327 GFP_KERNEL);
2328 if (!mem)
2329 return -ENOMEM;
2330 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2331
2332 /*
2333 * First item in chunk of DMA memory: 32-slot command table,
2334 * 32 bytes each in size
2335 */
2336 pp->cmd_slot = mem;
2337 pp->cmd_slot_dma = mem_dma;
2338
2339 mem += AHCI_CMD_SLOT_SZ;
2340 mem_dma += AHCI_CMD_SLOT_SZ;
2341
2342 /*
2343 * Second item: Received-FIS area
2344 */
2345 pp->rx_fis = mem;
2346 pp->rx_fis_dma = mem_dma;
2347
2348 mem += AHCI_RX_FIS_SZ;
2349 mem_dma += AHCI_RX_FIS_SZ;
2350
2351 /*
2352 * Third item: data area for storing a single command
2353 * and its scatter-gather table
2354 */
2355 pp->cmd_tbl = mem;
2356 pp->cmd_tbl_dma = mem_dma;
2357
2358 /*
2359 * Save off initial list of interrupts to be enabled.
2360 * This could be changed later
2361 */
2362 pp->intr_mask = DEF_PORT_IRQ;
2363
2364 ap->private_data = pp;
2365
2366 /* engage engines, captain */
2367 return ahci_port_resume(ap);
2368 }
2369
2370 static void ahci_port_stop(struct ata_port *ap)
2371 {
2372 const char *emsg = NULL;
2373 int rc;
2374
2375 /* de-initialize port */
2376 rc = ahci_deinit_port(ap, &emsg);
2377 if (rc)
2378 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2379 }
2380
2381 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2382 {
2383 int rc;
2384
2385 if (using_dac &&
2386 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2387 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2388 if (rc) {
2389 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2390 if (rc) {
2391 dev_printk(KERN_ERR, &pdev->dev,
2392 "64-bit DMA enable failed\n");
2393 return rc;
2394 }
2395 }
2396 } else {
2397 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2398 if (rc) {
2399 dev_printk(KERN_ERR, &pdev->dev,
2400 "32-bit DMA enable failed\n");
2401 return rc;
2402 }
2403 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2404 if (rc) {
2405 dev_printk(KERN_ERR, &pdev->dev,
2406 "32-bit consistent DMA enable failed\n");
2407 return rc;
2408 }
2409 }
2410 return 0;
2411 }
2412
2413 static void ahci_print_info(struct ata_host *host)
2414 {
2415 struct ahci_host_priv *hpriv = host->private_data;
2416 struct pci_dev *pdev = to_pci_dev(host->dev);
2417 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2418 u32 vers, cap, impl, speed;
2419 const char *speed_s;
2420 u16 cc;
2421 const char *scc_s;
2422
2423 vers = readl(mmio + HOST_VERSION);
2424 cap = hpriv->cap;
2425 impl = hpriv->port_map;
2426
2427 speed = (cap >> 20) & 0xf;
2428 if (speed == 1)
2429 speed_s = "1.5";
2430 else if (speed == 2)
2431 speed_s = "3";
2432 else
2433 speed_s = "?";
2434
2435 pci_read_config_word(pdev, 0x0a, &cc);
2436 if (cc == PCI_CLASS_STORAGE_IDE)
2437 scc_s = "IDE";
2438 else if (cc == PCI_CLASS_STORAGE_SATA)
2439 scc_s = "SATA";
2440 else if (cc == PCI_CLASS_STORAGE_RAID)
2441 scc_s = "RAID";
2442 else
2443 scc_s = "unknown";
2444
2445 dev_printk(KERN_INFO, &pdev->dev,
2446 "AHCI %02x%02x.%02x%02x "
2447 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2448 ,
2449
2450 (vers >> 24) & 0xff,
2451 (vers >> 16) & 0xff,
2452 (vers >> 8) & 0xff,
2453 vers & 0xff,
2454
2455 ((cap >> 8) & 0x1f) + 1,
2456 (cap & 0x1f) + 1,
2457 speed_s,
2458 impl,
2459 scc_s);
2460
2461 dev_printk(KERN_INFO, &pdev->dev,
2462 "flags: "
2463 "%s%s%s%s%s%s%s"
2464 "%s%s%s%s%s%s%s"
2465 "%s\n"
2466 ,
2467
2468 cap & (1 << 31) ? "64bit " : "",
2469 cap & (1 << 30) ? "ncq " : "",
2470 cap & (1 << 29) ? "sntf " : "",
2471 cap & (1 << 28) ? "ilck " : "",
2472 cap & (1 << 27) ? "stag " : "",
2473 cap & (1 << 26) ? "pm " : "",
2474 cap & (1 << 25) ? "led " : "",
2475
2476 cap & (1 << 24) ? "clo " : "",
2477 cap & (1 << 19) ? "nz " : "",
2478 cap & (1 << 18) ? "only " : "",
2479 cap & (1 << 17) ? "pmp " : "",
2480 cap & (1 << 15) ? "pio " : "",
2481 cap & (1 << 14) ? "slum " : "",
2482 cap & (1 << 13) ? "part " : "",
2483 cap & (1 << 6) ? "ems ": ""
2484 );
2485 }
2486
2487 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2488 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2489 * support PMP and the 4726 either directly exports the device
2490 * attached to the first downstream port or acts as a hardware storage
2491 * controller and emulate a single ATA device (can be RAID 0/1 or some
2492 * other configuration).
2493 *
2494 * When there's no device attached to the first downstream port of the
2495 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2496 * configure the 4726. However, ATA emulation of the device is very
2497 * lame. It doesn't send signature D2H Reg FIS after the initial
2498 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2499 *
2500 * The following function works around the problem by always using
2501 * hardreset on the port and not depending on receiving signature FIS
2502 * afterward. If signature FIS isn't received soon, ATA class is
2503 * assumed without follow-up softreset.
2504 */
2505 static void ahci_p5wdh_workaround(struct ata_host *host)
2506 {
2507 static struct dmi_system_id sysids[] = {
2508 {
2509 .ident = "P5W DH Deluxe",
2510 .matches = {
2511 DMI_MATCH(DMI_SYS_VENDOR,
2512 "ASUSTEK COMPUTER INC"),
2513 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2514 },
2515 },
2516 { }
2517 };
2518 struct pci_dev *pdev = to_pci_dev(host->dev);
2519
2520 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2521 dmi_check_system(sysids)) {
2522 struct ata_port *ap = host->ports[1];
2523
2524 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2525 "Deluxe on-board SIMG4726 workaround\n");
2526
2527 ap->ops = &ahci_p5wdh_ops;
2528 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2529 }
2530 }
2531
2532 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2533 {
2534 static int printed_version;
2535 unsigned int board_id = ent->driver_data;
2536 struct ata_port_info pi = ahci_port_info[board_id];
2537 const struct ata_port_info *ppi[] = { &pi, NULL };
2538 struct device *dev = &pdev->dev;
2539 struct ahci_host_priv *hpriv;
2540 struct ata_host *host;
2541 int n_ports, i, rc;
2542
2543 VPRINTK("ENTER\n");
2544
2545 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2546
2547 if (!printed_version++)
2548 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2549
2550 /* The AHCI driver can only drive the SATA ports, the PATA driver
2551 can drive them all so if both drivers are selected make sure
2552 AHCI stays out of the way */
2553 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2554 return -ENODEV;
2555
2556 /* acquire resources */
2557 rc = pcim_enable_device(pdev);
2558 if (rc)
2559 return rc;
2560
2561 /* AHCI controllers often implement SFF compatible interface.
2562 * Grab all PCI BARs just in case.
2563 */
2564 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2565 if (rc == -EBUSY)
2566 pcim_pin_device(pdev);
2567 if (rc)
2568 return rc;
2569
2570 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2571 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2572 u8 map;
2573
2574 /* ICH6s share the same PCI ID for both piix and ahci
2575 * modes. Enabling ahci mode while MAP indicates
2576 * combined mode is a bad idea. Yield to ata_piix.
2577 */
2578 pci_read_config_byte(pdev, ICH_MAP, &map);
2579 if (map & 0x3) {
2580 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2581 "combined mode, can't enable AHCI mode\n");
2582 return -ENODEV;
2583 }
2584 }
2585
2586 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2587 if (!hpriv)
2588 return -ENOMEM;
2589 hpriv->flags |= (unsigned long)pi.private_data;
2590
2591 /* MCP65 revision A1 and A2 can't do MSI */
2592 if (board_id == board_ahci_mcp65 &&
2593 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2594 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2595
2596 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2597 pci_intx(pdev, 1);
2598
2599 /* save initial config */
2600 ahci_save_initial_config(pdev, hpriv);
2601
2602 /* prepare host */
2603 if (hpriv->cap & HOST_CAP_NCQ)
2604 pi.flags |= ATA_FLAG_NCQ;
2605
2606 if (hpriv->cap & HOST_CAP_PMP)
2607 pi.flags |= ATA_FLAG_PMP;
2608
2609 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2610 u8 messages;
2611 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2612 u32 em_loc = readl(mmio + HOST_EM_LOC);
2613 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2614
2615 messages = (em_ctl & 0x000f0000) >> 16;
2616
2617 /* we only support LED message type right now */
2618 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2619 /* store em_loc */
2620 hpriv->em_loc = ((em_loc >> 16) * 4);
2621 pi.flags |= ATA_FLAG_EM;
2622 if (!(em_ctl & EM_CTL_ALHD))
2623 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2624 }
2625 }
2626
2627 /* CAP.NP sometimes indicate the index of the last enabled
2628 * port, at other times, that of the last possible port, so
2629 * determining the maximum port number requires looking at
2630 * both CAP.NP and port_map.
2631 */
2632 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2633
2634 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2635 if (!host)
2636 return -ENOMEM;
2637 host->iomap = pcim_iomap_table(pdev);
2638 host->private_data = hpriv;
2639
2640 if (pi.flags & ATA_FLAG_EM)
2641 ahci_reset_em(host);
2642
2643 for (i = 0; i < host->n_ports; i++) {
2644 struct ata_port *ap = host->ports[i];
2645
2646 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2647 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2648 0x100 + ap->port_no * 0x80, "port");
2649
2650 /* set initial link pm policy */
2651 ap->pm_policy = NOT_AVAILABLE;
2652
2653 /* set enclosure management message type */
2654 if (ap->flags & ATA_FLAG_EM)
2655 ap->em_message_type = ahci_em_messages;
2656
2657
2658 /* disabled/not-implemented port */
2659 if (!(hpriv->port_map & (1 << i)))
2660 ap->ops = &ata_dummy_port_ops;
2661 }
2662
2663 /* apply workaround for ASUS P5W DH Deluxe mainboard */
2664 ahci_p5wdh_workaround(host);
2665
2666 /* initialize adapter */
2667 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2668 if (rc)
2669 return rc;
2670
2671 rc = ahci_reset_controller(host);
2672 if (rc)
2673 return rc;
2674
2675 ahci_init_controller(host);
2676 ahci_print_info(host);
2677
2678 pci_set_master(pdev);
2679 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2680 &ahci_sht);
2681 }
2682
2683 static int __init ahci_init(void)
2684 {
2685 return pci_register_driver(&ahci_pci_driver);
2686 }
2687
2688 static void __exit ahci_exit(void)
2689 {
2690 pci_unregister_driver(&ahci_pci_driver);
2691 }
2692
2693
2694 MODULE_AUTHOR("Jeff Garzik");
2695 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2696 MODULE_LICENSE("GPL");
2697 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2698 MODULE_VERSION(DRV_VERSION);
2699
2700 module_init(ahci_init);
2701 module_exit(ahci_exit);
This page took 0.196128 seconds and 5 git commands to generate.