Merge branch 'master' of git://oss.sgi.com/xfs/xfs into for-linus
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
54
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
62
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71
72 static int ahci_enable_alpm(struct ata_port *ap,
73 enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 ssize_t size);
80
81 enum {
82 AHCI_PCI_BAR = 5,
83 AHCI_MAX_PORTS = 32,
84 AHCI_MAX_SG = 168, /* hardware max is 64K */
85 AHCI_DMA_BOUNDARY = 0xffffffff,
86 AHCI_MAX_CMDS = 32,
87 AHCI_CMD_SZ = 32,
88 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
89 AHCI_RX_FIS_SZ = 256,
90 AHCI_CMD_TBL_CDB = 0x40,
91 AHCI_CMD_TBL_HDR_SZ = 0x80,
92 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
95 AHCI_RX_FIS_SZ,
96 AHCI_IRQ_ON_SG = (1 << 31),
97 AHCI_CMD_ATAPI = (1 << 5),
98 AHCI_CMD_WRITE = (1 << 6),
99 AHCI_CMD_PREFETCH = (1 << 7),
100 AHCI_CMD_RESET = (1 << 8),
101 AHCI_CMD_CLR_BUSY = (1 << 10),
102
103 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
104 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
105 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
106
107 board_ahci = 0,
108 board_ahci_vt8251 = 1,
109 board_ahci_ign_iferr = 2,
110 board_ahci_sb600 = 3,
111 board_ahci_mv = 4,
112 board_ahci_sb700 = 5, /* for SB700 and SB800 */
113 board_ahci_mcp65 = 6,
114 board_ahci_nopmp = 7,
115 board_ahci_yesncq = 8,
116
117 /* global controller registers */
118 HOST_CAP = 0x00, /* host capabilities */
119 HOST_CTL = 0x04, /* global host control */
120 HOST_IRQ_STAT = 0x08, /* interrupt status */
121 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
122 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
123 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
124 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
125
126 /* HOST_CTL bits */
127 HOST_RESET = (1 << 0), /* reset controller; self-clear */
128 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
129 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
130
131 /* HOST_CAP bits */
132 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
133 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
134 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
135 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
136 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
137 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
138 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
139 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
140 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
141
142 /* registers for each SATA port */
143 PORT_LST_ADDR = 0x00, /* command list DMA addr */
144 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
145 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
146 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
147 PORT_IRQ_STAT = 0x10, /* interrupt status */
148 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
149 PORT_CMD = 0x18, /* port command */
150 PORT_TFDATA = 0x20, /* taskfile data */
151 PORT_SIG = 0x24, /* device TF signature */
152 PORT_CMD_ISSUE = 0x38, /* command issue */
153 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
154 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
155 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
156 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
157 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
158
159 /* PORT_IRQ_{STAT,MASK} bits */
160 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
161 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
162 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
163 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
164 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
165 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
166 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
167 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
168
169 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
170 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
171 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
172 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
173 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
174 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
175 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
176 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
177 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
178
179 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
180 PORT_IRQ_IF_ERR |
181 PORT_IRQ_CONNECT |
182 PORT_IRQ_PHYRDY |
183 PORT_IRQ_UNK_FIS |
184 PORT_IRQ_BAD_PMP,
185 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
186 PORT_IRQ_TF_ERR |
187 PORT_IRQ_HBUS_DATA_ERR,
188 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
189 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
190 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
191
192 /* PORT_CMD bits */
193 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
194 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
195 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
196 PORT_CMD_PMP = (1 << 17), /* PMP attached */
197 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
198 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
199 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
200 PORT_CMD_CLO = (1 << 3), /* Command list override */
201 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
202 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
203 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
204
205 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
206 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
207 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
208 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
209
210 /* hpriv->flags bits */
211 AHCI_HFLAG_NO_NCQ = (1 << 0),
212 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
213 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
214 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
215 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
216 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
217 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
218 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
219 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
220 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
221 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
222 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
223 link offline */
224
225 /* ap->flags bits */
226
227 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
228 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
229 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
230 ATA_FLAG_IPM,
231
232 ICH_MAP = 0x90, /* ICH MAP register */
233
234 /* em constants */
235 EM_MAX_SLOTS = 8,
236 EM_MAX_RETRY = 5,
237
238 /* em_ctl bits */
239 EM_CTL_RST = (1 << 9), /* Reset */
240 EM_CTL_TM = (1 << 8), /* Transmit Message */
241 EM_CTL_ALHD = (1 << 26), /* Activity LED */
242 };
243
244 struct ahci_cmd_hdr {
245 __le32 opts;
246 __le32 status;
247 __le32 tbl_addr;
248 __le32 tbl_addr_hi;
249 __le32 reserved[4];
250 };
251
252 struct ahci_sg {
253 __le32 addr;
254 __le32 addr_hi;
255 __le32 reserved;
256 __le32 flags_size;
257 };
258
259 struct ahci_em_priv {
260 enum sw_activity blink_policy;
261 struct timer_list timer;
262 unsigned long saved_activity;
263 unsigned long activity;
264 unsigned long led_state;
265 };
266
267 struct ahci_host_priv {
268 unsigned int flags; /* AHCI_HFLAG_* */
269 u32 cap; /* cap to use */
270 u32 port_map; /* port map to use */
271 u32 saved_cap; /* saved initial cap */
272 u32 saved_port_map; /* saved initial port_map */
273 u32 em_loc; /* enclosure management location */
274 };
275
276 struct ahci_port_priv {
277 struct ata_link *active_link;
278 struct ahci_cmd_hdr *cmd_slot;
279 dma_addr_t cmd_slot_dma;
280 void *cmd_tbl;
281 dma_addr_t cmd_tbl_dma;
282 void *rx_fis;
283 dma_addr_t rx_fis_dma;
284 /* for NCQ spurious interrupt analysis */
285 unsigned int ncq_saw_d2h:1;
286 unsigned int ncq_saw_dmas:1;
287 unsigned int ncq_saw_sdb:1;
288 u32 intr_mask; /* interrupts to enable */
289 /* enclosure management info per PM slot */
290 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
291 };
292
293 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
294 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
295 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
296 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
297 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
298 static int ahci_port_start(struct ata_port *ap);
299 static void ahci_port_stop(struct ata_port *ap);
300 static void ahci_qc_prep(struct ata_queued_cmd *qc);
301 static void ahci_freeze(struct ata_port *ap);
302 static void ahci_thaw(struct ata_port *ap);
303 static void ahci_pmp_attach(struct ata_port *ap);
304 static void ahci_pmp_detach(struct ata_port *ap);
305 static int ahci_softreset(struct ata_link *link, unsigned int *class,
306 unsigned long deadline);
307 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
308 unsigned long deadline);
309 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
310 unsigned long deadline);
311 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
312 unsigned long deadline);
313 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
314 unsigned long deadline);
315 static void ahci_postreset(struct ata_link *link, unsigned int *class);
316 static void ahci_error_handler(struct ata_port *ap);
317 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
318 static int ahci_port_resume(struct ata_port *ap);
319 static void ahci_dev_config(struct ata_device *dev);
320 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
321 u32 opts);
322 #ifdef CONFIG_PM
323 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
325 static int ahci_pci_device_resume(struct pci_dev *pdev);
326 #endif
327 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
328 static ssize_t ahci_activity_store(struct ata_device *dev,
329 enum sw_activity val);
330 static void ahci_init_sw_activity(struct ata_link *link);
331
332 static ssize_t ahci_show_host_caps(struct device *dev,
333 struct device_attribute *attr, char *buf);
334 static ssize_t ahci_show_host_version(struct device *dev,
335 struct device_attribute *attr, char *buf);
336 static ssize_t ahci_show_port_cmd(struct device *dev,
337 struct device_attribute *attr, char *buf);
338
339 DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
340 DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
341 DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
342
343 static struct device_attribute *ahci_shost_attrs[] = {
344 &dev_attr_link_power_management_policy,
345 &dev_attr_em_message_type,
346 &dev_attr_em_message,
347 &dev_attr_ahci_host_caps,
348 &dev_attr_ahci_host_version,
349 &dev_attr_ahci_port_cmd,
350 NULL
351 };
352
353 static struct device_attribute *ahci_sdev_attrs[] = {
354 &dev_attr_sw_activity,
355 &dev_attr_unload_heads,
356 NULL
357 };
358
359 static struct scsi_host_template ahci_sht = {
360 ATA_NCQ_SHT(DRV_NAME),
361 .can_queue = AHCI_MAX_CMDS - 1,
362 .sg_tablesize = AHCI_MAX_SG,
363 .dma_boundary = AHCI_DMA_BOUNDARY,
364 .shost_attrs = ahci_shost_attrs,
365 .sdev_attrs = ahci_sdev_attrs,
366 };
367
368 static struct ata_port_operations ahci_ops = {
369 .inherits = &sata_pmp_port_ops,
370
371 .qc_defer = sata_pmp_qc_defer_cmd_switch,
372 .qc_prep = ahci_qc_prep,
373 .qc_issue = ahci_qc_issue,
374 .qc_fill_rtf = ahci_qc_fill_rtf,
375
376 .freeze = ahci_freeze,
377 .thaw = ahci_thaw,
378 .softreset = ahci_softreset,
379 .hardreset = ahci_hardreset,
380 .postreset = ahci_postreset,
381 .pmp_softreset = ahci_softreset,
382 .error_handler = ahci_error_handler,
383 .post_internal_cmd = ahci_post_internal_cmd,
384 .dev_config = ahci_dev_config,
385
386 .scr_read = ahci_scr_read,
387 .scr_write = ahci_scr_write,
388 .pmp_attach = ahci_pmp_attach,
389 .pmp_detach = ahci_pmp_detach,
390
391 .enable_pm = ahci_enable_alpm,
392 .disable_pm = ahci_disable_alpm,
393 .em_show = ahci_led_show,
394 .em_store = ahci_led_store,
395 .sw_activity_show = ahci_activity_show,
396 .sw_activity_store = ahci_activity_store,
397 #ifdef CONFIG_PM
398 .port_suspend = ahci_port_suspend,
399 .port_resume = ahci_port_resume,
400 #endif
401 .port_start = ahci_port_start,
402 .port_stop = ahci_port_stop,
403 };
404
405 static struct ata_port_operations ahci_vt8251_ops = {
406 .inherits = &ahci_ops,
407 .hardreset = ahci_vt8251_hardreset,
408 };
409
410 static struct ata_port_operations ahci_p5wdh_ops = {
411 .inherits = &ahci_ops,
412 .hardreset = ahci_p5wdh_hardreset,
413 };
414
415 static struct ata_port_operations ahci_sb600_ops = {
416 .inherits = &ahci_ops,
417 .softreset = ahci_sb600_softreset,
418 .pmp_softreset = ahci_sb600_softreset,
419 };
420
421 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
422
423 static const struct ata_port_info ahci_port_info[] = {
424 [board_ahci] =
425 {
426 .flags = AHCI_FLAG_COMMON,
427 .pio_mask = ATA_PIO4,
428 .udma_mask = ATA_UDMA6,
429 .port_ops = &ahci_ops,
430 },
431 [board_ahci_vt8251] =
432 {
433 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
434 .flags = AHCI_FLAG_COMMON,
435 .pio_mask = ATA_PIO4,
436 .udma_mask = ATA_UDMA6,
437 .port_ops = &ahci_vt8251_ops,
438 },
439 [board_ahci_ign_iferr] =
440 {
441 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
442 .flags = AHCI_FLAG_COMMON,
443 .pio_mask = ATA_PIO4,
444 .udma_mask = ATA_UDMA6,
445 .port_ops = &ahci_ops,
446 },
447 [board_ahci_sb600] =
448 {
449 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
450 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255),
451 .flags = AHCI_FLAG_COMMON,
452 .pio_mask = ATA_PIO4,
453 .udma_mask = ATA_UDMA6,
454 .port_ops = &ahci_sb600_ops,
455 },
456 [board_ahci_mv] =
457 {
458 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
459 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
460 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
461 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
462 .pio_mask = ATA_PIO4,
463 .udma_mask = ATA_UDMA6,
464 .port_ops = &ahci_ops,
465 },
466 [board_ahci_sb700] = /* for SB700 and SB800 */
467 {
468 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
469 .flags = AHCI_FLAG_COMMON,
470 .pio_mask = ATA_PIO4,
471 .udma_mask = ATA_UDMA6,
472 .port_ops = &ahci_sb600_ops,
473 },
474 [board_ahci_mcp65] =
475 {
476 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
477 .flags = AHCI_FLAG_COMMON,
478 .pio_mask = ATA_PIO4,
479 .udma_mask = ATA_UDMA6,
480 .port_ops = &ahci_ops,
481 },
482 [board_ahci_nopmp] =
483 {
484 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
485 .flags = AHCI_FLAG_COMMON,
486 .pio_mask = ATA_PIO4,
487 .udma_mask = ATA_UDMA6,
488 .port_ops = &ahci_ops,
489 },
490 /* board_ahci_yesncq */
491 {
492 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
493 .flags = AHCI_FLAG_COMMON,
494 .pio_mask = ATA_PIO4,
495 .udma_mask = ATA_UDMA6,
496 .port_ops = &ahci_ops,
497 },
498 };
499
500 static const struct pci_device_id ahci_pci_tbl[] = {
501 /* Intel */
502 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
503 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
504 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
505 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
506 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
507 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
508 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
509 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
510 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
511 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
512 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
513 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
514 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
515 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
516 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
517 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
518 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
519 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
520 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
521 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
522 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
523 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
524 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
525 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
526 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
527 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
528 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
529 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
530 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
531 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
532 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
533 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
534 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
535 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
536 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
537 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
538 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
539 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
540 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
541 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
542
543 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
544 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
545 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
546
547 /* ATI */
548 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
549 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
550 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
551 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
552 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
553 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
554 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
555
556 /* AMD */
557 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
558 /* AMD is using RAID class only for ahci controllers */
559 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
560 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
561
562 /* VIA */
563 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
564 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
565
566 /* NVIDIA */
567 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
568 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
569 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
570 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
571 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
572 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
573 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
574 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
575 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
576 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
577 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
578 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
579 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
580 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
581 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
582 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
583 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
584 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
585 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
586 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
587 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
588 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
589 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
590 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
591 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
592 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
593 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
594 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
595 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
596 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
597 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
598 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
599 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
600 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
601 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
602 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
603 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
604 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
605 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
606 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
607 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
608 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
609 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
610 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
611 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
612 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
613 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
614 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
615 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
616 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
617 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
618 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
619 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
620 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
621 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
622 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
623 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
624 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
625 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
626 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
627 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
628 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
629 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
630 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
631 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
632 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
633 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
634 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
635
636 /* SiS */
637 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
638 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
639 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
640
641 /* Marvell */
642 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
643 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
644
645 /* Promise */
646 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
647
648 /* Generic, PCI class code for AHCI */
649 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
650 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
651
652 { } /* terminate list */
653 };
654
655
656 static struct pci_driver ahci_pci_driver = {
657 .name = DRV_NAME,
658 .id_table = ahci_pci_tbl,
659 .probe = ahci_init_one,
660 .remove = ata_pci_remove_one,
661 #ifdef CONFIG_PM
662 .suspend = ahci_pci_device_suspend,
663 .resume = ahci_pci_device_resume,
664 #endif
665 };
666
667 static int ahci_em_messages = 1;
668 module_param(ahci_em_messages, int, 0444);
669 /* add other LED protocol types when they become supported */
670 MODULE_PARM_DESC(ahci_em_messages,
671 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
672
673 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
674 static int marvell_enable;
675 #else
676 static int marvell_enable = 1;
677 #endif
678 module_param(marvell_enable, int, 0644);
679 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
680
681
682 static inline int ahci_nr_ports(u32 cap)
683 {
684 return (cap & 0x1f) + 1;
685 }
686
687 static inline void __iomem *__ahci_port_base(struct ata_host *host,
688 unsigned int port_no)
689 {
690 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
691
692 return mmio + 0x100 + (port_no * 0x80);
693 }
694
695 static inline void __iomem *ahci_port_base(struct ata_port *ap)
696 {
697 return __ahci_port_base(ap->host, ap->port_no);
698 }
699
700 static void ahci_enable_ahci(void __iomem *mmio)
701 {
702 int i;
703 u32 tmp;
704
705 /* turn on AHCI_EN */
706 tmp = readl(mmio + HOST_CTL);
707 if (tmp & HOST_AHCI_EN)
708 return;
709
710 /* Some controllers need AHCI_EN to be written multiple times.
711 * Try a few times before giving up.
712 */
713 for (i = 0; i < 5; i++) {
714 tmp |= HOST_AHCI_EN;
715 writel(tmp, mmio + HOST_CTL);
716 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
717 if (tmp & HOST_AHCI_EN)
718 return;
719 msleep(10);
720 }
721
722 WARN_ON(1);
723 }
724
725 static ssize_t ahci_show_host_caps(struct device *dev,
726 struct device_attribute *attr, char *buf)
727 {
728 struct Scsi_Host *shost = class_to_shost(dev);
729 struct ata_port *ap = ata_shost_to_port(shost);
730 struct ahci_host_priv *hpriv = ap->host->private_data;
731
732 return sprintf(buf, "%x\n", hpriv->cap);
733 }
734
735 static ssize_t ahci_show_host_version(struct device *dev,
736 struct device_attribute *attr, char *buf)
737 {
738 struct Scsi_Host *shost = class_to_shost(dev);
739 struct ata_port *ap = ata_shost_to_port(shost);
740 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
741
742 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
743 }
744
745 static ssize_t ahci_show_port_cmd(struct device *dev,
746 struct device_attribute *attr, char *buf)
747 {
748 struct Scsi_Host *shost = class_to_shost(dev);
749 struct ata_port *ap = ata_shost_to_port(shost);
750 void __iomem *port_mmio = ahci_port_base(ap);
751
752 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
753 }
754
755 /**
756 * ahci_save_initial_config - Save and fixup initial config values
757 * @pdev: target PCI device
758 * @hpriv: host private area to store config values
759 *
760 * Some registers containing configuration info might be setup by
761 * BIOS and might be cleared on reset. This function saves the
762 * initial values of those registers into @hpriv such that they
763 * can be restored after controller reset.
764 *
765 * If inconsistent, config values are fixed up by this function.
766 *
767 * LOCKING:
768 * None.
769 */
770 static void ahci_save_initial_config(struct pci_dev *pdev,
771 struct ahci_host_priv *hpriv)
772 {
773 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
774 u32 cap, port_map;
775 int i;
776 int mv;
777
778 /* make sure AHCI mode is enabled before accessing CAP */
779 ahci_enable_ahci(mmio);
780
781 /* Values prefixed with saved_ are written back to host after
782 * reset. Values without are used for driver operation.
783 */
784 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
785 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
786
787 /* some chips have errata preventing 64bit use */
788 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
789 dev_printk(KERN_INFO, &pdev->dev,
790 "controller can't do 64bit DMA, forcing 32bit\n");
791 cap &= ~HOST_CAP_64;
792 }
793
794 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
795 dev_printk(KERN_INFO, &pdev->dev,
796 "controller can't do NCQ, turning off CAP_NCQ\n");
797 cap &= ~HOST_CAP_NCQ;
798 }
799
800 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
801 dev_printk(KERN_INFO, &pdev->dev,
802 "controller can do NCQ, turning on CAP_NCQ\n");
803 cap |= HOST_CAP_NCQ;
804 }
805
806 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
807 dev_printk(KERN_INFO, &pdev->dev,
808 "controller can't do PMP, turning off CAP_PMP\n");
809 cap &= ~HOST_CAP_PMP;
810 }
811
812 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
813 port_map != 1) {
814 dev_printk(KERN_INFO, &pdev->dev,
815 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
816 port_map, 1);
817 port_map = 1;
818 }
819
820 /*
821 * Temporary Marvell 6145 hack: PATA port presence
822 * is asserted through the standard AHCI port
823 * presence register, as bit 4 (counting from 0)
824 */
825 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
826 if (pdev->device == 0x6121)
827 mv = 0x3;
828 else
829 mv = 0xf;
830 dev_printk(KERN_ERR, &pdev->dev,
831 "MV_AHCI HACK: port_map %x -> %x\n",
832 port_map,
833 port_map & mv);
834 dev_printk(KERN_ERR, &pdev->dev,
835 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
836
837 port_map &= mv;
838 }
839
840 /* cross check port_map and cap.n_ports */
841 if (port_map) {
842 int map_ports = 0;
843
844 for (i = 0; i < AHCI_MAX_PORTS; i++)
845 if (port_map & (1 << i))
846 map_ports++;
847
848 /* If PI has more ports than n_ports, whine, clear
849 * port_map and let it be generated from n_ports.
850 */
851 if (map_ports > ahci_nr_ports(cap)) {
852 dev_printk(KERN_WARNING, &pdev->dev,
853 "implemented port map (0x%x) contains more "
854 "ports than nr_ports (%u), using nr_ports\n",
855 port_map, ahci_nr_ports(cap));
856 port_map = 0;
857 }
858 }
859
860 /* fabricate port_map from cap.nr_ports */
861 if (!port_map) {
862 port_map = (1 << ahci_nr_ports(cap)) - 1;
863 dev_printk(KERN_WARNING, &pdev->dev,
864 "forcing PORTS_IMPL to 0x%x\n", port_map);
865
866 /* write the fixed up value to the PI register */
867 hpriv->saved_port_map = port_map;
868 }
869
870 /* record values to use during operation */
871 hpriv->cap = cap;
872 hpriv->port_map = port_map;
873 }
874
875 /**
876 * ahci_restore_initial_config - Restore initial config
877 * @host: target ATA host
878 *
879 * Restore initial config stored by ahci_save_initial_config().
880 *
881 * LOCKING:
882 * None.
883 */
884 static void ahci_restore_initial_config(struct ata_host *host)
885 {
886 struct ahci_host_priv *hpriv = host->private_data;
887 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
888
889 writel(hpriv->saved_cap, mmio + HOST_CAP);
890 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
891 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
892 }
893
894 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
895 {
896 static const int offset[] = {
897 [SCR_STATUS] = PORT_SCR_STAT,
898 [SCR_CONTROL] = PORT_SCR_CTL,
899 [SCR_ERROR] = PORT_SCR_ERR,
900 [SCR_ACTIVE] = PORT_SCR_ACT,
901 [SCR_NOTIFICATION] = PORT_SCR_NTF,
902 };
903 struct ahci_host_priv *hpriv = ap->host->private_data;
904
905 if (sc_reg < ARRAY_SIZE(offset) &&
906 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
907 return offset[sc_reg];
908 return 0;
909 }
910
911 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
912 {
913 void __iomem *port_mmio = ahci_port_base(link->ap);
914 int offset = ahci_scr_offset(link->ap, sc_reg);
915
916 if (offset) {
917 *val = readl(port_mmio + offset);
918 return 0;
919 }
920 return -EINVAL;
921 }
922
923 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
924 {
925 void __iomem *port_mmio = ahci_port_base(link->ap);
926 int offset = ahci_scr_offset(link->ap, sc_reg);
927
928 if (offset) {
929 writel(val, port_mmio + offset);
930 return 0;
931 }
932 return -EINVAL;
933 }
934
935 static void ahci_start_engine(struct ata_port *ap)
936 {
937 void __iomem *port_mmio = ahci_port_base(ap);
938 u32 tmp;
939
940 /* start DMA */
941 tmp = readl(port_mmio + PORT_CMD);
942 tmp |= PORT_CMD_START;
943 writel(tmp, port_mmio + PORT_CMD);
944 readl(port_mmio + PORT_CMD); /* flush */
945 }
946
947 static int ahci_stop_engine(struct ata_port *ap)
948 {
949 void __iomem *port_mmio = ahci_port_base(ap);
950 u32 tmp;
951
952 tmp = readl(port_mmio + PORT_CMD);
953
954 /* check if the HBA is idle */
955 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
956 return 0;
957
958 /* setting HBA to idle */
959 tmp &= ~PORT_CMD_START;
960 writel(tmp, port_mmio + PORT_CMD);
961
962 /* wait for engine to stop. This could be as long as 500 msec */
963 tmp = ata_wait_register(port_mmio + PORT_CMD,
964 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
965 if (tmp & PORT_CMD_LIST_ON)
966 return -EIO;
967
968 return 0;
969 }
970
971 static void ahci_start_fis_rx(struct ata_port *ap)
972 {
973 void __iomem *port_mmio = ahci_port_base(ap);
974 struct ahci_host_priv *hpriv = ap->host->private_data;
975 struct ahci_port_priv *pp = ap->private_data;
976 u32 tmp;
977
978 /* set FIS registers */
979 if (hpriv->cap & HOST_CAP_64)
980 writel((pp->cmd_slot_dma >> 16) >> 16,
981 port_mmio + PORT_LST_ADDR_HI);
982 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
983
984 if (hpriv->cap & HOST_CAP_64)
985 writel((pp->rx_fis_dma >> 16) >> 16,
986 port_mmio + PORT_FIS_ADDR_HI);
987 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
988
989 /* enable FIS reception */
990 tmp = readl(port_mmio + PORT_CMD);
991 tmp |= PORT_CMD_FIS_RX;
992 writel(tmp, port_mmio + PORT_CMD);
993
994 /* flush */
995 readl(port_mmio + PORT_CMD);
996 }
997
998 static int ahci_stop_fis_rx(struct ata_port *ap)
999 {
1000 void __iomem *port_mmio = ahci_port_base(ap);
1001 u32 tmp;
1002
1003 /* disable FIS reception */
1004 tmp = readl(port_mmio + PORT_CMD);
1005 tmp &= ~PORT_CMD_FIS_RX;
1006 writel(tmp, port_mmio + PORT_CMD);
1007
1008 /* wait for completion, spec says 500ms, give it 1000 */
1009 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1010 PORT_CMD_FIS_ON, 10, 1000);
1011 if (tmp & PORT_CMD_FIS_ON)
1012 return -EBUSY;
1013
1014 return 0;
1015 }
1016
1017 static void ahci_power_up(struct ata_port *ap)
1018 {
1019 struct ahci_host_priv *hpriv = ap->host->private_data;
1020 void __iomem *port_mmio = ahci_port_base(ap);
1021 u32 cmd;
1022
1023 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1024
1025 /* spin up device */
1026 if (hpriv->cap & HOST_CAP_SSS) {
1027 cmd |= PORT_CMD_SPIN_UP;
1028 writel(cmd, port_mmio + PORT_CMD);
1029 }
1030
1031 /* wake up link */
1032 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1033 }
1034
1035 static void ahci_disable_alpm(struct ata_port *ap)
1036 {
1037 struct ahci_host_priv *hpriv = ap->host->private_data;
1038 void __iomem *port_mmio = ahci_port_base(ap);
1039 u32 cmd;
1040 struct ahci_port_priv *pp = ap->private_data;
1041
1042 /* IPM bits should be disabled by libata-core */
1043 /* get the existing command bits */
1044 cmd = readl(port_mmio + PORT_CMD);
1045
1046 /* disable ALPM and ASP */
1047 cmd &= ~PORT_CMD_ASP;
1048 cmd &= ~PORT_CMD_ALPE;
1049
1050 /* force the interface back to active */
1051 cmd |= PORT_CMD_ICC_ACTIVE;
1052
1053 /* write out new cmd value */
1054 writel(cmd, port_mmio + PORT_CMD);
1055 cmd = readl(port_mmio + PORT_CMD);
1056
1057 /* wait 10ms to be sure we've come out of any low power state */
1058 msleep(10);
1059
1060 /* clear out any PhyRdy stuff from interrupt status */
1061 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1062
1063 /* go ahead and clean out PhyRdy Change from Serror too */
1064 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1065
1066 /*
1067 * Clear flag to indicate that we should ignore all PhyRdy
1068 * state changes
1069 */
1070 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1071
1072 /*
1073 * Enable interrupts on Phy Ready.
1074 */
1075 pp->intr_mask |= PORT_IRQ_PHYRDY;
1076 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1077
1078 /*
1079 * don't change the link pm policy - we can be called
1080 * just to turn of link pm temporarily
1081 */
1082 }
1083
1084 static int ahci_enable_alpm(struct ata_port *ap,
1085 enum link_pm policy)
1086 {
1087 struct ahci_host_priv *hpriv = ap->host->private_data;
1088 void __iomem *port_mmio = ahci_port_base(ap);
1089 u32 cmd;
1090 struct ahci_port_priv *pp = ap->private_data;
1091 u32 asp;
1092
1093 /* Make sure the host is capable of link power management */
1094 if (!(hpriv->cap & HOST_CAP_ALPM))
1095 return -EINVAL;
1096
1097 switch (policy) {
1098 case MAX_PERFORMANCE:
1099 case NOT_AVAILABLE:
1100 /*
1101 * if we came here with NOT_AVAILABLE,
1102 * it just means this is the first time we
1103 * have tried to enable - default to max performance,
1104 * and let the user go to lower power modes on request.
1105 */
1106 ahci_disable_alpm(ap);
1107 return 0;
1108 case MIN_POWER:
1109 /* configure HBA to enter SLUMBER */
1110 asp = PORT_CMD_ASP;
1111 break;
1112 case MEDIUM_POWER:
1113 /* configure HBA to enter PARTIAL */
1114 asp = 0;
1115 break;
1116 default:
1117 return -EINVAL;
1118 }
1119
1120 /*
1121 * Disable interrupts on Phy Ready. This keeps us from
1122 * getting woken up due to spurious phy ready interrupts
1123 * TBD - Hot plug should be done via polling now, is
1124 * that even supported?
1125 */
1126 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1127 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1128
1129 /*
1130 * Set a flag to indicate that we should ignore all PhyRdy
1131 * state changes since these can happen now whenever we
1132 * change link state
1133 */
1134 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1135
1136 /* get the existing command bits */
1137 cmd = readl(port_mmio + PORT_CMD);
1138
1139 /*
1140 * Set ASP based on Policy
1141 */
1142 cmd |= asp;
1143
1144 /*
1145 * Setting this bit will instruct the HBA to aggressively
1146 * enter a lower power link state when it's appropriate and
1147 * based on the value set above for ASP
1148 */
1149 cmd |= PORT_CMD_ALPE;
1150
1151 /* write out new cmd value */
1152 writel(cmd, port_mmio + PORT_CMD);
1153 cmd = readl(port_mmio + PORT_CMD);
1154
1155 /* IPM bits should be set by libata-core */
1156 return 0;
1157 }
1158
1159 #ifdef CONFIG_PM
1160 static void ahci_power_down(struct ata_port *ap)
1161 {
1162 struct ahci_host_priv *hpriv = ap->host->private_data;
1163 void __iomem *port_mmio = ahci_port_base(ap);
1164 u32 cmd, scontrol;
1165
1166 if (!(hpriv->cap & HOST_CAP_SSS))
1167 return;
1168
1169 /* put device into listen mode, first set PxSCTL.DET to 0 */
1170 scontrol = readl(port_mmio + PORT_SCR_CTL);
1171 scontrol &= ~0xf;
1172 writel(scontrol, port_mmio + PORT_SCR_CTL);
1173
1174 /* then set PxCMD.SUD to 0 */
1175 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1176 cmd &= ~PORT_CMD_SPIN_UP;
1177 writel(cmd, port_mmio + PORT_CMD);
1178 }
1179 #endif
1180
1181 static void ahci_start_port(struct ata_port *ap)
1182 {
1183 struct ahci_port_priv *pp = ap->private_data;
1184 struct ata_link *link;
1185 struct ahci_em_priv *emp;
1186 ssize_t rc;
1187 int i;
1188
1189 /* enable FIS reception */
1190 ahci_start_fis_rx(ap);
1191
1192 /* enable DMA */
1193 ahci_start_engine(ap);
1194
1195 /* turn on LEDs */
1196 if (ap->flags & ATA_FLAG_EM) {
1197 ata_for_each_link(link, ap, EDGE) {
1198 emp = &pp->em_priv[link->pmp];
1199
1200 /* EM Transmit bit maybe busy during init */
1201 for (i = 0; i < EM_MAX_RETRY; i++) {
1202 rc = ahci_transmit_led_message(ap,
1203 emp->led_state,
1204 4);
1205 if (rc == -EBUSY)
1206 msleep(1);
1207 else
1208 break;
1209 }
1210 }
1211 }
1212
1213 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1214 ata_for_each_link(link, ap, EDGE)
1215 ahci_init_sw_activity(link);
1216
1217 }
1218
1219 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1220 {
1221 int rc;
1222
1223 /* disable DMA */
1224 rc = ahci_stop_engine(ap);
1225 if (rc) {
1226 *emsg = "failed to stop engine";
1227 return rc;
1228 }
1229
1230 /* disable FIS reception */
1231 rc = ahci_stop_fis_rx(ap);
1232 if (rc) {
1233 *emsg = "failed stop FIS RX";
1234 return rc;
1235 }
1236
1237 return 0;
1238 }
1239
1240 static int ahci_reset_controller(struct ata_host *host)
1241 {
1242 struct pci_dev *pdev = to_pci_dev(host->dev);
1243 struct ahci_host_priv *hpriv = host->private_data;
1244 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1245 u32 tmp;
1246
1247 /* we must be in AHCI mode, before using anything
1248 * AHCI-specific, such as HOST_RESET.
1249 */
1250 ahci_enable_ahci(mmio);
1251
1252 /* global controller reset */
1253 if (!ahci_skip_host_reset) {
1254 tmp = readl(mmio + HOST_CTL);
1255 if ((tmp & HOST_RESET) == 0) {
1256 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1257 readl(mmio + HOST_CTL); /* flush */
1258 }
1259
1260 /*
1261 * to perform host reset, OS should set HOST_RESET
1262 * and poll until this bit is read to be "0".
1263 * reset must complete within 1 second, or
1264 * the hardware should be considered fried.
1265 */
1266 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1267 HOST_RESET, 10, 1000);
1268
1269 if (tmp & HOST_RESET) {
1270 dev_printk(KERN_ERR, host->dev,
1271 "controller reset failed (0x%x)\n", tmp);
1272 return -EIO;
1273 }
1274
1275 /* turn on AHCI mode */
1276 ahci_enable_ahci(mmio);
1277
1278 /* Some registers might be cleared on reset. Restore
1279 * initial values.
1280 */
1281 ahci_restore_initial_config(host);
1282 } else
1283 dev_printk(KERN_INFO, host->dev,
1284 "skipping global host reset\n");
1285
1286 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1287 u16 tmp16;
1288
1289 /* configure PCS */
1290 pci_read_config_word(pdev, 0x92, &tmp16);
1291 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1292 tmp16 |= hpriv->port_map;
1293 pci_write_config_word(pdev, 0x92, tmp16);
1294 }
1295 }
1296
1297 return 0;
1298 }
1299
1300 static void ahci_sw_activity(struct ata_link *link)
1301 {
1302 struct ata_port *ap = link->ap;
1303 struct ahci_port_priv *pp = ap->private_data;
1304 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1305
1306 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1307 return;
1308
1309 emp->activity++;
1310 if (!timer_pending(&emp->timer))
1311 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1312 }
1313
1314 static void ahci_sw_activity_blink(unsigned long arg)
1315 {
1316 struct ata_link *link = (struct ata_link *)arg;
1317 struct ata_port *ap = link->ap;
1318 struct ahci_port_priv *pp = ap->private_data;
1319 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1320 unsigned long led_message = emp->led_state;
1321 u32 activity_led_state;
1322 unsigned long flags;
1323
1324 led_message &= EM_MSG_LED_VALUE;
1325 led_message |= ap->port_no | (link->pmp << 8);
1326
1327 /* check to see if we've had activity. If so,
1328 * toggle state of LED and reset timer. If not,
1329 * turn LED to desired idle state.
1330 */
1331 spin_lock_irqsave(ap->lock, flags);
1332 if (emp->saved_activity != emp->activity) {
1333 emp->saved_activity = emp->activity;
1334 /* get the current LED state */
1335 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1336
1337 if (activity_led_state)
1338 activity_led_state = 0;
1339 else
1340 activity_led_state = 1;
1341
1342 /* clear old state */
1343 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1344
1345 /* toggle state */
1346 led_message |= (activity_led_state << 16);
1347 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1348 } else {
1349 /* switch to idle */
1350 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1351 if (emp->blink_policy == BLINK_OFF)
1352 led_message |= (1 << 16);
1353 }
1354 spin_unlock_irqrestore(ap->lock, flags);
1355 ahci_transmit_led_message(ap, led_message, 4);
1356 }
1357
1358 static void ahci_init_sw_activity(struct ata_link *link)
1359 {
1360 struct ata_port *ap = link->ap;
1361 struct ahci_port_priv *pp = ap->private_data;
1362 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1363
1364 /* init activity stats, setup timer */
1365 emp->saved_activity = emp->activity = 0;
1366 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1367
1368 /* check our blink policy and set flag for link if it's enabled */
1369 if (emp->blink_policy)
1370 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1371 }
1372
1373 static int ahci_reset_em(struct ata_host *host)
1374 {
1375 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1376 u32 em_ctl;
1377
1378 em_ctl = readl(mmio + HOST_EM_CTL);
1379 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1380 return -EINVAL;
1381
1382 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1383 return 0;
1384 }
1385
1386 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1387 ssize_t size)
1388 {
1389 struct ahci_host_priv *hpriv = ap->host->private_data;
1390 struct ahci_port_priv *pp = ap->private_data;
1391 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1392 u32 em_ctl;
1393 u32 message[] = {0, 0};
1394 unsigned long flags;
1395 int pmp;
1396 struct ahci_em_priv *emp;
1397
1398 /* get the slot number from the message */
1399 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1400 if (pmp < EM_MAX_SLOTS)
1401 emp = &pp->em_priv[pmp];
1402 else
1403 return -EINVAL;
1404
1405 spin_lock_irqsave(ap->lock, flags);
1406
1407 /*
1408 * if we are still busy transmitting a previous message,
1409 * do not allow
1410 */
1411 em_ctl = readl(mmio + HOST_EM_CTL);
1412 if (em_ctl & EM_CTL_TM) {
1413 spin_unlock_irqrestore(ap->lock, flags);
1414 return -EBUSY;
1415 }
1416
1417 /*
1418 * create message header - this is all zero except for
1419 * the message size, which is 4 bytes.
1420 */
1421 message[0] |= (4 << 8);
1422
1423 /* ignore 0:4 of byte zero, fill in port info yourself */
1424 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1425
1426 /* write message to EM_LOC */
1427 writel(message[0], mmio + hpriv->em_loc);
1428 writel(message[1], mmio + hpriv->em_loc+4);
1429
1430 /* save off new led state for port/slot */
1431 emp->led_state = state;
1432
1433 /*
1434 * tell hardware to transmit the message
1435 */
1436 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1437
1438 spin_unlock_irqrestore(ap->lock, flags);
1439 return size;
1440 }
1441
1442 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1443 {
1444 struct ahci_port_priv *pp = ap->private_data;
1445 struct ata_link *link;
1446 struct ahci_em_priv *emp;
1447 int rc = 0;
1448
1449 ata_for_each_link(link, ap, EDGE) {
1450 emp = &pp->em_priv[link->pmp];
1451 rc += sprintf(buf, "%lx\n", emp->led_state);
1452 }
1453 return rc;
1454 }
1455
1456 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1457 size_t size)
1458 {
1459 int state;
1460 int pmp;
1461 struct ahci_port_priv *pp = ap->private_data;
1462 struct ahci_em_priv *emp;
1463
1464 state = simple_strtoul(buf, NULL, 0);
1465
1466 /* get the slot number from the message */
1467 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1468 if (pmp < EM_MAX_SLOTS)
1469 emp = &pp->em_priv[pmp];
1470 else
1471 return -EINVAL;
1472
1473 /* mask off the activity bits if we are in sw_activity
1474 * mode, user should turn off sw_activity before setting
1475 * activity led through em_message
1476 */
1477 if (emp->blink_policy)
1478 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1479
1480 return ahci_transmit_led_message(ap, state, size);
1481 }
1482
1483 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1484 {
1485 struct ata_link *link = dev->link;
1486 struct ata_port *ap = link->ap;
1487 struct ahci_port_priv *pp = ap->private_data;
1488 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1489 u32 port_led_state = emp->led_state;
1490
1491 /* save the desired Activity LED behavior */
1492 if (val == OFF) {
1493 /* clear LFLAG */
1494 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1495
1496 /* set the LED to OFF */
1497 port_led_state &= EM_MSG_LED_VALUE_OFF;
1498 port_led_state |= (ap->port_no | (link->pmp << 8));
1499 ahci_transmit_led_message(ap, port_led_state, 4);
1500 } else {
1501 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1502 if (val == BLINK_OFF) {
1503 /* set LED to ON for idle */
1504 port_led_state &= EM_MSG_LED_VALUE_OFF;
1505 port_led_state |= (ap->port_no | (link->pmp << 8));
1506 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1507 ahci_transmit_led_message(ap, port_led_state, 4);
1508 }
1509 }
1510 emp->blink_policy = val;
1511 return 0;
1512 }
1513
1514 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1515 {
1516 struct ata_link *link = dev->link;
1517 struct ata_port *ap = link->ap;
1518 struct ahci_port_priv *pp = ap->private_data;
1519 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1520
1521 /* display the saved value of activity behavior for this
1522 * disk.
1523 */
1524 return sprintf(buf, "%d\n", emp->blink_policy);
1525 }
1526
1527 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1528 int port_no, void __iomem *mmio,
1529 void __iomem *port_mmio)
1530 {
1531 const char *emsg = NULL;
1532 int rc;
1533 u32 tmp;
1534
1535 /* make sure port is not active */
1536 rc = ahci_deinit_port(ap, &emsg);
1537 if (rc)
1538 dev_printk(KERN_WARNING, &pdev->dev,
1539 "%s (%d)\n", emsg, rc);
1540
1541 /* clear SError */
1542 tmp = readl(port_mmio + PORT_SCR_ERR);
1543 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1544 writel(tmp, port_mmio + PORT_SCR_ERR);
1545
1546 /* clear port IRQ */
1547 tmp = readl(port_mmio + PORT_IRQ_STAT);
1548 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1549 if (tmp)
1550 writel(tmp, port_mmio + PORT_IRQ_STAT);
1551
1552 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1553 }
1554
1555 static void ahci_init_controller(struct ata_host *host)
1556 {
1557 struct ahci_host_priv *hpriv = host->private_data;
1558 struct pci_dev *pdev = to_pci_dev(host->dev);
1559 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1560 int i;
1561 void __iomem *port_mmio;
1562 u32 tmp;
1563 int mv;
1564
1565 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1566 if (pdev->device == 0x6121)
1567 mv = 2;
1568 else
1569 mv = 4;
1570 port_mmio = __ahci_port_base(host, mv);
1571
1572 writel(0, port_mmio + PORT_IRQ_MASK);
1573
1574 /* clear port IRQ */
1575 tmp = readl(port_mmio + PORT_IRQ_STAT);
1576 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1577 if (tmp)
1578 writel(tmp, port_mmio + PORT_IRQ_STAT);
1579 }
1580
1581 for (i = 0; i < host->n_ports; i++) {
1582 struct ata_port *ap = host->ports[i];
1583
1584 port_mmio = ahci_port_base(ap);
1585 if (ata_port_is_dummy(ap))
1586 continue;
1587
1588 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1589 }
1590
1591 tmp = readl(mmio + HOST_CTL);
1592 VPRINTK("HOST_CTL 0x%x\n", tmp);
1593 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1594 tmp = readl(mmio + HOST_CTL);
1595 VPRINTK("HOST_CTL 0x%x\n", tmp);
1596 }
1597
1598 static void ahci_dev_config(struct ata_device *dev)
1599 {
1600 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1601
1602 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1603 dev->max_sectors = 255;
1604 ata_dev_printk(dev, KERN_INFO,
1605 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1606 }
1607 }
1608
1609 static unsigned int ahci_dev_classify(struct ata_port *ap)
1610 {
1611 void __iomem *port_mmio = ahci_port_base(ap);
1612 struct ata_taskfile tf;
1613 u32 tmp;
1614
1615 tmp = readl(port_mmio + PORT_SIG);
1616 tf.lbah = (tmp >> 24) & 0xff;
1617 tf.lbam = (tmp >> 16) & 0xff;
1618 tf.lbal = (tmp >> 8) & 0xff;
1619 tf.nsect = (tmp) & 0xff;
1620
1621 return ata_dev_classify(&tf);
1622 }
1623
1624 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1625 u32 opts)
1626 {
1627 dma_addr_t cmd_tbl_dma;
1628
1629 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1630
1631 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1632 pp->cmd_slot[tag].status = 0;
1633 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1634 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1635 }
1636
1637 static int ahci_kick_engine(struct ata_port *ap)
1638 {
1639 void __iomem *port_mmio = ahci_port_base(ap);
1640 struct ahci_host_priv *hpriv = ap->host->private_data;
1641 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1642 u32 tmp;
1643 int busy, rc;
1644
1645 /* stop engine */
1646 rc = ahci_stop_engine(ap);
1647 if (rc)
1648 goto out_restart;
1649
1650 /* need to do CLO?
1651 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1652 */
1653 busy = status & (ATA_BUSY | ATA_DRQ);
1654 if (!busy && !sata_pmp_attached(ap)) {
1655 rc = 0;
1656 goto out_restart;
1657 }
1658
1659 if (!(hpriv->cap & HOST_CAP_CLO)) {
1660 rc = -EOPNOTSUPP;
1661 goto out_restart;
1662 }
1663
1664 /* perform CLO */
1665 tmp = readl(port_mmio + PORT_CMD);
1666 tmp |= PORT_CMD_CLO;
1667 writel(tmp, port_mmio + PORT_CMD);
1668
1669 rc = 0;
1670 tmp = ata_wait_register(port_mmio + PORT_CMD,
1671 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1672 if (tmp & PORT_CMD_CLO)
1673 rc = -EIO;
1674
1675 /* restart engine */
1676 out_restart:
1677 ahci_start_engine(ap);
1678 return rc;
1679 }
1680
1681 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1682 struct ata_taskfile *tf, int is_cmd, u16 flags,
1683 unsigned long timeout_msec)
1684 {
1685 const u32 cmd_fis_len = 5; /* five dwords */
1686 struct ahci_port_priv *pp = ap->private_data;
1687 void __iomem *port_mmio = ahci_port_base(ap);
1688 u8 *fis = pp->cmd_tbl;
1689 u32 tmp;
1690
1691 /* prep the command */
1692 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1693 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1694
1695 /* issue & wait */
1696 writel(1, port_mmio + PORT_CMD_ISSUE);
1697
1698 if (timeout_msec) {
1699 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1700 1, timeout_msec);
1701 if (tmp & 0x1) {
1702 ahci_kick_engine(ap);
1703 return -EBUSY;
1704 }
1705 } else
1706 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1707
1708 return 0;
1709 }
1710
1711 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1712 int pmp, unsigned long deadline,
1713 int (*check_ready)(struct ata_link *link))
1714 {
1715 struct ata_port *ap = link->ap;
1716 struct ahci_host_priv *hpriv = ap->host->private_data;
1717 const char *reason = NULL;
1718 unsigned long now, msecs;
1719 struct ata_taskfile tf;
1720 int rc;
1721
1722 DPRINTK("ENTER\n");
1723
1724 /* prepare for SRST (AHCI-1.1 10.4.1) */
1725 rc = ahci_kick_engine(ap);
1726 if (rc && rc != -EOPNOTSUPP)
1727 ata_link_printk(link, KERN_WARNING,
1728 "failed to reset engine (errno=%d)\n", rc);
1729
1730 ata_tf_init(link->device, &tf);
1731
1732 /* issue the first D2H Register FIS */
1733 msecs = 0;
1734 now = jiffies;
1735 if (time_after(now, deadline))
1736 msecs = jiffies_to_msecs(deadline - now);
1737
1738 tf.ctl |= ATA_SRST;
1739 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1740 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1741 rc = -EIO;
1742 reason = "1st FIS failed";
1743 goto fail;
1744 }
1745
1746 /* spec says at least 5us, but be generous and sleep for 1ms */
1747 msleep(1);
1748
1749 /* issue the second D2H Register FIS */
1750 tf.ctl &= ~ATA_SRST;
1751 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1752
1753 /* wait for link to become ready */
1754 rc = ata_wait_after_reset(link, deadline, check_ready);
1755 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1756 /*
1757 * Workaround for cases where link online status can't
1758 * be trusted. Treat device readiness timeout as link
1759 * offline.
1760 */
1761 ata_link_printk(link, KERN_INFO,
1762 "device not ready, treating as offline\n");
1763 *class = ATA_DEV_NONE;
1764 } else if (rc) {
1765 /* link occupied, -ENODEV too is an error */
1766 reason = "device not ready";
1767 goto fail;
1768 } else
1769 *class = ahci_dev_classify(ap);
1770
1771 DPRINTK("EXIT, class=%u\n", *class);
1772 return 0;
1773
1774 fail:
1775 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1776 return rc;
1777 }
1778
1779 static int ahci_check_ready(struct ata_link *link)
1780 {
1781 void __iomem *port_mmio = ahci_port_base(link->ap);
1782 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1783
1784 return ata_check_ready(status);
1785 }
1786
1787 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1788 unsigned long deadline)
1789 {
1790 int pmp = sata_srst_pmp(link);
1791
1792 DPRINTK("ENTER\n");
1793
1794 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1795 }
1796
1797 static int ahci_sb600_check_ready(struct ata_link *link)
1798 {
1799 void __iomem *port_mmio = ahci_port_base(link->ap);
1800 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1801 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1802
1803 /*
1804 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1805 * which can save timeout delay.
1806 */
1807 if (irq_status & PORT_IRQ_BAD_PMP)
1808 return -EIO;
1809
1810 return ata_check_ready(status);
1811 }
1812
1813 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1814 unsigned long deadline)
1815 {
1816 struct ata_port *ap = link->ap;
1817 void __iomem *port_mmio = ahci_port_base(ap);
1818 int pmp = sata_srst_pmp(link);
1819 int rc;
1820 u32 irq_sts;
1821
1822 DPRINTK("ENTER\n");
1823
1824 rc = ahci_do_softreset(link, class, pmp, deadline,
1825 ahci_sb600_check_ready);
1826
1827 /*
1828 * Soft reset fails on some ATI chips with IPMS set when PMP
1829 * is enabled but SATA HDD/ODD is connected to SATA port,
1830 * do soft reset again to port 0.
1831 */
1832 if (rc == -EIO) {
1833 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1834 if (irq_sts & PORT_IRQ_BAD_PMP) {
1835 ata_link_printk(link, KERN_WARNING,
1836 "applying SB600 PMP SRST workaround "
1837 "and retrying\n");
1838 rc = ahci_do_softreset(link, class, 0, deadline,
1839 ahci_check_ready);
1840 }
1841 }
1842
1843 return rc;
1844 }
1845
1846 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1847 unsigned long deadline)
1848 {
1849 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1850 struct ata_port *ap = link->ap;
1851 struct ahci_port_priv *pp = ap->private_data;
1852 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1853 struct ata_taskfile tf;
1854 bool online;
1855 int rc;
1856
1857 DPRINTK("ENTER\n");
1858
1859 ahci_stop_engine(ap);
1860
1861 /* clear D2H reception area to properly wait for D2H FIS */
1862 ata_tf_init(link->device, &tf);
1863 tf.command = 0x80;
1864 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1865
1866 rc = sata_link_hardreset(link, timing, deadline, &online,
1867 ahci_check_ready);
1868
1869 ahci_start_engine(ap);
1870
1871 if (online)
1872 *class = ahci_dev_classify(ap);
1873
1874 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1875 return rc;
1876 }
1877
1878 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1879 unsigned long deadline)
1880 {
1881 struct ata_port *ap = link->ap;
1882 bool online;
1883 int rc;
1884
1885 DPRINTK("ENTER\n");
1886
1887 ahci_stop_engine(ap);
1888
1889 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1890 deadline, &online, NULL);
1891
1892 ahci_start_engine(ap);
1893
1894 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1895
1896 /* vt8251 doesn't clear BSY on signature FIS reception,
1897 * request follow-up softreset.
1898 */
1899 return online ? -EAGAIN : rc;
1900 }
1901
1902 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1903 unsigned long deadline)
1904 {
1905 struct ata_port *ap = link->ap;
1906 struct ahci_port_priv *pp = ap->private_data;
1907 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1908 struct ata_taskfile tf;
1909 bool online;
1910 int rc;
1911
1912 ahci_stop_engine(ap);
1913
1914 /* clear D2H reception area to properly wait for D2H FIS */
1915 ata_tf_init(link->device, &tf);
1916 tf.command = 0x80;
1917 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1918
1919 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1920 deadline, &online, NULL);
1921
1922 ahci_start_engine(ap);
1923
1924 /* The pseudo configuration device on SIMG4726 attached to
1925 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1926 * hardreset if no device is attached to the first downstream
1927 * port && the pseudo device locks up on SRST w/ PMP==0. To
1928 * work around this, wait for !BSY only briefly. If BSY isn't
1929 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1930 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1931 *
1932 * Wait for two seconds. Devices attached to downstream port
1933 * which can't process the following IDENTIFY after this will
1934 * have to be reset again. For most cases, this should
1935 * suffice while making probing snappish enough.
1936 */
1937 if (online) {
1938 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1939 ahci_check_ready);
1940 if (rc)
1941 ahci_kick_engine(ap);
1942 }
1943 return rc;
1944 }
1945
1946 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1947 {
1948 struct ata_port *ap = link->ap;
1949 void __iomem *port_mmio = ahci_port_base(ap);
1950 u32 new_tmp, tmp;
1951
1952 ata_std_postreset(link, class);
1953
1954 /* Make sure port's ATAPI bit is set appropriately */
1955 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1956 if (*class == ATA_DEV_ATAPI)
1957 new_tmp |= PORT_CMD_ATAPI;
1958 else
1959 new_tmp &= ~PORT_CMD_ATAPI;
1960 if (new_tmp != tmp) {
1961 writel(new_tmp, port_mmio + PORT_CMD);
1962 readl(port_mmio + PORT_CMD); /* flush */
1963 }
1964 }
1965
1966 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1967 {
1968 struct scatterlist *sg;
1969 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1970 unsigned int si;
1971
1972 VPRINTK("ENTER\n");
1973
1974 /*
1975 * Next, the S/G list.
1976 */
1977 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1978 dma_addr_t addr = sg_dma_address(sg);
1979 u32 sg_len = sg_dma_len(sg);
1980
1981 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1982 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1983 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1984 }
1985
1986 return si;
1987 }
1988
1989 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1990 {
1991 struct ata_port *ap = qc->ap;
1992 struct ahci_port_priv *pp = ap->private_data;
1993 int is_atapi = ata_is_atapi(qc->tf.protocol);
1994 void *cmd_tbl;
1995 u32 opts;
1996 const u32 cmd_fis_len = 5; /* five dwords */
1997 unsigned int n_elem;
1998
1999 /*
2000 * Fill in command table information. First, the header,
2001 * a SATA Register - Host to Device command FIS.
2002 */
2003 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2004
2005 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2006 if (is_atapi) {
2007 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2008 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2009 }
2010
2011 n_elem = 0;
2012 if (qc->flags & ATA_QCFLAG_DMAMAP)
2013 n_elem = ahci_fill_sg(qc, cmd_tbl);
2014
2015 /*
2016 * Fill in command slot information.
2017 */
2018 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2019 if (qc->tf.flags & ATA_TFLAG_WRITE)
2020 opts |= AHCI_CMD_WRITE;
2021 if (is_atapi)
2022 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2023
2024 ahci_fill_cmd_slot(pp, qc->tag, opts);
2025 }
2026
2027 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2028 {
2029 struct ahci_host_priv *hpriv = ap->host->private_data;
2030 struct ahci_port_priv *pp = ap->private_data;
2031 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2032 struct ata_link *link = NULL;
2033 struct ata_queued_cmd *active_qc;
2034 struct ata_eh_info *active_ehi;
2035 u32 serror;
2036
2037 /* determine active link */
2038 ata_for_each_link(link, ap, EDGE)
2039 if (ata_link_active(link))
2040 break;
2041 if (!link)
2042 link = &ap->link;
2043
2044 active_qc = ata_qc_from_tag(ap, link->active_tag);
2045 active_ehi = &link->eh_info;
2046
2047 /* record irq stat */
2048 ata_ehi_clear_desc(host_ehi);
2049 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2050
2051 /* AHCI needs SError cleared; otherwise, it might lock up */
2052 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2053 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2054 host_ehi->serror |= serror;
2055
2056 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2057 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2058 irq_stat &= ~PORT_IRQ_IF_ERR;
2059
2060 if (irq_stat & PORT_IRQ_TF_ERR) {
2061 /* If qc is active, charge it; otherwise, the active
2062 * link. There's no active qc on NCQ errors. It will
2063 * be determined by EH by reading log page 10h.
2064 */
2065 if (active_qc)
2066 active_qc->err_mask |= AC_ERR_DEV;
2067 else
2068 active_ehi->err_mask |= AC_ERR_DEV;
2069
2070 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2071 host_ehi->serror &= ~SERR_INTERNAL;
2072 }
2073
2074 if (irq_stat & PORT_IRQ_UNK_FIS) {
2075 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2076
2077 active_ehi->err_mask |= AC_ERR_HSM;
2078 active_ehi->action |= ATA_EH_RESET;
2079 ata_ehi_push_desc(active_ehi,
2080 "unknown FIS %08x %08x %08x %08x" ,
2081 unk[0], unk[1], unk[2], unk[3]);
2082 }
2083
2084 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2085 active_ehi->err_mask |= AC_ERR_HSM;
2086 active_ehi->action |= ATA_EH_RESET;
2087 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2088 }
2089
2090 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2091 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2092 host_ehi->action |= ATA_EH_RESET;
2093 ata_ehi_push_desc(host_ehi, "host bus error");
2094 }
2095
2096 if (irq_stat & PORT_IRQ_IF_ERR) {
2097 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2098 host_ehi->action |= ATA_EH_RESET;
2099 ata_ehi_push_desc(host_ehi, "interface fatal error");
2100 }
2101
2102 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2103 ata_ehi_hotplugged(host_ehi);
2104 ata_ehi_push_desc(host_ehi, "%s",
2105 irq_stat & PORT_IRQ_CONNECT ?
2106 "connection status changed" : "PHY RDY changed");
2107 }
2108
2109 /* okay, let's hand over to EH */
2110
2111 if (irq_stat & PORT_IRQ_FREEZE)
2112 ata_port_freeze(ap);
2113 else
2114 ata_port_abort(ap);
2115 }
2116
2117 static void ahci_port_intr(struct ata_port *ap)
2118 {
2119 void __iomem *port_mmio = ahci_port_base(ap);
2120 struct ata_eh_info *ehi = &ap->link.eh_info;
2121 struct ahci_port_priv *pp = ap->private_data;
2122 struct ahci_host_priv *hpriv = ap->host->private_data;
2123 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2124 u32 status, qc_active;
2125 int rc;
2126
2127 status = readl(port_mmio + PORT_IRQ_STAT);
2128 writel(status, port_mmio + PORT_IRQ_STAT);
2129
2130 /* ignore BAD_PMP while resetting */
2131 if (unlikely(resetting))
2132 status &= ~PORT_IRQ_BAD_PMP;
2133
2134 /* If we are getting PhyRdy, this is
2135 * just a power state change, we should
2136 * clear out this, plus the PhyRdy/Comm
2137 * Wake bits from Serror
2138 */
2139 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2140 (status & PORT_IRQ_PHYRDY)) {
2141 status &= ~PORT_IRQ_PHYRDY;
2142 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2143 }
2144
2145 if (unlikely(status & PORT_IRQ_ERROR)) {
2146 ahci_error_intr(ap, status);
2147 return;
2148 }
2149
2150 if (status & PORT_IRQ_SDB_FIS) {
2151 /* If SNotification is available, leave notification
2152 * handling to sata_async_notification(). If not,
2153 * emulate it by snooping SDB FIS RX area.
2154 *
2155 * Snooping FIS RX area is probably cheaper than
2156 * poking SNotification but some constrollers which
2157 * implement SNotification, ICH9 for example, don't
2158 * store AN SDB FIS into receive area.
2159 */
2160 if (hpriv->cap & HOST_CAP_SNTF)
2161 sata_async_notification(ap);
2162 else {
2163 /* If the 'N' bit in word 0 of the FIS is set,
2164 * we just received asynchronous notification.
2165 * Tell libata about it.
2166 */
2167 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2168 u32 f0 = le32_to_cpu(f[0]);
2169
2170 if (f0 & (1 << 15))
2171 sata_async_notification(ap);
2172 }
2173 }
2174
2175 /* pp->active_link is valid iff any command is in flight */
2176 if (ap->qc_active && pp->active_link->sactive)
2177 qc_active = readl(port_mmio + PORT_SCR_ACT);
2178 else
2179 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2180
2181 rc = ata_qc_complete_multiple(ap, qc_active);
2182
2183 /* while resetting, invalid completions are expected */
2184 if (unlikely(rc < 0 && !resetting)) {
2185 ehi->err_mask |= AC_ERR_HSM;
2186 ehi->action |= ATA_EH_RESET;
2187 ata_port_freeze(ap);
2188 }
2189 }
2190
2191 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2192 {
2193 struct ata_host *host = dev_instance;
2194 struct ahci_host_priv *hpriv;
2195 unsigned int i, handled = 0;
2196 void __iomem *mmio;
2197 u32 irq_stat, irq_masked;
2198
2199 VPRINTK("ENTER\n");
2200
2201 hpriv = host->private_data;
2202 mmio = host->iomap[AHCI_PCI_BAR];
2203
2204 /* sigh. 0xffffffff is a valid return from h/w */
2205 irq_stat = readl(mmio + HOST_IRQ_STAT);
2206 if (!irq_stat)
2207 return IRQ_NONE;
2208
2209 irq_masked = irq_stat & hpriv->port_map;
2210
2211 spin_lock(&host->lock);
2212
2213 for (i = 0; i < host->n_ports; i++) {
2214 struct ata_port *ap;
2215
2216 if (!(irq_masked & (1 << i)))
2217 continue;
2218
2219 ap = host->ports[i];
2220 if (ap) {
2221 ahci_port_intr(ap);
2222 VPRINTK("port %u\n", i);
2223 } else {
2224 VPRINTK("port %u (no irq)\n", i);
2225 if (ata_ratelimit())
2226 dev_printk(KERN_WARNING, host->dev,
2227 "interrupt on disabled port %u\n", i);
2228 }
2229
2230 handled = 1;
2231 }
2232
2233 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2234 * it should be cleared after all the port events are cleared;
2235 * otherwise, it will raise a spurious interrupt after each
2236 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2237 * information.
2238 *
2239 * Also, use the unmasked value to clear interrupt as spurious
2240 * pending event on a dummy port might cause screaming IRQ.
2241 */
2242 writel(irq_stat, mmio + HOST_IRQ_STAT);
2243
2244 spin_unlock(&host->lock);
2245
2246 VPRINTK("EXIT\n");
2247
2248 return IRQ_RETVAL(handled);
2249 }
2250
2251 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2252 {
2253 struct ata_port *ap = qc->ap;
2254 void __iomem *port_mmio = ahci_port_base(ap);
2255 struct ahci_port_priv *pp = ap->private_data;
2256
2257 /* Keep track of the currently active link. It will be used
2258 * in completion path to determine whether NCQ phase is in
2259 * progress.
2260 */
2261 pp->active_link = qc->dev->link;
2262
2263 if (qc->tf.protocol == ATA_PROT_NCQ)
2264 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2265 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2266
2267 ahci_sw_activity(qc->dev->link);
2268
2269 return 0;
2270 }
2271
2272 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2273 {
2274 struct ahci_port_priv *pp = qc->ap->private_data;
2275 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2276
2277 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2278 return true;
2279 }
2280
2281 static void ahci_freeze(struct ata_port *ap)
2282 {
2283 void __iomem *port_mmio = ahci_port_base(ap);
2284
2285 /* turn IRQ off */
2286 writel(0, port_mmio + PORT_IRQ_MASK);
2287 }
2288
2289 static void ahci_thaw(struct ata_port *ap)
2290 {
2291 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2292 void __iomem *port_mmio = ahci_port_base(ap);
2293 u32 tmp;
2294 struct ahci_port_priv *pp = ap->private_data;
2295
2296 /* clear IRQ */
2297 tmp = readl(port_mmio + PORT_IRQ_STAT);
2298 writel(tmp, port_mmio + PORT_IRQ_STAT);
2299 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2300
2301 /* turn IRQ back on */
2302 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2303 }
2304
2305 static void ahci_error_handler(struct ata_port *ap)
2306 {
2307 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2308 /* restart engine */
2309 ahci_stop_engine(ap);
2310 ahci_start_engine(ap);
2311 }
2312
2313 sata_pmp_error_handler(ap);
2314 }
2315
2316 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2317 {
2318 struct ata_port *ap = qc->ap;
2319
2320 /* make DMA engine forget about the failed command */
2321 if (qc->flags & ATA_QCFLAG_FAILED)
2322 ahci_kick_engine(ap);
2323 }
2324
2325 static void ahci_pmp_attach(struct ata_port *ap)
2326 {
2327 void __iomem *port_mmio = ahci_port_base(ap);
2328 struct ahci_port_priv *pp = ap->private_data;
2329 u32 cmd;
2330
2331 cmd = readl(port_mmio + PORT_CMD);
2332 cmd |= PORT_CMD_PMP;
2333 writel(cmd, port_mmio + PORT_CMD);
2334
2335 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2336 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2337 }
2338
2339 static void ahci_pmp_detach(struct ata_port *ap)
2340 {
2341 void __iomem *port_mmio = ahci_port_base(ap);
2342 struct ahci_port_priv *pp = ap->private_data;
2343 u32 cmd;
2344
2345 cmd = readl(port_mmio + PORT_CMD);
2346 cmd &= ~PORT_CMD_PMP;
2347 writel(cmd, port_mmio + PORT_CMD);
2348
2349 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2350 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2351 }
2352
2353 static int ahci_port_resume(struct ata_port *ap)
2354 {
2355 ahci_power_up(ap);
2356 ahci_start_port(ap);
2357
2358 if (sata_pmp_attached(ap))
2359 ahci_pmp_attach(ap);
2360 else
2361 ahci_pmp_detach(ap);
2362
2363 return 0;
2364 }
2365
2366 #ifdef CONFIG_PM
2367 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2368 {
2369 const char *emsg = NULL;
2370 int rc;
2371
2372 rc = ahci_deinit_port(ap, &emsg);
2373 if (rc == 0)
2374 ahci_power_down(ap);
2375 else {
2376 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2377 ahci_start_port(ap);
2378 }
2379
2380 return rc;
2381 }
2382
2383 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2384 {
2385 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2386 struct ahci_host_priv *hpriv = host->private_data;
2387 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2388 u32 ctl;
2389
2390 if (mesg.event & PM_EVENT_SUSPEND &&
2391 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2392 dev_printk(KERN_ERR, &pdev->dev,
2393 "BIOS update required for suspend/resume\n");
2394 return -EIO;
2395 }
2396
2397 if (mesg.event & PM_EVENT_SLEEP) {
2398 /* AHCI spec rev1.1 section 8.3.3:
2399 * Software must disable interrupts prior to requesting a
2400 * transition of the HBA to D3 state.
2401 */
2402 ctl = readl(mmio + HOST_CTL);
2403 ctl &= ~HOST_IRQ_EN;
2404 writel(ctl, mmio + HOST_CTL);
2405 readl(mmio + HOST_CTL); /* flush */
2406 }
2407
2408 return ata_pci_device_suspend(pdev, mesg);
2409 }
2410
2411 static int ahci_pci_device_resume(struct pci_dev *pdev)
2412 {
2413 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2414 int rc;
2415
2416 rc = ata_pci_device_do_resume(pdev);
2417 if (rc)
2418 return rc;
2419
2420 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2421 rc = ahci_reset_controller(host);
2422 if (rc)
2423 return rc;
2424
2425 ahci_init_controller(host);
2426 }
2427
2428 ata_host_resume(host);
2429
2430 return 0;
2431 }
2432 #endif
2433
2434 static int ahci_port_start(struct ata_port *ap)
2435 {
2436 struct device *dev = ap->host->dev;
2437 struct ahci_port_priv *pp;
2438 void *mem;
2439 dma_addr_t mem_dma;
2440
2441 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2442 if (!pp)
2443 return -ENOMEM;
2444
2445 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2446 GFP_KERNEL);
2447 if (!mem)
2448 return -ENOMEM;
2449 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2450
2451 /*
2452 * First item in chunk of DMA memory: 32-slot command table,
2453 * 32 bytes each in size
2454 */
2455 pp->cmd_slot = mem;
2456 pp->cmd_slot_dma = mem_dma;
2457
2458 mem += AHCI_CMD_SLOT_SZ;
2459 mem_dma += AHCI_CMD_SLOT_SZ;
2460
2461 /*
2462 * Second item: Received-FIS area
2463 */
2464 pp->rx_fis = mem;
2465 pp->rx_fis_dma = mem_dma;
2466
2467 mem += AHCI_RX_FIS_SZ;
2468 mem_dma += AHCI_RX_FIS_SZ;
2469
2470 /*
2471 * Third item: data area for storing a single command
2472 * and its scatter-gather table
2473 */
2474 pp->cmd_tbl = mem;
2475 pp->cmd_tbl_dma = mem_dma;
2476
2477 /*
2478 * Save off initial list of interrupts to be enabled.
2479 * This could be changed later
2480 */
2481 pp->intr_mask = DEF_PORT_IRQ;
2482
2483 ap->private_data = pp;
2484
2485 /* engage engines, captain */
2486 return ahci_port_resume(ap);
2487 }
2488
2489 static void ahci_port_stop(struct ata_port *ap)
2490 {
2491 const char *emsg = NULL;
2492 int rc;
2493
2494 /* de-initialize port */
2495 rc = ahci_deinit_port(ap, &emsg);
2496 if (rc)
2497 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2498 }
2499
2500 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2501 {
2502 int rc;
2503
2504 if (using_dac &&
2505 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2506 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2507 if (rc) {
2508 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2509 if (rc) {
2510 dev_printk(KERN_ERR, &pdev->dev,
2511 "64-bit DMA enable failed\n");
2512 return rc;
2513 }
2514 }
2515 } else {
2516 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2517 if (rc) {
2518 dev_printk(KERN_ERR, &pdev->dev,
2519 "32-bit DMA enable failed\n");
2520 return rc;
2521 }
2522 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2523 if (rc) {
2524 dev_printk(KERN_ERR, &pdev->dev,
2525 "32-bit consistent DMA enable failed\n");
2526 return rc;
2527 }
2528 }
2529 return 0;
2530 }
2531
2532 static void ahci_print_info(struct ata_host *host)
2533 {
2534 struct ahci_host_priv *hpriv = host->private_data;
2535 struct pci_dev *pdev = to_pci_dev(host->dev);
2536 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2537 u32 vers, cap, impl, speed;
2538 const char *speed_s;
2539 u16 cc;
2540 const char *scc_s;
2541
2542 vers = readl(mmio + HOST_VERSION);
2543 cap = hpriv->cap;
2544 impl = hpriv->port_map;
2545
2546 speed = (cap >> 20) & 0xf;
2547 if (speed == 1)
2548 speed_s = "1.5";
2549 else if (speed == 2)
2550 speed_s = "3";
2551 else if (speed == 3)
2552 speed_s = "6";
2553 else
2554 speed_s = "?";
2555
2556 pci_read_config_word(pdev, 0x0a, &cc);
2557 if (cc == PCI_CLASS_STORAGE_IDE)
2558 scc_s = "IDE";
2559 else if (cc == PCI_CLASS_STORAGE_SATA)
2560 scc_s = "SATA";
2561 else if (cc == PCI_CLASS_STORAGE_RAID)
2562 scc_s = "RAID";
2563 else
2564 scc_s = "unknown";
2565
2566 dev_printk(KERN_INFO, &pdev->dev,
2567 "AHCI %02x%02x.%02x%02x "
2568 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2569 ,
2570
2571 (vers >> 24) & 0xff,
2572 (vers >> 16) & 0xff,
2573 (vers >> 8) & 0xff,
2574 vers & 0xff,
2575
2576 ((cap >> 8) & 0x1f) + 1,
2577 (cap & 0x1f) + 1,
2578 speed_s,
2579 impl,
2580 scc_s);
2581
2582 dev_printk(KERN_INFO, &pdev->dev,
2583 "flags: "
2584 "%s%s%s%s%s%s%s"
2585 "%s%s%s%s%s%s%s"
2586 "%s\n"
2587 ,
2588
2589 cap & (1 << 31) ? "64bit " : "",
2590 cap & (1 << 30) ? "ncq " : "",
2591 cap & (1 << 29) ? "sntf " : "",
2592 cap & (1 << 28) ? "ilck " : "",
2593 cap & (1 << 27) ? "stag " : "",
2594 cap & (1 << 26) ? "pm " : "",
2595 cap & (1 << 25) ? "led " : "",
2596
2597 cap & (1 << 24) ? "clo " : "",
2598 cap & (1 << 19) ? "nz " : "",
2599 cap & (1 << 18) ? "only " : "",
2600 cap & (1 << 17) ? "pmp " : "",
2601 cap & (1 << 15) ? "pio " : "",
2602 cap & (1 << 14) ? "slum " : "",
2603 cap & (1 << 13) ? "part " : "",
2604 cap & (1 << 6) ? "ems ": ""
2605 );
2606 }
2607
2608 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2609 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2610 * support PMP and the 4726 either directly exports the device
2611 * attached to the first downstream port or acts as a hardware storage
2612 * controller and emulate a single ATA device (can be RAID 0/1 or some
2613 * other configuration).
2614 *
2615 * When there's no device attached to the first downstream port of the
2616 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2617 * configure the 4726. However, ATA emulation of the device is very
2618 * lame. It doesn't send signature D2H Reg FIS after the initial
2619 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2620 *
2621 * The following function works around the problem by always using
2622 * hardreset on the port and not depending on receiving signature FIS
2623 * afterward. If signature FIS isn't received soon, ATA class is
2624 * assumed without follow-up softreset.
2625 */
2626 static void ahci_p5wdh_workaround(struct ata_host *host)
2627 {
2628 static struct dmi_system_id sysids[] = {
2629 {
2630 .ident = "P5W DH Deluxe",
2631 .matches = {
2632 DMI_MATCH(DMI_SYS_VENDOR,
2633 "ASUSTEK COMPUTER INC"),
2634 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2635 },
2636 },
2637 { }
2638 };
2639 struct pci_dev *pdev = to_pci_dev(host->dev);
2640
2641 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2642 dmi_check_system(sysids)) {
2643 struct ata_port *ap = host->ports[1];
2644
2645 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2646 "Deluxe on-board SIMG4726 workaround\n");
2647
2648 ap->ops = &ahci_p5wdh_ops;
2649 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2650 }
2651 }
2652
2653 /*
2654 * SB600 ahci controller on certain boards can't do 64bit DMA with
2655 * older BIOS.
2656 */
2657 static bool ahci_sb600_32bit_only(struct pci_dev *pdev)
2658 {
2659 static const struct dmi_system_id sysids[] = {
2660 /*
2661 * The oldest version known to be broken is 0901 and
2662 * working is 1501 which was released on 2007-10-26.
2663 * Force 32bit DMA on anything older than 1501.
2664 * Please read bko#9412 for more info.
2665 */
2666 {
2667 .ident = "ASUS M2A-VM",
2668 .matches = {
2669 DMI_MATCH(DMI_BOARD_VENDOR,
2670 "ASUSTeK Computer INC."),
2671 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2672 },
2673 .driver_data = "20071026", /* yyyymmdd */
2674 },
2675 /*
2676 * It's yet unknown whether more recent BIOS fixes the
2677 * problem. Blacklist the whole board for the time
2678 * being. Please read the following thread for more
2679 * info.
2680 *
2681 * http://thread.gmane.org/gmane.linux.ide/42326
2682 */
2683 {
2684 .ident = "Gigabyte GA-MA69VM-S2",
2685 .matches = {
2686 DMI_MATCH(DMI_BOARD_VENDOR,
2687 "Gigabyte Technology Co., Ltd."),
2688 DMI_MATCH(DMI_BOARD_NAME, "GA-MA69VM-S2"),
2689 },
2690 },
2691 { }
2692 };
2693 const struct dmi_system_id *match;
2694
2695 match = dmi_first_match(sysids);
2696 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2697 !match)
2698 return false;
2699
2700 if (match->driver_data) {
2701 int year, month, date;
2702 char buf[9];
2703
2704 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2705 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2706
2707 if (strcmp(buf, match->driver_data) >= 0)
2708 return false;
2709
2710 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2711 "forcing 32bit DMA, update BIOS\n", match->ident);
2712 } else
2713 dev_printk(KERN_WARNING, &pdev->dev, "%s: this board can't "
2714 "do 64bit DMA, forcing 32bit\n", match->ident);
2715
2716 return true;
2717 }
2718
2719 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2720 {
2721 static const struct dmi_system_id broken_systems[] = {
2722 {
2723 .ident = "HP Compaq nx6310",
2724 .matches = {
2725 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2726 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2727 },
2728 /* PCI slot number of the controller */
2729 .driver_data = (void *)0x1FUL,
2730 },
2731 {
2732 .ident = "HP Compaq 6720s",
2733 .matches = {
2734 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2735 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2736 },
2737 /* PCI slot number of the controller */
2738 .driver_data = (void *)0x1FUL,
2739 },
2740
2741 { } /* terminate list */
2742 };
2743 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2744
2745 if (dmi) {
2746 unsigned long slot = (unsigned long)dmi->driver_data;
2747 /* apply the quirk only to on-board controllers */
2748 return slot == PCI_SLOT(pdev->devfn);
2749 }
2750
2751 return false;
2752 }
2753
2754 static bool ahci_broken_suspend(struct pci_dev *pdev)
2755 {
2756 static const struct dmi_system_id sysids[] = {
2757 /*
2758 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2759 * to the harddisk doesn't become online after
2760 * resuming from STR. Warn and fail suspend.
2761 */
2762 {
2763 .ident = "dv4",
2764 .matches = {
2765 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2766 DMI_MATCH(DMI_PRODUCT_NAME,
2767 "HP Pavilion dv4 Notebook PC"),
2768 },
2769 .driver_data = "F.30", /* cutoff BIOS version */
2770 },
2771 {
2772 .ident = "dv5",
2773 .matches = {
2774 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2775 DMI_MATCH(DMI_PRODUCT_NAME,
2776 "HP Pavilion dv5 Notebook PC"),
2777 },
2778 .driver_data = "F.16", /* cutoff BIOS version */
2779 },
2780 {
2781 .ident = "dv6",
2782 .matches = {
2783 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2784 DMI_MATCH(DMI_PRODUCT_NAME,
2785 "HP Pavilion dv6 Notebook PC"),
2786 },
2787 .driver_data = "F.21", /* cutoff BIOS version */
2788 },
2789 {
2790 .ident = "HDX18",
2791 .matches = {
2792 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2793 DMI_MATCH(DMI_PRODUCT_NAME,
2794 "HP HDX18 Notebook PC"),
2795 },
2796 .driver_data = "F.23", /* cutoff BIOS version */
2797 },
2798 { } /* terminate list */
2799 };
2800 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2801 const char *ver;
2802
2803 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2804 return false;
2805
2806 ver = dmi_get_system_info(DMI_BIOS_VERSION);
2807
2808 return !ver || strcmp(ver, dmi->driver_data) < 0;
2809 }
2810
2811 static bool ahci_broken_online(struct pci_dev *pdev)
2812 {
2813 #define ENCODE_BUSDEVFN(bus, slot, func) \
2814 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2815 static const struct dmi_system_id sysids[] = {
2816 /*
2817 * There are several gigabyte boards which use
2818 * SIMG5723s configured as hardware RAID. Certain
2819 * 5723 firmware revisions shipped there keep the link
2820 * online but fail to answer properly to SRST or
2821 * IDENTIFY when no device is attached downstream
2822 * causing libata to retry quite a few times leading
2823 * to excessive detection delay.
2824 *
2825 * As these firmwares respond to the second reset try
2826 * with invalid device signature, considering unknown
2827 * sig as offline works around the problem acceptably.
2828 */
2829 {
2830 .ident = "EP45-DQ6",
2831 .matches = {
2832 DMI_MATCH(DMI_BOARD_VENDOR,
2833 "Gigabyte Technology Co., Ltd."),
2834 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2835 },
2836 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2837 },
2838 {
2839 .ident = "EP45-DS5",
2840 .matches = {
2841 DMI_MATCH(DMI_BOARD_VENDOR,
2842 "Gigabyte Technology Co., Ltd."),
2843 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2844 },
2845 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2846 },
2847 { } /* terminate list */
2848 };
2849 #undef ENCODE_BUSDEVFN
2850 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2851 unsigned int val;
2852
2853 if (!dmi)
2854 return false;
2855
2856 val = (unsigned long)dmi->driver_data;
2857
2858 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2859 }
2860
2861 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2862 {
2863 static int printed_version;
2864 unsigned int board_id = ent->driver_data;
2865 struct ata_port_info pi = ahci_port_info[board_id];
2866 const struct ata_port_info *ppi[] = { &pi, NULL };
2867 struct device *dev = &pdev->dev;
2868 struct ahci_host_priv *hpriv;
2869 struct ata_host *host;
2870 int n_ports, i, rc;
2871
2872 VPRINTK("ENTER\n");
2873
2874 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2875
2876 if (!printed_version++)
2877 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2878
2879 /* The AHCI driver can only drive the SATA ports, the PATA driver
2880 can drive them all so if both drivers are selected make sure
2881 AHCI stays out of the way */
2882 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2883 return -ENODEV;
2884
2885 /* acquire resources */
2886 rc = pcim_enable_device(pdev);
2887 if (rc)
2888 return rc;
2889
2890 /* AHCI controllers often implement SFF compatible interface.
2891 * Grab all PCI BARs just in case.
2892 */
2893 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2894 if (rc == -EBUSY)
2895 pcim_pin_device(pdev);
2896 if (rc)
2897 return rc;
2898
2899 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2900 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2901 u8 map;
2902
2903 /* ICH6s share the same PCI ID for both piix and ahci
2904 * modes. Enabling ahci mode while MAP indicates
2905 * combined mode is a bad idea. Yield to ata_piix.
2906 */
2907 pci_read_config_byte(pdev, ICH_MAP, &map);
2908 if (map & 0x3) {
2909 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2910 "combined mode, can't enable AHCI mode\n");
2911 return -ENODEV;
2912 }
2913 }
2914
2915 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2916 if (!hpriv)
2917 return -ENOMEM;
2918 hpriv->flags |= (unsigned long)pi.private_data;
2919
2920 /* MCP65 revision A1 and A2 can't do MSI */
2921 if (board_id == board_ahci_mcp65 &&
2922 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2923 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2924
2925 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
2926 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2927 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2928
2929 /* apply sb600 32bit only quirk */
2930 if (ahci_sb600_32bit_only(pdev))
2931 hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
2932
2933 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
2934 pci_enable_msi(pdev);
2935
2936 /* save initial config */
2937 ahci_save_initial_config(pdev, hpriv);
2938
2939 /* prepare host */
2940 if (hpriv->cap & HOST_CAP_NCQ)
2941 pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
2942
2943 if (hpriv->cap & HOST_CAP_PMP)
2944 pi.flags |= ATA_FLAG_PMP;
2945
2946 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2947 u8 messages;
2948 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2949 u32 em_loc = readl(mmio + HOST_EM_LOC);
2950 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2951
2952 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2953
2954 /* we only support LED message type right now */
2955 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2956 /* store em_loc */
2957 hpriv->em_loc = ((em_loc >> 16) * 4);
2958 pi.flags |= ATA_FLAG_EM;
2959 if (!(em_ctl & EM_CTL_ALHD))
2960 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2961 }
2962 }
2963
2964 if (ahci_broken_system_poweroff(pdev)) {
2965 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
2966 dev_info(&pdev->dev,
2967 "quirky BIOS, skipping spindown on poweroff\n");
2968 }
2969
2970 if (ahci_broken_suspend(pdev)) {
2971 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
2972 dev_printk(KERN_WARNING, &pdev->dev,
2973 "BIOS update required for suspend/resume\n");
2974 }
2975
2976 if (ahci_broken_online(pdev)) {
2977 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
2978 dev_info(&pdev->dev,
2979 "online status unreliable, applying workaround\n");
2980 }
2981
2982 /* CAP.NP sometimes indicate the index of the last enabled
2983 * port, at other times, that of the last possible port, so
2984 * determining the maximum port number requires looking at
2985 * both CAP.NP and port_map.
2986 */
2987 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2988
2989 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2990 if (!host)
2991 return -ENOMEM;
2992 host->iomap = pcim_iomap_table(pdev);
2993 host->private_data = hpriv;
2994
2995 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
2996 host->flags |= ATA_HOST_PARALLEL_SCAN;
2997 else
2998 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
2999
3000 if (pi.flags & ATA_FLAG_EM)
3001 ahci_reset_em(host);
3002
3003 for (i = 0; i < host->n_ports; i++) {
3004 struct ata_port *ap = host->ports[i];
3005
3006 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3007 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3008 0x100 + ap->port_no * 0x80, "port");
3009
3010 /* set initial link pm policy */
3011 ap->pm_policy = NOT_AVAILABLE;
3012
3013 /* set enclosure management message type */
3014 if (ap->flags & ATA_FLAG_EM)
3015 ap->em_message_type = ahci_em_messages;
3016
3017
3018 /* disabled/not-implemented port */
3019 if (!(hpriv->port_map & (1 << i)))
3020 ap->ops = &ata_dummy_port_ops;
3021 }
3022
3023 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3024 ahci_p5wdh_workaround(host);
3025
3026 /* initialize adapter */
3027 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3028 if (rc)
3029 return rc;
3030
3031 rc = ahci_reset_controller(host);
3032 if (rc)
3033 return rc;
3034
3035 ahci_init_controller(host);
3036 ahci_print_info(host);
3037
3038 pci_set_master(pdev);
3039 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3040 &ahci_sht);
3041 }
3042
3043 static int __init ahci_init(void)
3044 {
3045 return pci_register_driver(&ahci_pci_driver);
3046 }
3047
3048 static void __exit ahci_exit(void)
3049 {
3050 pci_unregister_driver(&ahci_pci_driver);
3051 }
3052
3053
3054 MODULE_AUTHOR("Jeff Garzik");
3055 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3056 MODULE_LICENSE("GPL");
3057 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3058 MODULE_VERSION(DRV_VERSION);
3059
3060 module_init(ahci_init);
3061 module_exit(ahci_exit);
This page took 0.111998 seconds and 6 git commands to generate.