ahci: Add AMD SB900 SATA/IDE controller device IDs
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
54
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
62
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71
72 static int ahci_enable_alpm(struct ata_port *ap,
73 enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 ssize_t size);
80
81 enum {
82 AHCI_PCI_BAR = 5,
83 AHCI_MAX_PORTS = 32,
84 AHCI_MAX_SG = 168, /* hardware max is 64K */
85 AHCI_DMA_BOUNDARY = 0xffffffff,
86 AHCI_MAX_CMDS = 32,
87 AHCI_CMD_SZ = 32,
88 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
89 AHCI_RX_FIS_SZ = 256,
90 AHCI_CMD_TBL_CDB = 0x40,
91 AHCI_CMD_TBL_HDR_SZ = 0x80,
92 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
95 AHCI_RX_FIS_SZ,
96 AHCI_IRQ_ON_SG = (1 << 31),
97 AHCI_CMD_ATAPI = (1 << 5),
98 AHCI_CMD_WRITE = (1 << 6),
99 AHCI_CMD_PREFETCH = (1 << 7),
100 AHCI_CMD_RESET = (1 << 8),
101 AHCI_CMD_CLR_BUSY = (1 << 10),
102
103 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
104 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
105 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
106
107 board_ahci = 0,
108 board_ahci_vt8251 = 1,
109 board_ahci_ign_iferr = 2,
110 board_ahci_sb600 = 3,
111 board_ahci_mv = 4,
112 board_ahci_sb700 = 5, /* for SB700 and SB800 */
113 board_ahci_mcp65 = 6,
114 board_ahci_nopmp = 7,
115 board_ahci_yesncq = 8,
116
117 /* global controller registers */
118 HOST_CAP = 0x00, /* host capabilities */
119 HOST_CTL = 0x04, /* global host control */
120 HOST_IRQ_STAT = 0x08, /* interrupt status */
121 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
122 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
123 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
124 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
125
126 /* HOST_CTL bits */
127 HOST_RESET = (1 << 0), /* reset controller; self-clear */
128 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
129 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
130
131 /* HOST_CAP bits */
132 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
133 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
134 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
135 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
136 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
137 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
138 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
139 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
140 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
141
142 /* registers for each SATA port */
143 PORT_LST_ADDR = 0x00, /* command list DMA addr */
144 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
145 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
146 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
147 PORT_IRQ_STAT = 0x10, /* interrupt status */
148 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
149 PORT_CMD = 0x18, /* port command */
150 PORT_TFDATA = 0x20, /* taskfile data */
151 PORT_SIG = 0x24, /* device TF signature */
152 PORT_CMD_ISSUE = 0x38, /* command issue */
153 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
154 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
155 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
156 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
157 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
158
159 /* PORT_IRQ_{STAT,MASK} bits */
160 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
161 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
162 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
163 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
164 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
165 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
166 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
167 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
168
169 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
170 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
171 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
172 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
173 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
174 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
175 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
176 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
177 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
178
179 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
180 PORT_IRQ_IF_ERR |
181 PORT_IRQ_CONNECT |
182 PORT_IRQ_PHYRDY |
183 PORT_IRQ_UNK_FIS |
184 PORT_IRQ_BAD_PMP,
185 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
186 PORT_IRQ_TF_ERR |
187 PORT_IRQ_HBUS_DATA_ERR,
188 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
189 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
190 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
191
192 /* PORT_CMD bits */
193 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
194 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
195 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
196 PORT_CMD_PMP = (1 << 17), /* PMP attached */
197 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
198 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
199 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
200 PORT_CMD_CLO = (1 << 3), /* Command list override */
201 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
202 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
203 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
204
205 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
206 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
207 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
208 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
209
210 /* hpriv->flags bits */
211 AHCI_HFLAG_NO_NCQ = (1 << 0),
212 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
213 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
214 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
215 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
216 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
217 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
218 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
219 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
220 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
221 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
222 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
223 link offline */
224
225 /* ap->flags bits */
226
227 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
228 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
229 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
230 ATA_FLAG_IPM,
231
232 ICH_MAP = 0x90, /* ICH MAP register */
233
234 /* em constants */
235 EM_MAX_SLOTS = 8,
236 EM_MAX_RETRY = 5,
237
238 /* em_ctl bits */
239 EM_CTL_RST = (1 << 9), /* Reset */
240 EM_CTL_TM = (1 << 8), /* Transmit Message */
241 EM_CTL_ALHD = (1 << 26), /* Activity LED */
242 };
243
244 struct ahci_cmd_hdr {
245 __le32 opts;
246 __le32 status;
247 __le32 tbl_addr;
248 __le32 tbl_addr_hi;
249 __le32 reserved[4];
250 };
251
252 struct ahci_sg {
253 __le32 addr;
254 __le32 addr_hi;
255 __le32 reserved;
256 __le32 flags_size;
257 };
258
259 struct ahci_em_priv {
260 enum sw_activity blink_policy;
261 struct timer_list timer;
262 unsigned long saved_activity;
263 unsigned long activity;
264 unsigned long led_state;
265 };
266
267 struct ahci_host_priv {
268 unsigned int flags; /* AHCI_HFLAG_* */
269 u32 cap; /* cap to use */
270 u32 port_map; /* port map to use */
271 u32 saved_cap; /* saved initial cap */
272 u32 saved_port_map; /* saved initial port_map */
273 u32 em_loc; /* enclosure management location */
274 };
275
276 struct ahci_port_priv {
277 struct ata_link *active_link;
278 struct ahci_cmd_hdr *cmd_slot;
279 dma_addr_t cmd_slot_dma;
280 void *cmd_tbl;
281 dma_addr_t cmd_tbl_dma;
282 void *rx_fis;
283 dma_addr_t rx_fis_dma;
284 /* for NCQ spurious interrupt analysis */
285 unsigned int ncq_saw_d2h:1;
286 unsigned int ncq_saw_dmas:1;
287 unsigned int ncq_saw_sdb:1;
288 u32 intr_mask; /* interrupts to enable */
289 /* enclosure management info per PM slot */
290 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
291 };
292
293 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
294 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
295 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
296 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
297 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
298 static int ahci_port_start(struct ata_port *ap);
299 static void ahci_port_stop(struct ata_port *ap);
300 static void ahci_qc_prep(struct ata_queued_cmd *qc);
301 static void ahci_freeze(struct ata_port *ap);
302 static void ahci_thaw(struct ata_port *ap);
303 static void ahci_pmp_attach(struct ata_port *ap);
304 static void ahci_pmp_detach(struct ata_port *ap);
305 static int ahci_softreset(struct ata_link *link, unsigned int *class,
306 unsigned long deadline);
307 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
308 unsigned long deadline);
309 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
310 unsigned long deadline);
311 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
312 unsigned long deadline);
313 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
314 unsigned long deadline);
315 static void ahci_postreset(struct ata_link *link, unsigned int *class);
316 static void ahci_error_handler(struct ata_port *ap);
317 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
318 static int ahci_port_resume(struct ata_port *ap);
319 static void ahci_dev_config(struct ata_device *dev);
320 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
321 u32 opts);
322 #ifdef CONFIG_PM
323 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
325 static int ahci_pci_device_resume(struct pci_dev *pdev);
326 #endif
327 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
328 static ssize_t ahci_activity_store(struct ata_device *dev,
329 enum sw_activity val);
330 static void ahci_init_sw_activity(struct ata_link *link);
331
332 static ssize_t ahci_show_host_caps(struct device *dev,
333 struct device_attribute *attr, char *buf);
334 static ssize_t ahci_show_host_version(struct device *dev,
335 struct device_attribute *attr, char *buf);
336 static ssize_t ahci_show_port_cmd(struct device *dev,
337 struct device_attribute *attr, char *buf);
338
339 DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
340 DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
341 DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
342
343 static struct device_attribute *ahci_shost_attrs[] = {
344 &dev_attr_link_power_management_policy,
345 &dev_attr_em_message_type,
346 &dev_attr_em_message,
347 &dev_attr_ahci_host_caps,
348 &dev_attr_ahci_host_version,
349 &dev_attr_ahci_port_cmd,
350 NULL
351 };
352
353 static struct device_attribute *ahci_sdev_attrs[] = {
354 &dev_attr_sw_activity,
355 &dev_attr_unload_heads,
356 NULL
357 };
358
359 static struct scsi_host_template ahci_sht = {
360 ATA_NCQ_SHT(DRV_NAME),
361 .can_queue = AHCI_MAX_CMDS - 1,
362 .sg_tablesize = AHCI_MAX_SG,
363 .dma_boundary = AHCI_DMA_BOUNDARY,
364 .shost_attrs = ahci_shost_attrs,
365 .sdev_attrs = ahci_sdev_attrs,
366 };
367
368 static struct ata_port_operations ahci_ops = {
369 .inherits = &sata_pmp_port_ops,
370
371 .qc_defer = sata_pmp_qc_defer_cmd_switch,
372 .qc_prep = ahci_qc_prep,
373 .qc_issue = ahci_qc_issue,
374 .qc_fill_rtf = ahci_qc_fill_rtf,
375
376 .freeze = ahci_freeze,
377 .thaw = ahci_thaw,
378 .softreset = ahci_softreset,
379 .hardreset = ahci_hardreset,
380 .postreset = ahci_postreset,
381 .pmp_softreset = ahci_softreset,
382 .error_handler = ahci_error_handler,
383 .post_internal_cmd = ahci_post_internal_cmd,
384 .dev_config = ahci_dev_config,
385
386 .scr_read = ahci_scr_read,
387 .scr_write = ahci_scr_write,
388 .pmp_attach = ahci_pmp_attach,
389 .pmp_detach = ahci_pmp_detach,
390
391 .enable_pm = ahci_enable_alpm,
392 .disable_pm = ahci_disable_alpm,
393 .em_show = ahci_led_show,
394 .em_store = ahci_led_store,
395 .sw_activity_show = ahci_activity_show,
396 .sw_activity_store = ahci_activity_store,
397 #ifdef CONFIG_PM
398 .port_suspend = ahci_port_suspend,
399 .port_resume = ahci_port_resume,
400 #endif
401 .port_start = ahci_port_start,
402 .port_stop = ahci_port_stop,
403 };
404
405 static struct ata_port_operations ahci_vt8251_ops = {
406 .inherits = &ahci_ops,
407 .hardreset = ahci_vt8251_hardreset,
408 };
409
410 static struct ata_port_operations ahci_p5wdh_ops = {
411 .inherits = &ahci_ops,
412 .hardreset = ahci_p5wdh_hardreset,
413 };
414
415 static struct ata_port_operations ahci_sb600_ops = {
416 .inherits = &ahci_ops,
417 .softreset = ahci_sb600_softreset,
418 .pmp_softreset = ahci_sb600_softreset,
419 };
420
421 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
422
423 static const struct ata_port_info ahci_port_info[] = {
424 [board_ahci] =
425 {
426 .flags = AHCI_FLAG_COMMON,
427 .pio_mask = ATA_PIO4,
428 .udma_mask = ATA_UDMA6,
429 .port_ops = &ahci_ops,
430 },
431 [board_ahci_vt8251] =
432 {
433 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
434 .flags = AHCI_FLAG_COMMON,
435 .pio_mask = ATA_PIO4,
436 .udma_mask = ATA_UDMA6,
437 .port_ops = &ahci_vt8251_ops,
438 },
439 [board_ahci_ign_iferr] =
440 {
441 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
442 .flags = AHCI_FLAG_COMMON,
443 .pio_mask = ATA_PIO4,
444 .udma_mask = ATA_UDMA6,
445 .port_ops = &ahci_ops,
446 },
447 [board_ahci_sb600] =
448 {
449 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
450 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255),
451 .flags = AHCI_FLAG_COMMON,
452 .pio_mask = ATA_PIO4,
453 .udma_mask = ATA_UDMA6,
454 .port_ops = &ahci_sb600_ops,
455 },
456 [board_ahci_mv] =
457 {
458 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
459 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
460 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
461 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
462 .pio_mask = ATA_PIO4,
463 .udma_mask = ATA_UDMA6,
464 .port_ops = &ahci_ops,
465 },
466 [board_ahci_sb700] = /* for SB700 and SB800 */
467 {
468 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
469 .flags = AHCI_FLAG_COMMON,
470 .pio_mask = ATA_PIO4,
471 .udma_mask = ATA_UDMA6,
472 .port_ops = &ahci_sb600_ops,
473 },
474 [board_ahci_mcp65] =
475 {
476 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
477 .flags = AHCI_FLAG_COMMON,
478 .pio_mask = ATA_PIO4,
479 .udma_mask = ATA_UDMA6,
480 .port_ops = &ahci_ops,
481 },
482 [board_ahci_nopmp] =
483 {
484 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
485 .flags = AHCI_FLAG_COMMON,
486 .pio_mask = ATA_PIO4,
487 .udma_mask = ATA_UDMA6,
488 .port_ops = &ahci_ops,
489 },
490 /* board_ahci_yesncq */
491 {
492 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
493 .flags = AHCI_FLAG_COMMON,
494 .pio_mask = ATA_PIO4,
495 .udma_mask = ATA_UDMA6,
496 .port_ops = &ahci_ops,
497 },
498 };
499
500 static const struct pci_device_id ahci_pci_tbl[] = {
501 /* Intel */
502 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
503 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
504 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
505 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
506 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
507 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
508 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
509 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
510 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
511 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
512 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
513 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
514 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
515 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
516 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
517 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
518 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
519 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
520 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
521 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
522 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
523 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
524 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
525 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
526 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
527 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
528 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
529 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
530 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
531 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
532 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
533 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
534 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
535 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
536 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
537 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
538 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
539 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
540 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
541 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
542
543 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
544 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
545 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
546
547 /* ATI */
548 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
549 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
550 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
551 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
552 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
553 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
554 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
555
556 /* AMD */
557 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
558 /* AMD is using RAID class only for ahci controllers */
559 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
560 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
561
562 /* VIA */
563 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
564 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
565
566 /* NVIDIA */
567 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
568 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
569 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
570 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
571 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
572 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
573 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
574 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
575 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
576 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
577 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
578 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
579 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
580 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
581 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
582 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
583 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
584 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
585 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
586 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
587 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
588 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
589 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
590 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
591 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
592 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
593 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
594 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
595 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
596 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
597 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
598 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
599 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
600 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
601 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
602 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
603 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
604 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
605 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
606 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
607 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
608 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
609 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
610 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
611 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
612 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
613 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
614 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
615 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
616 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
617 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
618 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
619 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
620 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
621 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
622 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
623 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
624 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
625 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
626 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
627 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
628 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
629 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
630 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
631 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
632 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
633 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
634 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
635
636 /* SiS */
637 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
638 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
639 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
640
641 /* Marvell */
642 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
643 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
644
645 /* Promise */
646 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
647
648 /* Generic, PCI class code for AHCI */
649 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
650 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
651
652 { } /* terminate list */
653 };
654
655
656 static struct pci_driver ahci_pci_driver = {
657 .name = DRV_NAME,
658 .id_table = ahci_pci_tbl,
659 .probe = ahci_init_one,
660 .remove = ata_pci_remove_one,
661 #ifdef CONFIG_PM
662 .suspend = ahci_pci_device_suspend,
663 .resume = ahci_pci_device_resume,
664 #endif
665 };
666
667 static int ahci_em_messages = 1;
668 module_param(ahci_em_messages, int, 0444);
669 /* add other LED protocol types when they become supported */
670 MODULE_PARM_DESC(ahci_em_messages,
671 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
672
673 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
674 static int marvell_enable;
675 #else
676 static int marvell_enable = 1;
677 #endif
678 module_param(marvell_enable, int, 0644);
679 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
680
681
682 static inline int ahci_nr_ports(u32 cap)
683 {
684 return (cap & 0x1f) + 1;
685 }
686
687 static inline void __iomem *__ahci_port_base(struct ata_host *host,
688 unsigned int port_no)
689 {
690 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
691
692 return mmio + 0x100 + (port_no * 0x80);
693 }
694
695 static inline void __iomem *ahci_port_base(struct ata_port *ap)
696 {
697 return __ahci_port_base(ap->host, ap->port_no);
698 }
699
700 static void ahci_enable_ahci(void __iomem *mmio)
701 {
702 int i;
703 u32 tmp;
704
705 /* turn on AHCI_EN */
706 tmp = readl(mmio + HOST_CTL);
707 if (tmp & HOST_AHCI_EN)
708 return;
709
710 /* Some controllers need AHCI_EN to be written multiple times.
711 * Try a few times before giving up.
712 */
713 for (i = 0; i < 5; i++) {
714 tmp |= HOST_AHCI_EN;
715 writel(tmp, mmio + HOST_CTL);
716 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
717 if (tmp & HOST_AHCI_EN)
718 return;
719 msleep(10);
720 }
721
722 WARN_ON(1);
723 }
724
725 static ssize_t ahci_show_host_caps(struct device *dev,
726 struct device_attribute *attr, char *buf)
727 {
728 struct Scsi_Host *shost = class_to_shost(dev);
729 struct ata_port *ap = ata_shost_to_port(shost);
730 struct ahci_host_priv *hpriv = ap->host->private_data;
731
732 return sprintf(buf, "%x\n", hpriv->cap);
733 }
734
735 static ssize_t ahci_show_host_version(struct device *dev,
736 struct device_attribute *attr, char *buf)
737 {
738 struct Scsi_Host *shost = class_to_shost(dev);
739 struct ata_port *ap = ata_shost_to_port(shost);
740 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
741
742 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
743 }
744
745 static ssize_t ahci_show_port_cmd(struct device *dev,
746 struct device_attribute *attr, char *buf)
747 {
748 struct Scsi_Host *shost = class_to_shost(dev);
749 struct ata_port *ap = ata_shost_to_port(shost);
750 void __iomem *port_mmio = ahci_port_base(ap);
751
752 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
753 }
754
755 /**
756 * ahci_save_initial_config - Save and fixup initial config values
757 * @pdev: target PCI device
758 * @hpriv: host private area to store config values
759 *
760 * Some registers containing configuration info might be setup by
761 * BIOS and might be cleared on reset. This function saves the
762 * initial values of those registers into @hpriv such that they
763 * can be restored after controller reset.
764 *
765 * If inconsistent, config values are fixed up by this function.
766 *
767 * LOCKING:
768 * None.
769 */
770 static void ahci_save_initial_config(struct pci_dev *pdev,
771 struct ahci_host_priv *hpriv)
772 {
773 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
774 u32 cap, port_map;
775 int i;
776 int mv;
777
778 /* make sure AHCI mode is enabled before accessing CAP */
779 ahci_enable_ahci(mmio);
780
781 /* Values prefixed with saved_ are written back to host after
782 * reset. Values without are used for driver operation.
783 */
784 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
785 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
786
787 /* some chips have errata preventing 64bit use */
788 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
789 dev_printk(KERN_INFO, &pdev->dev,
790 "controller can't do 64bit DMA, forcing 32bit\n");
791 cap &= ~HOST_CAP_64;
792 }
793
794 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
795 dev_printk(KERN_INFO, &pdev->dev,
796 "controller can't do NCQ, turning off CAP_NCQ\n");
797 cap &= ~HOST_CAP_NCQ;
798 }
799
800 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
801 dev_printk(KERN_INFO, &pdev->dev,
802 "controller can do NCQ, turning on CAP_NCQ\n");
803 cap |= HOST_CAP_NCQ;
804 }
805
806 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
807 dev_printk(KERN_INFO, &pdev->dev,
808 "controller can't do PMP, turning off CAP_PMP\n");
809 cap &= ~HOST_CAP_PMP;
810 }
811
812 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
813 port_map != 1) {
814 dev_printk(KERN_INFO, &pdev->dev,
815 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
816 port_map, 1);
817 port_map = 1;
818 }
819
820 /*
821 * Temporary Marvell 6145 hack: PATA port presence
822 * is asserted through the standard AHCI port
823 * presence register, as bit 4 (counting from 0)
824 */
825 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
826 if (pdev->device == 0x6121)
827 mv = 0x3;
828 else
829 mv = 0xf;
830 dev_printk(KERN_ERR, &pdev->dev,
831 "MV_AHCI HACK: port_map %x -> %x\n",
832 port_map,
833 port_map & mv);
834 dev_printk(KERN_ERR, &pdev->dev,
835 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
836
837 port_map &= mv;
838 }
839
840 /* cross check port_map and cap.n_ports */
841 if (port_map) {
842 int map_ports = 0;
843
844 for (i = 0; i < AHCI_MAX_PORTS; i++)
845 if (port_map & (1 << i))
846 map_ports++;
847
848 /* If PI has more ports than n_ports, whine, clear
849 * port_map and let it be generated from n_ports.
850 */
851 if (map_ports > ahci_nr_ports(cap)) {
852 dev_printk(KERN_WARNING, &pdev->dev,
853 "implemented port map (0x%x) contains more "
854 "ports than nr_ports (%u), using nr_ports\n",
855 port_map, ahci_nr_ports(cap));
856 port_map = 0;
857 }
858 }
859
860 /* fabricate port_map from cap.nr_ports */
861 if (!port_map) {
862 port_map = (1 << ahci_nr_ports(cap)) - 1;
863 dev_printk(KERN_WARNING, &pdev->dev,
864 "forcing PORTS_IMPL to 0x%x\n", port_map);
865
866 /* write the fixed up value to the PI register */
867 hpriv->saved_port_map = port_map;
868 }
869
870 /* record values to use during operation */
871 hpriv->cap = cap;
872 hpriv->port_map = port_map;
873 }
874
875 /**
876 * ahci_restore_initial_config - Restore initial config
877 * @host: target ATA host
878 *
879 * Restore initial config stored by ahci_save_initial_config().
880 *
881 * LOCKING:
882 * None.
883 */
884 static void ahci_restore_initial_config(struct ata_host *host)
885 {
886 struct ahci_host_priv *hpriv = host->private_data;
887 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
888
889 writel(hpriv->saved_cap, mmio + HOST_CAP);
890 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
891 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
892 }
893
894 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
895 {
896 static const int offset[] = {
897 [SCR_STATUS] = PORT_SCR_STAT,
898 [SCR_CONTROL] = PORT_SCR_CTL,
899 [SCR_ERROR] = PORT_SCR_ERR,
900 [SCR_ACTIVE] = PORT_SCR_ACT,
901 [SCR_NOTIFICATION] = PORT_SCR_NTF,
902 };
903 struct ahci_host_priv *hpriv = ap->host->private_data;
904
905 if (sc_reg < ARRAY_SIZE(offset) &&
906 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
907 return offset[sc_reg];
908 return 0;
909 }
910
911 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
912 {
913 void __iomem *port_mmio = ahci_port_base(link->ap);
914 int offset = ahci_scr_offset(link->ap, sc_reg);
915
916 if (offset) {
917 *val = readl(port_mmio + offset);
918 return 0;
919 }
920 return -EINVAL;
921 }
922
923 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
924 {
925 void __iomem *port_mmio = ahci_port_base(link->ap);
926 int offset = ahci_scr_offset(link->ap, sc_reg);
927
928 if (offset) {
929 writel(val, port_mmio + offset);
930 return 0;
931 }
932 return -EINVAL;
933 }
934
935 static void ahci_start_engine(struct ata_port *ap)
936 {
937 void __iomem *port_mmio = ahci_port_base(ap);
938 u32 tmp;
939
940 /* start DMA */
941 tmp = readl(port_mmio + PORT_CMD);
942 tmp |= PORT_CMD_START;
943 writel(tmp, port_mmio + PORT_CMD);
944 readl(port_mmio + PORT_CMD); /* flush */
945 }
946
947 static int ahci_stop_engine(struct ata_port *ap)
948 {
949 void __iomem *port_mmio = ahci_port_base(ap);
950 u32 tmp;
951
952 tmp = readl(port_mmio + PORT_CMD);
953
954 /* check if the HBA is idle */
955 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
956 return 0;
957
958 /* setting HBA to idle */
959 tmp &= ~PORT_CMD_START;
960 writel(tmp, port_mmio + PORT_CMD);
961
962 /* wait for engine to stop. This could be as long as 500 msec */
963 tmp = ata_wait_register(port_mmio + PORT_CMD,
964 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
965 if (tmp & PORT_CMD_LIST_ON)
966 return -EIO;
967
968 return 0;
969 }
970
971 static void ahci_start_fis_rx(struct ata_port *ap)
972 {
973 void __iomem *port_mmio = ahci_port_base(ap);
974 struct ahci_host_priv *hpriv = ap->host->private_data;
975 struct ahci_port_priv *pp = ap->private_data;
976 u32 tmp;
977
978 /* set FIS registers */
979 if (hpriv->cap & HOST_CAP_64)
980 writel((pp->cmd_slot_dma >> 16) >> 16,
981 port_mmio + PORT_LST_ADDR_HI);
982 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
983
984 if (hpriv->cap & HOST_CAP_64)
985 writel((pp->rx_fis_dma >> 16) >> 16,
986 port_mmio + PORT_FIS_ADDR_HI);
987 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
988
989 /* enable FIS reception */
990 tmp = readl(port_mmio + PORT_CMD);
991 tmp |= PORT_CMD_FIS_RX;
992 writel(tmp, port_mmio + PORT_CMD);
993
994 /* flush */
995 readl(port_mmio + PORT_CMD);
996 }
997
998 static int ahci_stop_fis_rx(struct ata_port *ap)
999 {
1000 void __iomem *port_mmio = ahci_port_base(ap);
1001 u32 tmp;
1002
1003 /* disable FIS reception */
1004 tmp = readl(port_mmio + PORT_CMD);
1005 tmp &= ~PORT_CMD_FIS_RX;
1006 writel(tmp, port_mmio + PORT_CMD);
1007
1008 /* wait for completion, spec says 500ms, give it 1000 */
1009 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1010 PORT_CMD_FIS_ON, 10, 1000);
1011 if (tmp & PORT_CMD_FIS_ON)
1012 return -EBUSY;
1013
1014 return 0;
1015 }
1016
1017 static void ahci_power_up(struct ata_port *ap)
1018 {
1019 struct ahci_host_priv *hpriv = ap->host->private_data;
1020 void __iomem *port_mmio = ahci_port_base(ap);
1021 u32 cmd;
1022
1023 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1024
1025 /* spin up device */
1026 if (hpriv->cap & HOST_CAP_SSS) {
1027 cmd |= PORT_CMD_SPIN_UP;
1028 writel(cmd, port_mmio + PORT_CMD);
1029 }
1030
1031 /* wake up link */
1032 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1033 }
1034
1035 static void ahci_disable_alpm(struct ata_port *ap)
1036 {
1037 struct ahci_host_priv *hpriv = ap->host->private_data;
1038 void __iomem *port_mmio = ahci_port_base(ap);
1039 u32 cmd;
1040 struct ahci_port_priv *pp = ap->private_data;
1041
1042 /* IPM bits should be disabled by libata-core */
1043 /* get the existing command bits */
1044 cmd = readl(port_mmio + PORT_CMD);
1045
1046 /* disable ALPM and ASP */
1047 cmd &= ~PORT_CMD_ASP;
1048 cmd &= ~PORT_CMD_ALPE;
1049
1050 /* force the interface back to active */
1051 cmd |= PORT_CMD_ICC_ACTIVE;
1052
1053 /* write out new cmd value */
1054 writel(cmd, port_mmio + PORT_CMD);
1055 cmd = readl(port_mmio + PORT_CMD);
1056
1057 /* wait 10ms to be sure we've come out of any low power state */
1058 msleep(10);
1059
1060 /* clear out any PhyRdy stuff from interrupt status */
1061 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1062
1063 /* go ahead and clean out PhyRdy Change from Serror too */
1064 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1065
1066 /*
1067 * Clear flag to indicate that we should ignore all PhyRdy
1068 * state changes
1069 */
1070 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1071
1072 /*
1073 * Enable interrupts on Phy Ready.
1074 */
1075 pp->intr_mask |= PORT_IRQ_PHYRDY;
1076 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1077
1078 /*
1079 * don't change the link pm policy - we can be called
1080 * just to turn of link pm temporarily
1081 */
1082 }
1083
1084 static int ahci_enable_alpm(struct ata_port *ap,
1085 enum link_pm policy)
1086 {
1087 struct ahci_host_priv *hpriv = ap->host->private_data;
1088 void __iomem *port_mmio = ahci_port_base(ap);
1089 u32 cmd;
1090 struct ahci_port_priv *pp = ap->private_data;
1091 u32 asp;
1092
1093 /* Make sure the host is capable of link power management */
1094 if (!(hpriv->cap & HOST_CAP_ALPM))
1095 return -EINVAL;
1096
1097 switch (policy) {
1098 case MAX_PERFORMANCE:
1099 case NOT_AVAILABLE:
1100 /*
1101 * if we came here with NOT_AVAILABLE,
1102 * it just means this is the first time we
1103 * have tried to enable - default to max performance,
1104 * and let the user go to lower power modes on request.
1105 */
1106 ahci_disable_alpm(ap);
1107 return 0;
1108 case MIN_POWER:
1109 /* configure HBA to enter SLUMBER */
1110 asp = PORT_CMD_ASP;
1111 break;
1112 case MEDIUM_POWER:
1113 /* configure HBA to enter PARTIAL */
1114 asp = 0;
1115 break;
1116 default:
1117 return -EINVAL;
1118 }
1119
1120 /*
1121 * Disable interrupts on Phy Ready. This keeps us from
1122 * getting woken up due to spurious phy ready interrupts
1123 * TBD - Hot plug should be done via polling now, is
1124 * that even supported?
1125 */
1126 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1127 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1128
1129 /*
1130 * Set a flag to indicate that we should ignore all PhyRdy
1131 * state changes since these can happen now whenever we
1132 * change link state
1133 */
1134 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1135
1136 /* get the existing command bits */
1137 cmd = readl(port_mmio + PORT_CMD);
1138
1139 /*
1140 * Set ASP based on Policy
1141 */
1142 cmd |= asp;
1143
1144 /*
1145 * Setting this bit will instruct the HBA to aggressively
1146 * enter a lower power link state when it's appropriate and
1147 * based on the value set above for ASP
1148 */
1149 cmd |= PORT_CMD_ALPE;
1150
1151 /* write out new cmd value */
1152 writel(cmd, port_mmio + PORT_CMD);
1153 cmd = readl(port_mmio + PORT_CMD);
1154
1155 /* IPM bits should be set by libata-core */
1156 return 0;
1157 }
1158
1159 #ifdef CONFIG_PM
1160 static void ahci_power_down(struct ata_port *ap)
1161 {
1162 struct ahci_host_priv *hpriv = ap->host->private_data;
1163 void __iomem *port_mmio = ahci_port_base(ap);
1164 u32 cmd, scontrol;
1165
1166 if (!(hpriv->cap & HOST_CAP_SSS))
1167 return;
1168
1169 /* put device into listen mode, first set PxSCTL.DET to 0 */
1170 scontrol = readl(port_mmio + PORT_SCR_CTL);
1171 scontrol &= ~0xf;
1172 writel(scontrol, port_mmio + PORT_SCR_CTL);
1173
1174 /* then set PxCMD.SUD to 0 */
1175 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1176 cmd &= ~PORT_CMD_SPIN_UP;
1177 writel(cmd, port_mmio + PORT_CMD);
1178 }
1179 #endif
1180
1181 static void ahci_start_port(struct ata_port *ap)
1182 {
1183 struct ahci_port_priv *pp = ap->private_data;
1184 struct ata_link *link;
1185 struct ahci_em_priv *emp;
1186 ssize_t rc;
1187 int i;
1188
1189 /* enable FIS reception */
1190 ahci_start_fis_rx(ap);
1191
1192 /* enable DMA */
1193 ahci_start_engine(ap);
1194
1195 /* turn on LEDs */
1196 if (ap->flags & ATA_FLAG_EM) {
1197 ata_for_each_link(link, ap, EDGE) {
1198 emp = &pp->em_priv[link->pmp];
1199
1200 /* EM Transmit bit maybe busy during init */
1201 for (i = 0; i < EM_MAX_RETRY; i++) {
1202 rc = ahci_transmit_led_message(ap,
1203 emp->led_state,
1204 4);
1205 if (rc == -EBUSY)
1206 msleep(1);
1207 else
1208 break;
1209 }
1210 }
1211 }
1212
1213 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1214 ata_for_each_link(link, ap, EDGE)
1215 ahci_init_sw_activity(link);
1216
1217 }
1218
1219 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1220 {
1221 int rc;
1222
1223 /* disable DMA */
1224 rc = ahci_stop_engine(ap);
1225 if (rc) {
1226 *emsg = "failed to stop engine";
1227 return rc;
1228 }
1229
1230 /* disable FIS reception */
1231 rc = ahci_stop_fis_rx(ap);
1232 if (rc) {
1233 *emsg = "failed stop FIS RX";
1234 return rc;
1235 }
1236
1237 return 0;
1238 }
1239
1240 static int ahci_reset_controller(struct ata_host *host)
1241 {
1242 struct pci_dev *pdev = to_pci_dev(host->dev);
1243 struct ahci_host_priv *hpriv = host->private_data;
1244 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1245 u32 tmp;
1246
1247 /* we must be in AHCI mode, before using anything
1248 * AHCI-specific, such as HOST_RESET.
1249 */
1250 ahci_enable_ahci(mmio);
1251
1252 /* global controller reset */
1253 if (!ahci_skip_host_reset) {
1254 tmp = readl(mmio + HOST_CTL);
1255 if ((tmp & HOST_RESET) == 0) {
1256 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1257 readl(mmio + HOST_CTL); /* flush */
1258 }
1259
1260 /*
1261 * to perform host reset, OS should set HOST_RESET
1262 * and poll until this bit is read to be "0".
1263 * reset must complete within 1 second, or
1264 * the hardware should be considered fried.
1265 */
1266 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1267 HOST_RESET, 10, 1000);
1268
1269 if (tmp & HOST_RESET) {
1270 dev_printk(KERN_ERR, host->dev,
1271 "controller reset failed (0x%x)\n", tmp);
1272 return -EIO;
1273 }
1274
1275 /* turn on AHCI mode */
1276 ahci_enable_ahci(mmio);
1277
1278 /* Some registers might be cleared on reset. Restore
1279 * initial values.
1280 */
1281 ahci_restore_initial_config(host);
1282 } else
1283 dev_printk(KERN_INFO, host->dev,
1284 "skipping global host reset\n");
1285
1286 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1287 u16 tmp16;
1288
1289 /* configure PCS */
1290 pci_read_config_word(pdev, 0x92, &tmp16);
1291 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1292 tmp16 |= hpriv->port_map;
1293 pci_write_config_word(pdev, 0x92, tmp16);
1294 }
1295 }
1296
1297 return 0;
1298 }
1299
1300 static void ahci_sw_activity(struct ata_link *link)
1301 {
1302 struct ata_port *ap = link->ap;
1303 struct ahci_port_priv *pp = ap->private_data;
1304 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1305
1306 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1307 return;
1308
1309 emp->activity++;
1310 if (!timer_pending(&emp->timer))
1311 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1312 }
1313
1314 static void ahci_sw_activity_blink(unsigned long arg)
1315 {
1316 struct ata_link *link = (struct ata_link *)arg;
1317 struct ata_port *ap = link->ap;
1318 struct ahci_port_priv *pp = ap->private_data;
1319 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1320 unsigned long led_message = emp->led_state;
1321 u32 activity_led_state;
1322 unsigned long flags;
1323
1324 led_message &= EM_MSG_LED_VALUE;
1325 led_message |= ap->port_no | (link->pmp << 8);
1326
1327 /* check to see if we've had activity. If so,
1328 * toggle state of LED and reset timer. If not,
1329 * turn LED to desired idle state.
1330 */
1331 spin_lock_irqsave(ap->lock, flags);
1332 if (emp->saved_activity != emp->activity) {
1333 emp->saved_activity = emp->activity;
1334 /* get the current LED state */
1335 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1336
1337 if (activity_led_state)
1338 activity_led_state = 0;
1339 else
1340 activity_led_state = 1;
1341
1342 /* clear old state */
1343 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1344
1345 /* toggle state */
1346 led_message |= (activity_led_state << 16);
1347 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1348 } else {
1349 /* switch to idle */
1350 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1351 if (emp->blink_policy == BLINK_OFF)
1352 led_message |= (1 << 16);
1353 }
1354 spin_unlock_irqrestore(ap->lock, flags);
1355 ahci_transmit_led_message(ap, led_message, 4);
1356 }
1357
1358 static void ahci_init_sw_activity(struct ata_link *link)
1359 {
1360 struct ata_port *ap = link->ap;
1361 struct ahci_port_priv *pp = ap->private_data;
1362 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1363
1364 /* init activity stats, setup timer */
1365 emp->saved_activity = emp->activity = 0;
1366 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1367
1368 /* check our blink policy and set flag for link if it's enabled */
1369 if (emp->blink_policy)
1370 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1371 }
1372
1373 static int ahci_reset_em(struct ata_host *host)
1374 {
1375 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1376 u32 em_ctl;
1377
1378 em_ctl = readl(mmio + HOST_EM_CTL);
1379 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1380 return -EINVAL;
1381
1382 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1383 return 0;
1384 }
1385
1386 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1387 ssize_t size)
1388 {
1389 struct ahci_host_priv *hpriv = ap->host->private_data;
1390 struct ahci_port_priv *pp = ap->private_data;
1391 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1392 u32 em_ctl;
1393 u32 message[] = {0, 0};
1394 unsigned long flags;
1395 int pmp;
1396 struct ahci_em_priv *emp;
1397
1398 /* get the slot number from the message */
1399 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1400 if (pmp < EM_MAX_SLOTS)
1401 emp = &pp->em_priv[pmp];
1402 else
1403 return -EINVAL;
1404
1405 spin_lock_irqsave(ap->lock, flags);
1406
1407 /*
1408 * if we are still busy transmitting a previous message,
1409 * do not allow
1410 */
1411 em_ctl = readl(mmio + HOST_EM_CTL);
1412 if (em_ctl & EM_CTL_TM) {
1413 spin_unlock_irqrestore(ap->lock, flags);
1414 return -EBUSY;
1415 }
1416
1417 /*
1418 * create message header - this is all zero except for
1419 * the message size, which is 4 bytes.
1420 */
1421 message[0] |= (4 << 8);
1422
1423 /* ignore 0:4 of byte zero, fill in port info yourself */
1424 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1425
1426 /* write message to EM_LOC */
1427 writel(message[0], mmio + hpriv->em_loc);
1428 writel(message[1], mmio + hpriv->em_loc+4);
1429
1430 /* save off new led state for port/slot */
1431 emp->led_state = state;
1432
1433 /*
1434 * tell hardware to transmit the message
1435 */
1436 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1437
1438 spin_unlock_irqrestore(ap->lock, flags);
1439 return size;
1440 }
1441
1442 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1443 {
1444 struct ahci_port_priv *pp = ap->private_data;
1445 struct ata_link *link;
1446 struct ahci_em_priv *emp;
1447 int rc = 0;
1448
1449 ata_for_each_link(link, ap, EDGE) {
1450 emp = &pp->em_priv[link->pmp];
1451 rc += sprintf(buf, "%lx\n", emp->led_state);
1452 }
1453 return rc;
1454 }
1455
1456 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1457 size_t size)
1458 {
1459 int state;
1460 int pmp;
1461 struct ahci_port_priv *pp = ap->private_data;
1462 struct ahci_em_priv *emp;
1463
1464 state = simple_strtoul(buf, NULL, 0);
1465
1466 /* get the slot number from the message */
1467 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1468 if (pmp < EM_MAX_SLOTS)
1469 emp = &pp->em_priv[pmp];
1470 else
1471 return -EINVAL;
1472
1473 /* mask off the activity bits if we are in sw_activity
1474 * mode, user should turn off sw_activity before setting
1475 * activity led through em_message
1476 */
1477 if (emp->blink_policy)
1478 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1479
1480 return ahci_transmit_led_message(ap, state, size);
1481 }
1482
1483 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1484 {
1485 struct ata_link *link = dev->link;
1486 struct ata_port *ap = link->ap;
1487 struct ahci_port_priv *pp = ap->private_data;
1488 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1489 u32 port_led_state = emp->led_state;
1490
1491 /* save the desired Activity LED behavior */
1492 if (val == OFF) {
1493 /* clear LFLAG */
1494 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1495
1496 /* set the LED to OFF */
1497 port_led_state &= EM_MSG_LED_VALUE_OFF;
1498 port_led_state |= (ap->port_no | (link->pmp << 8));
1499 ahci_transmit_led_message(ap, port_led_state, 4);
1500 } else {
1501 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1502 if (val == BLINK_OFF) {
1503 /* set LED to ON for idle */
1504 port_led_state &= EM_MSG_LED_VALUE_OFF;
1505 port_led_state |= (ap->port_no | (link->pmp << 8));
1506 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1507 ahci_transmit_led_message(ap, port_led_state, 4);
1508 }
1509 }
1510 emp->blink_policy = val;
1511 return 0;
1512 }
1513
1514 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1515 {
1516 struct ata_link *link = dev->link;
1517 struct ata_port *ap = link->ap;
1518 struct ahci_port_priv *pp = ap->private_data;
1519 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1520
1521 /* display the saved value of activity behavior for this
1522 * disk.
1523 */
1524 return sprintf(buf, "%d\n", emp->blink_policy);
1525 }
1526
1527 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1528 int port_no, void __iomem *mmio,
1529 void __iomem *port_mmio)
1530 {
1531 const char *emsg = NULL;
1532 int rc;
1533 u32 tmp;
1534
1535 /* make sure port is not active */
1536 rc = ahci_deinit_port(ap, &emsg);
1537 if (rc)
1538 dev_printk(KERN_WARNING, &pdev->dev,
1539 "%s (%d)\n", emsg, rc);
1540
1541 /* clear SError */
1542 tmp = readl(port_mmio + PORT_SCR_ERR);
1543 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1544 writel(tmp, port_mmio + PORT_SCR_ERR);
1545
1546 /* clear port IRQ */
1547 tmp = readl(port_mmio + PORT_IRQ_STAT);
1548 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1549 if (tmp)
1550 writel(tmp, port_mmio + PORT_IRQ_STAT);
1551
1552 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1553 }
1554
1555 static void ahci_init_controller(struct ata_host *host)
1556 {
1557 struct ahci_host_priv *hpriv = host->private_data;
1558 struct pci_dev *pdev = to_pci_dev(host->dev);
1559 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1560 int i;
1561 void __iomem *port_mmio;
1562 u32 tmp;
1563 int mv;
1564
1565 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1566 if (pdev->device == 0x6121)
1567 mv = 2;
1568 else
1569 mv = 4;
1570 port_mmio = __ahci_port_base(host, mv);
1571
1572 writel(0, port_mmio + PORT_IRQ_MASK);
1573
1574 /* clear port IRQ */
1575 tmp = readl(port_mmio + PORT_IRQ_STAT);
1576 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1577 if (tmp)
1578 writel(tmp, port_mmio + PORT_IRQ_STAT);
1579 }
1580
1581 for (i = 0; i < host->n_ports; i++) {
1582 struct ata_port *ap = host->ports[i];
1583
1584 port_mmio = ahci_port_base(ap);
1585 if (ata_port_is_dummy(ap))
1586 continue;
1587
1588 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1589 }
1590
1591 tmp = readl(mmio + HOST_CTL);
1592 VPRINTK("HOST_CTL 0x%x\n", tmp);
1593 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1594 tmp = readl(mmio + HOST_CTL);
1595 VPRINTK("HOST_CTL 0x%x\n", tmp);
1596 }
1597
1598 static void ahci_dev_config(struct ata_device *dev)
1599 {
1600 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1601
1602 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1603 dev->max_sectors = 255;
1604 ata_dev_printk(dev, KERN_INFO,
1605 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1606 }
1607 }
1608
1609 static unsigned int ahci_dev_classify(struct ata_port *ap)
1610 {
1611 void __iomem *port_mmio = ahci_port_base(ap);
1612 struct ata_taskfile tf;
1613 u32 tmp;
1614
1615 tmp = readl(port_mmio + PORT_SIG);
1616 tf.lbah = (tmp >> 24) & 0xff;
1617 tf.lbam = (tmp >> 16) & 0xff;
1618 tf.lbal = (tmp >> 8) & 0xff;
1619 tf.nsect = (tmp) & 0xff;
1620
1621 return ata_dev_classify(&tf);
1622 }
1623
1624 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1625 u32 opts)
1626 {
1627 dma_addr_t cmd_tbl_dma;
1628
1629 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1630
1631 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1632 pp->cmd_slot[tag].status = 0;
1633 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1634 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1635 }
1636
1637 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1638 {
1639 void __iomem *port_mmio = ahci_port_base(ap);
1640 struct ahci_host_priv *hpriv = ap->host->private_data;
1641 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1642 u32 tmp;
1643 int busy, rc;
1644
1645 /* do we need to kick the port? */
1646 busy = status & (ATA_BUSY | ATA_DRQ);
1647 if (!busy && !force_restart)
1648 return 0;
1649
1650 /* stop engine */
1651 rc = ahci_stop_engine(ap);
1652 if (rc)
1653 goto out_restart;
1654
1655 /* need to do CLO? */
1656 if (!busy) {
1657 rc = 0;
1658 goto out_restart;
1659 }
1660
1661 if (!(hpriv->cap & HOST_CAP_CLO)) {
1662 rc = -EOPNOTSUPP;
1663 goto out_restart;
1664 }
1665
1666 /* perform CLO */
1667 tmp = readl(port_mmio + PORT_CMD);
1668 tmp |= PORT_CMD_CLO;
1669 writel(tmp, port_mmio + PORT_CMD);
1670
1671 rc = 0;
1672 tmp = ata_wait_register(port_mmio + PORT_CMD,
1673 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1674 if (tmp & PORT_CMD_CLO)
1675 rc = -EIO;
1676
1677 /* restart engine */
1678 out_restart:
1679 ahci_start_engine(ap);
1680 return rc;
1681 }
1682
1683 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1684 struct ata_taskfile *tf, int is_cmd, u16 flags,
1685 unsigned long timeout_msec)
1686 {
1687 const u32 cmd_fis_len = 5; /* five dwords */
1688 struct ahci_port_priv *pp = ap->private_data;
1689 void __iomem *port_mmio = ahci_port_base(ap);
1690 u8 *fis = pp->cmd_tbl;
1691 u32 tmp;
1692
1693 /* prep the command */
1694 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1695 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1696
1697 /* issue & wait */
1698 writel(1, port_mmio + PORT_CMD_ISSUE);
1699
1700 if (timeout_msec) {
1701 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1702 1, timeout_msec);
1703 if (tmp & 0x1) {
1704 ahci_kick_engine(ap, 1);
1705 return -EBUSY;
1706 }
1707 } else
1708 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1709
1710 return 0;
1711 }
1712
1713 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1714 int pmp, unsigned long deadline,
1715 int (*check_ready)(struct ata_link *link))
1716 {
1717 struct ata_port *ap = link->ap;
1718 struct ahci_host_priv *hpriv = ap->host->private_data;
1719 const char *reason = NULL;
1720 unsigned long now, msecs;
1721 struct ata_taskfile tf;
1722 int rc;
1723
1724 DPRINTK("ENTER\n");
1725
1726 /* prepare for SRST (AHCI-1.1 10.4.1) */
1727 rc = ahci_kick_engine(ap, 1);
1728 if (rc && rc != -EOPNOTSUPP)
1729 ata_link_printk(link, KERN_WARNING,
1730 "failed to reset engine (errno=%d)\n", rc);
1731
1732 ata_tf_init(link->device, &tf);
1733
1734 /* issue the first D2H Register FIS */
1735 msecs = 0;
1736 now = jiffies;
1737 if (time_after(now, deadline))
1738 msecs = jiffies_to_msecs(deadline - now);
1739
1740 tf.ctl |= ATA_SRST;
1741 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1742 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1743 rc = -EIO;
1744 reason = "1st FIS failed";
1745 goto fail;
1746 }
1747
1748 /* spec says at least 5us, but be generous and sleep for 1ms */
1749 msleep(1);
1750
1751 /* issue the second D2H Register FIS */
1752 tf.ctl &= ~ATA_SRST;
1753 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1754
1755 /* wait for link to become ready */
1756 rc = ata_wait_after_reset(link, deadline, check_ready);
1757 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1758 /*
1759 * Workaround for cases where link online status can't
1760 * be trusted. Treat device readiness timeout as link
1761 * offline.
1762 */
1763 ata_link_printk(link, KERN_INFO,
1764 "device not ready, treating as offline\n");
1765 *class = ATA_DEV_NONE;
1766 } else if (rc) {
1767 /* link occupied, -ENODEV too is an error */
1768 reason = "device not ready";
1769 goto fail;
1770 } else
1771 *class = ahci_dev_classify(ap);
1772
1773 DPRINTK("EXIT, class=%u\n", *class);
1774 return 0;
1775
1776 fail:
1777 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1778 return rc;
1779 }
1780
1781 static int ahci_check_ready(struct ata_link *link)
1782 {
1783 void __iomem *port_mmio = ahci_port_base(link->ap);
1784 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1785
1786 return ata_check_ready(status);
1787 }
1788
1789 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1790 unsigned long deadline)
1791 {
1792 int pmp = sata_srst_pmp(link);
1793
1794 DPRINTK("ENTER\n");
1795
1796 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1797 }
1798
1799 static int ahci_sb600_check_ready(struct ata_link *link)
1800 {
1801 void __iomem *port_mmio = ahci_port_base(link->ap);
1802 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1803 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1804
1805 /*
1806 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1807 * which can save timeout delay.
1808 */
1809 if (irq_status & PORT_IRQ_BAD_PMP)
1810 return -EIO;
1811
1812 return ata_check_ready(status);
1813 }
1814
1815 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1816 unsigned long deadline)
1817 {
1818 struct ata_port *ap = link->ap;
1819 void __iomem *port_mmio = ahci_port_base(ap);
1820 int pmp = sata_srst_pmp(link);
1821 int rc;
1822 u32 irq_sts;
1823
1824 DPRINTK("ENTER\n");
1825
1826 rc = ahci_do_softreset(link, class, pmp, deadline,
1827 ahci_sb600_check_ready);
1828
1829 /*
1830 * Soft reset fails on some ATI chips with IPMS set when PMP
1831 * is enabled but SATA HDD/ODD is connected to SATA port,
1832 * do soft reset again to port 0.
1833 */
1834 if (rc == -EIO) {
1835 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1836 if (irq_sts & PORT_IRQ_BAD_PMP) {
1837 ata_link_printk(link, KERN_WARNING,
1838 "applying SB600 PMP SRST workaround "
1839 "and retrying\n");
1840 rc = ahci_do_softreset(link, class, 0, deadline,
1841 ahci_check_ready);
1842 }
1843 }
1844
1845 return rc;
1846 }
1847
1848 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1849 unsigned long deadline)
1850 {
1851 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1852 struct ata_port *ap = link->ap;
1853 struct ahci_port_priv *pp = ap->private_data;
1854 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1855 struct ata_taskfile tf;
1856 bool online;
1857 int rc;
1858
1859 DPRINTK("ENTER\n");
1860
1861 ahci_stop_engine(ap);
1862
1863 /* clear D2H reception area to properly wait for D2H FIS */
1864 ata_tf_init(link->device, &tf);
1865 tf.command = 0x80;
1866 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1867
1868 rc = sata_link_hardreset(link, timing, deadline, &online,
1869 ahci_check_ready);
1870
1871 ahci_start_engine(ap);
1872
1873 if (online)
1874 *class = ahci_dev_classify(ap);
1875
1876 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1877 return rc;
1878 }
1879
1880 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1881 unsigned long deadline)
1882 {
1883 struct ata_port *ap = link->ap;
1884 bool online;
1885 int rc;
1886
1887 DPRINTK("ENTER\n");
1888
1889 ahci_stop_engine(ap);
1890
1891 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1892 deadline, &online, NULL);
1893
1894 ahci_start_engine(ap);
1895
1896 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1897
1898 /* vt8251 doesn't clear BSY on signature FIS reception,
1899 * request follow-up softreset.
1900 */
1901 return online ? -EAGAIN : rc;
1902 }
1903
1904 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1905 unsigned long deadline)
1906 {
1907 struct ata_port *ap = link->ap;
1908 struct ahci_port_priv *pp = ap->private_data;
1909 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1910 struct ata_taskfile tf;
1911 bool online;
1912 int rc;
1913
1914 ahci_stop_engine(ap);
1915
1916 /* clear D2H reception area to properly wait for D2H FIS */
1917 ata_tf_init(link->device, &tf);
1918 tf.command = 0x80;
1919 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1920
1921 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1922 deadline, &online, NULL);
1923
1924 ahci_start_engine(ap);
1925
1926 /* The pseudo configuration device on SIMG4726 attached to
1927 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1928 * hardreset if no device is attached to the first downstream
1929 * port && the pseudo device locks up on SRST w/ PMP==0. To
1930 * work around this, wait for !BSY only briefly. If BSY isn't
1931 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1932 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1933 *
1934 * Wait for two seconds. Devices attached to downstream port
1935 * which can't process the following IDENTIFY after this will
1936 * have to be reset again. For most cases, this should
1937 * suffice while making probing snappish enough.
1938 */
1939 if (online) {
1940 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1941 ahci_check_ready);
1942 if (rc)
1943 ahci_kick_engine(ap, 0);
1944 }
1945 return rc;
1946 }
1947
1948 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1949 {
1950 struct ata_port *ap = link->ap;
1951 void __iomem *port_mmio = ahci_port_base(ap);
1952 u32 new_tmp, tmp;
1953
1954 ata_std_postreset(link, class);
1955
1956 /* Make sure port's ATAPI bit is set appropriately */
1957 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1958 if (*class == ATA_DEV_ATAPI)
1959 new_tmp |= PORT_CMD_ATAPI;
1960 else
1961 new_tmp &= ~PORT_CMD_ATAPI;
1962 if (new_tmp != tmp) {
1963 writel(new_tmp, port_mmio + PORT_CMD);
1964 readl(port_mmio + PORT_CMD); /* flush */
1965 }
1966 }
1967
1968 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1969 {
1970 struct scatterlist *sg;
1971 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1972 unsigned int si;
1973
1974 VPRINTK("ENTER\n");
1975
1976 /*
1977 * Next, the S/G list.
1978 */
1979 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1980 dma_addr_t addr = sg_dma_address(sg);
1981 u32 sg_len = sg_dma_len(sg);
1982
1983 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1984 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1985 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1986 }
1987
1988 return si;
1989 }
1990
1991 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1992 {
1993 struct ata_port *ap = qc->ap;
1994 struct ahci_port_priv *pp = ap->private_data;
1995 int is_atapi = ata_is_atapi(qc->tf.protocol);
1996 void *cmd_tbl;
1997 u32 opts;
1998 const u32 cmd_fis_len = 5; /* five dwords */
1999 unsigned int n_elem;
2000
2001 /*
2002 * Fill in command table information. First, the header,
2003 * a SATA Register - Host to Device command FIS.
2004 */
2005 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2006
2007 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2008 if (is_atapi) {
2009 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2010 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2011 }
2012
2013 n_elem = 0;
2014 if (qc->flags & ATA_QCFLAG_DMAMAP)
2015 n_elem = ahci_fill_sg(qc, cmd_tbl);
2016
2017 /*
2018 * Fill in command slot information.
2019 */
2020 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2021 if (qc->tf.flags & ATA_TFLAG_WRITE)
2022 opts |= AHCI_CMD_WRITE;
2023 if (is_atapi)
2024 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2025
2026 ahci_fill_cmd_slot(pp, qc->tag, opts);
2027 }
2028
2029 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2030 {
2031 struct ahci_host_priv *hpriv = ap->host->private_data;
2032 struct ahci_port_priv *pp = ap->private_data;
2033 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2034 struct ata_link *link = NULL;
2035 struct ata_queued_cmd *active_qc;
2036 struct ata_eh_info *active_ehi;
2037 u32 serror;
2038
2039 /* determine active link */
2040 ata_for_each_link(link, ap, EDGE)
2041 if (ata_link_active(link))
2042 break;
2043 if (!link)
2044 link = &ap->link;
2045
2046 active_qc = ata_qc_from_tag(ap, link->active_tag);
2047 active_ehi = &link->eh_info;
2048
2049 /* record irq stat */
2050 ata_ehi_clear_desc(host_ehi);
2051 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2052
2053 /* AHCI needs SError cleared; otherwise, it might lock up */
2054 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2055 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2056 host_ehi->serror |= serror;
2057
2058 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2059 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2060 irq_stat &= ~PORT_IRQ_IF_ERR;
2061
2062 if (irq_stat & PORT_IRQ_TF_ERR) {
2063 /* If qc is active, charge it; otherwise, the active
2064 * link. There's no active qc on NCQ errors. It will
2065 * be determined by EH by reading log page 10h.
2066 */
2067 if (active_qc)
2068 active_qc->err_mask |= AC_ERR_DEV;
2069 else
2070 active_ehi->err_mask |= AC_ERR_DEV;
2071
2072 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2073 host_ehi->serror &= ~SERR_INTERNAL;
2074 }
2075
2076 if (irq_stat & PORT_IRQ_UNK_FIS) {
2077 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2078
2079 active_ehi->err_mask |= AC_ERR_HSM;
2080 active_ehi->action |= ATA_EH_RESET;
2081 ata_ehi_push_desc(active_ehi,
2082 "unknown FIS %08x %08x %08x %08x" ,
2083 unk[0], unk[1], unk[2], unk[3]);
2084 }
2085
2086 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2087 active_ehi->err_mask |= AC_ERR_HSM;
2088 active_ehi->action |= ATA_EH_RESET;
2089 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2090 }
2091
2092 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2093 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2094 host_ehi->action |= ATA_EH_RESET;
2095 ata_ehi_push_desc(host_ehi, "host bus error");
2096 }
2097
2098 if (irq_stat & PORT_IRQ_IF_ERR) {
2099 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2100 host_ehi->action |= ATA_EH_RESET;
2101 ata_ehi_push_desc(host_ehi, "interface fatal error");
2102 }
2103
2104 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2105 ata_ehi_hotplugged(host_ehi);
2106 ata_ehi_push_desc(host_ehi, "%s",
2107 irq_stat & PORT_IRQ_CONNECT ?
2108 "connection status changed" : "PHY RDY changed");
2109 }
2110
2111 /* okay, let's hand over to EH */
2112
2113 if (irq_stat & PORT_IRQ_FREEZE)
2114 ata_port_freeze(ap);
2115 else
2116 ata_port_abort(ap);
2117 }
2118
2119 static void ahci_port_intr(struct ata_port *ap)
2120 {
2121 void __iomem *port_mmio = ahci_port_base(ap);
2122 struct ata_eh_info *ehi = &ap->link.eh_info;
2123 struct ahci_port_priv *pp = ap->private_data;
2124 struct ahci_host_priv *hpriv = ap->host->private_data;
2125 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2126 u32 status, qc_active;
2127 int rc;
2128
2129 status = readl(port_mmio + PORT_IRQ_STAT);
2130 writel(status, port_mmio + PORT_IRQ_STAT);
2131
2132 /* ignore BAD_PMP while resetting */
2133 if (unlikely(resetting))
2134 status &= ~PORT_IRQ_BAD_PMP;
2135
2136 /* If we are getting PhyRdy, this is
2137 * just a power state change, we should
2138 * clear out this, plus the PhyRdy/Comm
2139 * Wake bits from Serror
2140 */
2141 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2142 (status & PORT_IRQ_PHYRDY)) {
2143 status &= ~PORT_IRQ_PHYRDY;
2144 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2145 }
2146
2147 if (unlikely(status & PORT_IRQ_ERROR)) {
2148 ahci_error_intr(ap, status);
2149 return;
2150 }
2151
2152 if (status & PORT_IRQ_SDB_FIS) {
2153 /* If SNotification is available, leave notification
2154 * handling to sata_async_notification(). If not,
2155 * emulate it by snooping SDB FIS RX area.
2156 *
2157 * Snooping FIS RX area is probably cheaper than
2158 * poking SNotification but some constrollers which
2159 * implement SNotification, ICH9 for example, don't
2160 * store AN SDB FIS into receive area.
2161 */
2162 if (hpriv->cap & HOST_CAP_SNTF)
2163 sata_async_notification(ap);
2164 else {
2165 /* If the 'N' bit in word 0 of the FIS is set,
2166 * we just received asynchronous notification.
2167 * Tell libata about it.
2168 */
2169 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2170 u32 f0 = le32_to_cpu(f[0]);
2171
2172 if (f0 & (1 << 15))
2173 sata_async_notification(ap);
2174 }
2175 }
2176
2177 /* pp->active_link is valid iff any command is in flight */
2178 if (ap->qc_active && pp->active_link->sactive)
2179 qc_active = readl(port_mmio + PORT_SCR_ACT);
2180 else
2181 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2182
2183 rc = ata_qc_complete_multiple(ap, qc_active);
2184
2185 /* while resetting, invalid completions are expected */
2186 if (unlikely(rc < 0 && !resetting)) {
2187 ehi->err_mask |= AC_ERR_HSM;
2188 ehi->action |= ATA_EH_RESET;
2189 ata_port_freeze(ap);
2190 }
2191 }
2192
2193 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2194 {
2195 struct ata_host *host = dev_instance;
2196 struct ahci_host_priv *hpriv;
2197 unsigned int i, handled = 0;
2198 void __iomem *mmio;
2199 u32 irq_stat, irq_masked;
2200
2201 VPRINTK("ENTER\n");
2202
2203 hpriv = host->private_data;
2204 mmio = host->iomap[AHCI_PCI_BAR];
2205
2206 /* sigh. 0xffffffff is a valid return from h/w */
2207 irq_stat = readl(mmio + HOST_IRQ_STAT);
2208 if (!irq_stat)
2209 return IRQ_NONE;
2210
2211 irq_masked = irq_stat & hpriv->port_map;
2212
2213 spin_lock(&host->lock);
2214
2215 for (i = 0; i < host->n_ports; i++) {
2216 struct ata_port *ap;
2217
2218 if (!(irq_masked & (1 << i)))
2219 continue;
2220
2221 ap = host->ports[i];
2222 if (ap) {
2223 ahci_port_intr(ap);
2224 VPRINTK("port %u\n", i);
2225 } else {
2226 VPRINTK("port %u (no irq)\n", i);
2227 if (ata_ratelimit())
2228 dev_printk(KERN_WARNING, host->dev,
2229 "interrupt on disabled port %u\n", i);
2230 }
2231
2232 handled = 1;
2233 }
2234
2235 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2236 * it should be cleared after all the port events are cleared;
2237 * otherwise, it will raise a spurious interrupt after each
2238 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2239 * information.
2240 *
2241 * Also, use the unmasked value to clear interrupt as spurious
2242 * pending event on a dummy port might cause screaming IRQ.
2243 */
2244 writel(irq_stat, mmio + HOST_IRQ_STAT);
2245
2246 spin_unlock(&host->lock);
2247
2248 VPRINTK("EXIT\n");
2249
2250 return IRQ_RETVAL(handled);
2251 }
2252
2253 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2254 {
2255 struct ata_port *ap = qc->ap;
2256 void __iomem *port_mmio = ahci_port_base(ap);
2257 struct ahci_port_priv *pp = ap->private_data;
2258
2259 /* Keep track of the currently active link. It will be used
2260 * in completion path to determine whether NCQ phase is in
2261 * progress.
2262 */
2263 pp->active_link = qc->dev->link;
2264
2265 if (qc->tf.protocol == ATA_PROT_NCQ)
2266 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2267 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2268
2269 ahci_sw_activity(qc->dev->link);
2270
2271 return 0;
2272 }
2273
2274 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2275 {
2276 struct ahci_port_priv *pp = qc->ap->private_data;
2277 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2278
2279 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2280 return true;
2281 }
2282
2283 static void ahci_freeze(struct ata_port *ap)
2284 {
2285 void __iomem *port_mmio = ahci_port_base(ap);
2286
2287 /* turn IRQ off */
2288 writel(0, port_mmio + PORT_IRQ_MASK);
2289 }
2290
2291 static void ahci_thaw(struct ata_port *ap)
2292 {
2293 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2294 void __iomem *port_mmio = ahci_port_base(ap);
2295 u32 tmp;
2296 struct ahci_port_priv *pp = ap->private_data;
2297
2298 /* clear IRQ */
2299 tmp = readl(port_mmio + PORT_IRQ_STAT);
2300 writel(tmp, port_mmio + PORT_IRQ_STAT);
2301 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2302
2303 /* turn IRQ back on */
2304 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2305 }
2306
2307 static void ahci_error_handler(struct ata_port *ap)
2308 {
2309 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2310 /* restart engine */
2311 ahci_stop_engine(ap);
2312 ahci_start_engine(ap);
2313 }
2314
2315 sata_pmp_error_handler(ap);
2316 }
2317
2318 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2319 {
2320 struct ata_port *ap = qc->ap;
2321
2322 /* make DMA engine forget about the failed command */
2323 if (qc->flags & ATA_QCFLAG_FAILED)
2324 ahci_kick_engine(ap, 1);
2325 }
2326
2327 static void ahci_pmp_attach(struct ata_port *ap)
2328 {
2329 void __iomem *port_mmio = ahci_port_base(ap);
2330 struct ahci_port_priv *pp = ap->private_data;
2331 u32 cmd;
2332
2333 cmd = readl(port_mmio + PORT_CMD);
2334 cmd |= PORT_CMD_PMP;
2335 writel(cmd, port_mmio + PORT_CMD);
2336
2337 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2338 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2339 }
2340
2341 static void ahci_pmp_detach(struct ata_port *ap)
2342 {
2343 void __iomem *port_mmio = ahci_port_base(ap);
2344 struct ahci_port_priv *pp = ap->private_data;
2345 u32 cmd;
2346
2347 cmd = readl(port_mmio + PORT_CMD);
2348 cmd &= ~PORT_CMD_PMP;
2349 writel(cmd, port_mmio + PORT_CMD);
2350
2351 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2352 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2353 }
2354
2355 static int ahci_port_resume(struct ata_port *ap)
2356 {
2357 ahci_power_up(ap);
2358 ahci_start_port(ap);
2359
2360 if (sata_pmp_attached(ap))
2361 ahci_pmp_attach(ap);
2362 else
2363 ahci_pmp_detach(ap);
2364
2365 return 0;
2366 }
2367
2368 #ifdef CONFIG_PM
2369 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2370 {
2371 const char *emsg = NULL;
2372 int rc;
2373
2374 rc = ahci_deinit_port(ap, &emsg);
2375 if (rc == 0)
2376 ahci_power_down(ap);
2377 else {
2378 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2379 ahci_start_port(ap);
2380 }
2381
2382 return rc;
2383 }
2384
2385 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2386 {
2387 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2388 struct ahci_host_priv *hpriv = host->private_data;
2389 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2390 u32 ctl;
2391
2392 if (mesg.event & PM_EVENT_SUSPEND &&
2393 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2394 dev_printk(KERN_ERR, &pdev->dev,
2395 "BIOS update required for suspend/resume\n");
2396 return -EIO;
2397 }
2398
2399 if (mesg.event & PM_EVENT_SLEEP) {
2400 /* AHCI spec rev1.1 section 8.3.3:
2401 * Software must disable interrupts prior to requesting a
2402 * transition of the HBA to D3 state.
2403 */
2404 ctl = readl(mmio + HOST_CTL);
2405 ctl &= ~HOST_IRQ_EN;
2406 writel(ctl, mmio + HOST_CTL);
2407 readl(mmio + HOST_CTL); /* flush */
2408 }
2409
2410 return ata_pci_device_suspend(pdev, mesg);
2411 }
2412
2413 static int ahci_pci_device_resume(struct pci_dev *pdev)
2414 {
2415 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2416 int rc;
2417
2418 rc = ata_pci_device_do_resume(pdev);
2419 if (rc)
2420 return rc;
2421
2422 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2423 rc = ahci_reset_controller(host);
2424 if (rc)
2425 return rc;
2426
2427 ahci_init_controller(host);
2428 }
2429
2430 ata_host_resume(host);
2431
2432 return 0;
2433 }
2434 #endif
2435
2436 static int ahci_port_start(struct ata_port *ap)
2437 {
2438 struct device *dev = ap->host->dev;
2439 struct ahci_port_priv *pp;
2440 void *mem;
2441 dma_addr_t mem_dma;
2442
2443 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2444 if (!pp)
2445 return -ENOMEM;
2446
2447 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2448 GFP_KERNEL);
2449 if (!mem)
2450 return -ENOMEM;
2451 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2452
2453 /*
2454 * First item in chunk of DMA memory: 32-slot command table,
2455 * 32 bytes each in size
2456 */
2457 pp->cmd_slot = mem;
2458 pp->cmd_slot_dma = mem_dma;
2459
2460 mem += AHCI_CMD_SLOT_SZ;
2461 mem_dma += AHCI_CMD_SLOT_SZ;
2462
2463 /*
2464 * Second item: Received-FIS area
2465 */
2466 pp->rx_fis = mem;
2467 pp->rx_fis_dma = mem_dma;
2468
2469 mem += AHCI_RX_FIS_SZ;
2470 mem_dma += AHCI_RX_FIS_SZ;
2471
2472 /*
2473 * Third item: data area for storing a single command
2474 * and its scatter-gather table
2475 */
2476 pp->cmd_tbl = mem;
2477 pp->cmd_tbl_dma = mem_dma;
2478
2479 /*
2480 * Save off initial list of interrupts to be enabled.
2481 * This could be changed later
2482 */
2483 pp->intr_mask = DEF_PORT_IRQ;
2484
2485 ap->private_data = pp;
2486
2487 /* engage engines, captain */
2488 return ahci_port_resume(ap);
2489 }
2490
2491 static void ahci_port_stop(struct ata_port *ap)
2492 {
2493 const char *emsg = NULL;
2494 int rc;
2495
2496 /* de-initialize port */
2497 rc = ahci_deinit_port(ap, &emsg);
2498 if (rc)
2499 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2500 }
2501
2502 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2503 {
2504 int rc;
2505
2506 if (using_dac &&
2507 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2508 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2509 if (rc) {
2510 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2511 if (rc) {
2512 dev_printk(KERN_ERR, &pdev->dev,
2513 "64-bit DMA enable failed\n");
2514 return rc;
2515 }
2516 }
2517 } else {
2518 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2519 if (rc) {
2520 dev_printk(KERN_ERR, &pdev->dev,
2521 "32-bit DMA enable failed\n");
2522 return rc;
2523 }
2524 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2525 if (rc) {
2526 dev_printk(KERN_ERR, &pdev->dev,
2527 "32-bit consistent DMA enable failed\n");
2528 return rc;
2529 }
2530 }
2531 return 0;
2532 }
2533
2534 static void ahci_print_info(struct ata_host *host)
2535 {
2536 struct ahci_host_priv *hpriv = host->private_data;
2537 struct pci_dev *pdev = to_pci_dev(host->dev);
2538 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2539 u32 vers, cap, impl, speed;
2540 const char *speed_s;
2541 u16 cc;
2542 const char *scc_s;
2543
2544 vers = readl(mmio + HOST_VERSION);
2545 cap = hpriv->cap;
2546 impl = hpriv->port_map;
2547
2548 speed = (cap >> 20) & 0xf;
2549 if (speed == 1)
2550 speed_s = "1.5";
2551 else if (speed == 2)
2552 speed_s = "3";
2553 else if (speed == 3)
2554 speed_s = "6";
2555 else
2556 speed_s = "?";
2557
2558 pci_read_config_word(pdev, 0x0a, &cc);
2559 if (cc == PCI_CLASS_STORAGE_IDE)
2560 scc_s = "IDE";
2561 else if (cc == PCI_CLASS_STORAGE_SATA)
2562 scc_s = "SATA";
2563 else if (cc == PCI_CLASS_STORAGE_RAID)
2564 scc_s = "RAID";
2565 else
2566 scc_s = "unknown";
2567
2568 dev_printk(KERN_INFO, &pdev->dev,
2569 "AHCI %02x%02x.%02x%02x "
2570 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2571 ,
2572
2573 (vers >> 24) & 0xff,
2574 (vers >> 16) & 0xff,
2575 (vers >> 8) & 0xff,
2576 vers & 0xff,
2577
2578 ((cap >> 8) & 0x1f) + 1,
2579 (cap & 0x1f) + 1,
2580 speed_s,
2581 impl,
2582 scc_s);
2583
2584 dev_printk(KERN_INFO, &pdev->dev,
2585 "flags: "
2586 "%s%s%s%s%s%s%s"
2587 "%s%s%s%s%s%s%s"
2588 "%s\n"
2589 ,
2590
2591 cap & (1 << 31) ? "64bit " : "",
2592 cap & (1 << 30) ? "ncq " : "",
2593 cap & (1 << 29) ? "sntf " : "",
2594 cap & (1 << 28) ? "ilck " : "",
2595 cap & (1 << 27) ? "stag " : "",
2596 cap & (1 << 26) ? "pm " : "",
2597 cap & (1 << 25) ? "led " : "",
2598
2599 cap & (1 << 24) ? "clo " : "",
2600 cap & (1 << 19) ? "nz " : "",
2601 cap & (1 << 18) ? "only " : "",
2602 cap & (1 << 17) ? "pmp " : "",
2603 cap & (1 << 15) ? "pio " : "",
2604 cap & (1 << 14) ? "slum " : "",
2605 cap & (1 << 13) ? "part " : "",
2606 cap & (1 << 6) ? "ems ": ""
2607 );
2608 }
2609
2610 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2611 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2612 * support PMP and the 4726 either directly exports the device
2613 * attached to the first downstream port or acts as a hardware storage
2614 * controller and emulate a single ATA device (can be RAID 0/1 or some
2615 * other configuration).
2616 *
2617 * When there's no device attached to the first downstream port of the
2618 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2619 * configure the 4726. However, ATA emulation of the device is very
2620 * lame. It doesn't send signature D2H Reg FIS after the initial
2621 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2622 *
2623 * The following function works around the problem by always using
2624 * hardreset on the port and not depending on receiving signature FIS
2625 * afterward. If signature FIS isn't received soon, ATA class is
2626 * assumed without follow-up softreset.
2627 */
2628 static void ahci_p5wdh_workaround(struct ata_host *host)
2629 {
2630 static struct dmi_system_id sysids[] = {
2631 {
2632 .ident = "P5W DH Deluxe",
2633 .matches = {
2634 DMI_MATCH(DMI_SYS_VENDOR,
2635 "ASUSTEK COMPUTER INC"),
2636 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2637 },
2638 },
2639 { }
2640 };
2641 struct pci_dev *pdev = to_pci_dev(host->dev);
2642
2643 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2644 dmi_check_system(sysids)) {
2645 struct ata_port *ap = host->ports[1];
2646
2647 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2648 "Deluxe on-board SIMG4726 workaround\n");
2649
2650 ap->ops = &ahci_p5wdh_ops;
2651 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2652 }
2653 }
2654
2655 /*
2656 * SB600 ahci controller on certain boards can't do 64bit DMA with
2657 * older BIOS.
2658 */
2659 static bool ahci_sb600_32bit_only(struct pci_dev *pdev)
2660 {
2661 static const struct dmi_system_id sysids[] = {
2662 /*
2663 * The oldest version known to be broken is 0901 and
2664 * working is 1501 which was released on 2007-10-26.
2665 * Force 32bit DMA on anything older than 1501.
2666 * Please read bko#9412 for more info.
2667 */
2668 {
2669 .ident = "ASUS M2A-VM",
2670 .matches = {
2671 DMI_MATCH(DMI_BOARD_VENDOR,
2672 "ASUSTeK Computer INC."),
2673 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2674 },
2675 .driver_data = "20071026", /* yyyymmdd */
2676 },
2677 /*
2678 * It's yet unknown whether more recent BIOS fixes the
2679 * problem. Blacklist the whole board for the time
2680 * being. Please read the following thread for more
2681 * info.
2682 *
2683 * http://thread.gmane.org/gmane.linux.ide/42326
2684 */
2685 {
2686 .ident = "Gigabyte GA-MA69VM-S2",
2687 .matches = {
2688 DMI_MATCH(DMI_BOARD_VENDOR,
2689 "Gigabyte Technology Co., Ltd."),
2690 DMI_MATCH(DMI_BOARD_NAME, "GA-MA69VM-S2"),
2691 },
2692 },
2693 { }
2694 };
2695 const struct dmi_system_id *match;
2696
2697 match = dmi_first_match(sysids);
2698 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2699 !match)
2700 return false;
2701
2702 if (match->driver_data) {
2703 int year, month, date;
2704 char buf[9];
2705
2706 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2707 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2708
2709 if (strcmp(buf, match->driver_data) >= 0)
2710 return false;
2711
2712 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2713 "forcing 32bit DMA, update BIOS\n", match->ident);
2714 } else
2715 dev_printk(KERN_WARNING, &pdev->dev, "%s: this board can't "
2716 "do 64bit DMA, forcing 32bit\n", match->ident);
2717
2718 return true;
2719 }
2720
2721 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2722 {
2723 static const struct dmi_system_id broken_systems[] = {
2724 {
2725 .ident = "HP Compaq nx6310",
2726 .matches = {
2727 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2728 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2729 },
2730 /* PCI slot number of the controller */
2731 .driver_data = (void *)0x1FUL,
2732 },
2733 {
2734 .ident = "HP Compaq 6720s",
2735 .matches = {
2736 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2737 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2738 },
2739 /* PCI slot number of the controller */
2740 .driver_data = (void *)0x1FUL,
2741 },
2742
2743 { } /* terminate list */
2744 };
2745 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2746
2747 if (dmi) {
2748 unsigned long slot = (unsigned long)dmi->driver_data;
2749 /* apply the quirk only to on-board controllers */
2750 return slot == PCI_SLOT(pdev->devfn);
2751 }
2752
2753 return false;
2754 }
2755
2756 static bool ahci_broken_suspend(struct pci_dev *pdev)
2757 {
2758 static const struct dmi_system_id sysids[] = {
2759 /*
2760 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2761 * to the harddisk doesn't become online after
2762 * resuming from STR. Warn and fail suspend.
2763 */
2764 {
2765 .ident = "dv4",
2766 .matches = {
2767 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2768 DMI_MATCH(DMI_PRODUCT_NAME,
2769 "HP Pavilion dv4 Notebook PC"),
2770 },
2771 .driver_data = "F.30", /* cutoff BIOS version */
2772 },
2773 {
2774 .ident = "dv5",
2775 .matches = {
2776 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2777 DMI_MATCH(DMI_PRODUCT_NAME,
2778 "HP Pavilion dv5 Notebook PC"),
2779 },
2780 .driver_data = "F.16", /* cutoff BIOS version */
2781 },
2782 {
2783 .ident = "dv6",
2784 .matches = {
2785 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2786 DMI_MATCH(DMI_PRODUCT_NAME,
2787 "HP Pavilion dv6 Notebook PC"),
2788 },
2789 .driver_data = "F.21", /* cutoff BIOS version */
2790 },
2791 {
2792 .ident = "HDX18",
2793 .matches = {
2794 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2795 DMI_MATCH(DMI_PRODUCT_NAME,
2796 "HP HDX18 Notebook PC"),
2797 },
2798 .driver_data = "F.23", /* cutoff BIOS version */
2799 },
2800 { } /* terminate list */
2801 };
2802 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2803 const char *ver;
2804
2805 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2806 return false;
2807
2808 ver = dmi_get_system_info(DMI_BIOS_VERSION);
2809
2810 return !ver || strcmp(ver, dmi->driver_data) < 0;
2811 }
2812
2813 static bool ahci_broken_online(struct pci_dev *pdev)
2814 {
2815 #define ENCODE_BUSDEVFN(bus, slot, func) \
2816 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2817 static const struct dmi_system_id sysids[] = {
2818 /*
2819 * There are several gigabyte boards which use
2820 * SIMG5723s configured as hardware RAID. Certain
2821 * 5723 firmware revisions shipped there keep the link
2822 * online but fail to answer properly to SRST or
2823 * IDENTIFY when no device is attached downstream
2824 * causing libata to retry quite a few times leading
2825 * to excessive detection delay.
2826 *
2827 * As these firmwares respond to the second reset try
2828 * with invalid device signature, considering unknown
2829 * sig as offline works around the problem acceptably.
2830 */
2831 {
2832 .ident = "EP45-DQ6",
2833 .matches = {
2834 DMI_MATCH(DMI_BOARD_VENDOR,
2835 "Gigabyte Technology Co., Ltd."),
2836 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2837 },
2838 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2839 },
2840 {
2841 .ident = "EP45-DS5",
2842 .matches = {
2843 DMI_MATCH(DMI_BOARD_VENDOR,
2844 "Gigabyte Technology Co., Ltd."),
2845 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2846 },
2847 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2848 },
2849 { } /* terminate list */
2850 };
2851 #undef ENCODE_BUSDEVFN
2852 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2853 unsigned int val;
2854
2855 if (!dmi)
2856 return false;
2857
2858 val = (unsigned long)dmi->driver_data;
2859
2860 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2861 }
2862
2863 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2864 {
2865 static int printed_version;
2866 unsigned int board_id = ent->driver_data;
2867 struct ata_port_info pi = ahci_port_info[board_id];
2868 const struct ata_port_info *ppi[] = { &pi, NULL };
2869 struct device *dev = &pdev->dev;
2870 struct ahci_host_priv *hpriv;
2871 struct ata_host *host;
2872 int n_ports, i, rc;
2873
2874 VPRINTK("ENTER\n");
2875
2876 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2877
2878 if (!printed_version++)
2879 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2880
2881 /* The AHCI driver can only drive the SATA ports, the PATA driver
2882 can drive them all so if both drivers are selected make sure
2883 AHCI stays out of the way */
2884 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2885 return -ENODEV;
2886
2887 /* acquire resources */
2888 rc = pcim_enable_device(pdev);
2889 if (rc)
2890 return rc;
2891
2892 /* AHCI controllers often implement SFF compatible interface.
2893 * Grab all PCI BARs just in case.
2894 */
2895 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2896 if (rc == -EBUSY)
2897 pcim_pin_device(pdev);
2898 if (rc)
2899 return rc;
2900
2901 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2902 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2903 u8 map;
2904
2905 /* ICH6s share the same PCI ID for both piix and ahci
2906 * modes. Enabling ahci mode while MAP indicates
2907 * combined mode is a bad idea. Yield to ata_piix.
2908 */
2909 pci_read_config_byte(pdev, ICH_MAP, &map);
2910 if (map & 0x3) {
2911 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2912 "combined mode, can't enable AHCI mode\n");
2913 return -ENODEV;
2914 }
2915 }
2916
2917 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2918 if (!hpriv)
2919 return -ENOMEM;
2920 hpriv->flags |= (unsigned long)pi.private_data;
2921
2922 /* MCP65 revision A1 and A2 can't do MSI */
2923 if (board_id == board_ahci_mcp65 &&
2924 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2925 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2926
2927 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
2928 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2929 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2930
2931 /* apply sb600 32bit only quirk */
2932 if (ahci_sb600_32bit_only(pdev))
2933 hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
2934
2935 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
2936 pci_enable_msi(pdev);
2937
2938 /* save initial config */
2939 ahci_save_initial_config(pdev, hpriv);
2940
2941 /* prepare host */
2942 if (hpriv->cap & HOST_CAP_NCQ)
2943 pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
2944
2945 if (hpriv->cap & HOST_CAP_PMP)
2946 pi.flags |= ATA_FLAG_PMP;
2947
2948 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2949 u8 messages;
2950 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2951 u32 em_loc = readl(mmio + HOST_EM_LOC);
2952 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2953
2954 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2955
2956 /* we only support LED message type right now */
2957 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2958 /* store em_loc */
2959 hpriv->em_loc = ((em_loc >> 16) * 4);
2960 pi.flags |= ATA_FLAG_EM;
2961 if (!(em_ctl & EM_CTL_ALHD))
2962 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2963 }
2964 }
2965
2966 if (ahci_broken_system_poweroff(pdev)) {
2967 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
2968 dev_info(&pdev->dev,
2969 "quirky BIOS, skipping spindown on poweroff\n");
2970 }
2971
2972 if (ahci_broken_suspend(pdev)) {
2973 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
2974 dev_printk(KERN_WARNING, &pdev->dev,
2975 "BIOS update required for suspend/resume\n");
2976 }
2977
2978 if (ahci_broken_online(pdev)) {
2979 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
2980 dev_info(&pdev->dev,
2981 "online status unreliable, applying workaround\n");
2982 }
2983
2984 /* CAP.NP sometimes indicate the index of the last enabled
2985 * port, at other times, that of the last possible port, so
2986 * determining the maximum port number requires looking at
2987 * both CAP.NP and port_map.
2988 */
2989 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2990
2991 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2992 if (!host)
2993 return -ENOMEM;
2994 host->iomap = pcim_iomap_table(pdev);
2995 host->private_data = hpriv;
2996
2997 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
2998 host->flags |= ATA_HOST_PARALLEL_SCAN;
2999 else
3000 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3001
3002 if (pi.flags & ATA_FLAG_EM)
3003 ahci_reset_em(host);
3004
3005 for (i = 0; i < host->n_ports; i++) {
3006 struct ata_port *ap = host->ports[i];
3007
3008 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3009 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3010 0x100 + ap->port_no * 0x80, "port");
3011
3012 /* set initial link pm policy */
3013 ap->pm_policy = NOT_AVAILABLE;
3014
3015 /* set enclosure management message type */
3016 if (ap->flags & ATA_FLAG_EM)
3017 ap->em_message_type = ahci_em_messages;
3018
3019
3020 /* disabled/not-implemented port */
3021 if (!(hpriv->port_map & (1 << i)))
3022 ap->ops = &ata_dummy_port_ops;
3023 }
3024
3025 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3026 ahci_p5wdh_workaround(host);
3027
3028 /* initialize adapter */
3029 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3030 if (rc)
3031 return rc;
3032
3033 rc = ahci_reset_controller(host);
3034 if (rc)
3035 return rc;
3036
3037 ahci_init_controller(host);
3038 ahci_print_info(host);
3039
3040 pci_set_master(pdev);
3041 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3042 &ahci_sht);
3043 }
3044
3045 static int __init ahci_init(void)
3046 {
3047 return pci_register_driver(&ahci_pci_driver);
3048 }
3049
3050 static void __exit ahci_exit(void)
3051 {
3052 pci_unregister_driver(&ahci_pci_driver);
3053 }
3054
3055
3056 MODULE_AUTHOR("Jeff Garzik");
3057 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3058 MODULE_LICENSE("GPL");
3059 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3060 MODULE_VERSION(DRV_VERSION);
3061
3062 module_init(ahci_init);
3063 module_exit(ahci_exit);
This page took 0.112284 seconds and 6 git commands to generate.