[PATCH] ahci: AHCI mode SATA patch for Intel ICH9
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/sched.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 #include <asm/io.h>
49
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "2.0"
52
53
54 enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
100
101 /* registers for each SATA port */
102 PORT_LST_ADDR = 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT = 0x10, /* interrupt status */
107 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
108 PORT_CMD = 0x18, /* port command */
109 PORT_TFDATA = 0x20, /* taskfile data */
110 PORT_SIG = 0x24, /* device TF signature */
111 PORT_CMD_ISSUE = 0x38, /* command issue */
112 PORT_SCR = 0x28, /* SATA phy register block */
113 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
117
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
127
128 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
137
138 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
139 PORT_IRQ_IF_ERR |
140 PORT_IRQ_CONNECT |
141 PORT_IRQ_PHYRDY |
142 PORT_IRQ_UNK_FIS,
143 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
144 PORT_IRQ_TF_ERR |
145 PORT_IRQ_HBUS_DATA_ERR,
146 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
147 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
148 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
149
150 /* PORT_CMD bits */
151 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO = (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
158 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
159
160 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
164
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI = (1 << 0),
167
168 /* ap->flags bits */
169 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
170 AHCI_FLAG_NO_NCQ = (1 << 25),
171 };
172
173 struct ahci_cmd_hdr {
174 u32 opts;
175 u32 status;
176 u32 tbl_addr;
177 u32 tbl_addr_hi;
178 u32 reserved[4];
179 };
180
181 struct ahci_sg {
182 u32 addr;
183 u32 addr_hi;
184 u32 reserved;
185 u32 flags_size;
186 };
187
188 struct ahci_host_priv {
189 unsigned long flags;
190 u32 cap; /* cache of HOST_CAP register */
191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
192 };
193
194 struct ahci_port_priv {
195 struct ahci_cmd_hdr *cmd_slot;
196 dma_addr_t cmd_slot_dma;
197 void *cmd_tbl;
198 dma_addr_t cmd_tbl_dma;
199 void *rx_fis;
200 dma_addr_t rx_fis_dma;
201 };
202
203 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
204 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
205 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
206 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
207 static irqreturn_t ahci_interrupt (int irq, void *dev_instance);
208 static void ahci_irq_clear(struct ata_port *ap);
209 static int ahci_port_start(struct ata_port *ap);
210 static void ahci_port_stop(struct ata_port *ap);
211 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
212 static void ahci_qc_prep(struct ata_queued_cmd *qc);
213 static u8 ahci_check_status(struct ata_port *ap);
214 static void ahci_freeze(struct ata_port *ap);
215 static void ahci_thaw(struct ata_port *ap);
216 static void ahci_error_handler(struct ata_port *ap);
217 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
218 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
219 static int ahci_port_resume(struct ata_port *ap);
220 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
221 static int ahci_pci_device_resume(struct pci_dev *pdev);
222 static void ahci_remove_one (struct pci_dev *pdev);
223
224 static struct scsi_host_template ahci_sht = {
225 .module = THIS_MODULE,
226 .name = DRV_NAME,
227 .ioctl = ata_scsi_ioctl,
228 .queuecommand = ata_scsi_queuecmd,
229 .change_queue_depth = ata_scsi_change_queue_depth,
230 .can_queue = AHCI_MAX_CMDS - 1,
231 .this_id = ATA_SHT_THIS_ID,
232 .sg_tablesize = AHCI_MAX_SG,
233 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
234 .emulated = ATA_SHT_EMULATED,
235 .use_clustering = AHCI_USE_CLUSTERING,
236 .proc_name = DRV_NAME,
237 .dma_boundary = AHCI_DMA_BOUNDARY,
238 .slave_configure = ata_scsi_slave_config,
239 .slave_destroy = ata_scsi_slave_destroy,
240 .bios_param = ata_std_bios_param,
241 .suspend = ata_scsi_device_suspend,
242 .resume = ata_scsi_device_resume,
243 };
244
245 static const struct ata_port_operations ahci_ops = {
246 .port_disable = ata_port_disable,
247
248 .check_status = ahci_check_status,
249 .check_altstatus = ahci_check_status,
250 .dev_select = ata_noop_dev_select,
251
252 .tf_read = ahci_tf_read,
253
254 .qc_prep = ahci_qc_prep,
255 .qc_issue = ahci_qc_issue,
256
257 .irq_handler = ahci_interrupt,
258 .irq_clear = ahci_irq_clear,
259
260 .scr_read = ahci_scr_read,
261 .scr_write = ahci_scr_write,
262
263 .freeze = ahci_freeze,
264 .thaw = ahci_thaw,
265
266 .error_handler = ahci_error_handler,
267 .post_internal_cmd = ahci_post_internal_cmd,
268
269 .port_suspend = ahci_port_suspend,
270 .port_resume = ahci_port_resume,
271
272 .port_start = ahci_port_start,
273 .port_stop = ahci_port_stop,
274 };
275
276 static const struct ata_port_info ahci_port_info[] = {
277 /* board_ahci */
278 {
279 .sht = &ahci_sht,
280 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
281 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
282 ATA_FLAG_SKIP_D2H_BSY,
283 .pio_mask = 0x1f, /* pio0-4 */
284 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
285 .port_ops = &ahci_ops,
286 },
287 /* board_ahci_vt8251 */
288 {
289 .sht = &ahci_sht,
290 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
291 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
292 ATA_FLAG_SKIP_D2H_BSY |
293 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
294 .pio_mask = 0x1f, /* pio0-4 */
295 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
296 .port_ops = &ahci_ops,
297 },
298 };
299
300 static const struct pci_device_id ahci_pci_tbl[] = {
301 /* Intel */
302 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
303 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
304 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
305 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
306 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
307 { PCI_VDEVICE(AL, 0x5288), board_ahci }, /* ULi M5288 */
308 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
309 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
310 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
311 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
312 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
313 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
314 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
315 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
316 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
317 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
318 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
319 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
320 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
321 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
322 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
323 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
324 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
325 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
326 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
327 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
328
329 /* JMicron */
330 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci }, /* JMicron JMB360 */
331 { PCI_VDEVICE(JMICRON, 0x2361), board_ahci }, /* JMicron JMB361 */
332 { PCI_VDEVICE(JMICRON, 0x2363), board_ahci }, /* JMicron JMB363 */
333 { PCI_VDEVICE(JMICRON, 0x2365), board_ahci }, /* JMicron JMB365 */
334 { PCI_VDEVICE(JMICRON, 0x2366), board_ahci }, /* JMicron JMB366 */
335
336 /* ATI */
337 { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
338 { PCI_VDEVICE(ATI, 0x4381), board_ahci }, /* ATI SB600 raid */
339
340 /* VIA */
341 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
342
343 /* NVIDIA */
344 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
345 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
346 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
347 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
348 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
349 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
350 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
351 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
352 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
353 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
354 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
355 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
356
357 /* SiS */
358 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
359 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
360 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
361
362 { } /* terminate list */
363 };
364
365
366 static struct pci_driver ahci_pci_driver = {
367 .name = DRV_NAME,
368 .id_table = ahci_pci_tbl,
369 .probe = ahci_init_one,
370 .suspend = ahci_pci_device_suspend,
371 .resume = ahci_pci_device_resume,
372 .remove = ahci_remove_one,
373 };
374
375
376 static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
377 {
378 return base + 0x100 + (port * 0x80);
379 }
380
381 static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
382 {
383 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
384 }
385
386 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
387 {
388 unsigned int sc_reg;
389
390 switch (sc_reg_in) {
391 case SCR_STATUS: sc_reg = 0; break;
392 case SCR_CONTROL: sc_reg = 1; break;
393 case SCR_ERROR: sc_reg = 2; break;
394 case SCR_ACTIVE: sc_reg = 3; break;
395 default:
396 return 0xffffffffU;
397 }
398
399 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
400 }
401
402
403 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
404 u32 val)
405 {
406 unsigned int sc_reg;
407
408 switch (sc_reg_in) {
409 case SCR_STATUS: sc_reg = 0; break;
410 case SCR_CONTROL: sc_reg = 1; break;
411 case SCR_ERROR: sc_reg = 2; break;
412 case SCR_ACTIVE: sc_reg = 3; break;
413 default:
414 return;
415 }
416
417 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
418 }
419
420 static void ahci_start_engine(void __iomem *port_mmio)
421 {
422 u32 tmp;
423
424 /* start DMA */
425 tmp = readl(port_mmio + PORT_CMD);
426 tmp |= PORT_CMD_START;
427 writel(tmp, port_mmio + PORT_CMD);
428 readl(port_mmio + PORT_CMD); /* flush */
429 }
430
431 static int ahci_stop_engine(void __iomem *port_mmio)
432 {
433 u32 tmp;
434
435 tmp = readl(port_mmio + PORT_CMD);
436
437 /* check if the HBA is idle */
438 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
439 return 0;
440
441 /* setting HBA to idle */
442 tmp &= ~PORT_CMD_START;
443 writel(tmp, port_mmio + PORT_CMD);
444
445 /* wait for engine to stop. This could be as long as 500 msec */
446 tmp = ata_wait_register(port_mmio + PORT_CMD,
447 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
448 if (tmp & PORT_CMD_LIST_ON)
449 return -EIO;
450
451 return 0;
452 }
453
454 static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
455 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
456 {
457 u32 tmp;
458
459 /* set FIS registers */
460 if (cap & HOST_CAP_64)
461 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
462 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
463
464 if (cap & HOST_CAP_64)
465 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
466 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
467
468 /* enable FIS reception */
469 tmp = readl(port_mmio + PORT_CMD);
470 tmp |= PORT_CMD_FIS_RX;
471 writel(tmp, port_mmio + PORT_CMD);
472
473 /* flush */
474 readl(port_mmio + PORT_CMD);
475 }
476
477 static int ahci_stop_fis_rx(void __iomem *port_mmio)
478 {
479 u32 tmp;
480
481 /* disable FIS reception */
482 tmp = readl(port_mmio + PORT_CMD);
483 tmp &= ~PORT_CMD_FIS_RX;
484 writel(tmp, port_mmio + PORT_CMD);
485
486 /* wait for completion, spec says 500ms, give it 1000 */
487 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
488 PORT_CMD_FIS_ON, 10, 1000);
489 if (tmp & PORT_CMD_FIS_ON)
490 return -EBUSY;
491
492 return 0;
493 }
494
495 static void ahci_power_up(void __iomem *port_mmio, u32 cap)
496 {
497 u32 cmd;
498
499 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
500
501 /* spin up device */
502 if (cap & HOST_CAP_SSS) {
503 cmd |= PORT_CMD_SPIN_UP;
504 writel(cmd, port_mmio + PORT_CMD);
505 }
506
507 /* wake up link */
508 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
509 }
510
511 static void ahci_power_down(void __iomem *port_mmio, u32 cap)
512 {
513 u32 cmd, scontrol;
514
515 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
516
517 if (cap & HOST_CAP_SSC) {
518 /* enable transitions to slumber mode */
519 scontrol = readl(port_mmio + PORT_SCR_CTL);
520 if ((scontrol & 0x0f00) > 0x100) {
521 scontrol &= ~0xf00;
522 writel(scontrol, port_mmio + PORT_SCR_CTL);
523 }
524
525 /* put device into slumber mode */
526 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
527
528 /* wait for the transition to complete */
529 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
530 PORT_CMD_ICC_SLUMBER, 1, 50);
531 }
532
533 /* put device into listen mode */
534 if (cap & HOST_CAP_SSS) {
535 /* first set PxSCTL.DET to 0 */
536 scontrol = readl(port_mmio + PORT_SCR_CTL);
537 scontrol &= ~0xf;
538 writel(scontrol, port_mmio + PORT_SCR_CTL);
539
540 /* then set PxCMD.SUD to 0 */
541 cmd &= ~PORT_CMD_SPIN_UP;
542 writel(cmd, port_mmio + PORT_CMD);
543 }
544 }
545
546 static void ahci_init_port(void __iomem *port_mmio, u32 cap,
547 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
548 {
549 /* power up */
550 ahci_power_up(port_mmio, cap);
551
552 /* enable FIS reception */
553 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
554
555 /* enable DMA */
556 ahci_start_engine(port_mmio);
557 }
558
559 static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
560 {
561 int rc;
562
563 /* disable DMA */
564 rc = ahci_stop_engine(port_mmio);
565 if (rc) {
566 *emsg = "failed to stop engine";
567 return rc;
568 }
569
570 /* disable FIS reception */
571 rc = ahci_stop_fis_rx(port_mmio);
572 if (rc) {
573 *emsg = "failed stop FIS RX";
574 return rc;
575 }
576
577 /* put device into slumber mode */
578 ahci_power_down(port_mmio, cap);
579
580 return 0;
581 }
582
583 static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
584 {
585 u32 cap_save, tmp;
586
587 cap_save = readl(mmio + HOST_CAP);
588 cap_save &= ( (1<<28) | (1<<17) );
589 cap_save |= (1 << 27);
590
591 /* global controller reset */
592 tmp = readl(mmio + HOST_CTL);
593 if ((tmp & HOST_RESET) == 0) {
594 writel(tmp | HOST_RESET, mmio + HOST_CTL);
595 readl(mmio + HOST_CTL); /* flush */
596 }
597
598 /* reset must complete within 1 second, or
599 * the hardware should be considered fried.
600 */
601 ssleep(1);
602
603 tmp = readl(mmio + HOST_CTL);
604 if (tmp & HOST_RESET) {
605 dev_printk(KERN_ERR, &pdev->dev,
606 "controller reset failed (0x%x)\n", tmp);
607 return -EIO;
608 }
609
610 writel(HOST_AHCI_EN, mmio + HOST_CTL);
611 (void) readl(mmio + HOST_CTL); /* flush */
612 writel(cap_save, mmio + HOST_CAP);
613 writel(0xf, mmio + HOST_PORTS_IMPL);
614 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
615
616 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
617 u16 tmp16;
618
619 /* configure PCS */
620 pci_read_config_word(pdev, 0x92, &tmp16);
621 tmp16 |= 0xf;
622 pci_write_config_word(pdev, 0x92, tmp16);
623 }
624
625 return 0;
626 }
627
628 static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
629 int n_ports, u32 cap)
630 {
631 int i, rc;
632 u32 tmp;
633
634 for (i = 0; i < n_ports; i++) {
635 void __iomem *port_mmio = ahci_port_base(mmio, i);
636 const char *emsg = NULL;
637
638 #if 0 /* BIOSen initialize this incorrectly */
639 if (!(hpriv->port_map & (1 << i)))
640 continue;
641 #endif
642
643 /* make sure port is not active */
644 rc = ahci_deinit_port(port_mmio, cap, &emsg);
645 if (rc)
646 dev_printk(KERN_WARNING, &pdev->dev,
647 "%s (%d)\n", emsg, rc);
648
649 /* clear SError */
650 tmp = readl(port_mmio + PORT_SCR_ERR);
651 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
652 writel(tmp, port_mmio + PORT_SCR_ERR);
653
654 /* clear port IRQ */
655 tmp = readl(port_mmio + PORT_IRQ_STAT);
656 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
657 if (tmp)
658 writel(tmp, port_mmio + PORT_IRQ_STAT);
659
660 writel(1 << i, mmio + HOST_IRQ_STAT);
661 }
662
663 tmp = readl(mmio + HOST_CTL);
664 VPRINTK("HOST_CTL 0x%x\n", tmp);
665 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
666 tmp = readl(mmio + HOST_CTL);
667 VPRINTK("HOST_CTL 0x%x\n", tmp);
668 }
669
670 static unsigned int ahci_dev_classify(struct ata_port *ap)
671 {
672 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
673 struct ata_taskfile tf;
674 u32 tmp;
675
676 tmp = readl(port_mmio + PORT_SIG);
677 tf.lbah = (tmp >> 24) & 0xff;
678 tf.lbam = (tmp >> 16) & 0xff;
679 tf.lbal = (tmp >> 8) & 0xff;
680 tf.nsect = (tmp) & 0xff;
681
682 return ata_dev_classify(&tf);
683 }
684
685 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
686 u32 opts)
687 {
688 dma_addr_t cmd_tbl_dma;
689
690 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
691
692 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
693 pp->cmd_slot[tag].status = 0;
694 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
695 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
696 }
697
698 static int ahci_clo(struct ata_port *ap)
699 {
700 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
701 struct ahci_host_priv *hpriv = ap->host->private_data;
702 u32 tmp;
703
704 if (!(hpriv->cap & HOST_CAP_CLO))
705 return -EOPNOTSUPP;
706
707 tmp = readl(port_mmio + PORT_CMD);
708 tmp |= PORT_CMD_CLO;
709 writel(tmp, port_mmio + PORT_CMD);
710
711 tmp = ata_wait_register(port_mmio + PORT_CMD,
712 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
713 if (tmp & PORT_CMD_CLO)
714 return -EIO;
715
716 return 0;
717 }
718
719 static int ahci_prereset(struct ata_port *ap)
720 {
721 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
722 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
723 /* ATA_BUSY hasn't cleared, so send a CLO */
724 ahci_clo(ap);
725 }
726
727 return ata_std_prereset(ap);
728 }
729
730 static int ahci_softreset(struct ata_port *ap, unsigned int *class)
731 {
732 struct ahci_port_priv *pp = ap->private_data;
733 void __iomem *mmio = ap->host->mmio_base;
734 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
735 const u32 cmd_fis_len = 5; /* five dwords */
736 const char *reason = NULL;
737 struct ata_taskfile tf;
738 u32 tmp;
739 u8 *fis;
740 int rc;
741
742 DPRINTK("ENTER\n");
743
744 if (ata_port_offline(ap)) {
745 DPRINTK("PHY reports no device\n");
746 *class = ATA_DEV_NONE;
747 return 0;
748 }
749
750 /* prepare for SRST (AHCI-1.1 10.4.1) */
751 rc = ahci_stop_engine(port_mmio);
752 if (rc) {
753 reason = "failed to stop engine";
754 goto fail_restart;
755 }
756
757 /* check BUSY/DRQ, perform Command List Override if necessary */
758 if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) {
759 rc = ahci_clo(ap);
760
761 if (rc == -EOPNOTSUPP) {
762 reason = "port busy but CLO unavailable";
763 goto fail_restart;
764 } else if (rc) {
765 reason = "port busy but CLO failed";
766 goto fail_restart;
767 }
768 }
769
770 /* restart engine */
771 ahci_start_engine(port_mmio);
772
773 ata_tf_init(ap->device, &tf);
774 fis = pp->cmd_tbl;
775
776 /* issue the first D2H Register FIS */
777 ahci_fill_cmd_slot(pp, 0,
778 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
779
780 tf.ctl |= ATA_SRST;
781 ata_tf_to_fis(&tf, fis, 0);
782 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
783
784 writel(1, port_mmio + PORT_CMD_ISSUE);
785
786 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
787 if (tmp & 0x1) {
788 rc = -EIO;
789 reason = "1st FIS failed";
790 goto fail;
791 }
792
793 /* spec says at least 5us, but be generous and sleep for 1ms */
794 msleep(1);
795
796 /* issue the second D2H Register FIS */
797 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
798
799 tf.ctl &= ~ATA_SRST;
800 ata_tf_to_fis(&tf, fis, 0);
801 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
802
803 writel(1, port_mmio + PORT_CMD_ISSUE);
804 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
805
806 /* spec mandates ">= 2ms" before checking status.
807 * We wait 150ms, because that was the magic delay used for
808 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
809 * between when the ATA command register is written, and then
810 * status is checked. Because waiting for "a while" before
811 * checking status is fine, post SRST, we perform this magic
812 * delay here as well.
813 */
814 msleep(150);
815
816 *class = ATA_DEV_NONE;
817 if (ata_port_online(ap)) {
818 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
819 rc = -EIO;
820 reason = "device not ready";
821 goto fail;
822 }
823 *class = ahci_dev_classify(ap);
824 }
825
826 DPRINTK("EXIT, class=%u\n", *class);
827 return 0;
828
829 fail_restart:
830 ahci_start_engine(port_mmio);
831 fail:
832 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
833 return rc;
834 }
835
836 static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
837 {
838 struct ahci_port_priv *pp = ap->private_data;
839 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
840 struct ata_taskfile tf;
841 void __iomem *mmio = ap->host->mmio_base;
842 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
843 int rc;
844
845 DPRINTK("ENTER\n");
846
847 ahci_stop_engine(port_mmio);
848
849 /* clear D2H reception area to properly wait for D2H FIS */
850 ata_tf_init(ap->device, &tf);
851 tf.command = 0xff;
852 ata_tf_to_fis(&tf, d2h_fis, 0);
853
854 rc = sata_std_hardreset(ap, class);
855
856 ahci_start_engine(port_mmio);
857
858 if (rc == 0 && ata_port_online(ap))
859 *class = ahci_dev_classify(ap);
860 if (*class == ATA_DEV_UNKNOWN)
861 *class = ATA_DEV_NONE;
862
863 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
864 return rc;
865 }
866
867 static void ahci_postreset(struct ata_port *ap, unsigned int *class)
868 {
869 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
870 u32 new_tmp, tmp;
871
872 ata_std_postreset(ap, class);
873
874 /* Make sure port's ATAPI bit is set appropriately */
875 new_tmp = tmp = readl(port_mmio + PORT_CMD);
876 if (*class == ATA_DEV_ATAPI)
877 new_tmp |= PORT_CMD_ATAPI;
878 else
879 new_tmp &= ~PORT_CMD_ATAPI;
880 if (new_tmp != tmp) {
881 writel(new_tmp, port_mmio + PORT_CMD);
882 readl(port_mmio + PORT_CMD); /* flush */
883 }
884 }
885
886 static u8 ahci_check_status(struct ata_port *ap)
887 {
888 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
889
890 return readl(mmio + PORT_TFDATA) & 0xFF;
891 }
892
893 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
894 {
895 struct ahci_port_priv *pp = ap->private_data;
896 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
897
898 ata_tf_from_fis(d2h_fis, tf);
899 }
900
901 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
902 {
903 struct scatterlist *sg;
904 struct ahci_sg *ahci_sg;
905 unsigned int n_sg = 0;
906
907 VPRINTK("ENTER\n");
908
909 /*
910 * Next, the S/G list.
911 */
912 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
913 ata_for_each_sg(sg, qc) {
914 dma_addr_t addr = sg_dma_address(sg);
915 u32 sg_len = sg_dma_len(sg);
916
917 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
918 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
919 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
920
921 ahci_sg++;
922 n_sg++;
923 }
924
925 return n_sg;
926 }
927
928 static void ahci_qc_prep(struct ata_queued_cmd *qc)
929 {
930 struct ata_port *ap = qc->ap;
931 struct ahci_port_priv *pp = ap->private_data;
932 int is_atapi = is_atapi_taskfile(&qc->tf);
933 void *cmd_tbl;
934 u32 opts;
935 const u32 cmd_fis_len = 5; /* five dwords */
936 unsigned int n_elem;
937
938 /*
939 * Fill in command table information. First, the header,
940 * a SATA Register - Host to Device command FIS.
941 */
942 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
943
944 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
945 if (is_atapi) {
946 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
947 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
948 }
949
950 n_elem = 0;
951 if (qc->flags & ATA_QCFLAG_DMAMAP)
952 n_elem = ahci_fill_sg(qc, cmd_tbl);
953
954 /*
955 * Fill in command slot information.
956 */
957 opts = cmd_fis_len | n_elem << 16;
958 if (qc->tf.flags & ATA_TFLAG_WRITE)
959 opts |= AHCI_CMD_WRITE;
960 if (is_atapi)
961 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
962
963 ahci_fill_cmd_slot(pp, qc->tag, opts);
964 }
965
966 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
967 {
968 struct ahci_port_priv *pp = ap->private_data;
969 struct ata_eh_info *ehi = &ap->eh_info;
970 unsigned int err_mask = 0, action = 0;
971 struct ata_queued_cmd *qc;
972 u32 serror;
973
974 ata_ehi_clear_desc(ehi);
975
976 /* AHCI needs SError cleared; otherwise, it might lock up */
977 serror = ahci_scr_read(ap, SCR_ERROR);
978 ahci_scr_write(ap, SCR_ERROR, serror);
979
980 /* analyze @irq_stat */
981 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
982
983 if (irq_stat & PORT_IRQ_TF_ERR)
984 err_mask |= AC_ERR_DEV;
985
986 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
987 err_mask |= AC_ERR_HOST_BUS;
988 action |= ATA_EH_SOFTRESET;
989 }
990
991 if (irq_stat & PORT_IRQ_IF_ERR) {
992 err_mask |= AC_ERR_ATA_BUS;
993 action |= ATA_EH_SOFTRESET;
994 ata_ehi_push_desc(ehi, ", interface fatal error");
995 }
996
997 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
998 ata_ehi_hotplugged(ehi);
999 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
1000 "connection status changed" : "PHY RDY changed");
1001 }
1002
1003 if (irq_stat & PORT_IRQ_UNK_FIS) {
1004 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1005
1006 err_mask |= AC_ERR_HSM;
1007 action |= ATA_EH_SOFTRESET;
1008 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
1009 unk[0], unk[1], unk[2], unk[3]);
1010 }
1011
1012 /* okay, let's hand over to EH */
1013 ehi->serror |= serror;
1014 ehi->action |= action;
1015
1016 qc = ata_qc_from_tag(ap, ap->active_tag);
1017 if (qc)
1018 qc->err_mask |= err_mask;
1019 else
1020 ehi->err_mask |= err_mask;
1021
1022 if (irq_stat & PORT_IRQ_FREEZE)
1023 ata_port_freeze(ap);
1024 else
1025 ata_port_abort(ap);
1026 }
1027
1028 static void ahci_host_intr(struct ata_port *ap)
1029 {
1030 void __iomem *mmio = ap->host->mmio_base;
1031 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1032 struct ata_eh_info *ehi = &ap->eh_info;
1033 u32 status, qc_active;
1034 int rc;
1035
1036 status = readl(port_mmio + PORT_IRQ_STAT);
1037 writel(status, port_mmio + PORT_IRQ_STAT);
1038
1039 if (unlikely(status & PORT_IRQ_ERROR)) {
1040 ahci_error_intr(ap, status);
1041 return;
1042 }
1043
1044 if (ap->sactive)
1045 qc_active = readl(port_mmio + PORT_SCR_ACT);
1046 else
1047 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1048
1049 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1050 if (rc > 0)
1051 return;
1052 if (rc < 0) {
1053 ehi->err_mask |= AC_ERR_HSM;
1054 ehi->action |= ATA_EH_SOFTRESET;
1055 ata_port_freeze(ap);
1056 return;
1057 }
1058
1059 /* hmmm... a spurious interupt */
1060
1061 /* some devices send D2H reg with I bit set during NCQ command phase */
1062 if (ap->sactive && (status & PORT_IRQ_D2H_REG_FIS))
1063 return;
1064
1065 /* ignore interim PIO setup fis interrupts */
1066 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
1067 return;
1068
1069 if (ata_ratelimit())
1070 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1071 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1072 status, ap->active_tag, ap->sactive);
1073 }
1074
1075 static void ahci_irq_clear(struct ata_port *ap)
1076 {
1077 /* TODO */
1078 }
1079
1080 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1081 {
1082 struct ata_host *host = dev_instance;
1083 struct ahci_host_priv *hpriv;
1084 unsigned int i, handled = 0;
1085 void __iomem *mmio;
1086 u32 irq_stat, irq_ack = 0;
1087
1088 VPRINTK("ENTER\n");
1089
1090 hpriv = host->private_data;
1091 mmio = host->mmio_base;
1092
1093 /* sigh. 0xffffffff is a valid return from h/w */
1094 irq_stat = readl(mmio + HOST_IRQ_STAT);
1095 irq_stat &= hpriv->port_map;
1096 if (!irq_stat)
1097 return IRQ_NONE;
1098
1099 spin_lock(&host->lock);
1100
1101 for (i = 0; i < host->n_ports; i++) {
1102 struct ata_port *ap;
1103
1104 if (!(irq_stat & (1 << i)))
1105 continue;
1106
1107 ap = host->ports[i];
1108 if (ap) {
1109 ahci_host_intr(ap);
1110 VPRINTK("port %u\n", i);
1111 } else {
1112 VPRINTK("port %u (no irq)\n", i);
1113 if (ata_ratelimit())
1114 dev_printk(KERN_WARNING, host->dev,
1115 "interrupt on disabled port %u\n", i);
1116 }
1117
1118 irq_ack |= (1 << i);
1119 }
1120
1121 if (irq_ack) {
1122 writel(irq_ack, mmio + HOST_IRQ_STAT);
1123 handled = 1;
1124 }
1125
1126 spin_unlock(&host->lock);
1127
1128 VPRINTK("EXIT\n");
1129
1130 return IRQ_RETVAL(handled);
1131 }
1132
1133 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1134 {
1135 struct ata_port *ap = qc->ap;
1136 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1137
1138 if (qc->tf.protocol == ATA_PROT_NCQ)
1139 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1140 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1141 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1142
1143 return 0;
1144 }
1145
1146 static void ahci_freeze(struct ata_port *ap)
1147 {
1148 void __iomem *mmio = ap->host->mmio_base;
1149 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1150
1151 /* turn IRQ off */
1152 writel(0, port_mmio + PORT_IRQ_MASK);
1153 }
1154
1155 static void ahci_thaw(struct ata_port *ap)
1156 {
1157 void __iomem *mmio = ap->host->mmio_base;
1158 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1159 u32 tmp;
1160
1161 /* clear IRQ */
1162 tmp = readl(port_mmio + PORT_IRQ_STAT);
1163 writel(tmp, port_mmio + PORT_IRQ_STAT);
1164 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1165
1166 /* turn IRQ back on */
1167 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1168 }
1169
1170 static void ahci_error_handler(struct ata_port *ap)
1171 {
1172 void __iomem *mmio = ap->host->mmio_base;
1173 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1174
1175 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1176 /* restart engine */
1177 ahci_stop_engine(port_mmio);
1178 ahci_start_engine(port_mmio);
1179 }
1180
1181 /* perform recovery */
1182 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1183 ahci_postreset);
1184 }
1185
1186 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1187 {
1188 struct ata_port *ap = qc->ap;
1189 void __iomem *mmio = ap->host->mmio_base;
1190 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1191
1192 if (qc->flags & ATA_QCFLAG_FAILED)
1193 qc->err_mask |= AC_ERR_OTHER;
1194
1195 if (qc->err_mask) {
1196 /* make DMA engine forget about the failed command */
1197 ahci_stop_engine(port_mmio);
1198 ahci_start_engine(port_mmio);
1199 }
1200 }
1201
1202 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1203 {
1204 struct ahci_host_priv *hpriv = ap->host->private_data;
1205 struct ahci_port_priv *pp = ap->private_data;
1206 void __iomem *mmio = ap->host->mmio_base;
1207 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1208 const char *emsg = NULL;
1209 int rc;
1210
1211 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1212 if (rc) {
1213 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1214 ahci_init_port(port_mmio, hpriv->cap,
1215 pp->cmd_slot_dma, pp->rx_fis_dma);
1216 }
1217
1218 return rc;
1219 }
1220
1221 static int ahci_port_resume(struct ata_port *ap)
1222 {
1223 struct ahci_port_priv *pp = ap->private_data;
1224 struct ahci_host_priv *hpriv = ap->host->private_data;
1225 void __iomem *mmio = ap->host->mmio_base;
1226 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1227
1228 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1229
1230 return 0;
1231 }
1232
1233 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1234 {
1235 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1236 void __iomem *mmio = host->mmio_base;
1237 u32 ctl;
1238
1239 if (mesg.event == PM_EVENT_SUSPEND) {
1240 /* AHCI spec rev1.1 section 8.3.3:
1241 * Software must disable interrupts prior to requesting a
1242 * transition of the HBA to D3 state.
1243 */
1244 ctl = readl(mmio + HOST_CTL);
1245 ctl &= ~HOST_IRQ_EN;
1246 writel(ctl, mmio + HOST_CTL);
1247 readl(mmio + HOST_CTL); /* flush */
1248 }
1249
1250 return ata_pci_device_suspend(pdev, mesg);
1251 }
1252
1253 static int ahci_pci_device_resume(struct pci_dev *pdev)
1254 {
1255 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1256 struct ahci_host_priv *hpriv = host->private_data;
1257 void __iomem *mmio = host->mmio_base;
1258 int rc;
1259
1260 ata_pci_device_do_resume(pdev);
1261
1262 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1263 rc = ahci_reset_controller(mmio, pdev);
1264 if (rc)
1265 return rc;
1266
1267 ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
1268 }
1269
1270 ata_host_resume(host);
1271
1272 return 0;
1273 }
1274
1275 static int ahci_port_start(struct ata_port *ap)
1276 {
1277 struct device *dev = ap->host->dev;
1278 struct ahci_host_priv *hpriv = ap->host->private_data;
1279 struct ahci_port_priv *pp;
1280 void __iomem *mmio = ap->host->mmio_base;
1281 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1282 void *mem;
1283 dma_addr_t mem_dma;
1284 int rc;
1285
1286 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1287 if (!pp)
1288 return -ENOMEM;
1289 memset(pp, 0, sizeof(*pp));
1290
1291 rc = ata_pad_alloc(ap, dev);
1292 if (rc) {
1293 kfree(pp);
1294 return rc;
1295 }
1296
1297 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1298 if (!mem) {
1299 ata_pad_free(ap, dev);
1300 kfree(pp);
1301 return -ENOMEM;
1302 }
1303 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1304
1305 /*
1306 * First item in chunk of DMA memory: 32-slot command table,
1307 * 32 bytes each in size
1308 */
1309 pp->cmd_slot = mem;
1310 pp->cmd_slot_dma = mem_dma;
1311
1312 mem += AHCI_CMD_SLOT_SZ;
1313 mem_dma += AHCI_CMD_SLOT_SZ;
1314
1315 /*
1316 * Second item: Received-FIS area
1317 */
1318 pp->rx_fis = mem;
1319 pp->rx_fis_dma = mem_dma;
1320
1321 mem += AHCI_RX_FIS_SZ;
1322 mem_dma += AHCI_RX_FIS_SZ;
1323
1324 /*
1325 * Third item: data area for storing a single command
1326 * and its scatter-gather table
1327 */
1328 pp->cmd_tbl = mem;
1329 pp->cmd_tbl_dma = mem_dma;
1330
1331 ap->private_data = pp;
1332
1333 /* initialize port */
1334 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1335
1336 return 0;
1337 }
1338
1339 static void ahci_port_stop(struct ata_port *ap)
1340 {
1341 struct device *dev = ap->host->dev;
1342 struct ahci_host_priv *hpriv = ap->host->private_data;
1343 struct ahci_port_priv *pp = ap->private_data;
1344 void __iomem *mmio = ap->host->mmio_base;
1345 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1346 const char *emsg = NULL;
1347 int rc;
1348
1349 /* de-initialize port */
1350 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1351 if (rc)
1352 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1353
1354 ap->private_data = NULL;
1355 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1356 pp->cmd_slot, pp->cmd_slot_dma);
1357 ata_pad_free(ap, dev);
1358 kfree(pp);
1359 }
1360
1361 static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1362 unsigned int port_idx)
1363 {
1364 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1365 base = ahci_port_base_ul(base, port_idx);
1366 VPRINTK("base now==0x%lx\n", base);
1367
1368 port->cmd_addr = base;
1369 port->scr_addr = base + PORT_SCR;
1370
1371 VPRINTK("EXIT\n");
1372 }
1373
1374 static int ahci_host_init(struct ata_probe_ent *probe_ent)
1375 {
1376 struct ahci_host_priv *hpriv = probe_ent->private_data;
1377 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1378 void __iomem *mmio = probe_ent->mmio_base;
1379 unsigned int i, using_dac;
1380 int rc;
1381
1382 rc = ahci_reset_controller(mmio, pdev);
1383 if (rc)
1384 return rc;
1385
1386 hpriv->cap = readl(mmio + HOST_CAP);
1387 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1388 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1389
1390 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1391 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1392
1393 using_dac = hpriv->cap & HOST_CAP_64;
1394 if (using_dac &&
1395 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1396 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1397 if (rc) {
1398 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1399 if (rc) {
1400 dev_printk(KERN_ERR, &pdev->dev,
1401 "64-bit DMA enable failed\n");
1402 return rc;
1403 }
1404 }
1405 } else {
1406 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1407 if (rc) {
1408 dev_printk(KERN_ERR, &pdev->dev,
1409 "32-bit DMA enable failed\n");
1410 return rc;
1411 }
1412 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1413 if (rc) {
1414 dev_printk(KERN_ERR, &pdev->dev,
1415 "32-bit consistent DMA enable failed\n");
1416 return rc;
1417 }
1418 }
1419
1420 for (i = 0; i < probe_ent->n_ports; i++)
1421 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1422
1423 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
1424
1425 pci_set_master(pdev);
1426
1427 return 0;
1428 }
1429
1430 static void ahci_print_info(struct ata_probe_ent *probe_ent)
1431 {
1432 struct ahci_host_priv *hpriv = probe_ent->private_data;
1433 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1434 void __iomem *mmio = probe_ent->mmio_base;
1435 u32 vers, cap, impl, speed;
1436 const char *speed_s;
1437 u16 cc;
1438 const char *scc_s;
1439
1440 vers = readl(mmio + HOST_VERSION);
1441 cap = hpriv->cap;
1442 impl = hpriv->port_map;
1443
1444 speed = (cap >> 20) & 0xf;
1445 if (speed == 1)
1446 speed_s = "1.5";
1447 else if (speed == 2)
1448 speed_s = "3";
1449 else
1450 speed_s = "?";
1451
1452 pci_read_config_word(pdev, 0x0a, &cc);
1453 if (cc == 0x0101)
1454 scc_s = "IDE";
1455 else if (cc == 0x0106)
1456 scc_s = "SATA";
1457 else if (cc == 0x0104)
1458 scc_s = "RAID";
1459 else
1460 scc_s = "unknown";
1461
1462 dev_printk(KERN_INFO, &pdev->dev,
1463 "AHCI %02x%02x.%02x%02x "
1464 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1465 ,
1466
1467 (vers >> 24) & 0xff,
1468 (vers >> 16) & 0xff,
1469 (vers >> 8) & 0xff,
1470 vers & 0xff,
1471
1472 ((cap >> 8) & 0x1f) + 1,
1473 (cap & 0x1f) + 1,
1474 speed_s,
1475 impl,
1476 scc_s);
1477
1478 dev_printk(KERN_INFO, &pdev->dev,
1479 "flags: "
1480 "%s%s%s%s%s%s"
1481 "%s%s%s%s%s%s%s\n"
1482 ,
1483
1484 cap & (1 << 31) ? "64bit " : "",
1485 cap & (1 << 30) ? "ncq " : "",
1486 cap & (1 << 28) ? "ilck " : "",
1487 cap & (1 << 27) ? "stag " : "",
1488 cap & (1 << 26) ? "pm " : "",
1489 cap & (1 << 25) ? "led " : "",
1490
1491 cap & (1 << 24) ? "clo " : "",
1492 cap & (1 << 19) ? "nz " : "",
1493 cap & (1 << 18) ? "only " : "",
1494 cap & (1 << 17) ? "pmp " : "",
1495 cap & (1 << 15) ? "pio " : "",
1496 cap & (1 << 14) ? "slum " : "",
1497 cap & (1 << 13) ? "part " : ""
1498 );
1499 }
1500
1501 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1502 {
1503 static int printed_version;
1504 struct ata_probe_ent *probe_ent = NULL;
1505 struct ahci_host_priv *hpriv;
1506 unsigned long base;
1507 void __iomem *mmio_base;
1508 unsigned int board_idx = (unsigned int) ent->driver_data;
1509 int have_msi, pci_dev_busy = 0;
1510 int rc;
1511
1512 VPRINTK("ENTER\n");
1513
1514 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1515
1516 if (!printed_version++)
1517 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1518
1519 /* JMicron-specific fixup: make sure we're in AHCI mode */
1520 /* This is protected from races with ata_jmicron by the pci probe
1521 locking */
1522 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1523 /* AHCI enable, AHCI on function 0 */
1524 pci_write_config_byte(pdev, 0x41, 0xa1);
1525 /* Function 1 is the PATA controller */
1526 if (PCI_FUNC(pdev->devfn))
1527 return -ENODEV;
1528 }
1529
1530 rc = pci_enable_device(pdev);
1531 if (rc)
1532 return rc;
1533
1534 rc = pci_request_regions(pdev, DRV_NAME);
1535 if (rc) {
1536 pci_dev_busy = 1;
1537 goto err_out;
1538 }
1539
1540 if (pci_enable_msi(pdev) == 0)
1541 have_msi = 1;
1542 else {
1543 pci_intx(pdev, 1);
1544 have_msi = 0;
1545 }
1546
1547 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1548 if (probe_ent == NULL) {
1549 rc = -ENOMEM;
1550 goto err_out_msi;
1551 }
1552
1553 memset(probe_ent, 0, sizeof(*probe_ent));
1554 probe_ent->dev = pci_dev_to_dev(pdev);
1555 INIT_LIST_HEAD(&probe_ent->node);
1556
1557 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1558 if (mmio_base == NULL) {
1559 rc = -ENOMEM;
1560 goto err_out_free_ent;
1561 }
1562 base = (unsigned long) mmio_base;
1563
1564 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1565 if (!hpriv) {
1566 rc = -ENOMEM;
1567 goto err_out_iounmap;
1568 }
1569 memset(hpriv, 0, sizeof(*hpriv));
1570
1571 probe_ent->sht = ahci_port_info[board_idx].sht;
1572 probe_ent->port_flags = ahci_port_info[board_idx].flags;
1573 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1574 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1575 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1576
1577 probe_ent->irq = pdev->irq;
1578 probe_ent->irq_flags = IRQF_SHARED;
1579 probe_ent->mmio_base = mmio_base;
1580 probe_ent->private_data = hpriv;
1581
1582 if (have_msi)
1583 hpriv->flags |= AHCI_FLAG_MSI;
1584
1585 /* initialize adapter */
1586 rc = ahci_host_init(probe_ent);
1587 if (rc)
1588 goto err_out_hpriv;
1589
1590 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
1591 (hpriv->cap & HOST_CAP_NCQ))
1592 probe_ent->port_flags |= ATA_FLAG_NCQ;
1593
1594 ahci_print_info(probe_ent);
1595
1596 /* FIXME: check ata_device_add return value */
1597 ata_device_add(probe_ent);
1598 kfree(probe_ent);
1599
1600 return 0;
1601
1602 err_out_hpriv:
1603 kfree(hpriv);
1604 err_out_iounmap:
1605 pci_iounmap(pdev, mmio_base);
1606 err_out_free_ent:
1607 kfree(probe_ent);
1608 err_out_msi:
1609 if (have_msi)
1610 pci_disable_msi(pdev);
1611 else
1612 pci_intx(pdev, 0);
1613 pci_release_regions(pdev);
1614 err_out:
1615 if (!pci_dev_busy)
1616 pci_disable_device(pdev);
1617 return rc;
1618 }
1619
1620 static void ahci_remove_one (struct pci_dev *pdev)
1621 {
1622 struct device *dev = pci_dev_to_dev(pdev);
1623 struct ata_host *host = dev_get_drvdata(dev);
1624 struct ahci_host_priv *hpriv = host->private_data;
1625 unsigned int i;
1626 int have_msi;
1627
1628 for (i = 0; i < host->n_ports; i++)
1629 ata_port_detach(host->ports[i]);
1630
1631 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1632 free_irq(host->irq, host);
1633
1634 for (i = 0; i < host->n_ports; i++) {
1635 struct ata_port *ap = host->ports[i];
1636
1637 ata_scsi_release(ap->scsi_host);
1638 scsi_host_put(ap->scsi_host);
1639 }
1640
1641 kfree(hpriv);
1642 pci_iounmap(pdev, host->mmio_base);
1643 kfree(host);
1644
1645 if (have_msi)
1646 pci_disable_msi(pdev);
1647 else
1648 pci_intx(pdev, 0);
1649 pci_release_regions(pdev);
1650 pci_disable_device(pdev);
1651 dev_set_drvdata(dev, NULL);
1652 }
1653
1654 static int __init ahci_init(void)
1655 {
1656 return pci_register_driver(&ahci_pci_driver);
1657 }
1658
1659 static void __exit ahci_exit(void)
1660 {
1661 pci_unregister_driver(&ahci_pci_driver);
1662 }
1663
1664
1665 MODULE_AUTHOR("Jeff Garzik");
1666 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1667 MODULE_LICENSE("GPL");
1668 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1669 MODULE_VERSION(DRV_VERSION);
1670
1671 module_init(ahci_init);
1672 module_exit(ahci_exit);
This page took 0.068485 seconds and 6 git commands to generate.