[PATCH] irq-flags: scsi: Use the new IRQF_ constants
[deliverable/linux.git] / drivers / scsi / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/sched.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 #include <asm/io.h>
49
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "2.0"
52
53
54 enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
98
99 /* registers for each SATA port */
100 PORT_LST_ADDR = 0x00, /* command list DMA addr */
101 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
102 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
103 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
104 PORT_IRQ_STAT = 0x10, /* interrupt status */
105 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
106 PORT_CMD = 0x18, /* port command */
107 PORT_TFDATA = 0x20, /* taskfile data */
108 PORT_SIG = 0x24, /* device TF signature */
109 PORT_CMD_ISSUE = 0x38, /* command issue */
110 PORT_SCR = 0x28, /* SATA phy register block */
111 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
112 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
113 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
114 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
115
116 /* PORT_IRQ_{STAT,MASK} bits */
117 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
118 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
119 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
120 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
121 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
122 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
123 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
124 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
125
126 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
127 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
128 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
129 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
130 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
131 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
132 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
133 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
134 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
135
136 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
137 PORT_IRQ_IF_ERR |
138 PORT_IRQ_CONNECT |
139 PORT_IRQ_PHYRDY |
140 PORT_IRQ_UNK_FIS,
141 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
142 PORT_IRQ_TF_ERR |
143 PORT_IRQ_HBUS_DATA_ERR,
144 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
145 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
146 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
147
148 /* PORT_CMD bits */
149 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
150 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
151 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
152 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
153 PORT_CMD_CLO = (1 << 3), /* Command list override */
154 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
155 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
156 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
157
158 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
159 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
160 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
161
162 /* hpriv->flags bits */
163 AHCI_FLAG_MSI = (1 << 0),
164
165 /* ap->flags bits */
166 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
167 AHCI_FLAG_NO_NCQ = (1 << 25),
168 };
169
170 struct ahci_cmd_hdr {
171 u32 opts;
172 u32 status;
173 u32 tbl_addr;
174 u32 tbl_addr_hi;
175 u32 reserved[4];
176 };
177
178 struct ahci_sg {
179 u32 addr;
180 u32 addr_hi;
181 u32 reserved;
182 u32 flags_size;
183 };
184
185 struct ahci_host_priv {
186 unsigned long flags;
187 u32 cap; /* cache of HOST_CAP register */
188 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
189 };
190
191 struct ahci_port_priv {
192 struct ahci_cmd_hdr *cmd_slot;
193 dma_addr_t cmd_slot_dma;
194 void *cmd_tbl;
195 dma_addr_t cmd_tbl_dma;
196 void *rx_fis;
197 dma_addr_t rx_fis_dma;
198 };
199
200 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
201 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
202 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
203 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
204 static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
205 static void ahci_irq_clear(struct ata_port *ap);
206 static int ahci_port_start(struct ata_port *ap);
207 static void ahci_port_stop(struct ata_port *ap);
208 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
209 static void ahci_qc_prep(struct ata_queued_cmd *qc);
210 static u8 ahci_check_status(struct ata_port *ap);
211 static void ahci_freeze(struct ata_port *ap);
212 static void ahci_thaw(struct ata_port *ap);
213 static void ahci_error_handler(struct ata_port *ap);
214 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
215 static void ahci_remove_one (struct pci_dev *pdev);
216
217 static struct scsi_host_template ahci_sht = {
218 .module = THIS_MODULE,
219 .name = DRV_NAME,
220 .ioctl = ata_scsi_ioctl,
221 .queuecommand = ata_scsi_queuecmd,
222 .change_queue_depth = ata_scsi_change_queue_depth,
223 .can_queue = AHCI_MAX_CMDS - 1,
224 .this_id = ATA_SHT_THIS_ID,
225 .sg_tablesize = AHCI_MAX_SG,
226 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
227 .emulated = ATA_SHT_EMULATED,
228 .use_clustering = AHCI_USE_CLUSTERING,
229 .proc_name = DRV_NAME,
230 .dma_boundary = AHCI_DMA_BOUNDARY,
231 .slave_configure = ata_scsi_slave_config,
232 .slave_destroy = ata_scsi_slave_destroy,
233 .bios_param = ata_std_bios_param,
234 };
235
236 static const struct ata_port_operations ahci_ops = {
237 .port_disable = ata_port_disable,
238
239 .check_status = ahci_check_status,
240 .check_altstatus = ahci_check_status,
241 .dev_select = ata_noop_dev_select,
242
243 .tf_read = ahci_tf_read,
244
245 .qc_prep = ahci_qc_prep,
246 .qc_issue = ahci_qc_issue,
247
248 .irq_handler = ahci_interrupt,
249 .irq_clear = ahci_irq_clear,
250
251 .scr_read = ahci_scr_read,
252 .scr_write = ahci_scr_write,
253
254 .freeze = ahci_freeze,
255 .thaw = ahci_thaw,
256
257 .error_handler = ahci_error_handler,
258 .post_internal_cmd = ahci_post_internal_cmd,
259
260 .port_start = ahci_port_start,
261 .port_stop = ahci_port_stop,
262 };
263
264 static const struct ata_port_info ahci_port_info[] = {
265 /* board_ahci */
266 {
267 .sht = &ahci_sht,
268 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
269 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
270 ATA_FLAG_SKIP_D2H_BSY,
271 .pio_mask = 0x1f, /* pio0-4 */
272 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
273 .port_ops = &ahci_ops,
274 },
275 /* board_ahci_vt8251 */
276 {
277 .sht = &ahci_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
279 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
280 ATA_FLAG_SKIP_D2H_BSY |
281 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
282 .pio_mask = 0x1f, /* pio0-4 */
283 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
284 .port_ops = &ahci_ops,
285 },
286 };
287
288 static const struct pci_device_id ahci_pci_tbl[] = {
289 /* Intel */
290 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
291 board_ahci }, /* ICH6 */
292 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
293 board_ahci }, /* ICH6M */
294 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
295 board_ahci }, /* ICH7 */
296 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
297 board_ahci }, /* ICH7M */
298 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
299 board_ahci }, /* ICH7R */
300 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
301 board_ahci }, /* ULi M5288 */
302 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
303 board_ahci }, /* ESB2 */
304 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
305 board_ahci }, /* ESB2 */
306 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 board_ahci }, /* ESB2 */
308 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
309 board_ahci }, /* ICH7-M DH */
310 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
311 board_ahci }, /* ICH8 */
312 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
313 board_ahci }, /* ICH8 */
314 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
315 board_ahci }, /* ICH8 */
316 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
317 board_ahci }, /* ICH8M */
318 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ICH8M */
320
321 /* JMicron */
322 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* JMicron JMB360 */
324 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* JMicron JMB361 */
326 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* JMicron JMB363 */
328 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* JMicron JMB365 */
330 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* JMicron JMB366 */
332
333 /* ATI */
334 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
335 board_ahci }, /* ATI SB600 non-raid */
336 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
337 board_ahci }, /* ATI SB600 raid */
338
339 /* VIA */
340 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci_vt8251 }, /* VIA VT8251 */
342
343 /* NVIDIA */
344 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
345 board_ahci }, /* MCP65 */
346 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* MCP65 */
348 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* MCP65 */
350 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
351 board_ahci }, /* MCP65 */
352
353 { } /* terminate list */
354 };
355
356
357 static struct pci_driver ahci_pci_driver = {
358 .name = DRV_NAME,
359 .id_table = ahci_pci_tbl,
360 .probe = ahci_init_one,
361 .remove = ahci_remove_one,
362 };
363
364
365 static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
366 {
367 return base + 0x100 + (port * 0x80);
368 }
369
370 static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
371 {
372 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
373 }
374
375 static int ahci_port_start(struct ata_port *ap)
376 {
377 struct device *dev = ap->host_set->dev;
378 struct ahci_host_priv *hpriv = ap->host_set->private_data;
379 struct ahci_port_priv *pp;
380 void __iomem *mmio = ap->host_set->mmio_base;
381 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
382 void *mem;
383 dma_addr_t mem_dma;
384 int rc;
385
386 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
387 if (!pp)
388 return -ENOMEM;
389 memset(pp, 0, sizeof(*pp));
390
391 rc = ata_pad_alloc(ap, dev);
392 if (rc) {
393 kfree(pp);
394 return rc;
395 }
396
397 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
398 if (!mem) {
399 ata_pad_free(ap, dev);
400 kfree(pp);
401 return -ENOMEM;
402 }
403 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
404
405 /*
406 * First item in chunk of DMA memory: 32-slot command table,
407 * 32 bytes each in size
408 */
409 pp->cmd_slot = mem;
410 pp->cmd_slot_dma = mem_dma;
411
412 mem += AHCI_CMD_SLOT_SZ;
413 mem_dma += AHCI_CMD_SLOT_SZ;
414
415 /*
416 * Second item: Received-FIS area
417 */
418 pp->rx_fis = mem;
419 pp->rx_fis_dma = mem_dma;
420
421 mem += AHCI_RX_FIS_SZ;
422 mem_dma += AHCI_RX_FIS_SZ;
423
424 /*
425 * Third item: data area for storing a single command
426 * and its scatter-gather table
427 */
428 pp->cmd_tbl = mem;
429 pp->cmd_tbl_dma = mem_dma;
430
431 ap->private_data = pp;
432
433 if (hpriv->cap & HOST_CAP_64)
434 writel((pp->cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
435 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
436 readl(port_mmio + PORT_LST_ADDR); /* flush */
437
438 if (hpriv->cap & HOST_CAP_64)
439 writel((pp->rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
440 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
441 readl(port_mmio + PORT_FIS_ADDR); /* flush */
442
443 writel(PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX |
444 PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP |
445 PORT_CMD_START, port_mmio + PORT_CMD);
446 readl(port_mmio + PORT_CMD); /* flush */
447
448 return 0;
449 }
450
451
452 static void ahci_port_stop(struct ata_port *ap)
453 {
454 struct device *dev = ap->host_set->dev;
455 struct ahci_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base;
457 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
458 u32 tmp;
459
460 tmp = readl(port_mmio + PORT_CMD);
461 tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX);
462 writel(tmp, port_mmio + PORT_CMD);
463 readl(port_mmio + PORT_CMD); /* flush */
464
465 /* spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so
466 * this is slightly incorrect.
467 */
468 msleep(500);
469
470 ap->private_data = NULL;
471 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
472 pp->cmd_slot, pp->cmd_slot_dma);
473 ata_pad_free(ap, dev);
474 kfree(pp);
475 }
476
477 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
478 {
479 unsigned int sc_reg;
480
481 switch (sc_reg_in) {
482 case SCR_STATUS: sc_reg = 0; break;
483 case SCR_CONTROL: sc_reg = 1; break;
484 case SCR_ERROR: sc_reg = 2; break;
485 case SCR_ACTIVE: sc_reg = 3; break;
486 default:
487 return 0xffffffffU;
488 }
489
490 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
491 }
492
493
494 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
495 u32 val)
496 {
497 unsigned int sc_reg;
498
499 switch (sc_reg_in) {
500 case SCR_STATUS: sc_reg = 0; break;
501 case SCR_CONTROL: sc_reg = 1; break;
502 case SCR_ERROR: sc_reg = 2; break;
503 case SCR_ACTIVE: sc_reg = 3; break;
504 default:
505 return;
506 }
507
508 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
509 }
510
511 static int ahci_stop_engine(struct ata_port *ap)
512 {
513 void __iomem *mmio = ap->host_set->mmio_base;
514 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
515 int work;
516 u32 tmp;
517
518 tmp = readl(port_mmio + PORT_CMD);
519 tmp &= ~PORT_CMD_START;
520 writel(tmp, port_mmio + PORT_CMD);
521
522 /* wait for engine to stop. TODO: this could be
523 * as long as 500 msec
524 */
525 work = 1000;
526 while (work-- > 0) {
527 tmp = readl(port_mmio + PORT_CMD);
528 if ((tmp & PORT_CMD_LIST_ON) == 0)
529 return 0;
530 udelay(10);
531 }
532
533 return -EIO;
534 }
535
536 static void ahci_start_engine(struct ata_port *ap)
537 {
538 void __iomem *mmio = ap->host_set->mmio_base;
539 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
540 u32 tmp;
541
542 tmp = readl(port_mmio + PORT_CMD);
543 tmp |= PORT_CMD_START;
544 writel(tmp, port_mmio + PORT_CMD);
545 readl(port_mmio + PORT_CMD); /* flush */
546 }
547
548 static unsigned int ahci_dev_classify(struct ata_port *ap)
549 {
550 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
551 struct ata_taskfile tf;
552 u32 tmp;
553
554 tmp = readl(port_mmio + PORT_SIG);
555 tf.lbah = (tmp >> 24) & 0xff;
556 tf.lbam = (tmp >> 16) & 0xff;
557 tf.lbal = (tmp >> 8) & 0xff;
558 tf.nsect = (tmp) & 0xff;
559
560 return ata_dev_classify(&tf);
561 }
562
563 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
564 u32 opts)
565 {
566 dma_addr_t cmd_tbl_dma;
567
568 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
569
570 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
571 pp->cmd_slot[tag].status = 0;
572 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
573 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
574 }
575
576 static int ahci_clo(struct ata_port *ap)
577 {
578 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
579 struct ahci_host_priv *hpriv = ap->host_set->private_data;
580 u32 tmp;
581
582 if (!(hpriv->cap & HOST_CAP_CLO))
583 return -EOPNOTSUPP;
584
585 tmp = readl(port_mmio + PORT_CMD);
586 tmp |= PORT_CMD_CLO;
587 writel(tmp, port_mmio + PORT_CMD);
588
589 tmp = ata_wait_register(port_mmio + PORT_CMD,
590 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
591 if (tmp & PORT_CMD_CLO)
592 return -EIO;
593
594 return 0;
595 }
596
597 static int ahci_prereset(struct ata_port *ap)
598 {
599 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
600 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
601 /* ATA_BUSY hasn't cleared, so send a CLO */
602 ahci_clo(ap);
603 }
604
605 return ata_std_prereset(ap);
606 }
607
608 static int ahci_softreset(struct ata_port *ap, unsigned int *class)
609 {
610 struct ahci_port_priv *pp = ap->private_data;
611 void __iomem *mmio = ap->host_set->mmio_base;
612 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
613 const u32 cmd_fis_len = 5; /* five dwords */
614 const char *reason = NULL;
615 struct ata_taskfile tf;
616 u32 tmp;
617 u8 *fis;
618 int rc;
619
620 DPRINTK("ENTER\n");
621
622 if (ata_port_offline(ap)) {
623 DPRINTK("PHY reports no device\n");
624 *class = ATA_DEV_NONE;
625 return 0;
626 }
627
628 /* prepare for SRST (AHCI-1.1 10.4.1) */
629 rc = ahci_stop_engine(ap);
630 if (rc) {
631 reason = "failed to stop engine";
632 goto fail_restart;
633 }
634
635 /* check BUSY/DRQ, perform Command List Override if necessary */
636 ahci_tf_read(ap, &tf);
637 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
638 rc = ahci_clo(ap);
639
640 if (rc == -EOPNOTSUPP) {
641 reason = "port busy but CLO unavailable";
642 goto fail_restart;
643 } else if (rc) {
644 reason = "port busy but CLO failed";
645 goto fail_restart;
646 }
647 }
648
649 /* restart engine */
650 ahci_start_engine(ap);
651
652 ata_tf_init(ap->device, &tf);
653 fis = pp->cmd_tbl;
654
655 /* issue the first D2H Register FIS */
656 ahci_fill_cmd_slot(pp, 0,
657 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
658
659 tf.ctl |= ATA_SRST;
660 ata_tf_to_fis(&tf, fis, 0);
661 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
662
663 writel(1, port_mmio + PORT_CMD_ISSUE);
664
665 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
666 if (tmp & 0x1) {
667 rc = -EIO;
668 reason = "1st FIS failed";
669 goto fail;
670 }
671
672 /* spec says at least 5us, but be generous and sleep for 1ms */
673 msleep(1);
674
675 /* issue the second D2H Register FIS */
676 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
677
678 tf.ctl &= ~ATA_SRST;
679 ata_tf_to_fis(&tf, fis, 0);
680 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
681
682 writel(1, port_mmio + PORT_CMD_ISSUE);
683 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
684
685 /* spec mandates ">= 2ms" before checking status.
686 * We wait 150ms, because that was the magic delay used for
687 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
688 * between when the ATA command register is written, and then
689 * status is checked. Because waiting for "a while" before
690 * checking status is fine, post SRST, we perform this magic
691 * delay here as well.
692 */
693 msleep(150);
694
695 *class = ATA_DEV_NONE;
696 if (ata_port_online(ap)) {
697 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
698 rc = -EIO;
699 reason = "device not ready";
700 goto fail;
701 }
702 *class = ahci_dev_classify(ap);
703 }
704
705 DPRINTK("EXIT, class=%u\n", *class);
706 return 0;
707
708 fail_restart:
709 ahci_start_engine(ap);
710 fail:
711 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
712 return rc;
713 }
714
715 static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
716 {
717 struct ahci_port_priv *pp = ap->private_data;
718 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
719 struct ata_taskfile tf;
720 int rc;
721
722 DPRINTK("ENTER\n");
723
724 ahci_stop_engine(ap);
725
726 /* clear D2H reception area to properly wait for D2H FIS */
727 ata_tf_init(ap->device, &tf);
728 tf.command = 0xff;
729 ata_tf_to_fis(&tf, d2h_fis, 0);
730
731 rc = sata_std_hardreset(ap, class);
732
733 ahci_start_engine(ap);
734
735 if (rc == 0 && ata_port_online(ap))
736 *class = ahci_dev_classify(ap);
737 if (*class == ATA_DEV_UNKNOWN)
738 *class = ATA_DEV_NONE;
739
740 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
741 return rc;
742 }
743
744 static void ahci_postreset(struct ata_port *ap, unsigned int *class)
745 {
746 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
747 u32 new_tmp, tmp;
748
749 ata_std_postreset(ap, class);
750
751 /* Make sure port's ATAPI bit is set appropriately */
752 new_tmp = tmp = readl(port_mmio + PORT_CMD);
753 if (*class == ATA_DEV_ATAPI)
754 new_tmp |= PORT_CMD_ATAPI;
755 else
756 new_tmp &= ~PORT_CMD_ATAPI;
757 if (new_tmp != tmp) {
758 writel(new_tmp, port_mmio + PORT_CMD);
759 readl(port_mmio + PORT_CMD); /* flush */
760 }
761 }
762
763 static u8 ahci_check_status(struct ata_port *ap)
764 {
765 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
766
767 return readl(mmio + PORT_TFDATA) & 0xFF;
768 }
769
770 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
771 {
772 struct ahci_port_priv *pp = ap->private_data;
773 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
774
775 ata_tf_from_fis(d2h_fis, tf);
776 }
777
778 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
779 {
780 struct scatterlist *sg;
781 struct ahci_sg *ahci_sg;
782 unsigned int n_sg = 0;
783
784 VPRINTK("ENTER\n");
785
786 /*
787 * Next, the S/G list.
788 */
789 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
790 ata_for_each_sg(sg, qc) {
791 dma_addr_t addr = sg_dma_address(sg);
792 u32 sg_len = sg_dma_len(sg);
793
794 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
795 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
796 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
797
798 ahci_sg++;
799 n_sg++;
800 }
801
802 return n_sg;
803 }
804
805 static void ahci_qc_prep(struct ata_queued_cmd *qc)
806 {
807 struct ata_port *ap = qc->ap;
808 struct ahci_port_priv *pp = ap->private_data;
809 int is_atapi = is_atapi_taskfile(&qc->tf);
810 void *cmd_tbl;
811 u32 opts;
812 const u32 cmd_fis_len = 5; /* five dwords */
813 unsigned int n_elem;
814
815 /*
816 * Fill in command table information. First, the header,
817 * a SATA Register - Host to Device command FIS.
818 */
819 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
820
821 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
822 if (is_atapi) {
823 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
824 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
825 }
826
827 n_elem = 0;
828 if (qc->flags & ATA_QCFLAG_DMAMAP)
829 n_elem = ahci_fill_sg(qc, cmd_tbl);
830
831 /*
832 * Fill in command slot information.
833 */
834 opts = cmd_fis_len | n_elem << 16;
835 if (qc->tf.flags & ATA_TFLAG_WRITE)
836 opts |= AHCI_CMD_WRITE;
837 if (is_atapi)
838 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
839
840 ahci_fill_cmd_slot(pp, qc->tag, opts);
841 }
842
843 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
844 {
845 struct ahci_port_priv *pp = ap->private_data;
846 struct ata_eh_info *ehi = &ap->eh_info;
847 unsigned int err_mask = 0, action = 0;
848 struct ata_queued_cmd *qc;
849 u32 serror;
850
851 ata_ehi_clear_desc(ehi);
852
853 /* AHCI needs SError cleared; otherwise, it might lock up */
854 serror = ahci_scr_read(ap, SCR_ERROR);
855 ahci_scr_write(ap, SCR_ERROR, serror);
856
857 /* analyze @irq_stat */
858 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
859
860 if (irq_stat & PORT_IRQ_TF_ERR)
861 err_mask |= AC_ERR_DEV;
862
863 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
864 err_mask |= AC_ERR_HOST_BUS;
865 action |= ATA_EH_SOFTRESET;
866 }
867
868 if (irq_stat & PORT_IRQ_IF_ERR) {
869 err_mask |= AC_ERR_ATA_BUS;
870 action |= ATA_EH_SOFTRESET;
871 ata_ehi_push_desc(ehi, ", interface fatal error");
872 }
873
874 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
875 ata_ehi_hotplugged(ehi);
876 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
877 "connection status changed" : "PHY RDY changed");
878 }
879
880 if (irq_stat & PORT_IRQ_UNK_FIS) {
881 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
882
883 err_mask |= AC_ERR_HSM;
884 action |= ATA_EH_SOFTRESET;
885 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
886 unk[0], unk[1], unk[2], unk[3]);
887 }
888
889 /* okay, let's hand over to EH */
890 ehi->serror |= serror;
891 ehi->action |= action;
892
893 qc = ata_qc_from_tag(ap, ap->active_tag);
894 if (qc)
895 qc->err_mask |= err_mask;
896 else
897 ehi->err_mask |= err_mask;
898
899 if (irq_stat & PORT_IRQ_FREEZE)
900 ata_port_freeze(ap);
901 else
902 ata_port_abort(ap);
903 }
904
905 static void ahci_host_intr(struct ata_port *ap)
906 {
907 void __iomem *mmio = ap->host_set->mmio_base;
908 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
909 struct ata_eh_info *ehi = &ap->eh_info;
910 u32 status, qc_active;
911 int rc;
912
913 status = readl(port_mmio + PORT_IRQ_STAT);
914 writel(status, port_mmio + PORT_IRQ_STAT);
915
916 if (unlikely(status & PORT_IRQ_ERROR)) {
917 ahci_error_intr(ap, status);
918 return;
919 }
920
921 if (ap->sactive)
922 qc_active = readl(port_mmio + PORT_SCR_ACT);
923 else
924 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
925
926 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
927 if (rc > 0)
928 return;
929 if (rc < 0) {
930 ehi->err_mask |= AC_ERR_HSM;
931 ehi->action |= ATA_EH_SOFTRESET;
932 ata_port_freeze(ap);
933 return;
934 }
935
936 /* hmmm... a spurious interupt */
937
938 /* some devices send D2H reg with I bit set during NCQ command phase */
939 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
940 return;
941
942 /* ignore interim PIO setup fis interrupts */
943 if (ata_tag_valid(ap->active_tag)) {
944 struct ata_queued_cmd *qc =
945 ata_qc_from_tag(ap, ap->active_tag);
946
947 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
948 (status & PORT_IRQ_PIOS_FIS))
949 return;
950 }
951
952 if (ata_ratelimit())
953 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
954 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
955 status, ap->active_tag, ap->sactive);
956 }
957
958 static void ahci_irq_clear(struct ata_port *ap)
959 {
960 /* TODO */
961 }
962
963 static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
964 {
965 struct ata_host_set *host_set = dev_instance;
966 struct ahci_host_priv *hpriv;
967 unsigned int i, handled = 0;
968 void __iomem *mmio;
969 u32 irq_stat, irq_ack = 0;
970
971 VPRINTK("ENTER\n");
972
973 hpriv = host_set->private_data;
974 mmio = host_set->mmio_base;
975
976 /* sigh. 0xffffffff is a valid return from h/w */
977 irq_stat = readl(mmio + HOST_IRQ_STAT);
978 irq_stat &= hpriv->port_map;
979 if (!irq_stat)
980 return IRQ_NONE;
981
982 spin_lock(&host_set->lock);
983
984 for (i = 0; i < host_set->n_ports; i++) {
985 struct ata_port *ap;
986
987 if (!(irq_stat & (1 << i)))
988 continue;
989
990 ap = host_set->ports[i];
991 if (ap) {
992 ahci_host_intr(ap);
993 VPRINTK("port %u\n", i);
994 } else {
995 VPRINTK("port %u (no irq)\n", i);
996 if (ata_ratelimit())
997 dev_printk(KERN_WARNING, host_set->dev,
998 "interrupt on disabled port %u\n", i);
999 }
1000
1001 irq_ack |= (1 << i);
1002 }
1003
1004 if (irq_ack) {
1005 writel(irq_ack, mmio + HOST_IRQ_STAT);
1006 handled = 1;
1007 }
1008
1009 spin_unlock(&host_set->lock);
1010
1011 VPRINTK("EXIT\n");
1012
1013 return IRQ_RETVAL(handled);
1014 }
1015
1016 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1017 {
1018 struct ata_port *ap = qc->ap;
1019 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1020
1021 if (qc->tf.protocol == ATA_PROT_NCQ)
1022 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1023 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1024 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1025
1026 return 0;
1027 }
1028
1029 static void ahci_freeze(struct ata_port *ap)
1030 {
1031 void __iomem *mmio = ap->host_set->mmio_base;
1032 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1033
1034 /* turn IRQ off */
1035 writel(0, port_mmio + PORT_IRQ_MASK);
1036 }
1037
1038 static void ahci_thaw(struct ata_port *ap)
1039 {
1040 void __iomem *mmio = ap->host_set->mmio_base;
1041 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1042 u32 tmp;
1043
1044 /* clear IRQ */
1045 tmp = readl(port_mmio + PORT_IRQ_STAT);
1046 writel(tmp, port_mmio + PORT_IRQ_STAT);
1047 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1048
1049 /* turn IRQ back on */
1050 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1051 }
1052
1053 static void ahci_error_handler(struct ata_port *ap)
1054 {
1055 if (!(ap->flags & ATA_FLAG_FROZEN)) {
1056 /* restart engine */
1057 ahci_stop_engine(ap);
1058 ahci_start_engine(ap);
1059 }
1060
1061 /* perform recovery */
1062 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1063 ahci_postreset);
1064 }
1065
1066 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1067 {
1068 struct ata_port *ap = qc->ap;
1069
1070 if (qc->flags & ATA_QCFLAG_FAILED)
1071 qc->err_mask |= AC_ERR_OTHER;
1072
1073 if (qc->err_mask) {
1074 /* make DMA engine forget about the failed command */
1075 ahci_stop_engine(ap);
1076 ahci_start_engine(ap);
1077 }
1078 }
1079
1080 static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1081 unsigned int port_idx)
1082 {
1083 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1084 base = ahci_port_base_ul(base, port_idx);
1085 VPRINTK("base now==0x%lx\n", base);
1086
1087 port->cmd_addr = base;
1088 port->scr_addr = base + PORT_SCR;
1089
1090 VPRINTK("EXIT\n");
1091 }
1092
1093 static int ahci_host_init(struct ata_probe_ent *probe_ent)
1094 {
1095 struct ahci_host_priv *hpriv = probe_ent->private_data;
1096 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1097 void __iomem *mmio = probe_ent->mmio_base;
1098 u32 tmp, cap_save;
1099 unsigned int i, j, using_dac;
1100 int rc;
1101 void __iomem *port_mmio;
1102
1103 cap_save = readl(mmio + HOST_CAP);
1104 cap_save &= ( (1<<28) | (1<<17) );
1105 cap_save |= (1 << 27);
1106
1107 /* global controller reset */
1108 tmp = readl(mmio + HOST_CTL);
1109 if ((tmp & HOST_RESET) == 0) {
1110 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1111 readl(mmio + HOST_CTL); /* flush */
1112 }
1113
1114 /* reset must complete within 1 second, or
1115 * the hardware should be considered fried.
1116 */
1117 ssleep(1);
1118
1119 tmp = readl(mmio + HOST_CTL);
1120 if (tmp & HOST_RESET) {
1121 dev_printk(KERN_ERR, &pdev->dev,
1122 "controller reset failed (0x%x)\n", tmp);
1123 return -EIO;
1124 }
1125
1126 writel(HOST_AHCI_EN, mmio + HOST_CTL);
1127 (void) readl(mmio + HOST_CTL); /* flush */
1128 writel(cap_save, mmio + HOST_CAP);
1129 writel(0xf, mmio + HOST_PORTS_IMPL);
1130 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
1131
1132 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1133 u16 tmp16;
1134
1135 pci_read_config_word(pdev, 0x92, &tmp16);
1136 tmp16 |= 0xf;
1137 pci_write_config_word(pdev, 0x92, tmp16);
1138 }
1139
1140 hpriv->cap = readl(mmio + HOST_CAP);
1141 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1142 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1143
1144 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1145 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1146
1147 using_dac = hpriv->cap & HOST_CAP_64;
1148 if (using_dac &&
1149 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1150 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1151 if (rc) {
1152 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1153 if (rc) {
1154 dev_printk(KERN_ERR, &pdev->dev,
1155 "64-bit DMA enable failed\n");
1156 return rc;
1157 }
1158 }
1159 } else {
1160 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1161 if (rc) {
1162 dev_printk(KERN_ERR, &pdev->dev,
1163 "32-bit DMA enable failed\n");
1164 return rc;
1165 }
1166 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1167 if (rc) {
1168 dev_printk(KERN_ERR, &pdev->dev,
1169 "32-bit consistent DMA enable failed\n");
1170 return rc;
1171 }
1172 }
1173
1174 for (i = 0; i < probe_ent->n_ports; i++) {
1175 #if 0 /* BIOSen initialize this incorrectly */
1176 if (!(hpriv->port_map & (1 << i)))
1177 continue;
1178 #endif
1179
1180 port_mmio = ahci_port_base(mmio, i);
1181 VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
1182
1183 ahci_setup_port(&probe_ent->port[i],
1184 (unsigned long) mmio, i);
1185
1186 /* make sure port is not active */
1187 tmp = readl(port_mmio + PORT_CMD);
1188 VPRINTK("PORT_CMD 0x%x\n", tmp);
1189 if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1190 PORT_CMD_FIS_RX | PORT_CMD_START)) {
1191 tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1192 PORT_CMD_FIS_RX | PORT_CMD_START);
1193 writel(tmp, port_mmio + PORT_CMD);
1194 readl(port_mmio + PORT_CMD); /* flush */
1195
1196 /* spec says 500 msecs for each bit, so
1197 * this is slightly incorrect.
1198 */
1199 msleep(500);
1200 }
1201
1202 writel(PORT_CMD_SPIN_UP, port_mmio + PORT_CMD);
1203
1204 j = 0;
1205 while (j < 100) {
1206 msleep(10);
1207 tmp = readl(port_mmio + PORT_SCR_STAT);
1208 if ((tmp & 0xf) == 0x3)
1209 break;
1210 j++;
1211 }
1212
1213 tmp = readl(port_mmio + PORT_SCR_ERR);
1214 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1215 writel(tmp, port_mmio + PORT_SCR_ERR);
1216
1217 /* ack any pending irq events for this port */
1218 tmp = readl(port_mmio + PORT_IRQ_STAT);
1219 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1220 if (tmp)
1221 writel(tmp, port_mmio + PORT_IRQ_STAT);
1222
1223 writel(1 << i, mmio + HOST_IRQ_STAT);
1224 }
1225
1226 tmp = readl(mmio + HOST_CTL);
1227 VPRINTK("HOST_CTL 0x%x\n", tmp);
1228 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1229 tmp = readl(mmio + HOST_CTL);
1230 VPRINTK("HOST_CTL 0x%x\n", tmp);
1231
1232 pci_set_master(pdev);
1233
1234 return 0;
1235 }
1236
1237 static void ahci_print_info(struct ata_probe_ent *probe_ent)
1238 {
1239 struct ahci_host_priv *hpriv = probe_ent->private_data;
1240 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1241 void __iomem *mmio = probe_ent->mmio_base;
1242 u32 vers, cap, impl, speed;
1243 const char *speed_s;
1244 u16 cc;
1245 const char *scc_s;
1246
1247 vers = readl(mmio + HOST_VERSION);
1248 cap = hpriv->cap;
1249 impl = hpriv->port_map;
1250
1251 speed = (cap >> 20) & 0xf;
1252 if (speed == 1)
1253 speed_s = "1.5";
1254 else if (speed == 2)
1255 speed_s = "3";
1256 else
1257 speed_s = "?";
1258
1259 pci_read_config_word(pdev, 0x0a, &cc);
1260 if (cc == 0x0101)
1261 scc_s = "IDE";
1262 else if (cc == 0x0106)
1263 scc_s = "SATA";
1264 else if (cc == 0x0104)
1265 scc_s = "RAID";
1266 else
1267 scc_s = "unknown";
1268
1269 dev_printk(KERN_INFO, &pdev->dev,
1270 "AHCI %02x%02x.%02x%02x "
1271 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1272 ,
1273
1274 (vers >> 24) & 0xff,
1275 (vers >> 16) & 0xff,
1276 (vers >> 8) & 0xff,
1277 vers & 0xff,
1278
1279 ((cap >> 8) & 0x1f) + 1,
1280 (cap & 0x1f) + 1,
1281 speed_s,
1282 impl,
1283 scc_s);
1284
1285 dev_printk(KERN_INFO, &pdev->dev,
1286 "flags: "
1287 "%s%s%s%s%s%s"
1288 "%s%s%s%s%s%s%s\n"
1289 ,
1290
1291 cap & (1 << 31) ? "64bit " : "",
1292 cap & (1 << 30) ? "ncq " : "",
1293 cap & (1 << 28) ? "ilck " : "",
1294 cap & (1 << 27) ? "stag " : "",
1295 cap & (1 << 26) ? "pm " : "",
1296 cap & (1 << 25) ? "led " : "",
1297
1298 cap & (1 << 24) ? "clo " : "",
1299 cap & (1 << 19) ? "nz " : "",
1300 cap & (1 << 18) ? "only " : "",
1301 cap & (1 << 17) ? "pmp " : "",
1302 cap & (1 << 15) ? "pio " : "",
1303 cap & (1 << 14) ? "slum " : "",
1304 cap & (1 << 13) ? "part " : ""
1305 );
1306 }
1307
1308 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1309 {
1310 static int printed_version;
1311 struct ata_probe_ent *probe_ent = NULL;
1312 struct ahci_host_priv *hpriv;
1313 unsigned long base;
1314 void __iomem *mmio_base;
1315 unsigned int board_idx = (unsigned int) ent->driver_data;
1316 int have_msi, pci_dev_busy = 0;
1317 int rc;
1318
1319 VPRINTK("ENTER\n");
1320
1321 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1322
1323 if (!printed_version++)
1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1325
1326 rc = pci_enable_device(pdev);
1327 if (rc)
1328 return rc;
1329
1330 rc = pci_request_regions(pdev, DRV_NAME);
1331 if (rc) {
1332 pci_dev_busy = 1;
1333 goto err_out;
1334 }
1335
1336 if (pci_enable_msi(pdev) == 0)
1337 have_msi = 1;
1338 else {
1339 pci_intx(pdev, 1);
1340 have_msi = 0;
1341 }
1342
1343 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1344 if (probe_ent == NULL) {
1345 rc = -ENOMEM;
1346 goto err_out_msi;
1347 }
1348
1349 memset(probe_ent, 0, sizeof(*probe_ent));
1350 probe_ent->dev = pci_dev_to_dev(pdev);
1351 INIT_LIST_HEAD(&probe_ent->node);
1352
1353 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1354 if (mmio_base == NULL) {
1355 rc = -ENOMEM;
1356 goto err_out_free_ent;
1357 }
1358 base = (unsigned long) mmio_base;
1359
1360 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1361 if (!hpriv) {
1362 rc = -ENOMEM;
1363 goto err_out_iounmap;
1364 }
1365 memset(hpriv, 0, sizeof(*hpriv));
1366
1367 probe_ent->sht = ahci_port_info[board_idx].sht;
1368 probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
1369 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1370 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1371 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1372
1373 probe_ent->irq = pdev->irq;
1374 probe_ent->irq_flags = IRQF_SHARED;
1375 probe_ent->mmio_base = mmio_base;
1376 probe_ent->private_data = hpriv;
1377
1378 if (have_msi)
1379 hpriv->flags |= AHCI_FLAG_MSI;
1380
1381 /* JMicron-specific fixup: make sure we're in AHCI mode */
1382 if (pdev->vendor == 0x197b)
1383 pci_write_config_byte(pdev, 0x41, 0xa1);
1384
1385 /* initialize adapter */
1386 rc = ahci_host_init(probe_ent);
1387 if (rc)
1388 goto err_out_hpriv;
1389
1390 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
1391 (hpriv->cap & HOST_CAP_NCQ))
1392 probe_ent->host_flags |= ATA_FLAG_NCQ;
1393
1394 ahci_print_info(probe_ent);
1395
1396 /* FIXME: check ata_device_add return value */
1397 ata_device_add(probe_ent);
1398 kfree(probe_ent);
1399
1400 return 0;
1401
1402 err_out_hpriv:
1403 kfree(hpriv);
1404 err_out_iounmap:
1405 pci_iounmap(pdev, mmio_base);
1406 err_out_free_ent:
1407 kfree(probe_ent);
1408 err_out_msi:
1409 if (have_msi)
1410 pci_disable_msi(pdev);
1411 else
1412 pci_intx(pdev, 0);
1413 pci_release_regions(pdev);
1414 err_out:
1415 if (!pci_dev_busy)
1416 pci_disable_device(pdev);
1417 return rc;
1418 }
1419
1420 static void ahci_remove_one (struct pci_dev *pdev)
1421 {
1422 struct device *dev = pci_dev_to_dev(pdev);
1423 struct ata_host_set *host_set = dev_get_drvdata(dev);
1424 struct ahci_host_priv *hpriv = host_set->private_data;
1425 unsigned int i;
1426 int have_msi;
1427
1428 for (i = 0; i < host_set->n_ports; i++)
1429 ata_port_detach(host_set->ports[i]);
1430
1431 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1432 free_irq(host_set->irq, host_set);
1433
1434 for (i = 0; i < host_set->n_ports; i++) {
1435 struct ata_port *ap = host_set->ports[i];
1436
1437 ata_scsi_release(ap->host);
1438 scsi_host_put(ap->host);
1439 }
1440
1441 kfree(hpriv);
1442 pci_iounmap(pdev, host_set->mmio_base);
1443 kfree(host_set);
1444
1445 if (have_msi)
1446 pci_disable_msi(pdev);
1447 else
1448 pci_intx(pdev, 0);
1449 pci_release_regions(pdev);
1450 pci_disable_device(pdev);
1451 dev_set_drvdata(dev, NULL);
1452 }
1453
1454 static int __init ahci_init(void)
1455 {
1456 return pci_module_init(&ahci_pci_driver);
1457 }
1458
1459 static void __exit ahci_exit(void)
1460 {
1461 pci_unregister_driver(&ahci_pci_driver);
1462 }
1463
1464
1465 MODULE_AUTHOR("Jeff Garzik");
1466 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1467 MODULE_LICENSE("GPL");
1468 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1469 MODULE_VERSION(DRV_VERSION);
1470
1471 module_init(ahci_init);
1472 module_exit(ahci_exit);
This page took 0.062608 seconds and 5 git commands to generate.