[PATCH] kvm: userspace interface
[deliverable/linux.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
2dec7555 52#define DRV_VERSION "3.2"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df
JG
56enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 63
27e4b274 64 /* INT_STATUS/ENABLE */
10ad05df 65 NV_INT_STATUS = 0x10,
10ad05df 66 NV_INT_ENABLE = 0x11,
27e4b274 67 NV_INT_STATUS_CK804 = 0x440,
10ad05df 68 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 69
27e4b274
TH
70 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
75
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
77
39f87582 78 NV_INT_ALL = 0x0f,
5a44efff
TH
79 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 81
27e4b274 82 /* INT_CONFIG */
10ad05df
JG
83 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 85
10ad05df
JG
86 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
93
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
108
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
111
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
114
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
125
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
133
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
139
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
146
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
151
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
165
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d
RH
169
170};
171
172/* ADMA Physical Region Descriptor - one SG segment */
173struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
179};
180
181enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
189};
190
191/* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
10ad05df 208};
1da177e4 209
fbbb262d
RH
210
211struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 u8 flags;
217};
218
219#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
220
1da177e4 221static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
cca3974e 222static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
223static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
225static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
1da177e4
LT
226static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
227static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
1da177e4 228
39f87582
TH
229static void nv_nf2_freeze(struct ata_port *ap);
230static void nv_nf2_thaw(struct ata_port *ap);
231static void nv_ck804_freeze(struct ata_port *ap);
232static void nv_ck804_thaw(struct ata_port *ap);
233static void nv_error_handler(struct ata_port *ap);
fbbb262d 234static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 235static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
236static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239static void nv_adma_irq_clear(struct ata_port *ap);
240static int nv_adma_port_start(struct ata_port *ap);
241static void nv_adma_port_stop(struct ata_port *ap);
242static void nv_adma_error_handler(struct ata_port *ap);
243static void nv_adma_host_stop(struct ata_host *host);
244static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247static u8 nv_adma_bmdma_status(struct ata_port *ap);
39f87582 248
1da177e4
LT
249enum nv_host_type
250{
251 GENERIC,
252 NFORCE2,
27e4b274 253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d
RH
254 CK804,
255 ADMA
1da177e4
LT
256};
257
3b7d697d 258static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
8fc2d9ca
PC
273 { PCI_VDEVICE(NVIDIA, 0x045c), GENERIC }, /* MCP65 */
274 { PCI_VDEVICE(NVIDIA, 0x045d), GENERIC }, /* MCP65 */
275 { PCI_VDEVICE(NVIDIA, 0x045e), GENERIC }, /* MCP65 */
276 { PCI_VDEVICE(NVIDIA, 0x045f), GENERIC }, /* MCP65 */
277 { PCI_VDEVICE(NVIDIA, 0x0550), GENERIC }, /* MCP67 */
278 { PCI_VDEVICE(NVIDIA, 0x0551), GENERIC }, /* MCP67 */
279 { PCI_VDEVICE(NVIDIA, 0x0552), GENERIC }, /* MCP67 */
280 { PCI_VDEVICE(NVIDIA, 0x0553), GENERIC }, /* MCP67 */
1da177e4
LT
281 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
282 PCI_ANY_ID, PCI_ANY_ID,
283 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
541134cf
DD
284 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
285 PCI_ANY_ID, PCI_ANY_ID,
286 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
2d2744fc
JG
287
288 { } /* terminate list */
1da177e4
LT
289};
290
1da177e4
LT
291static struct pci_driver nv_pci_driver = {
292 .name = DRV_NAME,
293 .id_table = nv_pci_tbl,
294 .probe = nv_init_one,
295 .remove = ata_pci_remove_one,
296};
297
193515d5 298static struct scsi_host_template nv_sht = {
1da177e4
LT
299 .module = THIS_MODULE,
300 .name = DRV_NAME,
301 .ioctl = ata_scsi_ioctl,
302 .queuecommand = ata_scsi_queuecmd,
1da177e4
LT
303 .can_queue = ATA_DEF_QUEUE,
304 .this_id = ATA_SHT_THIS_ID,
305 .sg_tablesize = LIBATA_MAX_PRD,
1da177e4
LT
306 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
307 .emulated = ATA_SHT_EMULATED,
308 .use_clustering = ATA_SHT_USE_CLUSTERING,
309 .proc_name = DRV_NAME,
310 .dma_boundary = ATA_DMA_BOUNDARY,
311 .slave_configure = ata_scsi_slave_config,
ccf68c34 312 .slave_destroy = ata_scsi_slave_destroy,
1da177e4 313 .bios_param = ata_std_bios_param,
1da177e4
LT
314};
315
fbbb262d
RH
316static struct scsi_host_template nv_adma_sht = {
317 .module = THIS_MODULE,
318 .name = DRV_NAME,
319 .ioctl = ata_scsi_ioctl,
320 .queuecommand = ata_scsi_queuecmd,
321 .can_queue = NV_ADMA_MAX_CPBS,
322 .this_id = ATA_SHT_THIS_ID,
323 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
324 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
325 .emulated = ATA_SHT_EMULATED,
326 .use_clustering = ATA_SHT_USE_CLUSTERING,
327 .proc_name = DRV_NAME,
328 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
329 .slave_configure = nv_adma_slave_config,
330 .slave_destroy = ata_scsi_slave_destroy,
331 .bios_param = ata_std_bios_param,
332};
333
ada364e8 334static const struct ata_port_operations nv_generic_ops = {
1da177e4
LT
335 .port_disable = ata_port_disable,
336 .tf_load = ata_tf_load,
337 .tf_read = ata_tf_read,
338 .exec_command = ata_exec_command,
339 .check_status = ata_check_status,
340 .dev_select = ata_std_dev_select,
1da177e4
LT
341 .bmdma_setup = ata_bmdma_setup,
342 .bmdma_start = ata_bmdma_start,
343 .bmdma_stop = ata_bmdma_stop,
344 .bmdma_status = ata_bmdma_status,
345 .qc_prep = ata_qc_prep,
346 .qc_issue = ata_qc_issue_prot,
39f87582
TH
347 .freeze = ata_bmdma_freeze,
348 .thaw = ata_bmdma_thaw,
349 .error_handler = nv_error_handler,
350 .post_internal_cmd = ata_bmdma_post_internal_cmd,
a6b2c5d4 351 .data_xfer = ata_pio_data_xfer,
ada364e8 352 .irq_handler = nv_generic_interrupt,
1da177e4
LT
353 .irq_clear = ata_bmdma_irq_clear,
354 .scr_read = nv_scr_read,
355 .scr_write = nv_scr_write,
356 .port_start = ata_port_start,
357 .port_stop = ata_port_stop,
e6faf082 358 .host_stop = ata_pci_host_stop,
1da177e4
LT
359};
360
ada364e8
TH
361static const struct ata_port_operations nv_nf2_ops = {
362 .port_disable = ata_port_disable,
363 .tf_load = ata_tf_load,
364 .tf_read = ata_tf_read,
365 .exec_command = ata_exec_command,
366 .check_status = ata_check_status,
367 .dev_select = ata_std_dev_select,
ada364e8
TH
368 .bmdma_setup = ata_bmdma_setup,
369 .bmdma_start = ata_bmdma_start,
370 .bmdma_stop = ata_bmdma_stop,
371 .bmdma_status = ata_bmdma_status,
372 .qc_prep = ata_qc_prep,
373 .qc_issue = ata_qc_issue_prot,
39f87582
TH
374 .freeze = nv_nf2_freeze,
375 .thaw = nv_nf2_thaw,
376 .error_handler = nv_error_handler,
377 .post_internal_cmd = ata_bmdma_post_internal_cmd,
ada364e8
TH
378 .data_xfer = ata_pio_data_xfer,
379 .irq_handler = nv_nf2_interrupt,
380 .irq_clear = ata_bmdma_irq_clear,
381 .scr_read = nv_scr_read,
382 .scr_write = nv_scr_write,
383 .port_start = ata_port_start,
384 .port_stop = ata_port_stop,
385 .host_stop = ata_pci_host_stop,
386};
387
388static const struct ata_port_operations nv_ck804_ops = {
389 .port_disable = ata_port_disable,
390 .tf_load = ata_tf_load,
391 .tf_read = ata_tf_read,
392 .exec_command = ata_exec_command,
393 .check_status = ata_check_status,
394 .dev_select = ata_std_dev_select,
ada364e8
TH
395 .bmdma_setup = ata_bmdma_setup,
396 .bmdma_start = ata_bmdma_start,
397 .bmdma_stop = ata_bmdma_stop,
398 .bmdma_status = ata_bmdma_status,
399 .qc_prep = ata_qc_prep,
400 .qc_issue = ata_qc_issue_prot,
39f87582
TH
401 .freeze = nv_ck804_freeze,
402 .thaw = nv_ck804_thaw,
403 .error_handler = nv_error_handler,
404 .post_internal_cmd = ata_bmdma_post_internal_cmd,
ada364e8
TH
405 .data_xfer = ata_pio_data_xfer,
406 .irq_handler = nv_ck804_interrupt,
407 .irq_clear = ata_bmdma_irq_clear,
408 .scr_read = nv_scr_read,
409 .scr_write = nv_scr_write,
410 .port_start = ata_port_start,
411 .port_stop = ata_port_stop,
412 .host_stop = nv_ck804_host_stop,
413};
414
fbbb262d
RH
415static const struct ata_port_operations nv_adma_ops = {
416 .port_disable = ata_port_disable,
417 .tf_load = ata_tf_load,
418 .tf_read = ata_tf_read,
2dec7555 419 .check_atapi_dma = nv_adma_check_atapi_dma,
fbbb262d
RH
420 .exec_command = ata_exec_command,
421 .check_status = ata_check_status,
422 .dev_select = ata_std_dev_select,
423 .bmdma_setup = nv_adma_bmdma_setup,
424 .bmdma_start = nv_adma_bmdma_start,
425 .bmdma_stop = nv_adma_bmdma_stop,
426 .bmdma_status = nv_adma_bmdma_status,
427 .qc_prep = nv_adma_qc_prep,
428 .qc_issue = nv_adma_qc_issue,
429 .freeze = nv_ck804_freeze,
430 .thaw = nv_ck804_thaw,
431 .error_handler = nv_adma_error_handler,
432 .post_internal_cmd = nv_adma_bmdma_stop,
433 .data_xfer = ata_mmio_data_xfer,
434 .irq_handler = nv_adma_interrupt,
435 .irq_clear = nv_adma_irq_clear,
436 .scr_read = nv_scr_read,
437 .scr_write = nv_scr_write,
438 .port_start = nv_adma_port_start,
439 .port_stop = nv_adma_port_stop,
440 .host_stop = nv_adma_host_stop,
441};
442
ada364e8
TH
443static struct ata_port_info nv_port_info[] = {
444 /* generic */
445 {
446 .sht = &nv_sht,
722420fe
TH
447 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
448 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
449 .pio_mask = NV_PIO_MASK,
450 .mwdma_mask = NV_MWDMA_MASK,
451 .udma_mask = NV_UDMA_MASK,
452 .port_ops = &nv_generic_ops,
453 },
454 /* nforce2/3 */
455 {
456 .sht = &nv_sht,
722420fe
TH
457 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
458 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
459 .pio_mask = NV_PIO_MASK,
460 .mwdma_mask = NV_MWDMA_MASK,
461 .udma_mask = NV_UDMA_MASK,
462 .port_ops = &nv_nf2_ops,
463 },
464 /* ck804 */
465 {
466 .sht = &nv_sht,
722420fe
TH
467 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
468 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
469 .pio_mask = NV_PIO_MASK,
470 .mwdma_mask = NV_MWDMA_MASK,
471 .udma_mask = NV_UDMA_MASK,
472 .port_ops = &nv_ck804_ops,
473 },
fbbb262d
RH
474 /* ADMA */
475 {
476 .sht = &nv_adma_sht,
477 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
478 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
479 .pio_mask = NV_PIO_MASK,
480 .mwdma_mask = NV_MWDMA_MASK,
481 .udma_mask = NV_UDMA_MASK,
482 .port_ops = &nv_adma_ops,
483 },
1da177e4
LT
484};
485
486MODULE_AUTHOR("NVIDIA");
487MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
488MODULE_LICENSE("GPL");
489MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
490MODULE_VERSION(DRV_VERSION);
491
fbbb262d
RH
492static int adma_enabled = 1;
493
2dec7555
RH
494static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
495 unsigned int port_no)
496{
497 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
498 return mmio;
499}
500
501static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
502{
503 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
504}
505
506static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
507{
508 return (ap->host->mmio_base + NV_ADMA_GEN);
509}
510
511static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
512{
513 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
514}
515
516static void nv_adma_register_mode(struct ata_port *ap)
517{
518 void __iomem *mmio = nv_adma_ctl_block(ap);
519 struct nv_adma_port_priv *pp = ap->private_data;
520 u16 tmp;
521
522 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
523 return;
524
525 tmp = readw(mmio + NV_ADMA_CTL);
526 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
527
528 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
529}
530
531static void nv_adma_mode(struct ata_port *ap)
532{
533 void __iomem *mmio = nv_adma_ctl_block(ap);
534 struct nv_adma_port_priv *pp = ap->private_data;
535 u16 tmp;
536
537 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
538 return;
539
540 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
541
542 tmp = readw(mmio + NV_ADMA_CTL);
543 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
544
545 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
546}
547
fbbb262d
RH
548static int nv_adma_slave_config(struct scsi_device *sdev)
549{
550 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555
RH
551 struct nv_adma_port_priv *pp = ap->private_data;
552 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
fbbb262d
RH
553 u64 bounce_limit;
554 unsigned long segment_boundary;
555 unsigned short sg_tablesize;
556 int rc;
2dec7555
RH
557 int adma_enable;
558 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
559
560 rc = ata_scsi_slave_config(sdev);
561
562 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
563 /* Not a proper libata device, ignore */
564 return rc;
565
566 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
567 /*
568 * NVIDIA reports that ADMA mode does not support ATAPI commands.
569 * Therefore ATAPI commands are sent through the legacy interface.
570 * However, the legacy interface only supports 32-bit DMA.
571 * Restrict DMA parameters as required by the legacy interface
572 * when an ATAPI device is connected.
573 */
574 bounce_limit = ATA_DMA_MASK;
575 segment_boundary = ATA_DMA_BOUNDARY;
576 /* Subtract 1 since an extra entry may be needed for padding, see
577 libata-scsi.c */
578 sg_tablesize = LIBATA_MAX_PRD - 1;
2dec7555
RH
579
580 /* Since the legacy DMA engine is in use, we need to disable ADMA
581 on the port. */
582 adma_enable = 0;
583 nv_adma_register_mode(ap);
fbbb262d
RH
584 }
585 else {
586 bounce_limit = *ap->dev->dma_mask;
587 segment_boundary = NV_ADMA_DMA_BOUNDARY;
588 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 589 adma_enable = 1;
fbbb262d 590 }
2dec7555
RH
591
592 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
593
594 if(ap->port_no == 1)
595 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
596 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
597 else
598 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
599 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
600
601 if(adma_enable) {
602 new_reg = current_reg | config_mask;
603 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
604 }
605 else {
606 new_reg = current_reg & ~config_mask;
607 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
608 }
609
610 if(current_reg != new_reg)
611 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
612
fbbb262d
RH
613 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
614 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
615 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
616 ata_port_printk(ap, KERN_INFO,
617 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
618 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
619 return rc;
620}
621
2dec7555
RH
622static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
623{
624 struct nv_adma_port_priv *pp = qc->ap->private_data;
625 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
626}
627
628static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
629{
630 unsigned int idx = 0;
631
632 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
633
634 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
635 cpb[idx++] = cpu_to_le16(IGN);
636 cpb[idx++] = cpu_to_le16(IGN);
637 cpb[idx++] = cpu_to_le16(IGN);
638 cpb[idx++] = cpu_to_le16(IGN);
639 cpb[idx++] = cpu_to_le16(IGN);
640 }
641 else {
642 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
643 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
647 }
648 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
649 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
650 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
651 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
652 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
653
654 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
655
656 return idx;
657}
658
fbbb262d
RH
659static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
660{
661 struct nv_adma_port_priv *pp = ap->private_data;
662 int complete = 0, have_err = 0;
2dec7555 663 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
664
665 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
666
667 if (flags & NV_CPB_RESP_DONE) {
668 VPRINTK("CPB flags done, flags=0x%x\n", flags);
669 complete = 1;
670 }
671 if (flags & NV_CPB_RESP_ATA_ERR) {
672 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
673 have_err = 1;
674 complete = 1;
675 }
676 if (flags & NV_CPB_RESP_CMD_ERR) {
677 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
678 have_err = 1;
679 complete = 1;
680 }
681 if (flags & NV_CPB_RESP_CPB_ERR) {
682 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
683 have_err = 1;
684 complete = 1;
685 }
686 if(complete || force_err)
687 {
688 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
689 if(likely(qc)) {
690 u8 ata_status = 0;
691 /* Only use the ATA port status for non-NCQ commands.
692 For NCQ commands the current status may have nothing to do with
693 the command just completed. */
694 if(qc->tf.protocol != ATA_PROT_NCQ)
695 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
696
697 if(have_err || force_err)
698 ata_status |= ATA_ERR;
699
700 qc->err_mask |= ac_err_mask(ata_status);
701 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
702 qc->err_mask);
703 ata_qc_complete(qc);
704 }
705 }
706}
707
2dec7555
RH
708static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
709{
710 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
711 int handled;
712
713 /* freeze if hotplugged */
714 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
715 ata_port_freeze(ap);
716 return 1;
717 }
718
719 /* bail out if not our interrupt */
720 if (!(irq_stat & NV_INT_DEV))
721 return 0;
722
723 /* DEV interrupt w/ no active qc? */
724 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
725 ata_check_status(ap);
726 return 1;
727 }
728
729 /* handle interrupt */
730 handled = ata_host_intr(ap, qc);
731 if (unlikely(!handled)) {
732 /* spurious, clear it */
733 ata_check_status(ap);
734 }
735
736 return 1;
737}
738
fbbb262d
RH
739static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
740{
741 struct ata_host *host = dev_instance;
742 int i, handled = 0;
2dec7555 743 u32 notifier_clears[2];
fbbb262d
RH
744
745 spin_lock(&host->lock);
746
747 for (i = 0; i < host->n_ports; i++) {
748 struct ata_port *ap = host->ports[i];
2dec7555 749 notifier_clears[i] = 0;
fbbb262d
RH
750
751 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
752 struct nv_adma_port_priv *pp = ap->private_data;
753 void __iomem *mmio = nv_adma_ctl_block(ap);
754 u16 status;
755 u32 gen_ctl;
756 int have_global_err = 0;
757 u32 notifier, notifier_error;
758
759 /* if in ATA register mode, use standard ata interrupt handler */
760 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
2dec7555
RH
761 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
762 >> (NV_INT_PORT_SHIFT * i);
763 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
764 continue;
765 }
766
767 notifier = readl(mmio + NV_ADMA_NOTIFIER);
768 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 769 notifier_clears[i] = notifier | notifier_error;
fbbb262d
RH
770
771 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
772
fbbb262d
RH
773 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
774 !notifier_error)
775 /* Nothing to do */
776 continue;
777
778 status = readw(mmio + NV_ADMA_STAT);
779
780 /* Clear status. Ensure the controller sees the clearing before we start
781 looking at any of the CPB statuses, so that any CPB completions after
782 this point in the handler will raise another interrupt. */
783 writew(status, mmio + NV_ADMA_STAT);
784 readw(mmio + NV_ADMA_STAT); /* flush posted write */
785 rmb();
786
787 /* freeze if hotplugged */
788 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
789 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
790 ata_port_freeze(ap);
791 handled++;
792 continue;
793 }
794
795 if (status & NV_ADMA_STAT_TIMEOUT) {
796 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
797 have_global_err = 1;
798 }
799 if (status & NV_ADMA_STAT_CPBERR) {
800 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
801 have_global_err = 1;
802 }
803 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
804 /** Check CPBs for completed commands */
805
806 if(ata_tag_valid(ap->active_tag))
807 /* Non-NCQ command */
808 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
809 (notifier_error & (1 << ap->active_tag)));
810 else {
811 int pos;
812 u32 active = ap->sactive;
813 while( (pos = ffs(active)) ) {
814 pos--;
815 nv_adma_check_cpb(ap, pos, have_global_err ||
816 (notifier_error & (1 << pos)) );
817 active &= ~(1 << pos );
818 }
819 }
820 }
821
822 handled++; /* irq handled if we got here */
823 }
824 }
2dec7555
RH
825
826 if(notifier_clears[0] || notifier_clears[1]) {
827 /* Note: Both notifier clear registers must be written
828 if either is set, even if one is zero, according to NVIDIA. */
829 writel(notifier_clears[0],
830 nv_adma_notifier_clear_block(host->ports[0]));
831 writel(notifier_clears[1],
832 nv_adma_notifier_clear_block(host->ports[1]));
833 }
fbbb262d
RH
834
835 spin_unlock(&host->lock);
836
837 return IRQ_RETVAL(handled);
838}
839
840static void nv_adma_irq_clear(struct ata_port *ap)
841{
842 void __iomem *mmio = nv_adma_ctl_block(ap);
843 u16 status = readw(mmio + NV_ADMA_STAT);
844 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
845 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 846 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
fbbb262d
RH
847
848 /* clear ADMA status */
849 writew(status, mmio + NV_ADMA_STAT);
850 writel(notifier | notifier_error,
851 nv_adma_notifier_clear_block(ap));
852
853 /** clear legacy status */
2dec7555 854 outb(inb(dma_stat_addr), dma_stat_addr);
fbbb262d
RH
855}
856
857static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
858{
2dec7555
RH
859 struct ata_port *ap = qc->ap;
860 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
861 struct nv_adma_port_priv *pp = ap->private_data;
862 u8 dmactl;
fbbb262d 863
2dec7555 864 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
865 WARN_ON(1);
866 return;
867 }
868
2dec7555
RH
869 /* load PRD table addr. */
870 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
871
872 /* specify data direction, triple-check start bit is clear */
873 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
874 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
875 if (!rw)
876 dmactl |= ATA_DMA_WR;
877
878 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
879
880 /* issue r/w command */
881 ata_exec_command(ap, &qc->tf);
fbbb262d
RH
882}
883
884static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
885{
2dec7555
RH
886 struct ata_port *ap = qc->ap;
887 struct nv_adma_port_priv *pp = ap->private_data;
888 u8 dmactl;
fbbb262d 889
2dec7555 890 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
891 WARN_ON(1);
892 return;
893 }
894
2dec7555
RH
895 /* start host DMA transaction */
896 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
897 outb(dmactl | ATA_DMA_START,
898 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d
RH
899}
900
901static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
902{
2dec7555 903 struct ata_port *ap = qc->ap;
fbbb262d
RH
904 struct nv_adma_port_priv *pp = ap->private_data;
905
2dec7555 906 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
fbbb262d
RH
907 return;
908
2dec7555
RH
909 /* clear start/stop bit */
910 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
911 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d 912
2dec7555
RH
913 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
914 ata_altstatus(ap); /* dummy read */
fbbb262d
RH
915}
916
2dec7555 917static u8 nv_adma_bmdma_status(struct ata_port *ap)
fbbb262d 918{
fbbb262d 919 struct nv_adma_port_priv *pp = ap->private_data;
fbbb262d 920
2dec7555 921 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
fbbb262d 922
2dec7555 923 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
fbbb262d
RH
924}
925
926static int nv_adma_port_start(struct ata_port *ap)
927{
928 struct device *dev = ap->host->dev;
929 struct nv_adma_port_priv *pp;
930 int rc;
931 void *mem;
932 dma_addr_t mem_dma;
933 void __iomem *mmio = nv_adma_ctl_block(ap);
934 u16 tmp;
935
936 VPRINTK("ENTER\n");
937
938 rc = ata_port_start(ap);
939 if (rc)
940 return rc;
941
942 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
943 if (!pp) {
944 rc = -ENOMEM;
945 goto err_out;
946 }
947
948 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
949 &mem_dma, GFP_KERNEL);
950
951 if (!mem) {
952 rc = -ENOMEM;
953 goto err_out_kfree;
954 }
955 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
956
957 /*
958 * First item in chunk of DMA memory:
959 * 128-byte command parameter block (CPB)
960 * one for each command tag
961 */
962 pp->cpb = mem;
963 pp->cpb_dma = mem_dma;
964
965 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
966 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
967
968 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
969 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
970
971 /*
972 * Second item: block of ADMA_SGTBL_LEN s/g entries
973 */
974 pp->aprd = mem;
975 pp->aprd_dma = mem_dma;
976
977 ap->private_data = pp;
978
979 /* clear any outstanding interrupt conditions */
980 writew(0xffff, mmio + NV_ADMA_STAT);
981
982 /* initialize port variables */
983 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
984
985 /* clear CPB fetch count */
986 writew(0, mmio + NV_ADMA_CPB_COUNT);
987
988 /* clear GO for register mode */
989 tmp = readw(mmio + NV_ADMA_CTL);
990 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
991
992 tmp = readw(mmio + NV_ADMA_CTL);
993 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
994 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
995 udelay(1);
996 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
997 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
998
999 return 0;
1000
1001err_out_kfree:
1002 kfree(pp);
1003err_out:
1004 ata_port_stop(ap);
1005 return rc;
1006}
1007
1008static void nv_adma_port_stop(struct ata_port *ap)
1009{
1010 struct device *dev = ap->host->dev;
1011 struct nv_adma_port_priv *pp = ap->private_data;
1012 void __iomem *mmio = nv_adma_ctl_block(ap);
1013
1014 VPRINTK("ENTER\n");
1015
1016 writew(0, mmio + NV_ADMA_CTL);
1017
1018 ap->private_data = NULL;
1019 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1020 kfree(pp);
1021 ata_port_stop(ap);
1022}
1023
1024
1025static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1026{
1027 void __iomem *mmio = probe_ent->mmio_base;
1028 struct ata_ioports *ioport = &probe_ent->port[port];
1029
1030 VPRINTK("ENTER\n");
1031
1032 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1033
1034 ioport->cmd_addr = (unsigned long) mmio;
1035 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1036 ioport->error_addr =
1037 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1038 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1039 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1040 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1041 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1042 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1043 ioport->status_addr =
1044 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1045 ioport->altstatus_addr =
1046 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1047}
1048
1049static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1050{
1051 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1052 unsigned int i;
1053 u32 tmp32;
1054
1055 VPRINTK("ENTER\n");
1056
1057 /* enable ADMA on the ports */
1058 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1059 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1060 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1061 NV_MCP_SATA_CFG_20_PORT1_EN |
1062 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1063
1064 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1065
1066 for (i = 0; i < probe_ent->n_ports; i++)
1067 nv_adma_setup_port(probe_ent, i);
1068
1069 for (i = 0; i < probe_ent->n_ports; i++) {
1070 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1071 u16 tmp;
1072
1073 /* enable interrupt, clear reset if not already clear */
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1076 }
1077
1078 return 0;
1079}
1080
1081static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1082 struct scatterlist *sg,
1083 int idx,
1084 struct nv_adma_prd *aprd)
1085{
2dec7555 1086 u8 flags;
fbbb262d
RH
1087
1088 memset(aprd, 0, sizeof(struct nv_adma_prd));
1089
1090 flags = 0;
1091 if (qc->tf.flags & ATA_TFLAG_WRITE)
1092 flags |= NV_APRD_WRITE;
1093 if (idx == qc->n_elem - 1)
1094 flags |= NV_APRD_END;
1095 else if (idx != 4)
1096 flags |= NV_APRD_CONT;
1097
1098 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1099 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1100 aprd->flags = flags;
fbbb262d
RH
1101}
1102
1103static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1104{
1105 struct nv_adma_port_priv *pp = qc->ap->private_data;
1106 unsigned int idx;
1107 struct nv_adma_prd *aprd;
1108 struct scatterlist *sg;
1109
1110 VPRINTK("ENTER\n");
1111
1112 idx = 0;
1113
1114 ata_for_each_sg(sg, qc) {
1115 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1116 nv_adma_fill_aprd(qc, sg, idx, aprd);
1117 idx++;
1118 }
1119 if (idx > 5)
1120 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1121}
1122
1123static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1124{
1125 struct nv_adma_port_priv *pp = qc->ap->private_data;
1126 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1127 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1128 NV_CPB_CTL_APRD_VALID |
1129 NV_CPB_CTL_IEN;
1130
1131 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1132
1133 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
2dec7555
RH
1134 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1135 nv_adma_register_mode(qc->ap);
fbbb262d
RH
1136 ata_qc_prep(qc);
1137 return;
1138 }
1139
1140 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1141
1142 cpb->len = 3;
1143 cpb->tag = qc->tag;
1144 cpb->next_cpb_idx = 0;
1145
1146 /* turn on NCQ flags for NCQ commands */
1147 if (qc->tf.protocol == ATA_PROT_NCQ)
1148 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1149
1150 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1151
1152 nv_adma_fill_sg(qc, cpb);
1153
1154 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1155 finished filling in all of the contents */
1156 wmb();
1157 cpb->ctl_flags = ctl_flags;
1158}
1159
1160static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1161{
2dec7555 1162 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1163 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1164
1165 VPRINTK("ENTER\n");
1166
1167 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
2dec7555 1168 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
fbbb262d
RH
1169 /* use ATA register mode */
1170 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1171 nv_adma_register_mode(qc->ap);
1172 return ata_qc_issue_prot(qc);
1173 } else
1174 nv_adma_mode(qc->ap);
1175
1176 /* write append register, command tag in lower 8 bits
1177 and (number of cpbs to append -1) in top 8 bits */
1178 wmb();
1179 writew(qc->tag, mmio + NV_ADMA_APPEND);
1180
1181 DPRINTK("Issued tag %u\n",qc->tag);
1182
1183 return 0;
1184}
1185
7d12e780 1186static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1187{
cca3974e 1188 struct ata_host *host = dev_instance;
1da177e4
LT
1189 unsigned int i;
1190 unsigned int handled = 0;
1191 unsigned long flags;
1192
cca3974e 1193 spin_lock_irqsave(&host->lock, flags);
1da177e4 1194
cca3974e 1195 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1196 struct ata_port *ap;
1197
cca3974e 1198 ap = host->ports[i];
c1389503 1199 if (ap &&
029f5468 1200 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1201 struct ata_queued_cmd *qc;
1202
1203 qc = ata_qc_from_tag(ap, ap->active_tag);
e50362ec 1204 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1da177e4 1205 handled += ata_host_intr(ap, qc);
b887030a
AC
1206 else
1207 // No request pending? Clear interrupt status
1208 // anyway, in case there's one pending.
1209 ap->ops->check_status(ap);
1da177e4
LT
1210 }
1211
1212 }
1213
cca3974e 1214 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1215
1216 return IRQ_RETVAL(handled);
1217}
1218
cca3974e 1219static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1220{
1221 int i, handled = 0;
1222
cca3974e
JG
1223 for (i = 0; i < host->n_ports; i++) {
1224 struct ata_port *ap = host->ports[i];
ada364e8
TH
1225
1226 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1227 handled += nv_host_intr(ap, irq_stat);
1228
1229 irq_stat >>= NV_INT_PORT_SHIFT;
1230 }
1231
1232 return IRQ_RETVAL(handled);
1233}
1234
7d12e780 1235static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1236{
cca3974e 1237 struct ata_host *host = dev_instance;
ada364e8
TH
1238 u8 irq_stat;
1239 irqreturn_t ret;
1240
cca3974e
JG
1241 spin_lock(&host->lock);
1242 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1243 ret = nv_do_interrupt(host, irq_stat);
1244 spin_unlock(&host->lock);
ada364e8
TH
1245
1246 return ret;
1247}
1248
7d12e780 1249static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1250{
cca3974e 1251 struct ata_host *host = dev_instance;
ada364e8
TH
1252 u8 irq_stat;
1253 irqreturn_t ret;
1254
cca3974e
JG
1255 spin_lock(&host->lock);
1256 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1257 ret = nv_do_interrupt(host, irq_stat);
1258 spin_unlock(&host->lock);
ada364e8
TH
1259
1260 return ret;
1261}
1262
1da177e4
LT
1263static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1264{
1da177e4
LT
1265 if (sc_reg > SCR_CONTROL)
1266 return 0xffffffffU;
1267
02cbd926 1268 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1269}
1270
1271static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1272{
1da177e4
LT
1273 if (sc_reg > SCR_CONTROL)
1274 return;
1275
02cbd926 1276 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1277}
1278
39f87582
TH
1279static void nv_nf2_freeze(struct ata_port *ap)
1280{
cca3974e 1281 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1282 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1283 u8 mask;
1284
1285 mask = inb(scr_addr + NV_INT_ENABLE);
1286 mask &= ~(NV_INT_ALL << shift);
1287 outb(mask, scr_addr + NV_INT_ENABLE);
1288}
1289
1290static void nv_nf2_thaw(struct ata_port *ap)
1291{
cca3974e 1292 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1293 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1294 u8 mask;
1295
1296 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1297
1298 mask = inb(scr_addr + NV_INT_ENABLE);
1299 mask |= (NV_INT_MASK << shift);
1300 outb(mask, scr_addr + NV_INT_ENABLE);
1301}
1302
1303static void nv_ck804_freeze(struct ata_port *ap)
1304{
cca3974e 1305 void __iomem *mmio_base = ap->host->mmio_base;
39f87582
TH
1306 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1307 u8 mask;
1308
1309 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1310 mask &= ~(NV_INT_ALL << shift);
1311 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1312}
1313
1314static void nv_ck804_thaw(struct ata_port *ap)
1315{
cca3974e 1316 void __iomem *mmio_base = ap->host->mmio_base;
39f87582
TH
1317 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1318 u8 mask;
1319
1320 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1321
1322 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1323 mask |= (NV_INT_MASK << shift);
1324 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1325}
1326
1327static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1328{
1329 unsigned int dummy;
1330
1331 /* SATA hardreset fails to retrieve proper device signature on
1332 * some controllers. Don't classify on hardreset. For more
1333 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1334 */
1335 return sata_std_hardreset(ap, &dummy);
1336}
1337
1338static void nv_error_handler(struct ata_port *ap)
1339{
1340 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1341 nv_hardreset, ata_std_postreset);
1342}
1343
fbbb262d
RH
1344static void nv_adma_error_handler(struct ata_port *ap)
1345{
1346 struct nv_adma_port_priv *pp = ap->private_data;
1347 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1348 void __iomem *mmio = nv_adma_ctl_block(ap);
1349 int i;
1350 u16 tmp;
1351
1352 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1353 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1354 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1355 u32 status = readw(mmio + NV_ADMA_STAT);
1356
1357 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1358 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1359 notifier, notifier_error, gen_ctl, status);
1360
1361 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1362 struct nv_adma_cpb *cpb = &pp->cpb[i];
1363 if( cpb->ctl_flags || cpb->resp_flags )
1364 ata_port_printk(ap, KERN_ERR,
1365 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1366 i, cpb->ctl_flags, cpb->resp_flags);
1367 }
1368
1369 /* Push us back into port register mode for error handling. */
1370 nv_adma_register_mode(ap);
1371
1372 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1373
1374 /* Mark all of the CPBs as invalid to prevent them from being executed */
1375 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1376 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1377
1378 /* clear CPB fetch count */
1379 writew(0, mmio + NV_ADMA_CPB_COUNT);
1380
1381 /* Reset channel */
1382 tmp = readw(mmio + NV_ADMA_CTL);
1383 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1384 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1385 udelay(1);
1386 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1387 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1388 }
1389
1390 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1391 nv_hardreset, ata_std_postreset);
1392}
1393
1da177e4
LT
1394static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1395{
1396 static int printed_version = 0;
29da9f6d 1397 struct ata_port_info *ppi[2];
1da177e4
LT
1398 struct ata_probe_ent *probe_ent;
1399 int pci_dev_busy = 0;
1400 int rc;
1401 u32 bar;
02cbd926 1402 unsigned long base;
fbbb262d
RH
1403 unsigned long type = ent->driver_data;
1404 int mask_set = 0;
1da177e4
LT
1405
1406 // Make sure this is a SATA controller by counting the number of bars
1407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1408 // it's an IDE controller and we ignore it.
1409 for (bar=0; bar<6; bar++)
1410 if (pci_resource_start(pdev, bar) == 0)
1411 return -ENODEV;
1412
fbbb262d 1413 if ( !printed_version++)
a9524a76 1414 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4
LT
1415
1416 rc = pci_enable_device(pdev);
1417 if (rc)
1418 goto err_out;
1419
1420 rc = pci_request_regions(pdev, DRV_NAME);
1421 if (rc) {
1422 pci_dev_busy = 1;
1423 goto err_out_disable;
1424 }
1425
fbbb262d
RH
1426 if(type >= CK804 && adma_enabled) {
1427 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1428 type = ADMA;
1429 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1430 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1431 mask_set = 1;
1432 }
1433
1434 if(!mask_set) {
1435 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1436 if (rc)
1437 goto err_out_regions;
1438 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1439 if (rc)
1440 goto err_out_regions;
1441 }
1da177e4
LT
1442
1443 rc = -ENOMEM;
1444
fbbb262d 1445 ppi[0] = ppi[1] = &nv_port_info[type];
29da9f6d 1446 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1da177e4
LT
1447 if (!probe_ent)
1448 goto err_out_regions;
1449
02cbd926
JG
1450 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1451 if (!probe_ent->mmio_base) {
1452 rc = -EIO;
e6faf082 1453 goto err_out_free_ent;
02cbd926 1454 }
1da177e4 1455
02cbd926 1456 base = (unsigned long)probe_ent->mmio_base;
1da177e4 1457
02cbd926
JG
1458 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1459 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 1460
ada364e8 1461 /* enable SATA space for CK804 */
fbbb262d 1462 if (type >= CK804) {
ada364e8
TH
1463 u8 regval;
1464
1465 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1466 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1467 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1468 }
1469
1da177e4
LT
1470 pci_set_master(pdev);
1471
fbbb262d
RH
1472 if (type == ADMA) {
1473 rc = nv_adma_host_init(probe_ent);
1474 if (rc)
1475 goto err_out_iounmap;
1476 }
1477
1da177e4
LT
1478 rc = ata_device_add(probe_ent);
1479 if (rc != NV_PORTS)
1480 goto err_out_iounmap;
1481
1da177e4
LT
1482 kfree(probe_ent);
1483
1484 return 0;
1485
1486err_out_iounmap:
02cbd926 1487 pci_iounmap(pdev, probe_ent->mmio_base);
1da177e4
LT
1488err_out_free_ent:
1489 kfree(probe_ent);
1490err_out_regions:
1491 pci_release_regions(pdev);
1492err_out_disable:
1493 if (!pci_dev_busy)
1494 pci_disable_device(pdev);
1495err_out:
1496 return rc;
1497}
1498
cca3974e 1499static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 1500{
cca3974e 1501 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
1502 u8 regval;
1503
1504 /* disable SATA space for CK804 */
1505 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1506 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1507 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1508
cca3974e 1509 ata_pci_host_stop(host);
ada364e8
TH
1510}
1511
fbbb262d
RH
1512static void nv_adma_host_stop(struct ata_host *host)
1513{
1514 struct pci_dev *pdev = to_pci_dev(host->dev);
1515 int i;
1516 u32 tmp32;
1517
1518 for (i = 0; i < host->n_ports; i++) {
1519 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1520 u16 tmp;
1521
1522 /* disable interrupt */
1523 tmp = readw(mmio + NV_ADMA_CTL);
1524 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1525 }
1526
1527 /* disable ADMA on the ports */
1528 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1529 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1530 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1531 NV_MCP_SATA_CFG_20_PORT1_EN |
1532 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1533
1534 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1535
1536 nv_ck804_host_stop(host);
1537}
1538
1da177e4
LT
1539static int __init nv_init(void)
1540{
b7887196 1541 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
1542}
1543
1544static void __exit nv_exit(void)
1545{
1546 pci_unregister_driver(&nv_pci_driver);
1547}
1548
1549module_init(nv_init);
1550module_exit(nv_exit);
fbbb262d
RH
1551module_param_named(adma, adma_enabled, bool, 0444);
1552MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
This page took 0.207854 seconds and 5 git commands to generate.