Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[deliverable/linux.git] / drivers / ata / pata_amd.c
1 /*
2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8 * claimed by sata-nv.c.
9 *
10 * TODO:
11 * Variable system clock when/if it makes sense
12 * Power management on ports
13 *
14 *
15 * Documentation publically available.
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_host.h>
25 #include <linux/libata.h>
26
27 #define DRV_NAME "pata_amd"
28 #define DRV_VERSION "0.3.10"
29
30 /**
31 * timing_setup - shared timing computation and load
32 * @ap: ATA port being set up
33 * @adev: drive being configured
34 * @offset: port offset
35 * @speed: target speed
36 * @clock: clock multiplier (number of times 33MHz for this part)
37 *
38 * Perform the actual timing set up for Nvidia or AMD PATA devices.
39 * The actual devices vary so they all call into this helper function
40 * providing the clock multipler and offset (because AMD and Nvidia put
41 * the ports at different locations).
42 */
43
44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
45 {
46 static const unsigned char amd_cyc2udma[] = {
47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
48 };
49
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 struct ata_device *peer = ata_dev_pair(adev);
52 int dn = ap->port_no * 2 + adev->devno;
53 struct ata_timing at, apeer;
54 int T, UT;
55 const int amd_clock = 33333; /* KHz. */
56 u8 t;
57
58 T = 1000000000 / amd_clock;
59 UT = T / min_t(int, max_t(int, clock, 1), 2);
60
61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
63 return;
64 }
65
66 if (peer) {
67 /* This may be over conservative */
68 if (peer->dma_mode) {
69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
71 }
72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
74 }
75
76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
78
79 /*
80 * Now do the setup work
81 */
82
83 /* Configure the address set up timing */
84 pci_read_config_byte(pdev, offset + 0x0C, &t);
85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 pci_write_config_byte(pdev, offset + 0x0C , t);
87
88 /* Configure the 8bit I/O timing */
89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
91
92 /* Drive timing */
93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
95
96 switch (clock) {
97 case 1:
98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
99 break;
100
101 case 2:
102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
103 break;
104
105 case 3:
106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
107 break;
108
109 case 4:
110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
111 break;
112
113 default:
114 return;
115 }
116
117 /* UDMA timing */
118 if (at.udma)
119 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
120 }
121
122 /**
123 * amd_pre_reset - perform reset handling
124 * @link: ATA link
125 * @deadline: deadline jiffies for the operation
126 *
127 * Reset sequence checking enable bits to see which ports are
128 * active.
129 */
130
131 static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
132 {
133 static const struct pci_bits amd_enable_bits[] = {
134 { 0x40, 1, 0x02, 0x02 },
135 { 0x40, 1, 0x01, 0x01 }
136 };
137
138 struct ata_port *ap = link->ap;
139 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
140
141 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
142 return -ENOENT;
143
144 return ata_std_prereset(link, deadline);
145 }
146
147 static void amd_error_handler(struct ata_port *ap)
148 {
149 ata_bmdma_drive_eh(ap, amd_pre_reset, ata_std_softreset, NULL,
150 ata_std_postreset);
151 }
152
153 static int amd_cable_detect(struct ata_port *ap)
154 {
155 static const u32 bitmask[2] = {0x03, 0x0C};
156 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
157 u8 ata66;
158
159 pci_read_config_byte(pdev, 0x42, &ata66);
160 if (ata66 & bitmask[ap->port_no])
161 return ATA_CBL_PATA80;
162 return ATA_CBL_PATA40;
163 }
164
165 /**
166 * amd33_set_piomode - set initial PIO mode data
167 * @ap: ATA interface
168 * @adev: ATA device
169 *
170 * Program the AMD registers for PIO mode.
171 */
172
173 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
174 {
175 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
176 }
177
178 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
179 {
180 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
181 }
182
183 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
184 {
185 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
186 }
187
188 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
189 {
190 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
191 }
192
193 /**
194 * amd33_set_dmamode - set initial DMA mode data
195 * @ap: ATA interface
196 * @adev: ATA device
197 *
198 * Program the MWDMA/UDMA modes for the AMD and Nvidia
199 * chipset.
200 */
201
202 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
203 {
204 timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
205 }
206
207 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
208 {
209 timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
210 }
211
212 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
213 {
214 timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
215 }
216
217 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
218 {
219 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
220 }
221
222 /* Both host-side and drive-side detection results are worthless on NV
223 * PATAs. Ignore them and just follow what BIOS configured. Both the
224 * current configuration in PCI config reg and ACPI GTM result are
225 * cached during driver attach and are consulted to select transfer
226 * mode.
227 */
228 static unsigned long nv_mode_filter(struct ata_device *dev,
229 unsigned long xfer_mask)
230 {
231 static const unsigned int udma_mask_map[] =
232 { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
233 ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
234 struct ata_port *ap = dev->link->ap;
235 char acpi_str[32] = "";
236 u32 saved_udma, udma;
237 const struct ata_acpi_gtm *gtm;
238 unsigned long bios_limit = 0, acpi_limit = 0, limit;
239
240 /* find out what BIOS configured */
241 udma = saved_udma = (unsigned long)ap->host->private_data;
242
243 if (ap->port_no == 0)
244 udma >>= 16;
245 if (dev->devno == 0)
246 udma >>= 8;
247
248 if ((udma & 0xc0) == 0xc0)
249 bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
250
251 /* consult ACPI GTM too */
252 gtm = ata_acpi_init_gtm(ap);
253 if (gtm) {
254 acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
255
256 snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
257 gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
258 }
259
260 /* be optimistic, EH can take care of things if something goes wrong */
261 limit = bios_limit | acpi_limit;
262
263 /* If PIO or DMA isn't configured at all, don't limit. Let EH
264 * handle it.
265 */
266 if (!(limit & ATA_MASK_PIO))
267 limit |= ATA_MASK_PIO;
268 if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
269 limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
270
271 ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
272 "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
273 xfer_mask, limit, xfer_mask & limit, bios_limit,
274 saved_udma, acpi_limit, acpi_str);
275
276 return xfer_mask & limit;
277 }
278
279 /**
280 * nv_probe_init - cable detection
281 * @lin: ATA link
282 *
283 * Perform cable detection. The BIOS stores this in PCI config
284 * space for us.
285 */
286
287 static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
288 {
289 static const struct pci_bits nv_enable_bits[] = {
290 { 0x50, 1, 0x02, 0x02 },
291 { 0x50, 1, 0x01, 0x01 }
292 };
293
294 struct ata_port *ap = link->ap;
295 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
296
297 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
298 return -ENOENT;
299
300 return ata_std_prereset(link, deadline);
301 }
302
303 static void nv_error_handler(struct ata_port *ap)
304 {
305 ata_bmdma_drive_eh(ap, nv_pre_reset,
306 ata_std_softreset, NULL,
307 ata_std_postreset);
308 }
309
310 /**
311 * nv100_set_piomode - set initial PIO mode data
312 * @ap: ATA interface
313 * @adev: ATA device
314 *
315 * Program the AMD registers for PIO mode.
316 */
317
318 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
319 {
320 timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
321 }
322
323 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
324 {
325 timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
326 }
327
328 /**
329 * nv100_set_dmamode - set initial DMA mode data
330 * @ap: ATA interface
331 * @adev: ATA device
332 *
333 * Program the MWDMA/UDMA modes for the AMD and Nvidia
334 * chipset.
335 */
336
337 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
338 {
339 timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
340 }
341
342 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
343 {
344 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
345 }
346
347 static void nv_host_stop(struct ata_host *host)
348 {
349 u32 udma = (unsigned long)host->private_data;
350
351 /* restore PCI config register 0x60 */
352 pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
353 }
354
355 static struct scsi_host_template amd_sht = {
356 .module = THIS_MODULE,
357 .name = DRV_NAME,
358 .ioctl = ata_scsi_ioctl,
359 .queuecommand = ata_scsi_queuecmd,
360 .can_queue = ATA_DEF_QUEUE,
361 .this_id = ATA_SHT_THIS_ID,
362 .sg_tablesize = LIBATA_MAX_PRD,
363 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
364 .emulated = ATA_SHT_EMULATED,
365 .use_clustering = ATA_SHT_USE_CLUSTERING,
366 .proc_name = DRV_NAME,
367 .dma_boundary = ATA_DMA_BOUNDARY,
368 .slave_configure = ata_scsi_slave_config,
369 .slave_destroy = ata_scsi_slave_destroy,
370 .bios_param = ata_std_bios_param,
371 };
372
373 static struct ata_port_operations amd33_port_ops = {
374 .set_piomode = amd33_set_piomode,
375 .set_dmamode = amd33_set_dmamode,
376 .mode_filter = ata_pci_default_filter,
377 .tf_load = ata_tf_load,
378 .tf_read = ata_tf_read,
379 .check_status = ata_check_status,
380 .exec_command = ata_exec_command,
381 .dev_select = ata_std_dev_select,
382
383 .freeze = ata_bmdma_freeze,
384 .thaw = ata_bmdma_thaw,
385 .error_handler = amd_error_handler,
386 .post_internal_cmd = ata_bmdma_post_internal_cmd,
387 .cable_detect = ata_cable_40wire,
388
389 .bmdma_setup = ata_bmdma_setup,
390 .bmdma_start = ata_bmdma_start,
391 .bmdma_stop = ata_bmdma_stop,
392 .bmdma_status = ata_bmdma_status,
393
394 .qc_prep = ata_qc_prep,
395 .qc_issue = ata_qc_issue_prot,
396
397 .data_xfer = ata_data_xfer,
398
399 .irq_handler = ata_interrupt,
400 .irq_clear = ata_bmdma_irq_clear,
401 .irq_on = ata_irq_on,
402
403 .port_start = ata_sff_port_start,
404 };
405
406 static struct ata_port_operations amd66_port_ops = {
407 .set_piomode = amd66_set_piomode,
408 .set_dmamode = amd66_set_dmamode,
409 .mode_filter = ata_pci_default_filter,
410 .tf_load = ata_tf_load,
411 .tf_read = ata_tf_read,
412 .check_status = ata_check_status,
413 .exec_command = ata_exec_command,
414 .dev_select = ata_std_dev_select,
415
416 .freeze = ata_bmdma_freeze,
417 .thaw = ata_bmdma_thaw,
418 .error_handler = amd_error_handler,
419 .post_internal_cmd = ata_bmdma_post_internal_cmd,
420 .cable_detect = ata_cable_unknown,
421
422 .bmdma_setup = ata_bmdma_setup,
423 .bmdma_start = ata_bmdma_start,
424 .bmdma_stop = ata_bmdma_stop,
425 .bmdma_status = ata_bmdma_status,
426
427 .qc_prep = ata_qc_prep,
428 .qc_issue = ata_qc_issue_prot,
429
430 .data_xfer = ata_data_xfer,
431
432 .irq_handler = ata_interrupt,
433 .irq_clear = ata_bmdma_irq_clear,
434 .irq_on = ata_irq_on,
435
436 .port_start = ata_sff_port_start,
437 };
438
439 static struct ata_port_operations amd100_port_ops = {
440 .set_piomode = amd100_set_piomode,
441 .set_dmamode = amd100_set_dmamode,
442 .mode_filter = ata_pci_default_filter,
443 .tf_load = ata_tf_load,
444 .tf_read = ata_tf_read,
445 .check_status = ata_check_status,
446 .exec_command = ata_exec_command,
447 .dev_select = ata_std_dev_select,
448
449 .freeze = ata_bmdma_freeze,
450 .thaw = ata_bmdma_thaw,
451 .error_handler = amd_error_handler,
452 .post_internal_cmd = ata_bmdma_post_internal_cmd,
453 .cable_detect = ata_cable_unknown,
454
455 .bmdma_setup = ata_bmdma_setup,
456 .bmdma_start = ata_bmdma_start,
457 .bmdma_stop = ata_bmdma_stop,
458 .bmdma_status = ata_bmdma_status,
459
460 .qc_prep = ata_qc_prep,
461 .qc_issue = ata_qc_issue_prot,
462
463 .data_xfer = ata_data_xfer,
464
465 .irq_handler = ata_interrupt,
466 .irq_clear = ata_bmdma_irq_clear,
467 .irq_on = ata_irq_on,
468
469 .port_start = ata_sff_port_start,
470 };
471
472 static struct ata_port_operations amd133_port_ops = {
473 .set_piomode = amd133_set_piomode,
474 .set_dmamode = amd133_set_dmamode,
475 .mode_filter = ata_pci_default_filter,
476 .tf_load = ata_tf_load,
477 .tf_read = ata_tf_read,
478 .check_status = ata_check_status,
479 .exec_command = ata_exec_command,
480 .dev_select = ata_std_dev_select,
481
482 .freeze = ata_bmdma_freeze,
483 .thaw = ata_bmdma_thaw,
484 .error_handler = amd_error_handler,
485 .post_internal_cmd = ata_bmdma_post_internal_cmd,
486 .cable_detect = amd_cable_detect,
487
488 .bmdma_setup = ata_bmdma_setup,
489 .bmdma_start = ata_bmdma_start,
490 .bmdma_stop = ata_bmdma_stop,
491 .bmdma_status = ata_bmdma_status,
492
493 .qc_prep = ata_qc_prep,
494 .qc_issue = ata_qc_issue_prot,
495
496 .data_xfer = ata_data_xfer,
497
498 .irq_handler = ata_interrupt,
499 .irq_clear = ata_bmdma_irq_clear,
500 .irq_on = ata_irq_on,
501
502 .port_start = ata_sff_port_start,
503 };
504
505 static struct ata_port_operations nv100_port_ops = {
506 .set_piomode = nv100_set_piomode,
507 .set_dmamode = nv100_set_dmamode,
508 .tf_load = ata_tf_load,
509 .tf_read = ata_tf_read,
510 .check_status = ata_check_status,
511 .exec_command = ata_exec_command,
512 .dev_select = ata_std_dev_select,
513
514 .freeze = ata_bmdma_freeze,
515 .thaw = ata_bmdma_thaw,
516 .error_handler = nv_error_handler,
517 .post_internal_cmd = ata_bmdma_post_internal_cmd,
518 .cable_detect = ata_cable_ignore,
519 .mode_filter = nv_mode_filter,
520
521 .bmdma_setup = ata_bmdma_setup,
522 .bmdma_start = ata_bmdma_start,
523 .bmdma_stop = ata_bmdma_stop,
524 .bmdma_status = ata_bmdma_status,
525
526 .qc_prep = ata_qc_prep,
527 .qc_issue = ata_qc_issue_prot,
528
529 .data_xfer = ata_data_xfer,
530
531 .irq_handler = ata_interrupt,
532 .irq_clear = ata_bmdma_irq_clear,
533 .irq_on = ata_irq_on,
534
535 .port_start = ata_sff_port_start,
536 .host_stop = nv_host_stop,
537 };
538
539 static struct ata_port_operations nv133_port_ops = {
540 .set_piomode = nv133_set_piomode,
541 .set_dmamode = nv133_set_dmamode,
542 .tf_load = ata_tf_load,
543 .tf_read = ata_tf_read,
544 .check_status = ata_check_status,
545 .exec_command = ata_exec_command,
546 .dev_select = ata_std_dev_select,
547
548 .freeze = ata_bmdma_freeze,
549 .thaw = ata_bmdma_thaw,
550 .error_handler = nv_error_handler,
551 .post_internal_cmd = ata_bmdma_post_internal_cmd,
552 .cable_detect = ata_cable_ignore,
553 .mode_filter = nv_mode_filter,
554
555 .bmdma_setup = ata_bmdma_setup,
556 .bmdma_start = ata_bmdma_start,
557 .bmdma_stop = ata_bmdma_stop,
558 .bmdma_status = ata_bmdma_status,
559
560 .qc_prep = ata_qc_prep,
561 .qc_issue = ata_qc_issue_prot,
562
563 .data_xfer = ata_data_xfer,
564
565 .irq_handler = ata_interrupt,
566 .irq_clear = ata_bmdma_irq_clear,
567 .irq_on = ata_irq_on,
568
569 .port_start = ata_sff_port_start,
570 .host_stop = nv_host_stop,
571 };
572
573 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
574 {
575 static const struct ata_port_info info[10] = {
576 { /* 0: AMD 7401 */
577 .sht = &amd_sht,
578 .flags = ATA_FLAG_SLAVE_POSS,
579 .pio_mask = 0x1f,
580 .mwdma_mask = 0x07, /* No SWDMA */
581 .udma_mask = 0x07, /* UDMA 33 */
582 .port_ops = &amd33_port_ops
583 },
584 { /* 1: Early AMD7409 - no swdma */
585 .sht = &amd_sht,
586 .flags = ATA_FLAG_SLAVE_POSS,
587 .pio_mask = 0x1f,
588 .mwdma_mask = 0x07,
589 .udma_mask = ATA_UDMA4, /* UDMA 66 */
590 .port_ops = &amd66_port_ops
591 },
592 { /* 2: AMD 7409, no swdma errata */
593 .sht = &amd_sht,
594 .flags = ATA_FLAG_SLAVE_POSS,
595 .pio_mask = 0x1f,
596 .mwdma_mask = 0x07,
597 .udma_mask = ATA_UDMA4, /* UDMA 66 */
598 .port_ops = &amd66_port_ops
599 },
600 { /* 3: AMD 7411 */
601 .sht = &amd_sht,
602 .flags = ATA_FLAG_SLAVE_POSS,
603 .pio_mask = 0x1f,
604 .mwdma_mask = 0x07,
605 .udma_mask = ATA_UDMA5, /* UDMA 100 */
606 .port_ops = &amd100_port_ops
607 },
608 { /* 4: AMD 7441 */
609 .sht = &amd_sht,
610 .flags = ATA_FLAG_SLAVE_POSS,
611 .pio_mask = 0x1f,
612 .mwdma_mask = 0x07,
613 .udma_mask = ATA_UDMA5, /* UDMA 100 */
614 .port_ops = &amd100_port_ops
615 },
616 { /* 5: AMD 8111*/
617 .sht = &amd_sht,
618 .flags = ATA_FLAG_SLAVE_POSS,
619 .pio_mask = 0x1f,
620 .mwdma_mask = 0x07,
621 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
622 .port_ops = &amd133_port_ops
623 },
624 { /* 6: AMD 8111 UDMA 100 (Serenade) */
625 .sht = &amd_sht,
626 .flags = ATA_FLAG_SLAVE_POSS,
627 .pio_mask = 0x1f,
628 .mwdma_mask = 0x07,
629 .udma_mask = ATA_UDMA5, /* UDMA 100, no swdma */
630 .port_ops = &amd133_port_ops
631 },
632 { /* 7: Nvidia Nforce */
633 .sht = &amd_sht,
634 .flags = ATA_FLAG_SLAVE_POSS,
635 .pio_mask = 0x1f,
636 .mwdma_mask = 0x07,
637 .udma_mask = ATA_UDMA5, /* UDMA 100 */
638 .port_ops = &nv100_port_ops
639 },
640 { /* 8: Nvidia Nforce2 and later */
641 .sht = &amd_sht,
642 .flags = ATA_FLAG_SLAVE_POSS,
643 .pio_mask = 0x1f,
644 .mwdma_mask = 0x07,
645 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
646 .port_ops = &nv133_port_ops
647 },
648 { /* 9: AMD CS5536 (Geode companion) */
649 .sht = &amd_sht,
650 .flags = ATA_FLAG_SLAVE_POSS,
651 .pio_mask = 0x1f,
652 .mwdma_mask = 0x07,
653 .udma_mask = ATA_UDMA5, /* UDMA 100 */
654 .port_ops = &amd100_port_ops
655 }
656 };
657 struct ata_port_info pi;
658 const struct ata_port_info *ppi[] = { &pi, NULL };
659 static int printed_version;
660 int type = id->driver_data;
661 u8 fifo;
662
663 if (!printed_version++)
664 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
665
666 pci_read_config_byte(pdev, 0x41, &fifo);
667
668 /* Check for AMD7409 without swdma errata and if found adjust type */
669 if (type == 1 && pdev->revision > 0x7)
670 type = 2;
671
672 /* Serenade ? */
673 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
674 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
675 type = 6; /* UDMA 100 only */
676
677 /*
678 * Okay, type is determined now. Apply type-specific workarounds.
679 */
680 pi = info[type];
681
682 if (type < 3)
683 ata_pci_clear_simplex(pdev);
684
685 /* Check for AMD7411 */
686 if (type == 3)
687 /* FIFO is broken */
688 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
689 else
690 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
691
692 /* Cable detection on Nvidia chips doesn't work too well,
693 * cache BIOS programmed UDMA mode.
694 */
695 if (type == 7 || type == 8) {
696 u32 udma;
697
698 pci_read_config_dword(pdev, 0x60, &udma);
699 pi.private_data = (void *)(unsigned long)udma;
700 }
701
702 /* And fire it up */
703 return ata_pci_init_one(pdev, ppi);
704 }
705
706 #ifdef CONFIG_PM
707 static int amd_reinit_one(struct pci_dev *pdev)
708 {
709 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
710 u8 fifo;
711 pci_read_config_byte(pdev, 0x41, &fifo);
712 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
713 /* FIFO is broken */
714 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
715 else
716 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
717 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
718 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
719 ata_pci_clear_simplex(pdev);
720 }
721 return ata_pci_device_resume(pdev);
722 }
723 #endif
724
725 static const struct pci_device_id amd[] = {
726 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
727 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
728 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
729 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
730 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
731 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
732 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
733 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
734 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
735 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
736 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
737 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
738 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
739 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
740 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
741 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
742 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
743 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
744 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
745 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
746
747 { },
748 };
749
750 static struct pci_driver amd_pci_driver = {
751 .name = DRV_NAME,
752 .id_table = amd,
753 .probe = amd_init_one,
754 .remove = ata_pci_remove_one,
755 #ifdef CONFIG_PM
756 .suspend = ata_pci_device_suspend,
757 .resume = amd_reinit_one,
758 #endif
759 };
760
761 static int __init amd_init(void)
762 {
763 return pci_register_driver(&amd_pci_driver);
764 }
765
766 static void __exit amd_exit(void)
767 {
768 pci_unregister_driver(&amd_pci_driver);
769 }
770
771 MODULE_AUTHOR("Alan Cox");
772 MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
773 MODULE_LICENSE("GPL");
774 MODULE_DEVICE_TABLE(pci, amd);
775 MODULE_VERSION(DRV_VERSION);
776
777 module_init(amd_init);
778 module_exit(amd_exit);
This page took 0.047912 seconds and 6 git commands to generate.