ide: add struct ide_port_ops (take 2)
[deliverable/linux.git] / drivers / ide / mips / au1xxx-ide.c
1 /*
2 * BRIEF MODULE DESCRIPTION
3 * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
4 *
5 * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
6 *
7 * This program is free software; you can redistribute it and/or modify it under
8 * the terms of the GNU General Public License as published by the Free Software
9 * Foundation; either version 2 of the License, or (at your option) any later
10 * version.
11 *
12 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
13 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
14 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
15 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
17 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21 * POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
28 * Interface and Linux Device Driver" Application Note.
29 */
30 #include <linux/types.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/delay.h>
34 #include <linux/platform_device.h>
35 #include <linux/init.h>
36 #include <linux/ide.h>
37 #include <linux/scatterlist.h>
38
39 #include <asm/mach-au1x00/au1xxx.h>
40 #include <asm/mach-au1x00/au1xxx_dbdma.h>
41 #include <asm/mach-au1x00/au1xxx_ide.h>
42
43 #define DRV_NAME "au1200-ide"
44 #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
45
46 /* enable the burstmode in the dbdma */
47 #define IDE_AU1XXX_BURSTMODE 1
48
49 static _auide_hwif auide_hwif;
50 static int dbdma_init_done;
51
52 static int auide_ddma_init(_auide_hwif *auide);
53
54 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
55
56 void auide_insw(unsigned long port, void *addr, u32 count)
57 {
58 _auide_hwif *ahwif = &auide_hwif;
59 chan_tab_t *ctp;
60 au1x_ddma_desc_t *dp;
61
62 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
63 DDMA_FLAGS_NOIE)) {
64 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
65 return;
66 }
67 ctp = *((chan_tab_t **)ahwif->rx_chan);
68 dp = ctp->cur_ptr;
69 while (dp->dscr_cmd0 & DSCR_CMD0_V)
70 ;
71 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
72 }
73
74 void auide_outsw(unsigned long port, void *addr, u32 count)
75 {
76 _auide_hwif *ahwif = &auide_hwif;
77 chan_tab_t *ctp;
78 au1x_ddma_desc_t *dp;
79
80 if(!put_source_flags(ahwif->tx_chan, (void*)addr,
81 count << 1, DDMA_FLAGS_NOIE)) {
82 printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
83 return;
84 }
85 ctp = *((chan_tab_t **)ahwif->tx_chan);
86 dp = ctp->cur_ptr;
87 while (dp->dscr_cmd0 & DSCR_CMD0_V)
88 ;
89 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
90 }
91
92 #endif
93
94 static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
95 {
96 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
97
98 /* set pio mode! */
99 switch(pio) {
100 case 0:
101 mem_sttime = SBC_IDE_TIMING(PIO0);
102
103 /* set configuration for RCS2# */
104 mem_stcfg |= TS_MASK;
105 mem_stcfg &= ~TCSOE_MASK;
106 mem_stcfg &= ~TOECS_MASK;
107 mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
108 break;
109
110 case 1:
111 mem_sttime = SBC_IDE_TIMING(PIO1);
112
113 /* set configuration for RCS2# */
114 mem_stcfg |= TS_MASK;
115 mem_stcfg &= ~TCSOE_MASK;
116 mem_stcfg &= ~TOECS_MASK;
117 mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
118 break;
119
120 case 2:
121 mem_sttime = SBC_IDE_TIMING(PIO2);
122
123 /* set configuration for RCS2# */
124 mem_stcfg &= ~TS_MASK;
125 mem_stcfg &= ~TCSOE_MASK;
126 mem_stcfg &= ~TOECS_MASK;
127 mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
128 break;
129
130 case 3:
131 mem_sttime = SBC_IDE_TIMING(PIO3);
132
133 /* set configuration for RCS2# */
134 mem_stcfg &= ~TS_MASK;
135 mem_stcfg &= ~TCSOE_MASK;
136 mem_stcfg &= ~TOECS_MASK;
137 mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
138
139 break;
140
141 case 4:
142 mem_sttime = SBC_IDE_TIMING(PIO4);
143
144 /* set configuration for RCS2# */
145 mem_stcfg &= ~TS_MASK;
146 mem_stcfg &= ~TCSOE_MASK;
147 mem_stcfg &= ~TOECS_MASK;
148 mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
149 break;
150 }
151
152 au_writel(mem_sttime,MEM_STTIME2);
153 au_writel(mem_stcfg,MEM_STCFG2);
154 }
155
156 static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
157 {
158 int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
159
160 switch(speed) {
161 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
162 case XFER_MW_DMA_2:
163 mem_sttime = SBC_IDE_TIMING(MDMA2);
164
165 /* set configuration for RCS2# */
166 mem_stcfg &= ~TS_MASK;
167 mem_stcfg &= ~TCSOE_MASK;
168 mem_stcfg &= ~TOECS_MASK;
169 mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
170
171 break;
172 case XFER_MW_DMA_1:
173 mem_sttime = SBC_IDE_TIMING(MDMA1);
174
175 /* set configuration for RCS2# */
176 mem_stcfg &= ~TS_MASK;
177 mem_stcfg &= ~TCSOE_MASK;
178 mem_stcfg &= ~TOECS_MASK;
179 mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
180
181 break;
182 case XFER_MW_DMA_0:
183 mem_sttime = SBC_IDE_TIMING(MDMA0);
184
185 /* set configuration for RCS2# */
186 mem_stcfg |= TS_MASK;
187 mem_stcfg &= ~TCSOE_MASK;
188 mem_stcfg &= ~TOECS_MASK;
189 mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
190
191 break;
192 #endif
193 }
194
195 au_writel(mem_sttime,MEM_STTIME2);
196 au_writel(mem_stcfg,MEM_STCFG2);
197 }
198
199 /*
200 * Multi-Word DMA + DbDMA functions
201 */
202
203 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
204 static int auide_build_dmatable(ide_drive_t *drive)
205 {
206 int i, iswrite, count = 0;
207 ide_hwif_t *hwif = HWIF(drive);
208
209 struct request *rq = HWGROUP(drive)->rq;
210
211 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
212 struct scatterlist *sg;
213
214 iswrite = (rq_data_dir(rq) == WRITE);
215 /* Save for interrupt context */
216 ahwif->drive = drive;
217
218 hwif->sg_nents = i = ide_build_sglist(drive, rq);
219
220 if (!i)
221 return 0;
222
223 /* fill the descriptors */
224 sg = hwif->sg_table;
225 while (i && sg_dma_len(sg)) {
226 u32 cur_addr;
227 u32 cur_len;
228
229 cur_addr = sg_dma_address(sg);
230 cur_len = sg_dma_len(sg);
231
232 while (cur_len) {
233 u32 flags = DDMA_FLAGS_NOIE;
234 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
235
236 if (++count >= PRD_ENTRIES) {
237 printk(KERN_WARNING "%s: DMA table too small\n",
238 drive->name);
239 goto use_pio_instead;
240 }
241
242 /* Lets enable intr for the last descriptor only */
243 if (1==i)
244 flags = DDMA_FLAGS_IE;
245 else
246 flags = DDMA_FLAGS_NOIE;
247
248 if (iswrite) {
249 if(!put_source_flags(ahwif->tx_chan,
250 (void*) sg_virt(sg),
251 tc, flags)) {
252 printk(KERN_ERR "%s failed %d\n",
253 __FUNCTION__, __LINE__);
254 }
255 } else
256 {
257 if(!put_dest_flags(ahwif->rx_chan,
258 (void*) sg_virt(sg),
259 tc, flags)) {
260 printk(KERN_ERR "%s failed %d\n",
261 __FUNCTION__, __LINE__);
262 }
263 }
264
265 cur_addr += tc;
266 cur_len -= tc;
267 }
268 sg = sg_next(sg);
269 i--;
270 }
271
272 if (count)
273 return 1;
274
275 use_pio_instead:
276 ide_destroy_dmatable(drive);
277
278 return 0; /* revert to PIO for this request */
279 }
280
281 static int auide_dma_end(ide_drive_t *drive)
282 {
283 ide_hwif_t *hwif = HWIF(drive);
284
285 if (hwif->sg_nents) {
286 ide_destroy_dmatable(drive);
287 hwif->sg_nents = 0;
288 }
289
290 return 0;
291 }
292
293 static void auide_dma_start(ide_drive_t *drive )
294 {
295 }
296
297
298 static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command)
299 {
300 /* issue cmd to drive */
301 ide_execute_command(drive, command, &ide_dma_intr,
302 (2*WAIT_CMD), NULL);
303 }
304
305 static int auide_dma_setup(ide_drive_t *drive)
306 {
307 struct request *rq = HWGROUP(drive)->rq;
308
309 if (!auide_build_dmatable(drive)) {
310 ide_map_sg(drive, rq);
311 return 1;
312 }
313
314 drive->waiting_for_dma = 1;
315 return 0;
316 }
317
318 static u8 auide_mdma_filter(ide_drive_t *drive)
319 {
320 /*
321 * FIXME: ->white_list and ->black_list are based on completely bogus
322 * ->ide_dma_check implementation which didn't set neither the host
323 * controller timings nor the device for the desired transfer mode.
324 *
325 * They should be either removed or 0x00 MWDMA mask should be
326 * returned for devices on the ->black_list.
327 */
328
329 if (dbdma_init_done == 0) {
330 auide_hwif.white_list = ide_in_drive_list(drive->id,
331 dma_white_list);
332 auide_hwif.black_list = ide_in_drive_list(drive->id,
333 dma_black_list);
334 auide_hwif.drive = drive;
335 auide_ddma_init(&auide_hwif);
336 dbdma_init_done = 1;
337 }
338
339 /* Is the drive in our DMA black list? */
340 if (auide_hwif.black_list)
341 printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
342 drive->name, drive->id->model);
343
344 return drive->hwif->mwdma_mask;
345 }
346
347 static int auide_dma_test_irq(ide_drive_t *drive)
348 {
349 if (drive->waiting_for_dma == 0)
350 printk(KERN_WARNING "%s: ide_dma_test_irq \
351 called while not waiting\n", drive->name);
352
353 /* If dbdma didn't execute the STOP command yet, the
354 * active bit is still set
355 */
356 drive->waiting_for_dma++;
357 if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
358 printk(KERN_WARNING "%s: timeout waiting for ddma to \
359 complete\n", drive->name);
360 return 1;
361 }
362 udelay(10);
363 return 0;
364 }
365
366 static void auide_dma_host_set(ide_drive_t *drive, int on)
367 {
368 }
369
370 static void auide_dma_lost_irq(ide_drive_t *drive)
371 {
372 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
373 }
374
375 static void auide_ddma_tx_callback(int irq, void *param)
376 {
377 _auide_hwif *ahwif = (_auide_hwif*)param;
378 ahwif->drive->waiting_for_dma = 0;
379 }
380
381 static void auide_ddma_rx_callback(int irq, void *param)
382 {
383 _auide_hwif *ahwif = (_auide_hwif*)param;
384 ahwif->drive->waiting_for_dma = 0;
385 }
386
387 #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
388
389 static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
390 {
391 dev->dev_id = dev_id;
392 dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR;
393 dev->dev_intlevel = 0;
394 dev->dev_intpolarity = 0;
395 dev->dev_tsize = tsize;
396 dev->dev_devwidth = devwidth;
397 dev->dev_flags = flags;
398 }
399
400 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
401
402 static void auide_dma_timeout(ide_drive_t *drive)
403 {
404 ide_hwif_t *hwif = HWIF(drive);
405
406 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
407
408 if (hwif->ide_dma_test_irq(drive))
409 return;
410
411 hwif->ide_dma_end(drive);
412 }
413
414
415 static int auide_ddma_init(_auide_hwif *auide) {
416
417 dbdev_tab_t source_dev_tab, target_dev_tab;
418 u32 dev_id, tsize, devwidth, flags;
419 ide_hwif_t *hwif = auide->hwif;
420
421 dev_id = AU1XXX_ATA_DDMA_REQ;
422
423 if (auide->white_list || auide->black_list) {
424 tsize = 8;
425 devwidth = 32;
426 }
427 else {
428 tsize = 1;
429 devwidth = 16;
430
431 printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
432 printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
433 }
434
435 #ifdef IDE_AU1XXX_BURSTMODE
436 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
437 #else
438 flags = DEV_FLAGS_SYNC;
439 #endif
440
441 /* setup dev_tab for tx channel */
442 auide_init_dbdma_dev( &source_dev_tab,
443 dev_id,
444 tsize, devwidth, DEV_FLAGS_OUT | flags);
445 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
446
447 auide_init_dbdma_dev( &source_dev_tab,
448 dev_id,
449 tsize, devwidth, DEV_FLAGS_IN | flags);
450 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
451
452 /* We also need to add a target device for the DMA */
453 auide_init_dbdma_dev( &target_dev_tab,
454 (u32)DSCR_CMD0_ALWAYS,
455 tsize, devwidth, DEV_FLAGS_ANYUSE);
456 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
457
458 /* Get a channel for TX */
459 auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
460 auide->tx_dev_id,
461 auide_ddma_tx_callback,
462 (void*)auide);
463
464 /* Get a channel for RX */
465 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
466 auide->target_dev_id,
467 auide_ddma_rx_callback,
468 (void*)auide);
469
470 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
471 NUM_DESCRIPTORS);
472 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
473 NUM_DESCRIPTORS);
474
475 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
476 PRD_ENTRIES * PRD_BYTES, /* 1 Page */
477 &hwif->dmatable_dma, GFP_KERNEL);
478
479 au1xxx_dbdma_start( auide->tx_chan );
480 au1xxx_dbdma_start( auide->rx_chan );
481
482 return 0;
483 }
484 #else
485
486 static int auide_ddma_init( _auide_hwif *auide )
487 {
488 dbdev_tab_t source_dev_tab;
489 int flags;
490
491 #ifdef IDE_AU1XXX_BURSTMODE
492 flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
493 #else
494 flags = DEV_FLAGS_SYNC;
495 #endif
496
497 /* setup dev_tab for tx channel */
498 auide_init_dbdma_dev( &source_dev_tab,
499 (u32)DSCR_CMD0_ALWAYS,
500 8, 32, DEV_FLAGS_OUT | flags);
501 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
502
503 auide_init_dbdma_dev( &source_dev_tab,
504 (u32)DSCR_CMD0_ALWAYS,
505 8, 32, DEV_FLAGS_IN | flags);
506 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
507
508 /* Get a channel for TX */
509 auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
510 auide->tx_dev_id,
511 NULL,
512 (void*)auide);
513
514 /* Get a channel for RX */
515 auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
516 DSCR_CMD0_ALWAYS,
517 NULL,
518 (void*)auide);
519
520 auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
521 NUM_DESCRIPTORS);
522 auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
523 NUM_DESCRIPTORS);
524
525 au1xxx_dbdma_start( auide->tx_chan );
526 au1xxx_dbdma_start( auide->rx_chan );
527
528 return 0;
529 }
530 #endif
531
532 static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
533 {
534 int i;
535 unsigned long *ata_regs = hw->io_ports;
536
537 /* FIXME? */
538 for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
539 *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
540 }
541
542 /* set the Alternative Status register */
543 *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
544 }
545
546 static const struct ide_port_ops au1xxx_port_ops = {
547 .set_pio_mode = au1xxx_set_pio_mode,
548 .set_dma_mode = auide_set_dma_mode,
549 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
550 .mdma_filter = auide_mdma_filter,
551 #endif
552 };
553
554 static const struct ide_port_info au1xxx_port_info = {
555 .port_ops = &au1xxx_port_ops,
556 .host_flags = IDE_HFLAG_POST_SET_MODE |
557 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
558 IDE_HFLAG_NO_IO_32BIT |
559 IDE_HFLAG_UNMASK_IRQS,
560 .pio_mask = ATA_PIO4,
561 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
562 .mwdma_mask = ATA_MWDMA2,
563 #endif
564 };
565
566 static int au_ide_probe(struct device *dev)
567 {
568 struct platform_device *pdev = to_platform_device(dev);
569 _auide_hwif *ahwif = &auide_hwif;
570 ide_hwif_t *hwif;
571 struct resource *res;
572 int ret = 0;
573 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
574 hw_regs_t hw;
575
576 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
577 char *mode = "MWDMA2";
578 #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
579 char *mode = "PIO+DDMA(offload)";
580 #endif
581
582 memset(&auide_hwif, 0, sizeof(_auide_hwif));
583 ahwif->irq = platform_get_irq(pdev, 0);
584
585 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
586
587 if (res == NULL) {
588 pr_debug("%s %d: no base address\n", DRV_NAME, pdev->id);
589 ret = -ENODEV;
590 goto out;
591 }
592 if (ahwif->irq < 0) {
593 pr_debug("%s %d: no IRQ\n", DRV_NAME, pdev->id);
594 ret = -ENODEV;
595 goto out;
596 }
597
598 if (!request_mem_region(res->start, res->end - res->start + 1,
599 pdev->name)) {
600 pr_debug("%s: request_mem_region failed\n", DRV_NAME);
601 ret = -EBUSY;
602 goto out;
603 }
604
605 ahwif->regbase = (u32)ioremap(res->start, res->end - res->start + 1);
606 if (ahwif->regbase == 0) {
607 ret = -ENOMEM;
608 goto out;
609 }
610
611 hwif = ide_find_port();
612 if (hwif == NULL) {
613 ret = -ENOENT;
614 goto out;
615 }
616
617 memset(&hw, 0, sizeof(hw));
618 auide_setup_ports(&hw, ahwif);
619 hw.irq = ahwif->irq;
620 hw.dev = dev;
621 hw.chipset = ide_au1xxx;
622
623 ide_init_port_hw(hwif, &hw);
624
625 hwif->dev = dev;
626
627 hwif->mmio = 1;
628
629 /* If the user has selected DDMA assisted copies,
630 then set up a few local I/O function entry points
631 */
632
633 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
634 hwif->INSW = auide_insw;
635 hwif->OUTSW = auide_outsw;
636 #endif
637 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
638 hwif->dma_timeout = &auide_dma_timeout;
639 hwif->dma_host_set = &auide_dma_host_set;
640 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
641 hwif->dma_start = &auide_dma_start;
642 hwif->ide_dma_end = &auide_dma_end;
643 hwif->dma_setup = &auide_dma_setup;
644 hwif->ide_dma_test_irq = &auide_dma_test_irq;
645 hwif->dma_lost_irq = &auide_dma_lost_irq;
646 #endif
647 hwif->select_data = 0; /* no chipset-specific code */
648 hwif->config_data = 0; /* no chipset-specific code */
649
650 auide_hwif.hwif = hwif;
651 hwif->hwif_data = &auide_hwif;
652
653 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
654 auide_ddma_init(&auide_hwif);
655 dbdma_init_done = 1;
656 #endif
657
658 idx[0] = hwif->index;
659
660 ide_device_add(idx, &au1xxx_port_info);
661
662 dev_set_drvdata(dev, hwif);
663
664 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
665
666 out:
667 return ret;
668 }
669
670 static int au_ide_remove(struct device *dev)
671 {
672 struct platform_device *pdev = to_platform_device(dev);
673 struct resource *res;
674 ide_hwif_t *hwif = dev_get_drvdata(dev);
675 _auide_hwif *ahwif = &auide_hwif;
676
677 ide_unregister(hwif->index);
678
679 iounmap((void *)ahwif->regbase);
680
681 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
682 release_mem_region(res->start, res->end - res->start + 1);
683
684 return 0;
685 }
686
687 static struct device_driver au1200_ide_driver = {
688 .name = "au1200-ide",
689 .bus = &platform_bus_type,
690 .probe = au_ide_probe,
691 .remove = au_ide_remove,
692 };
693
694 static int __init au_ide_init(void)
695 {
696 return driver_register(&au1200_ide_driver);
697 }
698
699 static void __exit au_ide_exit(void)
700 {
701 driver_unregister(&au1200_ide_driver);
702 }
703
704 MODULE_LICENSE("GPL");
705 MODULE_DESCRIPTION("AU1200 IDE driver");
706
707 module_init(au_ide_init);
708 module_exit(au_ide_exit);
This page took 0.049835 seconds and 5 git commands to generate.