2 * drivers/block/mg_disk.c
4 * Support for the mGine m[g]flash IO mode.
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
18 #include <linux/blkdev.h>
19 #include <linux/hdreg.h>
20 #include <linux/libata.h>
21 #include <linux/interrupt.h>
22 #include <linux/delay.h>
23 #include <linux/platform_device.h>
24 #include <linux/gpio.h>
26 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28 /* name for block device */
29 #define MG_DISK_NAME "mgd"
30 /* name for platform device */
31 #define MG_DEV_NAME "mg_disk"
34 #define MG_DISK_MAX_PART 16
35 #define MG_SECTOR_SIZE 512
36 #define MG_MAX_SECTS 256
38 /* Register offsets */
39 #define MG_BUFF_OFFSET 0x8000
40 #define MG_STORAGE_BUFFER_SIZE 0x200
41 #define MG_REG_OFFSET 0xC000
42 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
43 #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
44 #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
45 #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
46 #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
47 #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
48 #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
49 #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
50 #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
51 #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
52 #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
54 /* "Drive Select/Head Register" bit values */
55 #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
56 #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
57 #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
58 #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
61 /* "Device Control Register" bit values */
62 #define MG_REG_CTRL_INTR_ENABLE 0x0
63 #define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
64 #define MG_REG_CTRL_RESET (0x1<<2)
65 #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
66 #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
67 #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
68 #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
69 #define MG_REG_CTRL_DPD_DISABLE 0x0
70 #define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
72 /* Status register bit */
73 /* error bit in status register */
74 #define MG_REG_STATUS_BIT_ERROR 0x01
75 /* corrected error in status register */
76 #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
77 /* data request bit in status register */
78 #define MG_REG_STATUS_BIT_DATA_REQ 0x08
79 /* DSC - Drive Seek Complete */
80 #define MG_REG_STATUS_BIT_SEEK_DONE 0x10
81 /* DWF - Drive Write Fault */
82 #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
83 #define MG_REG_STATUS_BIT_READY 0x40
84 #define MG_REG_STATUS_BIT_BUSY 0x80
87 #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
88 #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
89 (MG_REG_STATUS_BIT_BUSY | \
90 MG_REG_STATUS_BIT_WRITE_FAULT | \
91 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
94 #define MG_REG_ERR_AMNF 0x01
95 #define MG_REG_ERR_ABRT 0x04
96 #define MG_REG_ERR_IDNF 0x10
97 #define MG_REG_ERR_UNC 0x40
98 #define MG_REG_ERR_BBK 0x80
100 /* error code for others */
101 #define MG_ERR_NONE 0
102 #define MG_ERR_TIMEOUT 0x100
103 #define MG_ERR_INIT_STAT 0x101
104 #define MG_ERR_TRANSLATION 0x102
105 #define MG_ERR_CTRL_RST 0x103
106 #define MG_ERR_INV_STAT 0x104
107 #define MG_ERR_RSTOUT 0x105
109 #define MG_MAX_ERRORS 6 /* Max read/write errors */
112 #define MG_CMD_RD 0x20
113 #define MG_CMD_WR 0x30
114 #define MG_CMD_SLEEP 0x99
115 #define MG_CMD_WAKEUP 0xC3
116 #define MG_CMD_ID 0xEC
117 #define MG_CMD_WR_CONF 0x3C
118 #define MG_CMD_RD_CONF 0x40
121 #define MG_OP_CASCADE (1 << 0)
122 #define MG_OP_CASCADE_SYNC_RD (1 << 1)
123 #define MG_OP_CASCADE_SYNC_WR (1 << 2)
124 #define MG_OP_INTERLEAVE (1 << 3)
127 #define MG_BURST_LAT_4 (3 << 4)
128 #define MG_BURST_LAT_5 (4 << 4)
129 #define MG_BURST_LAT_6 (5 << 4)
130 #define MG_BURST_LAT_7 (6 << 4)
131 #define MG_BURST_LAT_8 (7 << 4)
132 #define MG_BURST_LEN_4 (1 << 1)
133 #define MG_BURST_LEN_8 (2 << 1)
134 #define MG_BURST_LEN_16 (3 << 1)
135 #define MG_BURST_LEN_32 (4 << 1)
136 #define MG_BURST_LEN_CONT (0 << 1)
138 /* timeout value (unit: ms) */
139 #define MG_TMAX_CONF_TO_CMD 1
140 #define MG_TMAX_WAIT_RD_DRQ 10
141 #define MG_TMAX_WAIT_WR_DRQ 500
142 #define MG_TMAX_RST_TO_BUSY 10
143 #define MG_TMAX_HDRST_TO_RDY 500
144 #define MG_TMAX_SWRST_TO_RDY 500
145 #define MG_TMAX_RSTOUT 3000
147 /* device attribution */
148 /* use mflash as boot device */
149 #define MG_BOOT_DEV (1 << 0)
150 /* use mflash as storage device */
151 #define MG_STORAGE_DEV (1 << 1)
152 /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
153 #define MG_STORAGE_DEV_SKIP_RST (1 << 2)
155 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
157 /* names of GPIO resource */
158 #define MG_RST_PIN "mg_rst"
159 /* except MG_BOOT_DEV, reset-out pin should be assigned */
160 #define MG_RSTOUT_PIN "mg_rstout"
162 /* private driver data */
167 /* device attribution */
170 /* internally used */
171 struct mg_host
*host
;
174 /* main structure for mflash driver */
178 struct request_queue
*breq
;
182 struct timer_list timer
;
183 void (*mg_do_intr
) (struct mg_host
*);
185 u16 id
[ATA_ID_WORDS
];
193 void __iomem
*dev_base
;
203 * Debugging macro and defines
207 # define MG_DBG(fmt, args...) \
208 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
209 #else /* CONFIG_MG_DEBUG */
210 # define MG_DBG(fmt, args...) do { } while (0)
211 #endif /* CONFIG_MG_DEBUG */
213 static void mg_request(struct request_queue
*);
215 static void mg_dump_status(const char *msg
, unsigned int stat
,
216 struct mg_host
*host
)
218 char *name
= MG_DISK_NAME
;
222 req
= elv_next_request(host
->breq
);
224 name
= req
->rq_disk
->disk_name
;
227 printk(KERN_ERR
"%s: %s: status=0x%02x { ", name
, msg
, stat
& 0xff);
228 if (stat
& MG_REG_STATUS_BIT_BUSY
)
230 if (stat
& MG_REG_STATUS_BIT_READY
)
231 printk("DriveReady ");
232 if (stat
& MG_REG_STATUS_BIT_WRITE_FAULT
)
233 printk("WriteFault ");
234 if (stat
& MG_REG_STATUS_BIT_SEEK_DONE
)
235 printk("SeekComplete ");
236 if (stat
& MG_REG_STATUS_BIT_DATA_REQ
)
237 printk("DataRequest ");
238 if (stat
& MG_REG_STATUS_BIT_CORRECTED_ERROR
)
239 printk("CorrectedError ");
240 if (stat
& MG_REG_STATUS_BIT_ERROR
)
243 if ((stat
& MG_REG_STATUS_BIT_ERROR
) == 0) {
246 host
->error
= inb((unsigned long)host
->dev_base
+ MG_REG_ERROR
);
247 printk(KERN_ERR
"%s: %s: error=0x%02x { ", name
, msg
,
249 if (host
->error
& MG_REG_ERR_BBK
)
250 printk("BadSector ");
251 if (host
->error
& MG_REG_ERR_UNC
)
252 printk("UncorrectableError ");
253 if (host
->error
& MG_REG_ERR_IDNF
)
254 printk("SectorIdNotFound ");
255 if (host
->error
& MG_REG_ERR_ABRT
)
256 printk("DriveStatusError ");
257 if (host
->error
& MG_REG_ERR_AMNF
)
258 printk("AddrMarkNotFound ");
261 (MG_REG_ERR_BBK
| MG_REG_ERR_UNC
|
262 MG_REG_ERR_IDNF
| MG_REG_ERR_AMNF
)) {
264 req
= elv_next_request(host
->breq
);
266 printk(", sector=%u", (u32
)req
->sector
);
274 static unsigned int mg_wait(struct mg_host
*host
, u32 expect
, u32 msec
)
277 unsigned long expire
, cur_jiffies
;
278 struct mg_drv_data
*prv_data
= host
->dev
->platform_data
;
280 host
->error
= MG_ERR_NONE
;
281 expire
= jiffies
+ msecs_to_jiffies(msec
);
283 status
= inb((unsigned long)host
->dev_base
+ MG_REG_STATUS
);
286 cur_jiffies
= jiffies
;
287 if (status
& MG_REG_STATUS_BIT_BUSY
) {
288 if (expect
== MG_REG_STATUS_BIT_BUSY
)
291 /* Check the error condition! */
292 if (status
& MG_REG_STATUS_BIT_ERROR
) {
293 mg_dump_status("mg_wait", status
, host
);
297 if (expect
== MG_STAT_READY
)
298 if (MG_READY_OK(status
))
301 if (expect
== MG_REG_STATUS_BIT_DATA_REQ
)
302 if (status
& MG_REG_STATUS_BIT_DATA_REQ
)
306 mg_dump_status("not ready", status
, host
);
307 return MG_ERR_INV_STAT
;
309 if (prv_data
->use_polling
)
312 status
= inb((unsigned long)host
->dev_base
+ MG_REG_STATUS
);
313 } while (time_before(cur_jiffies
, expire
));
315 if (time_after_eq(cur_jiffies
, expire
) && msec
)
316 host
->error
= MG_ERR_TIMEOUT
;
321 static unsigned int mg_wait_rstout(u32 rstout
, u32 msec
)
323 unsigned long expire
;
325 expire
= jiffies
+ msecs_to_jiffies(msec
);
326 while (time_before(jiffies
, expire
)) {
327 if (gpio_get_value(rstout
) == 1)
332 return MG_ERR_RSTOUT
;
335 static void mg_unexpected_intr(struct mg_host
*host
)
337 u32 status
= inb((unsigned long)host
->dev_base
+ MG_REG_STATUS
);
339 mg_dump_status("mg_unexpected_intr", status
, host
);
342 static irqreturn_t
mg_irq(int irq
, void *dev_id
)
344 struct mg_host
*host
= dev_id
;
345 void (*handler
)(struct mg_host
*) = host
->mg_do_intr
;
347 spin_lock(&host
->lock
);
349 host
->mg_do_intr
= NULL
;
350 del_timer(&host
->timer
);
352 handler
= mg_unexpected_intr
;
355 spin_unlock(&host
->lock
);
360 static int mg_get_disk_id(struct mg_host
*host
)
364 const u16
*id
= host
->id
;
365 struct mg_drv_data
*prv_data
= host
->dev
->platform_data
;
366 char fwrev
[ATA_ID_FW_REV_LEN
+ 1];
367 char model
[ATA_ID_PROD_LEN
+ 1];
368 char serial
[ATA_ID_SERNO_LEN
+ 1];
370 if (!prv_data
->use_polling
)
371 outb(MG_REG_CTRL_INTR_DISABLE
,
372 (unsigned long)host
->dev_base
+
375 outb(MG_CMD_ID
, (unsigned long)host
->dev_base
+ MG_REG_COMMAND
);
376 err
= mg_wait(host
, MG_REG_STATUS_BIT_DATA_REQ
, MG_TMAX_WAIT_RD_DRQ
);
380 for (i
= 0; i
< (MG_SECTOR_SIZE
>> 1); i
++)
381 host
->id
[i
] = le16_to_cpu(inw((unsigned long)host
->dev_base
+
382 MG_BUFF_OFFSET
+ i
* 2));
384 outb(MG_CMD_RD_CONF
, (unsigned long)host
->dev_base
+ MG_REG_COMMAND
);
385 err
= mg_wait(host
, MG_STAT_READY
, MG_TMAX_CONF_TO_CMD
);
389 if ((id
[ATA_ID_FIELD_VALID
] & 1) == 0)
390 return MG_ERR_TRANSLATION
;
392 host
->n_sectors
= ata_id_u32(id
, ATA_ID_LBA_CAPACITY
);
393 host
->cyls
= id
[ATA_ID_CYLS
];
394 host
->heads
= id
[ATA_ID_HEADS
];
395 host
->sectors
= id
[ATA_ID_SECTORS
];
397 if (MG_RES_SEC
&& host
->heads
&& host
->sectors
) {
398 /* modify cyls, n_sectors */
399 host
->cyls
= (host
->n_sectors
- MG_RES_SEC
) /
400 host
->heads
/ host
->sectors
;
401 host
->nres_sectors
= host
->n_sectors
- host
->cyls
*
402 host
->heads
* host
->sectors
;
403 host
->n_sectors
-= host
->nres_sectors
;
406 ata_id_c_string(id
, fwrev
, ATA_ID_FW_REV
, sizeof(fwrev
));
407 ata_id_c_string(id
, model
, ATA_ID_PROD
, sizeof(model
));
408 ata_id_c_string(id
, serial
, ATA_ID_SERNO
, sizeof(serial
));
409 printk(KERN_INFO
"mg_disk: model: %s\n", model
);
410 printk(KERN_INFO
"mg_disk: firm: %.8s\n", fwrev
);
411 printk(KERN_INFO
"mg_disk: serial: %s\n", serial
);
412 printk(KERN_INFO
"mg_disk: %d + reserved %d sectors\n",
413 host
->n_sectors
, host
->nres_sectors
);
415 if (!prv_data
->use_polling
)
416 outb(MG_REG_CTRL_INTR_ENABLE
, (unsigned long)host
->dev_base
+
423 static int mg_disk_init(struct mg_host
*host
)
425 struct mg_drv_data
*prv_data
= host
->dev
->platform_data
;
430 gpio_set_value(host
->rst
, 0);
431 err
= mg_wait(host
, MG_REG_STATUS_BIT_BUSY
, MG_TMAX_RST_TO_BUSY
);
436 gpio_set_value(host
->rst
, 1);
437 err
= mg_wait(host
, MG_STAT_READY
, MG_TMAX_HDRST_TO_RDY
);
442 outb(MG_REG_CTRL_RESET
|
443 (prv_data
->use_polling
? MG_REG_CTRL_INTR_DISABLE
:
444 MG_REG_CTRL_INTR_ENABLE
),
445 (unsigned long)host
->dev_base
+ MG_REG_DRV_CTRL
);
446 err
= mg_wait(host
, MG_REG_STATUS_BIT_BUSY
, MG_TMAX_RST_TO_BUSY
);
451 outb(prv_data
->use_polling
? MG_REG_CTRL_INTR_DISABLE
:
452 MG_REG_CTRL_INTR_ENABLE
,
453 (unsigned long)host
->dev_base
+ MG_REG_DRV_CTRL
);
454 err
= mg_wait(host
, MG_STAT_READY
, MG_TMAX_SWRST_TO_RDY
);
458 init_status
= inb((unsigned long)host
->dev_base
+ MG_REG_STATUS
) & 0xf;
460 if (init_status
== 0xf)
461 return MG_ERR_INIT_STAT
;
466 static void mg_bad_rw_intr(struct mg_host
*host
)
468 struct request
*req
= elv_next_request(host
->breq
);
470 if (++req
->errors
>= MG_MAX_ERRORS
||
471 host
->error
== MG_ERR_TIMEOUT
)
472 __blk_end_request_cur(req
, -EIO
);
475 static unsigned int mg_out(struct mg_host
*host
,
476 unsigned int sect_num
,
477 unsigned int sect_cnt
,
479 void (*intr_addr
)(struct mg_host
*))
481 struct mg_drv_data
*prv_data
= host
->dev
->platform_data
;
483 if (mg_wait(host
, MG_STAT_READY
, MG_TMAX_CONF_TO_CMD
))
486 if (!prv_data
->use_polling
) {
487 host
->mg_do_intr
= intr_addr
;
488 mod_timer(&host
->timer
, jiffies
+ 3 * HZ
);
491 sect_num
+= MG_RES_SEC
;
492 outb((u8
)sect_cnt
, (unsigned long)host
->dev_base
+ MG_REG_SECT_CNT
);
493 outb((u8
)sect_num
, (unsigned long)host
->dev_base
+ MG_REG_SECT_NUM
);
494 outb((u8
)(sect_num
>> 8), (unsigned long)host
->dev_base
+
496 outb((u8
)(sect_num
>> 16), (unsigned long)host
->dev_base
+
498 outb((u8
)((sect_num
>> 24) | MG_REG_HEAD_LBA_MODE
),
499 (unsigned long)host
->dev_base
+ MG_REG_DRV_HEAD
);
500 outb(cmd
, (unsigned long)host
->dev_base
+ MG_REG_COMMAND
);
504 static void mg_read(struct request
*req
)
507 struct mg_host
*host
= req
->rq_disk
->private_data
;
509 remains
= req
->nr_sectors
;
511 if (mg_out(host
, req
->sector
, req
->nr_sectors
, MG_CMD_RD
, NULL
) !=
513 mg_bad_rw_intr(host
);
515 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
516 remains
, req
->sector
, req
->buffer
);
519 if (mg_wait(host
, MG_REG_STATUS_BIT_DATA_REQ
,
520 MG_TMAX_WAIT_RD_DRQ
) != MG_ERR_NONE
) {
521 mg_bad_rw_intr(host
);
524 for (j
= 0; j
< MG_SECTOR_SIZE
>> 1; j
++) {
525 *(u16
*)req
->buffer
=
526 inw((unsigned long)host
->dev_base
+
527 MG_BUFF_OFFSET
+ (j
<< 1));
533 remains
= --req
->nr_sectors
;
534 --req
->current_nr_sectors
;
536 if (req
->current_nr_sectors
<= 0) {
537 MG_DBG("remain : %d sects\n", remains
);
538 __blk_end_request_cur(req
, 0);
540 req
= elv_next_request(host
->breq
);
543 outb(MG_CMD_RD_CONF
, (unsigned long)host
->dev_base
+
548 static void mg_write(struct request
*req
)
551 struct mg_host
*host
= req
->rq_disk
->private_data
;
553 remains
= req
->nr_sectors
;
555 if (mg_out(host
, req
->sector
, req
->nr_sectors
, MG_CMD_WR
, NULL
) !=
557 mg_bad_rw_intr(host
);
562 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
563 remains
, req
->sector
, req
->buffer
);
565 if (mg_wait(host
, MG_REG_STATUS_BIT_DATA_REQ
,
566 MG_TMAX_WAIT_WR_DRQ
) != MG_ERR_NONE
) {
567 mg_bad_rw_intr(host
);
570 for (j
= 0; j
< MG_SECTOR_SIZE
>> 1; j
++) {
571 outw(*(u16
*)req
->buffer
,
572 (unsigned long)host
->dev_base
+
573 MG_BUFF_OFFSET
+ (j
<< 1));
577 remains
= --req
->nr_sectors
;
578 --req
->current_nr_sectors
;
580 if (req
->current_nr_sectors
<= 0) {
581 MG_DBG("remain : %d sects\n", remains
);
582 __blk_end_request_cur(req
, 0);
584 req
= elv_next_request(host
->breq
);
587 outb(MG_CMD_WR_CONF
, (unsigned long)host
->dev_base
+
592 static void mg_read_intr(struct mg_host
*host
)
599 i
= inb((unsigned long)host
->dev_base
+ MG_REG_STATUS
);
600 if (i
& MG_REG_STATUS_BIT_BUSY
)
604 if (i
& MG_REG_STATUS_BIT_DATA_REQ
)
607 mg_dump_status("mg_read_intr", i
, host
);
608 mg_bad_rw_intr(host
);
609 mg_request(host
->breq
);
613 /* get current segment of request */
614 req
= elv_next_request(host
->breq
);
617 for (i
= 0; i
< MG_SECTOR_SIZE
>> 1; i
++) {
618 *(u16
*)req
->buffer
=
619 inw((unsigned long)host
->dev_base
+ MG_BUFF_OFFSET
+
624 /* manipulate request */
625 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
626 req
->sector
, req
->nr_sectors
- 1, req
->buffer
);
630 i
= --req
->nr_sectors
;
631 --req
->current_nr_sectors
;
633 /* let know if current segment done */
634 if (req
->current_nr_sectors
<= 0)
635 __blk_end_request_cur(req
, 0);
637 /* set handler if read remains */
639 host
->mg_do_intr
= mg_read_intr
;
640 mod_timer(&host
->timer
, jiffies
+ 3 * HZ
);
643 /* send read confirm */
644 outb(MG_CMD_RD_CONF
, (unsigned long)host
->dev_base
+ MG_REG_COMMAND
);
646 /* goto next request */
648 mg_request(host
->breq
);
651 static void mg_write_intr(struct mg_host
*host
)
657 /* get current segment of request */
658 req
= elv_next_request(host
->breq
);
662 i
= inb((unsigned long)host
->dev_base
+ MG_REG_STATUS
);
663 if (i
& MG_REG_STATUS_BIT_BUSY
)
667 if ((req
->nr_sectors
<= 1) || (i
& MG_REG_STATUS_BIT_DATA_REQ
))
670 mg_dump_status("mg_write_intr", i
, host
);
671 mg_bad_rw_intr(host
);
672 mg_request(host
->breq
);
676 /* manipulate request */
678 i
= --req
->nr_sectors
;
679 --req
->current_nr_sectors
;
680 req
->buffer
+= MG_SECTOR_SIZE
;
682 /* let know if current segment or all done */
683 if (!i
|| (req
->bio
&& req
->current_nr_sectors
<= 0))
684 __blk_end_request_cur(req
, 0);
686 /* write 1 sector and set handler if remains */
688 buff
= (u16
*)req
->buffer
;
689 for (j
= 0; j
< MG_STORAGE_BUFFER_SIZE
>> 1; j
++) {
690 outw(*buff
, (unsigned long)host
->dev_base
+
691 MG_BUFF_OFFSET
+ (j
<< 1));
694 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
695 req
->sector
, req
->nr_sectors
, req
->buffer
);
696 host
->mg_do_intr
= mg_write_intr
;
697 mod_timer(&host
->timer
, jiffies
+ 3 * HZ
);
700 /* send write confirm */
701 outb(MG_CMD_WR_CONF
, (unsigned long)host
->dev_base
+ MG_REG_COMMAND
);
704 mg_request(host
->breq
);
707 void mg_times_out(unsigned long data
)
709 struct mg_host
*host
= (struct mg_host
*)data
;
713 spin_lock_irq(&host
->lock
);
715 req
= elv_next_request(host
->breq
);
719 host
->mg_do_intr
= NULL
;
721 name
= req
->rq_disk
->disk_name
;
722 printk(KERN_DEBUG
"%s: timeout\n", name
);
724 host
->error
= MG_ERR_TIMEOUT
;
725 mg_bad_rw_intr(host
);
727 mg_request(host
->breq
);
729 spin_unlock_irq(&host
->lock
);
732 static void mg_request_poll(struct request_queue
*q
)
735 struct mg_host
*host
;
737 while ((req
= elv_next_request(q
)) != NULL
) {
738 host
= req
->rq_disk
->private_data
;
739 if (blk_fs_request(req
)) {
740 switch (rq_data_dir(req
)) {
752 static unsigned int mg_issue_req(struct request
*req
,
753 struct mg_host
*host
,
754 unsigned int sect_num
,
755 unsigned int sect_cnt
)
760 switch (rq_data_dir(req
)) {
762 if (mg_out(host
, sect_num
, sect_cnt
, MG_CMD_RD
, &mg_read_intr
)
764 mg_bad_rw_intr(host
);
770 outb(MG_REG_CTRL_INTR_DISABLE
,
771 (unsigned long)host
->dev_base
+
773 if (mg_out(host
, sect_num
, sect_cnt
, MG_CMD_WR
, &mg_write_intr
)
775 mg_bad_rw_intr(host
);
778 del_timer(&host
->timer
);
779 mg_wait(host
, MG_REG_STATUS_BIT_DATA_REQ
, MG_TMAX_WAIT_WR_DRQ
);
780 outb(MG_REG_CTRL_INTR_ENABLE
, (unsigned long)host
->dev_base
+
783 mg_bad_rw_intr(host
);
786 buff
= (u16
*)req
->buffer
;
787 for (i
= 0; i
< MG_SECTOR_SIZE
>> 1; i
++) {
788 outw(*buff
, (unsigned long)host
->dev_base
+
789 MG_BUFF_OFFSET
+ (i
<< 1));
792 mod_timer(&host
->timer
, jiffies
+ 3 * HZ
);
793 outb(MG_CMD_WR_CONF
, (unsigned long)host
->dev_base
+
800 /* This function also called from IRQ context */
801 static void mg_request(struct request_queue
*q
)
804 struct mg_host
*host
;
805 u32 sect_num
, sect_cnt
;
808 req
= elv_next_request(q
);
812 host
= req
->rq_disk
->private_data
;
814 /* check unwanted request call */
815 if (host
->mg_do_intr
)
818 del_timer(&host
->timer
);
820 sect_num
= req
->sector
;
821 /* deal whole segments */
822 sect_cnt
= req
->nr_sectors
;
825 if (sect_num
>= get_capacity(req
->rq_disk
) ||
826 ((sect_num
+ sect_cnt
) >
827 get_capacity(req
->rq_disk
))) {
829 "%s: bad access: sector=%d, count=%d\n",
830 req
->rq_disk
->disk_name
,
832 __blk_end_request_cur(req
, -EIO
);
836 if (!blk_fs_request(req
))
839 if (!mg_issue_req(req
, host
, sect_num
, sect_cnt
))
844 static int mg_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
846 struct mg_host
*host
= bdev
->bd_disk
->private_data
;
848 geo
->cylinders
= (unsigned short)host
->cyls
;
849 geo
->heads
= (unsigned char)host
->heads
;
850 geo
->sectors
= (unsigned char)host
->sectors
;
854 static struct block_device_operations mg_disk_ops
= {
858 static int mg_suspend(struct platform_device
*plat_dev
, pm_message_t state
)
860 struct mg_drv_data
*prv_data
= plat_dev
->dev
.platform_data
;
861 struct mg_host
*host
= prv_data
->host
;
863 if (mg_wait(host
, MG_STAT_READY
, MG_TMAX_CONF_TO_CMD
))
866 if (!prv_data
->use_polling
)
867 outb(MG_REG_CTRL_INTR_DISABLE
,
868 (unsigned long)host
->dev_base
+
871 outb(MG_CMD_SLEEP
, (unsigned long)host
->dev_base
+ MG_REG_COMMAND
);
872 /* wait until mflash deep sleep */
875 if (mg_wait(host
, MG_STAT_READY
, MG_TMAX_CONF_TO_CMD
)) {
876 if (!prv_data
->use_polling
)
877 outb(MG_REG_CTRL_INTR_ENABLE
,
878 (unsigned long)host
->dev_base
+
886 static int mg_resume(struct platform_device
*plat_dev
)
888 struct mg_drv_data
*prv_data
= plat_dev
->dev
.platform_data
;
889 struct mg_host
*host
= prv_data
->host
;
891 if (mg_wait(host
, MG_STAT_READY
, MG_TMAX_CONF_TO_CMD
))
894 outb(MG_CMD_WAKEUP
, (unsigned long)host
->dev_base
+ MG_REG_COMMAND
);
895 /* wait until mflash wakeup */
898 if (mg_wait(host
, MG_STAT_READY
, MG_TMAX_CONF_TO_CMD
))
901 if (!prv_data
->use_polling
)
902 outb(MG_REG_CTRL_INTR_ENABLE
, (unsigned long)host
->dev_base
+
908 static int mg_probe(struct platform_device
*plat_dev
)
910 struct mg_host
*host
;
911 struct resource
*rsc
;
912 struct mg_drv_data
*prv_data
= plat_dev
->dev
.platform_data
;
916 printk(KERN_ERR
"%s:%d fail (no driver_data)\n",
923 host
= kzalloc(sizeof(struct mg_host
), GFP_KERNEL
);
925 printk(KERN_ERR
"%s:%d fail (no memory for mg_host)\n",
930 host
->major
= MG_DISK_MAJ
;
932 /* link each other */
933 prv_data
->host
= host
;
934 host
->dev
= &plat_dev
->dev
;
937 rsc
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
939 printk(KERN_ERR
"%s:%d platform_get_resource fail\n",
944 host
->dev_base
= ioremap(rsc
->start
, rsc
->end
+ 1);
945 if (!host
->dev_base
) {
946 printk(KERN_ERR
"%s:%d ioremap fail\n",
951 MG_DBG("dev_base = 0x%x\n", (u32
)host
->dev_base
);
954 rsc
= platform_get_resource_byname(plat_dev
, IORESOURCE_IO
,
957 printk(KERN_ERR
"%s:%d get reset pin fail\n",
962 host
->rst
= rsc
->start
;
965 err
= gpio_request(host
->rst
, MG_RST_PIN
);
968 gpio_direction_output(host
->rst
, 1);
971 if (!(prv_data
->dev_attr
& MG_DEV_MASK
))
974 if (prv_data
->dev_attr
!= MG_BOOT_DEV
) {
975 rsc
= platform_get_resource_byname(plat_dev
, IORESOURCE_IO
,
978 printk(KERN_ERR
"%s:%d get reset-out pin fail\n",
983 host
->rstout
= rsc
->start
;
984 err
= gpio_request(host
->rstout
, MG_RSTOUT_PIN
);
987 gpio_direction_input(host
->rstout
);
991 if (prv_data
->dev_attr
== MG_STORAGE_DEV
) {
992 /* If POR seq. not yet finised, wait */
993 err
= mg_wait_rstout(host
->rstout
, MG_TMAX_RSTOUT
);
996 err
= mg_disk_init(host
);
998 printk(KERN_ERR
"%s:%d fail (err code : %d)\n",
999 __func__
, __LINE__
, err
);
1005 /* get irq resource */
1006 if (!prv_data
->use_polling
) {
1007 host
->irq
= platform_get_irq(plat_dev
, 0);
1008 if (host
->irq
== -ENXIO
) {
1012 err
= request_irq(host
->irq
, mg_irq
,
1013 IRQF_DISABLED
| IRQF_TRIGGER_RISING
,
1016 printk(KERN_ERR
"%s:%d fail (request_irq err=%d)\n",
1017 __func__
, __LINE__
, err
);
1024 err
= mg_get_disk_id(host
);
1026 printk(KERN_ERR
"%s:%d fail (err code : %d)\n",
1027 __func__
, __LINE__
, err
);
1032 err
= register_blkdev(host
->major
, MG_DISK_NAME
);
1034 printk(KERN_ERR
"%s:%d register_blkdev fail (err code : %d)\n",
1035 __func__
, __LINE__
, err
);
1041 spin_lock_init(&host
->lock
);
1043 if (prv_data
->use_polling
)
1044 host
->breq
= blk_init_queue(mg_request_poll
, &host
->lock
);
1046 host
->breq
= blk_init_queue(mg_request
, &host
->lock
);
1050 printk(KERN_ERR
"%s:%d (blk_init_queue) fail\n",
1051 __func__
, __LINE__
);
1055 /* mflash is random device, thanx for the noop */
1056 elevator_exit(host
->breq
->elevator
);
1057 err
= elevator_init(host
->breq
, "noop");
1059 printk(KERN_ERR
"%s:%d (elevator_init) fail\n",
1060 __func__
, __LINE__
);
1063 blk_queue_max_sectors(host
->breq
, MG_MAX_SECTS
);
1064 blk_queue_hardsect_size(host
->breq
, MG_SECTOR_SIZE
);
1066 init_timer(&host
->timer
);
1067 host
->timer
.function
= mg_times_out
;
1068 host
->timer
.data
= (unsigned long)host
;
1070 host
->gd
= alloc_disk(MG_DISK_MAX_PART
);
1072 printk(KERN_ERR
"%s:%d (alloc_disk) fail\n",
1073 __func__
, __LINE__
);
1077 host
->gd
->major
= host
->major
;
1078 host
->gd
->first_minor
= 0;
1079 host
->gd
->fops
= &mg_disk_ops
;
1080 host
->gd
->queue
= host
->breq
;
1081 host
->gd
->private_data
= host
;
1082 sprintf(host
->gd
->disk_name
, MG_DISK_NAME
"a");
1084 set_capacity(host
->gd
, host
->n_sectors
);
1091 del_timer_sync(&host
->timer
);
1093 blk_cleanup_queue(host
->breq
);
1095 unregister_blkdev(MG_DISK_MAJ
, MG_DISK_NAME
);
1097 if (!prv_data
->use_polling
)
1098 free_irq(host
->irq
, host
);
1100 gpio_free(host
->rstout
);
1102 gpio_free(host
->rst
);
1104 iounmap(host
->dev_base
);
1111 static int mg_remove(struct platform_device
*plat_dev
)
1113 struct mg_drv_data
*prv_data
= plat_dev
->dev
.platform_data
;
1114 struct mg_host
*host
= prv_data
->host
;
1118 del_timer_sync(&host
->timer
);
1122 del_gendisk(host
->gd
);
1127 blk_cleanup_queue(host
->breq
);
1129 /* unregister blk device */
1130 unregister_blkdev(host
->major
, MG_DISK_NAME
);
1133 if (!prv_data
->use_polling
)
1134 free_irq(host
->irq
, host
);
1136 /* free reset-out pin */
1137 if (prv_data
->dev_attr
!= MG_BOOT_DEV
)
1138 gpio_free(host
->rstout
);
1142 gpio_free(host
->rst
);
1146 iounmap(host
->dev_base
);
1154 static struct platform_driver mg_disk_driver
= {
1156 .remove
= mg_remove
,
1157 .suspend
= mg_suspend
,
1158 .resume
= mg_resume
,
1160 .name
= MG_DEV_NAME
,
1161 .owner
= THIS_MODULE
,
1165 /****************************************************************************
1169 ****************************************************************************/
1171 static int __init
mg_init(void)
1173 printk(KERN_INFO
"mGine mflash driver, (c) 2008 mGine Co.\n");
1174 return platform_driver_register(&mg_disk_driver
);
1177 static void __exit
mg_exit(void)
1179 printk(KERN_INFO
"mflash driver : bye bye\n");
1180 platform_driver_unregister(&mg_disk_driver
);
1183 module_init(mg_init
);
1184 module_exit(mg_exit
);
1186 MODULE_LICENSE("GPL");
1187 MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1188 MODULE_DESCRIPTION("mGine m[g]flash device driver");