Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[deliverable/linux.git] / drivers / mtd / onenand / samsung.c
1 /*
2 * Samsung S3C64XX/S5PC1XX OneNAND driver
3 *
4 * Copyright © 2008-2010 Samsung Electronics
5 * Kyungmin Park <kyungmin.park@samsung.com>
6 * Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Implementation:
13 * S3C64XX and S5PC100: emulate the pseudo BufferRAM
14 * S5PC110: use DMA
15 */
16
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/onenand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26
27 #include <asm/mach/flash.h>
28 #include <plat/regs-onenand.h>
29
30 #include <linux/io.h>
31
32 enum soc_type {
33 TYPE_S3C6400,
34 TYPE_S3C6410,
35 TYPE_S5PC100,
36 TYPE_S5PC110,
37 };
38
39 #define ONENAND_ERASE_STATUS 0x00
40 #define ONENAND_MULTI_ERASE_SET 0x01
41 #define ONENAND_ERASE_START 0x03
42 #define ONENAND_UNLOCK_START 0x08
43 #define ONENAND_UNLOCK_END 0x09
44 #define ONENAND_LOCK_START 0x0A
45 #define ONENAND_LOCK_END 0x0B
46 #define ONENAND_LOCK_TIGHT_START 0x0C
47 #define ONENAND_LOCK_TIGHT_END 0x0D
48 #define ONENAND_UNLOCK_ALL 0x0E
49 #define ONENAND_OTP_ACCESS 0x12
50 #define ONENAND_SPARE_ACCESS_ONLY 0x13
51 #define ONENAND_MAIN_ACCESS_ONLY 0x14
52 #define ONENAND_ERASE_VERIFY 0x15
53 #define ONENAND_MAIN_SPARE_ACCESS 0x16
54 #define ONENAND_PIPELINE_READ 0x4000
55
56 #define MAP_00 (0x0)
57 #define MAP_01 (0x1)
58 #define MAP_10 (0x2)
59 #define MAP_11 (0x3)
60
61 #define S3C64XX_CMD_MAP_SHIFT 24
62 #define S5PC100_CMD_MAP_SHIFT 26
63
64 #define S3C6400_FBA_SHIFT 10
65 #define S3C6400_FPA_SHIFT 4
66 #define S3C6400_FSA_SHIFT 2
67
68 #define S3C6410_FBA_SHIFT 12
69 #define S3C6410_FPA_SHIFT 6
70 #define S3C6410_FSA_SHIFT 4
71
72 #define S5PC100_FBA_SHIFT 13
73 #define S5PC100_FPA_SHIFT 7
74 #define S5PC100_FSA_SHIFT 5
75
76 /* S5PC110 specific definitions */
77 #define S5PC110_DMA_SRC_ADDR 0x400
78 #define S5PC110_DMA_SRC_CFG 0x404
79 #define S5PC110_DMA_DST_ADDR 0x408
80 #define S5PC110_DMA_DST_CFG 0x40C
81 #define S5PC110_DMA_TRANS_SIZE 0x414
82 #define S5PC110_DMA_TRANS_CMD 0x418
83 #define S5PC110_DMA_TRANS_STATUS 0x41C
84 #define S5PC110_DMA_TRANS_DIR 0x420
85 #define S5PC110_INTC_DMA_CLR 0x1004
86 #define S5PC110_INTC_ONENAND_CLR 0x1008
87 #define S5PC110_INTC_DMA_MASK 0x1024
88 #define S5PC110_INTC_ONENAND_MASK 0x1028
89 #define S5PC110_INTC_DMA_PEND 0x1044
90 #define S5PC110_INTC_ONENAND_PEND 0x1048
91 #define S5PC110_INTC_DMA_STATUS 0x1064
92 #define S5PC110_INTC_ONENAND_STATUS 0x1068
93
94 #define S5PC110_INTC_DMA_TD (1 << 24)
95 #define S5PC110_INTC_DMA_TE (1 << 16)
96
97 #define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
98 #define S5PC110_DMA_CFG_4BURST (0x2 << 16)
99 #define S5PC110_DMA_CFG_8BURST (0x3 << 16)
100 #define S5PC110_DMA_CFG_16BURST (0x4 << 16)
101
102 #define S5PC110_DMA_CFG_INC (0x0 << 8)
103 #define S5PC110_DMA_CFG_CNT (0x1 << 8)
104
105 #define S5PC110_DMA_CFG_8BIT (0x0 << 0)
106 #define S5PC110_DMA_CFG_16BIT (0x1 << 0)
107 #define S5PC110_DMA_CFG_32BIT (0x2 << 0)
108
109 #define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
110 S5PC110_DMA_CFG_INC | \
111 S5PC110_DMA_CFG_16BIT)
112 #define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
113 S5PC110_DMA_CFG_INC | \
114 S5PC110_DMA_CFG_32BIT)
115 #define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
116 S5PC110_DMA_CFG_INC | \
117 S5PC110_DMA_CFG_32BIT)
118 #define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
119 S5PC110_DMA_CFG_INC | \
120 S5PC110_DMA_CFG_16BIT)
121
122 #define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
123 #define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
124 #define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
125
126 #define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
127 #define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
128 #define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
129
130 #define S5PC110_DMA_DIR_READ 0x0
131 #define S5PC110_DMA_DIR_WRITE 0x1
132
133 struct s3c_onenand {
134 struct mtd_info *mtd;
135 struct platform_device *pdev;
136 enum soc_type type;
137 void __iomem *base;
138 struct resource *base_res;
139 void __iomem *ahb_addr;
140 struct resource *ahb_res;
141 int bootram_command;
142 void __iomem *page_buf;
143 void __iomem *oob_buf;
144 unsigned int (*mem_addr)(int fba, int fpa, int fsa);
145 unsigned int (*cmd_map)(unsigned int type, unsigned int val);
146 void __iomem *dma_addr;
147 struct resource *dma_res;
148 unsigned long phys_base;
149 struct completion complete;
150 struct mtd_partition *parts;
151 };
152
153 #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
154 #define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
155 #define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
156 #define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
157
158 static struct s3c_onenand *onenand;
159
160 static const char *part_probes[] = { "cmdlinepart", NULL, };
161
162 static inline int s3c_read_reg(int offset)
163 {
164 return readl(onenand->base + offset);
165 }
166
167 static inline void s3c_write_reg(int value, int offset)
168 {
169 writel(value, onenand->base + offset);
170 }
171
172 static inline int s3c_read_cmd(unsigned int cmd)
173 {
174 return readl(onenand->ahb_addr + cmd);
175 }
176
177 static inline void s3c_write_cmd(int value, unsigned int cmd)
178 {
179 writel(value, onenand->ahb_addr + cmd);
180 }
181
182 #ifdef SAMSUNG_DEBUG
183 static void s3c_dump_reg(void)
184 {
185 int i;
186
187 for (i = 0; i < 0x400; i += 0x40) {
188 printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
189 (unsigned int) onenand->base + i,
190 s3c_read_reg(i), s3c_read_reg(i + 0x10),
191 s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
192 }
193 }
194 #endif
195
196 static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
197 {
198 return (type << S3C64XX_CMD_MAP_SHIFT) | val;
199 }
200
201 static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
202 {
203 return (type << S5PC100_CMD_MAP_SHIFT) | val;
204 }
205
206 static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
207 {
208 return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
209 (fsa << S3C6400_FSA_SHIFT);
210 }
211
212 static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
213 {
214 return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
215 (fsa << S3C6410_FSA_SHIFT);
216 }
217
218 static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
219 {
220 return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
221 (fsa << S5PC100_FSA_SHIFT);
222 }
223
224 static void s3c_onenand_reset(void)
225 {
226 unsigned long timeout = 0x10000;
227 int stat;
228
229 s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
230 while (1 && timeout--) {
231 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
232 if (stat & RST_CMP)
233 break;
234 }
235 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
236 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
237
238 /* Clear interrupt */
239 s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
240 /* Clear the ECC status */
241 s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
242 }
243
244 static unsigned short s3c_onenand_readw(void __iomem *addr)
245 {
246 struct onenand_chip *this = onenand->mtd->priv;
247 struct device *dev = &onenand->pdev->dev;
248 int reg = addr - this->base;
249 int word_addr = reg >> 1;
250 int value;
251
252 /* It's used for probing time */
253 switch (reg) {
254 case ONENAND_REG_MANUFACTURER_ID:
255 return s3c_read_reg(MANUFACT_ID_OFFSET);
256 case ONENAND_REG_DEVICE_ID:
257 return s3c_read_reg(DEVICE_ID_OFFSET);
258 case ONENAND_REG_VERSION_ID:
259 return s3c_read_reg(FLASH_VER_ID_OFFSET);
260 case ONENAND_REG_DATA_BUFFER_SIZE:
261 return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
262 case ONENAND_REG_TECHNOLOGY:
263 return s3c_read_reg(TECH_OFFSET);
264 case ONENAND_REG_SYS_CFG1:
265 return s3c_read_reg(MEM_CFG_OFFSET);
266
267 /* Used at unlock all status */
268 case ONENAND_REG_CTRL_STATUS:
269 return 0;
270
271 case ONENAND_REG_WP_STATUS:
272 return ONENAND_WP_US;
273
274 default:
275 break;
276 }
277
278 /* BootRAM access control */
279 if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
280 if (word_addr == 0)
281 return s3c_read_reg(MANUFACT_ID_OFFSET);
282 if (word_addr == 1)
283 return s3c_read_reg(DEVICE_ID_OFFSET);
284 if (word_addr == 2)
285 return s3c_read_reg(FLASH_VER_ID_OFFSET);
286 }
287
288 value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
289 dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
290 word_addr, value);
291 return value;
292 }
293
294 static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
295 {
296 struct onenand_chip *this = onenand->mtd->priv;
297 struct device *dev = &onenand->pdev->dev;
298 unsigned int reg = addr - this->base;
299 unsigned int word_addr = reg >> 1;
300
301 /* It's used for probing time */
302 switch (reg) {
303 case ONENAND_REG_SYS_CFG1:
304 s3c_write_reg(value, MEM_CFG_OFFSET);
305 return;
306
307 case ONENAND_REG_START_ADDRESS1:
308 case ONENAND_REG_START_ADDRESS2:
309 return;
310
311 /* Lock/lock-tight/unlock/unlock_all */
312 case ONENAND_REG_START_BLOCK_ADDRESS:
313 return;
314
315 default:
316 break;
317 }
318
319 /* BootRAM access control */
320 if ((unsigned int)addr < ONENAND_DATARAM) {
321 if (value == ONENAND_CMD_READID) {
322 onenand->bootram_command = 1;
323 return;
324 }
325 if (value == ONENAND_CMD_RESET) {
326 s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
327 onenand->bootram_command = 0;
328 return;
329 }
330 }
331
332 dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
333 word_addr, value);
334
335 s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
336 }
337
338 static int s3c_onenand_wait(struct mtd_info *mtd, int state)
339 {
340 struct device *dev = &onenand->pdev->dev;
341 unsigned int flags = INT_ACT;
342 unsigned int stat, ecc;
343 unsigned long timeout;
344
345 switch (state) {
346 case FL_READING:
347 flags |= BLK_RW_CMP | LOAD_CMP;
348 break;
349 case FL_WRITING:
350 flags |= BLK_RW_CMP | PGM_CMP;
351 break;
352 case FL_ERASING:
353 flags |= BLK_RW_CMP | ERS_CMP;
354 break;
355 case FL_LOCKING:
356 flags |= BLK_RW_CMP;
357 break;
358 default:
359 break;
360 }
361
362 /* The 20 msec is enough */
363 timeout = jiffies + msecs_to_jiffies(20);
364 while (time_before(jiffies, timeout)) {
365 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
366 if (stat & flags)
367 break;
368
369 if (state != FL_READING)
370 cond_resched();
371 }
372 /* To get correct interrupt status in timeout case */
373 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
374 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
375
376 /*
377 * In the Spec. it checks the controller status first
378 * However if you get the correct information in case of
379 * power off recovery (POR) test, it should read ECC status first
380 */
381 if (stat & LOAD_CMP) {
382 ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
383 if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
384 dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
385 ecc);
386 mtd->ecc_stats.failed++;
387 return -EBADMSG;
388 }
389 }
390
391 if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
392 dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
393 stat);
394 if (stat & LOCKED_BLK)
395 dev_info(dev, "%s: it's locked error = 0x%04x\n",
396 __func__, stat);
397
398 return -EIO;
399 }
400
401 return 0;
402 }
403
404 static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
405 size_t len)
406 {
407 struct onenand_chip *this = mtd->priv;
408 unsigned int *m, *s;
409 int fba, fpa, fsa = 0;
410 unsigned int mem_addr, cmd_map_01, cmd_map_10;
411 int i, mcount, scount;
412 int index;
413
414 fba = (int) (addr >> this->erase_shift);
415 fpa = (int) (addr >> this->page_shift);
416 fpa &= this->page_mask;
417
418 mem_addr = onenand->mem_addr(fba, fpa, fsa);
419 cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
420 cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
421
422 switch (cmd) {
423 case ONENAND_CMD_READ:
424 case ONENAND_CMD_READOOB:
425 case ONENAND_CMD_BUFFERRAM:
426 ONENAND_SET_NEXT_BUFFERRAM(this);
427 default:
428 break;
429 }
430
431 index = ONENAND_CURRENT_BUFFERRAM(this);
432
433 /*
434 * Emulate Two BufferRAMs and access with 4 bytes pointer
435 */
436 m = (unsigned int *) onenand->page_buf;
437 s = (unsigned int *) onenand->oob_buf;
438
439 if (index) {
440 m += (this->writesize >> 2);
441 s += (mtd->oobsize >> 2);
442 }
443
444 mcount = mtd->writesize >> 2;
445 scount = mtd->oobsize >> 2;
446
447 switch (cmd) {
448 case ONENAND_CMD_READ:
449 /* Main */
450 for (i = 0; i < mcount; i++)
451 *m++ = s3c_read_cmd(cmd_map_01);
452 return 0;
453
454 case ONENAND_CMD_READOOB:
455 s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
456 /* Main */
457 for (i = 0; i < mcount; i++)
458 *m++ = s3c_read_cmd(cmd_map_01);
459
460 /* Spare */
461 for (i = 0; i < scount; i++)
462 *s++ = s3c_read_cmd(cmd_map_01);
463
464 s3c_write_reg(0, TRANS_SPARE_OFFSET);
465 return 0;
466
467 case ONENAND_CMD_PROG:
468 /* Main */
469 for (i = 0; i < mcount; i++)
470 s3c_write_cmd(*m++, cmd_map_01);
471 return 0;
472
473 case ONENAND_CMD_PROGOOB:
474 s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
475
476 /* Main - dummy write */
477 for (i = 0; i < mcount; i++)
478 s3c_write_cmd(0xffffffff, cmd_map_01);
479
480 /* Spare */
481 for (i = 0; i < scount; i++)
482 s3c_write_cmd(*s++, cmd_map_01);
483
484 s3c_write_reg(0, TRANS_SPARE_OFFSET);
485 return 0;
486
487 case ONENAND_CMD_UNLOCK_ALL:
488 s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
489 return 0;
490
491 case ONENAND_CMD_ERASE:
492 s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
493 return 0;
494
495 default:
496 break;
497 }
498
499 return 0;
500 }
501
502 static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
503 {
504 struct onenand_chip *this = mtd->priv;
505 int index = ONENAND_CURRENT_BUFFERRAM(this);
506 unsigned char *p;
507
508 if (area == ONENAND_DATARAM) {
509 p = (unsigned char *) onenand->page_buf;
510 if (index == 1)
511 p += this->writesize;
512 } else {
513 p = (unsigned char *) onenand->oob_buf;
514 if (index == 1)
515 p += mtd->oobsize;
516 }
517
518 return p;
519 }
520
521 static int onenand_read_bufferram(struct mtd_info *mtd, int area,
522 unsigned char *buffer, int offset,
523 size_t count)
524 {
525 unsigned char *p;
526
527 p = s3c_get_bufferram(mtd, area);
528 memcpy(buffer, p + offset, count);
529 return 0;
530 }
531
532 static int onenand_write_bufferram(struct mtd_info *mtd, int area,
533 const unsigned char *buffer, int offset,
534 size_t count)
535 {
536 unsigned char *p;
537
538 p = s3c_get_bufferram(mtd, area);
539 memcpy(p + offset, buffer, count);
540 return 0;
541 }
542
543 static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
544
545 static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
546 {
547 void __iomem *base = onenand->dma_addr;
548 int status;
549 unsigned long timeout;
550
551 writel(src, base + S5PC110_DMA_SRC_ADDR);
552 writel(dst, base + S5PC110_DMA_DST_ADDR);
553
554 if (direction == S5PC110_DMA_DIR_READ) {
555 writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
556 writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
557 } else {
558 writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
559 writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
560 }
561
562 writel(count, base + S5PC110_DMA_TRANS_SIZE);
563 writel(direction, base + S5PC110_DMA_TRANS_DIR);
564
565 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
566
567 /*
568 * There's no exact timeout values at Spec.
569 * In real case it takes under 1 msec.
570 * So 20 msecs are enough.
571 */
572 timeout = jiffies + msecs_to_jiffies(20);
573
574 do {
575 status = readl(base + S5PC110_DMA_TRANS_STATUS);
576 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
577 writel(S5PC110_DMA_TRANS_CMD_TEC,
578 base + S5PC110_DMA_TRANS_CMD);
579 return -EIO;
580 }
581 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
582 time_before(jiffies, timeout));
583
584 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
585
586 return 0;
587 }
588
589 static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
590 {
591 void __iomem *base = onenand->dma_addr;
592 int status, cmd = 0;
593
594 status = readl(base + S5PC110_INTC_DMA_STATUS);
595
596 if (likely(status & S5PC110_INTC_DMA_TD))
597 cmd = S5PC110_DMA_TRANS_CMD_TDC;
598
599 if (unlikely(status & S5PC110_INTC_DMA_TE))
600 cmd = S5PC110_DMA_TRANS_CMD_TEC;
601
602 writel(cmd, base + S5PC110_DMA_TRANS_CMD);
603 writel(status, base + S5PC110_INTC_DMA_CLR);
604
605 if (!onenand->complete.done)
606 complete(&onenand->complete);
607
608 return IRQ_HANDLED;
609 }
610
611 static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
612 {
613 void __iomem *base = onenand->dma_addr;
614 int status;
615
616 status = readl(base + S5PC110_INTC_DMA_MASK);
617 if (status) {
618 status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
619 writel(status, base + S5PC110_INTC_DMA_MASK);
620 }
621
622 writel(src, base + S5PC110_DMA_SRC_ADDR);
623 writel(dst, base + S5PC110_DMA_DST_ADDR);
624
625 if (direction == S5PC110_DMA_DIR_READ) {
626 writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
627 writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
628 } else {
629 writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
630 writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
631 }
632
633 writel(count, base + S5PC110_DMA_TRANS_SIZE);
634 writel(direction, base + S5PC110_DMA_TRANS_DIR);
635
636 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
637
638 wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
639
640 return 0;
641 }
642
643 static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
644 unsigned char *buffer, int offset, size_t count)
645 {
646 struct onenand_chip *this = mtd->priv;
647 void __iomem *p;
648 void *buf = (void *) buffer;
649 dma_addr_t dma_src, dma_dst;
650 int err, ofs, page_dma = 0;
651 struct device *dev = &onenand->pdev->dev;
652
653 p = this->base + area;
654 if (ONENAND_CURRENT_BUFFERRAM(this)) {
655 if (area == ONENAND_DATARAM)
656 p += this->writesize;
657 else
658 p += mtd->oobsize;
659 }
660
661 if (offset & 3 || (size_t) buf & 3 ||
662 !onenand->dma_addr || count != mtd->writesize)
663 goto normal;
664
665 /* Handle vmalloc address */
666 if (buf >= high_memory) {
667 struct page *page;
668
669 if (((size_t) buf & PAGE_MASK) !=
670 ((size_t) (buf + count - 1) & PAGE_MASK))
671 goto normal;
672 page = vmalloc_to_page(buf);
673 if (!page)
674 goto normal;
675
676 /* Page offset */
677 ofs = ((size_t) buf & ~PAGE_MASK);
678 page_dma = 1;
679
680 /* DMA routine */
681 dma_src = onenand->phys_base + (p - this->base);
682 dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
683 } else {
684 /* DMA routine */
685 dma_src = onenand->phys_base + (p - this->base);
686 dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
687 }
688 if (dma_mapping_error(dev, dma_dst)) {
689 dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
690 goto normal;
691 }
692 err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
693 count, S5PC110_DMA_DIR_READ);
694
695 if (page_dma)
696 dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
697 else
698 dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
699
700 if (!err)
701 return 0;
702
703 normal:
704 if (count != mtd->writesize) {
705 /* Copy the bufferram to memory to prevent unaligned access */
706 memcpy(this->page_buf, p, mtd->writesize);
707 p = this->page_buf + offset;
708 }
709
710 memcpy(buffer, p, count);
711
712 return 0;
713 }
714
715 static int s5pc110_chip_probe(struct mtd_info *mtd)
716 {
717 /* Now just return 0 */
718 return 0;
719 }
720
721 static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
722 {
723 unsigned int flags = INT_ACT | LOAD_CMP;
724 unsigned int stat;
725 unsigned long timeout;
726
727 /* The 20 msec is enough */
728 timeout = jiffies + msecs_to_jiffies(20);
729 while (time_before(jiffies, timeout)) {
730 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
731 if (stat & flags)
732 break;
733 }
734 /* To get correct interrupt status in timeout case */
735 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
736 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
737
738 if (stat & LD_FAIL_ECC_ERR) {
739 s3c_onenand_reset();
740 return ONENAND_BBT_READ_ERROR;
741 }
742
743 if (stat & LOAD_CMP) {
744 int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
745 if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
746 s3c_onenand_reset();
747 return ONENAND_BBT_READ_ERROR;
748 }
749 }
750
751 return 0;
752 }
753
754 static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
755 {
756 struct onenand_chip *this = mtd->priv;
757 struct device *dev = &onenand->pdev->dev;
758 unsigned int block, end;
759 int tmp;
760
761 end = this->chipsize >> this->erase_shift;
762
763 for (block = 0; block < end; block++) {
764 unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
765 tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
766
767 if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
768 dev_err(dev, "block %d is write-protected!\n", block);
769 s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
770 }
771 }
772 }
773
774 static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
775 size_t len, int cmd)
776 {
777 struct onenand_chip *this = mtd->priv;
778 int start, end, start_mem_addr, end_mem_addr;
779
780 start = ofs >> this->erase_shift;
781 start_mem_addr = onenand->mem_addr(start, 0, 0);
782 end = start + (len >> this->erase_shift) - 1;
783 end_mem_addr = onenand->mem_addr(end, 0, 0);
784
785 if (cmd == ONENAND_CMD_LOCK) {
786 s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
787 start_mem_addr));
788 s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
789 end_mem_addr));
790 } else {
791 s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
792 start_mem_addr));
793 s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
794 end_mem_addr));
795 }
796
797 this->wait(mtd, FL_LOCKING);
798 }
799
800 static void s3c_unlock_all(struct mtd_info *mtd)
801 {
802 struct onenand_chip *this = mtd->priv;
803 loff_t ofs = 0;
804 size_t len = this->chipsize;
805
806 if (this->options & ONENAND_HAS_UNLOCK_ALL) {
807 /* Write unlock command */
808 this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
809
810 /* No need to check return value */
811 this->wait(mtd, FL_LOCKING);
812
813 /* Workaround for all block unlock in DDP */
814 if (!ONENAND_IS_DDP(this)) {
815 s3c_onenand_check_lock_status(mtd);
816 return;
817 }
818
819 /* All blocks on another chip */
820 ofs = this->chipsize >> 1;
821 len = this->chipsize >> 1;
822 }
823
824 s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
825
826 s3c_onenand_check_lock_status(mtd);
827 }
828
829 static void s3c_onenand_setup(struct mtd_info *mtd)
830 {
831 struct onenand_chip *this = mtd->priv;
832
833 onenand->mtd = mtd;
834
835 if (onenand->type == TYPE_S3C6400) {
836 onenand->mem_addr = s3c6400_mem_addr;
837 onenand->cmd_map = s3c64xx_cmd_map;
838 } else if (onenand->type == TYPE_S3C6410) {
839 onenand->mem_addr = s3c6410_mem_addr;
840 onenand->cmd_map = s3c64xx_cmd_map;
841 } else if (onenand->type == TYPE_S5PC100) {
842 onenand->mem_addr = s5pc100_mem_addr;
843 onenand->cmd_map = s5pc1xx_cmd_map;
844 } else if (onenand->type == TYPE_S5PC110) {
845 /* Use generic onenand functions */
846 this->read_bufferram = s5pc110_read_bufferram;
847 this->chip_probe = s5pc110_chip_probe;
848 return;
849 } else {
850 BUG();
851 }
852
853 this->read_word = s3c_onenand_readw;
854 this->write_word = s3c_onenand_writew;
855
856 this->wait = s3c_onenand_wait;
857 this->bbt_wait = s3c_onenand_bbt_wait;
858 this->unlock_all = s3c_unlock_all;
859 this->command = s3c_onenand_command;
860
861 this->read_bufferram = onenand_read_bufferram;
862 this->write_bufferram = onenand_write_bufferram;
863 }
864
865 static int s3c_onenand_probe(struct platform_device *pdev)
866 {
867 struct onenand_platform_data *pdata;
868 struct onenand_chip *this;
869 struct mtd_info *mtd;
870 struct resource *r;
871 int size, err;
872
873 pdata = pdev->dev.platform_data;
874 /* No need to check pdata. the platform data is optional */
875
876 size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
877 mtd = kzalloc(size, GFP_KERNEL);
878 if (!mtd) {
879 dev_err(&pdev->dev, "failed to allocate memory\n");
880 return -ENOMEM;
881 }
882
883 onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
884 if (!onenand) {
885 err = -ENOMEM;
886 goto onenand_fail;
887 }
888
889 this = (struct onenand_chip *) &mtd[1];
890 mtd->priv = this;
891 mtd->dev.parent = &pdev->dev;
892 mtd->owner = THIS_MODULE;
893 onenand->pdev = pdev;
894 onenand->type = platform_get_device_id(pdev)->driver_data;
895
896 s3c_onenand_setup(mtd);
897
898 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
899 if (!r) {
900 dev_err(&pdev->dev, "no memory resource defined\n");
901 return -ENOENT;
902 goto ahb_resource_failed;
903 }
904
905 onenand->base_res = request_mem_region(r->start, resource_size(r),
906 pdev->name);
907 if (!onenand->base_res) {
908 dev_err(&pdev->dev, "failed to request memory resource\n");
909 err = -EBUSY;
910 goto resource_failed;
911 }
912
913 onenand->base = ioremap(r->start, resource_size(r));
914 if (!onenand->base) {
915 dev_err(&pdev->dev, "failed to map memory resource\n");
916 err = -EFAULT;
917 goto ioremap_failed;
918 }
919 /* Set onenand_chip also */
920 this->base = onenand->base;
921
922 /* Use runtime badblock check */
923 this->options |= ONENAND_SKIP_UNLOCK_CHECK;
924
925 if (onenand->type != TYPE_S5PC110) {
926 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
927 if (!r) {
928 dev_err(&pdev->dev, "no buffer memory resource defined\n");
929 return -ENOENT;
930 goto ahb_resource_failed;
931 }
932
933 onenand->ahb_res = request_mem_region(r->start, resource_size(r),
934 pdev->name);
935 if (!onenand->ahb_res) {
936 dev_err(&pdev->dev, "failed to request buffer memory resource\n");
937 err = -EBUSY;
938 goto ahb_resource_failed;
939 }
940
941 onenand->ahb_addr = ioremap(r->start, resource_size(r));
942 if (!onenand->ahb_addr) {
943 dev_err(&pdev->dev, "failed to map buffer memory resource\n");
944 err = -EINVAL;
945 goto ahb_ioremap_failed;
946 }
947
948 /* Allocate 4KiB BufferRAM */
949 onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
950 if (!onenand->page_buf) {
951 err = -ENOMEM;
952 goto page_buf_fail;
953 }
954
955 /* Allocate 128 SpareRAM */
956 onenand->oob_buf = kzalloc(128, GFP_KERNEL);
957 if (!onenand->oob_buf) {
958 err = -ENOMEM;
959 goto oob_buf_fail;
960 }
961
962 /* S3C doesn't handle subpage write */
963 mtd->subpage_sft = 0;
964 this->subpagesize = mtd->writesize;
965
966 } else { /* S5PC110 */
967 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
968 if (!r) {
969 dev_err(&pdev->dev, "no dma memory resource defined\n");
970 return -ENOENT;
971 goto dma_resource_failed;
972 }
973
974 onenand->dma_res = request_mem_region(r->start, resource_size(r),
975 pdev->name);
976 if (!onenand->dma_res) {
977 dev_err(&pdev->dev, "failed to request dma memory resource\n");
978 err = -EBUSY;
979 goto dma_resource_failed;
980 }
981
982 onenand->dma_addr = ioremap(r->start, resource_size(r));
983 if (!onenand->dma_addr) {
984 dev_err(&pdev->dev, "failed to map dma memory resource\n");
985 err = -EINVAL;
986 goto dma_ioremap_failed;
987 }
988
989 onenand->phys_base = onenand->base_res->start;
990
991 s5pc110_dma_ops = s5pc110_dma_poll;
992 /* Interrupt support */
993 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
994 if (r) {
995 init_completion(&onenand->complete);
996 s5pc110_dma_ops = s5pc110_dma_irq;
997 err = request_irq(r->start, s5pc110_onenand_irq,
998 IRQF_SHARED, "onenand", &onenand);
999 if (err) {
1000 dev_err(&pdev->dev, "failed to get irq\n");
1001 goto scan_failed;
1002 }
1003 }
1004 }
1005
1006 if (onenand_scan(mtd, 1)) {
1007 err = -EFAULT;
1008 goto scan_failed;
1009 }
1010
1011 if (onenand->type != TYPE_S5PC110) {
1012 /* S3C doesn't handle subpage write */
1013 mtd->subpage_sft = 0;
1014 this->subpagesize = mtd->writesize;
1015 }
1016
1017 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1018 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1019
1020 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
1021 if (err > 0)
1022 mtd_device_register(mtd, onenand->parts, err);
1023 else if (err <= 0 && pdata && pdata->parts)
1024 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1025 else
1026 err = mtd_device_register(mtd, NULL, 0);
1027
1028 platform_set_drvdata(pdev, mtd);
1029
1030 return 0;
1031
1032 scan_failed:
1033 if (onenand->dma_addr)
1034 iounmap(onenand->dma_addr);
1035 dma_ioremap_failed:
1036 if (onenand->dma_res)
1037 release_mem_region(onenand->dma_res->start,
1038 resource_size(onenand->dma_res));
1039 kfree(onenand->oob_buf);
1040 oob_buf_fail:
1041 kfree(onenand->page_buf);
1042 page_buf_fail:
1043 if (onenand->ahb_addr)
1044 iounmap(onenand->ahb_addr);
1045 ahb_ioremap_failed:
1046 if (onenand->ahb_res)
1047 release_mem_region(onenand->ahb_res->start,
1048 resource_size(onenand->ahb_res));
1049 dma_resource_failed:
1050 ahb_resource_failed:
1051 iounmap(onenand->base);
1052 ioremap_failed:
1053 if (onenand->base_res)
1054 release_mem_region(onenand->base_res->start,
1055 resource_size(onenand->base_res));
1056 resource_failed:
1057 kfree(onenand);
1058 onenand_fail:
1059 kfree(mtd);
1060 return err;
1061 }
1062
1063 static int __devexit s3c_onenand_remove(struct platform_device *pdev)
1064 {
1065 struct mtd_info *mtd = platform_get_drvdata(pdev);
1066
1067 onenand_release(mtd);
1068 if (onenand->ahb_addr)
1069 iounmap(onenand->ahb_addr);
1070 if (onenand->ahb_res)
1071 release_mem_region(onenand->ahb_res->start,
1072 resource_size(onenand->ahb_res));
1073 if (onenand->dma_addr)
1074 iounmap(onenand->dma_addr);
1075 if (onenand->dma_res)
1076 release_mem_region(onenand->dma_res->start,
1077 resource_size(onenand->dma_res));
1078
1079 iounmap(onenand->base);
1080 release_mem_region(onenand->base_res->start,
1081 resource_size(onenand->base_res));
1082
1083 platform_set_drvdata(pdev, NULL);
1084 kfree(onenand->oob_buf);
1085 kfree(onenand->page_buf);
1086 kfree(onenand);
1087 kfree(mtd);
1088 return 0;
1089 }
1090
1091 static int s3c_pm_ops_suspend(struct device *dev)
1092 {
1093 struct platform_device *pdev = to_platform_device(dev);
1094 struct mtd_info *mtd = platform_get_drvdata(pdev);
1095 struct onenand_chip *this = mtd->priv;
1096
1097 this->wait(mtd, FL_PM_SUSPENDED);
1098 return 0;
1099 }
1100
1101 static int s3c_pm_ops_resume(struct device *dev)
1102 {
1103 struct platform_device *pdev = to_platform_device(dev);
1104 struct mtd_info *mtd = platform_get_drvdata(pdev);
1105 struct onenand_chip *this = mtd->priv;
1106
1107 this->unlock_all(mtd);
1108 return 0;
1109 }
1110
1111 static const struct dev_pm_ops s3c_pm_ops = {
1112 .suspend = s3c_pm_ops_suspend,
1113 .resume = s3c_pm_ops_resume,
1114 };
1115
1116 static struct platform_device_id s3c_onenand_driver_ids[] = {
1117 {
1118 .name = "s3c6400-onenand",
1119 .driver_data = TYPE_S3C6400,
1120 }, {
1121 .name = "s3c6410-onenand",
1122 .driver_data = TYPE_S3C6410,
1123 }, {
1124 .name = "s5pc100-onenand",
1125 .driver_data = TYPE_S5PC100,
1126 }, {
1127 .name = "s5pc110-onenand",
1128 .driver_data = TYPE_S5PC110,
1129 }, { },
1130 };
1131 MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
1132
1133 static struct platform_driver s3c_onenand_driver = {
1134 .driver = {
1135 .name = "samsung-onenand",
1136 .pm = &s3c_pm_ops,
1137 },
1138 .id_table = s3c_onenand_driver_ids,
1139 .probe = s3c_onenand_probe,
1140 .remove = __devexit_p(s3c_onenand_remove),
1141 };
1142
1143 static int __init s3c_onenand_init(void)
1144 {
1145 return platform_driver_register(&s3c_onenand_driver);
1146 }
1147
1148 static void __exit s3c_onenand_exit(void)
1149 {
1150 platform_driver_unregister(&s3c_onenand_driver);
1151 }
1152
1153 module_init(s3c_onenand_init);
1154 module_exit(s3c_onenand_exit);
1155
1156 MODULE_LICENSE("GPL");
1157 MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
1158 MODULE_DESCRIPTION("Samsung OneNAND controller support");
This page took 0.186946 seconds and 6 git commands to generate.