iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[deliverable/linux.git] / drivers / mtd / onenand / omap2.c
CommitLineData
36cd4fb5
AH
1/*
2 * linux/drivers/mtd/onenand/omap2.c
3 *
4 * OneNAND driver for OMAP2 / OMAP3
5 *
6 * Copyright © 2005-2006 Nokia Corporation
7 *
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/onenand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/platform_device.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
cbbd6956
AH
35#include <linux/dma-mapping.h>
36#include <linux/io.h>
36cd4fb5 37
36cd4fb5 38#include <asm/mach/flash.h>
ce491cf8
TL
39#include <plat/gpmc.h>
40#include <plat/onenand.h>
fe875358 41#include <mach/gpio.h>
36cd4fb5 42
ce491cf8 43#include <plat/dma.h>
36cd4fb5 44
ce491cf8 45#include <plat/board.h>
36cd4fb5
AH
46
47#define DRIVER_NAME "omap2-onenand"
48
49#define ONENAND_IO_SIZE SZ_128K
50#define ONENAND_BUFRAM_SIZE (1024 * 5)
51
52struct omap2_onenand {
53 struct platform_device *pdev;
54 int gpmc_cs;
55 unsigned long phys_base;
56 int gpio_irq;
57 struct mtd_info mtd;
58 struct mtd_partition *parts;
59 struct onenand_chip onenand;
60 struct completion irq_done;
61 struct completion dma_done;
62 int dma_channel;
63 int freq;
64 int (*setup)(void __iomem *base, int freq);
65};
66
67static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
68{
69 struct omap2_onenand *c = data;
70
71 complete(&c->dma_done);
72}
73
74static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
75{
76 struct omap2_onenand *c = dev_id;
77
78 complete(&c->irq_done);
79
80 return IRQ_HANDLED;
81}
82
83static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
84{
85 return readw(c->onenand.base + reg);
86}
87
88static inline void write_reg(struct omap2_onenand *c, unsigned short value,
89 int reg)
90{
91 writew(value, c->onenand.base + reg);
92}
93
94static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
95{
96 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
97 msg, state, ctrl, intr);
98}
99
100static void wait_warn(char *msg, int state, unsigned int ctrl,
101 unsigned int intr)
102{
103 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
104 "intr 0x%04x\n", msg, state, ctrl, intr);
105}
106
107static int omap2_onenand_wait(struct mtd_info *mtd, int state)
108{
109 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
110 unsigned int intr = 0;
111 unsigned int ctrl;
112 unsigned long timeout;
113 u32 syscfg;
114
72073027
MK
115 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
116 state == FL_VERIFYING_ERASE) {
117 int i = 21;
118 unsigned int intr_flags = ONENAND_INT_MASTER;
119
120 switch (state) {
121 case FL_RESETING:
122 intr_flags |= ONENAND_INT_RESET;
123 break;
124 case FL_PREPARING_ERASE:
125 intr_flags |= ONENAND_INT_ERASE;
126 break;
127 case FL_VERIFYING_ERASE:
128 i = 101;
129 break;
130 }
36cd4fb5 131
72073027 132 while (--i) {
36cd4fb5
AH
133 udelay(1);
134 intr = read_reg(c, ONENAND_REG_INTERRUPT);
135 if (intr & ONENAND_INT_MASTER)
136 break;
137 }
138 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
139 if (ctrl & ONENAND_CTRL_ERROR) {
140 wait_err("controller error", state, ctrl, intr);
141 return -EIO;
142 }
72073027 143 if ((intr & intr_flags) != intr_flags) {
36cd4fb5
AH
144 wait_err("timeout", state, ctrl, intr);
145 return -EIO;
146 }
147 return 0;
148 }
149
150 if (state != FL_READING) {
151 int result;
152
153 /* Turn interrupts on */
154 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
782b7a36
AH
155 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
156 syscfg |= ONENAND_SYS_CFG1_IOBE;
157 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
158 if (cpu_is_omap34xx())
159 /* Add a delay to let GPIO settle */
160 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
161 }
36cd4fb5
AH
162
163 INIT_COMPLETION(c->irq_done);
164 if (c->gpio_irq) {
0b84b5ca 165 result = gpio_get_value(c->gpio_irq);
36cd4fb5
AH
166 if (result == -1) {
167 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
168 intr = read_reg(c, ONENAND_REG_INTERRUPT);
169 wait_err("gpio error", state, ctrl, intr);
170 return -EIO;
171 }
172 } else
173 result = 0;
174 if (result == 0) {
175 int retry_cnt = 0;
176retry:
177 result = wait_for_completion_timeout(&c->irq_done,
178 msecs_to_jiffies(20));
179 if (result == 0) {
180 /* Timeout after 20ms */
181 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
182 if (ctrl & ONENAND_CTRL_ONGO) {
183 /*
184 * The operation seems to be still going
185 * so give it some more time.
186 */
187 retry_cnt += 1;
188 if (retry_cnt < 3)
189 goto retry;
190 intr = read_reg(c,
191 ONENAND_REG_INTERRUPT);
192 wait_err("timeout", state, ctrl, intr);
193 return -EIO;
194 }
195 intr = read_reg(c, ONENAND_REG_INTERRUPT);
196 if ((intr & ONENAND_INT_MASTER) == 0)
197 wait_warn("timeout", state, ctrl, intr);
198 }
199 }
200 } else {
8afbc114
AH
201 int retry_cnt = 0;
202
36cd4fb5
AH
203 /* Turn interrupts off */
204 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
205 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
206 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
207
208 timeout = jiffies + msecs_to_jiffies(20);
8afbc114
AH
209 while (1) {
210 if (time_before(jiffies, timeout)) {
211 intr = read_reg(c, ONENAND_REG_INTERRUPT);
212 if (intr & ONENAND_INT_MASTER)
213 break;
214 } else {
215 /* Timeout after 20ms */
216 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
217 if (ctrl & ONENAND_CTRL_ONGO) {
218 /*
219 * The operation seems to be still going
220 * so give it some more time.
221 */
222 retry_cnt += 1;
223 if (retry_cnt < 3) {
224 timeout = jiffies +
225 msecs_to_jiffies(20);
226 continue;
227 }
228 }
36cd4fb5 229 break;
8afbc114 230 }
36cd4fb5
AH
231 }
232 }
233
234 intr = read_reg(c, ONENAND_REG_INTERRUPT);
235 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
236
237 if (intr & ONENAND_INT_READ) {
238 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
239
240 if (ecc) {
241 unsigned int addr1, addr8;
242
243 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
244 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
245 if (ecc & ONENAND_ECC_2BIT_ALL) {
246 printk(KERN_ERR "onenand_wait: ECC error = "
247 "0x%04x, addr1 %#x, addr8 %#x\n",
248 ecc, addr1, addr8);
249 mtd->ecc_stats.failed++;
250 return -EBADMSG;
251 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
252 printk(KERN_NOTICE "onenand_wait: correctable "
253 "ECC error = 0x%04x, addr1 %#x, "
254 "addr8 %#x\n", ecc, addr1, addr8);
255 mtd->ecc_stats.corrected++;
256 }
257 }
258 } else if (state == FL_READING) {
259 wait_err("timeout", state, ctrl, intr);
260 return -EIO;
261 }
262
263 if (ctrl & ONENAND_CTRL_ERROR) {
264 wait_err("controller error", state, ctrl, intr);
265 if (ctrl & ONENAND_CTRL_LOCK)
266 printk(KERN_ERR "onenand_wait: "
267 "Device is write protected!!!\n");
268 return -EIO;
269 }
270
271 if (ctrl & 0xFE9F)
272 wait_warn("unexpected controller status", state, ctrl, intr);
273
274 return 0;
275}
276
277static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
278{
279 struct onenand_chip *this = mtd->priv;
280
281 if (ONENAND_CURRENT_BUFFERRAM(this)) {
282 if (area == ONENAND_DATARAM)
00acf4a8 283 return this->writesize;
36cd4fb5
AH
284 if (area == ONENAND_SPARERAM)
285 return mtd->oobsize;
286 }
287
288 return 0;
289}
290
291#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
292
293static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
294 unsigned char *buffer, int offset,
295 size_t count)
296{
297 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
298 struct onenand_chip *this = mtd->priv;
299 dma_addr_t dma_src, dma_dst;
300 int bram_offset;
301 unsigned long timeout;
302 void *buf = (void *)buffer;
303 size_t xtra;
304 volatile unsigned *done;
305
306 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
307 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
308 goto out_copy;
309
a29f280b
AH
310 /* panic_write() may be in an interrupt context */
311 if (in_interrupt())
312 goto out_copy;
313
36cd4fb5
AH
314 if (buf >= high_memory) {
315 struct page *p1;
316
317 if (((size_t)buf & PAGE_MASK) !=
318 ((size_t)(buf + count - 1) & PAGE_MASK))
319 goto out_copy;
320 p1 = vmalloc_to_page(buf);
321 if (!p1)
322 goto out_copy;
323 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
324 }
325
326 xtra = count & 3;
327 if (xtra) {
328 count -= xtra;
329 memcpy(buf + count, this->base + bram_offset + count, xtra);
330 }
331
332 dma_src = c->phys_base + bram_offset;
333 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
334 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
335 dev_err(&c->pdev->dev,
336 "Couldn't DMA map a %d byte buffer\n",
337 count);
338 goto out_copy;
339 }
340
341 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
342 count >> 2, 1, 0, 0, 0);
343 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
344 dma_src, 0, 0);
345 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
346 dma_dst, 0, 0);
347
348 INIT_COMPLETION(c->dma_done);
349 omap_start_dma(c->dma_channel);
350
351 timeout = jiffies + msecs_to_jiffies(20);
352 done = &c->dma_done.done;
353 while (time_before(jiffies, timeout))
354 if (*done)
355 break;
356
357 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
358
359 if (!*done) {
360 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
361 goto out_copy;
362 }
363
364 return 0;
365
366out_copy:
367 memcpy(buf, this->base + bram_offset, count);
368 return 0;
369}
370
371static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
372 const unsigned char *buffer,
373 int offset, size_t count)
374{
375 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
376 struct onenand_chip *this = mtd->priv;
377 dma_addr_t dma_src, dma_dst;
378 int bram_offset;
379 unsigned long timeout;
380 void *buf = (void *)buffer;
381 volatile unsigned *done;
382
383 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
384 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
385 goto out_copy;
386
387 /* panic_write() may be in an interrupt context */
388 if (in_interrupt())
389 goto out_copy;
390
391 if (buf >= high_memory) {
392 struct page *p1;
393
394 if (((size_t)buf & PAGE_MASK) !=
395 ((size_t)(buf + count - 1) & PAGE_MASK))
396 goto out_copy;
397 p1 = vmalloc_to_page(buf);
398 if (!p1)
399 goto out_copy;
400 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
401 }
402
403 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
404 dma_dst = c->phys_base + bram_offset;
405 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
406 dev_err(&c->pdev->dev,
407 "Couldn't DMA map a %d byte buffer\n",
408 count);
409 return -1;
410 }
411
412 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
413 count >> 2, 1, 0, 0, 0);
414 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
415 dma_src, 0, 0);
416 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
417 dma_dst, 0, 0);
418
419 INIT_COMPLETION(c->dma_done);
420 omap_start_dma(c->dma_channel);
421
422 timeout = jiffies + msecs_to_jiffies(20);
423 done = &c->dma_done.done;
424 while (time_before(jiffies, timeout))
425 if (*done)
426 break;
427
428 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
429
430 if (!*done) {
431 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
432 goto out_copy;
433 }
434
435 return 0;
436
437out_copy:
438 memcpy(this->base + bram_offset, buf, count);
439 return 0;
440}
441
442#else
443
444int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
445 unsigned char *buffer, int offset,
446 size_t count);
447
448int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
449 const unsigned char *buffer,
450 int offset, size_t count);
451
452#endif
453
454#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
455
456static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
457 unsigned char *buffer, int offset,
458 size_t count)
459{
460 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
461 struct onenand_chip *this = mtd->priv;
462 dma_addr_t dma_src, dma_dst;
463 int bram_offset;
464
465 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
466 /* DMA is not used. Revisit PM requirements before enabling it. */
467 if (1 || (c->dma_channel < 0) ||
468 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
469 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
470 memcpy(buffer, (__force void *)(this->base + bram_offset),
471 count);
472 return 0;
473 }
474
475 dma_src = c->phys_base + bram_offset;
476 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
477 DMA_FROM_DEVICE);
478 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
479 dev_err(&c->pdev->dev,
480 "Couldn't DMA map a %d byte buffer\n",
481 count);
482 return -1;
483 }
484
485 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
486 count / 4, 1, 0, 0, 0);
487 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
488 dma_src, 0, 0);
489 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
490 dma_dst, 0, 0);
491
492 INIT_COMPLETION(c->dma_done);
493 omap_start_dma(c->dma_channel);
494 wait_for_completion(&c->dma_done);
495
496 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
497
498 return 0;
499}
500
501static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
502 const unsigned char *buffer,
503 int offset, size_t count)
504{
505 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
506 struct onenand_chip *this = mtd->priv;
507 dma_addr_t dma_src, dma_dst;
508 int bram_offset;
509
510 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
511 /* DMA is not used. Revisit PM requirements before enabling it. */
512 if (1 || (c->dma_channel < 0) ||
513 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
514 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
515 memcpy((__force void *)(this->base + bram_offset), buffer,
516 count);
517 return 0;
518 }
519
520 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
521 DMA_TO_DEVICE);
522 dma_dst = c->phys_base + bram_offset;
523 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
524 dev_err(&c->pdev->dev,
525 "Couldn't DMA map a %d byte buffer\n",
526 count);
527 return -1;
528 }
529
530 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
531 count / 2, 1, 0, 0, 0);
532 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
533 dma_src, 0, 0);
534 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
535 dma_dst, 0, 0);
536
537 INIT_COMPLETION(c->dma_done);
538 omap_start_dma(c->dma_channel);
539 wait_for_completion(&c->dma_done);
540
541 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
542
543 return 0;
544}
545
546#else
547
548int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
549 unsigned char *buffer, int offset,
550 size_t count);
551
552int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
553 const unsigned char *buffer,
554 int offset, size_t count);
555
556#endif
557
558static struct platform_driver omap2_onenand_driver;
559
560static int __adjust_timing(struct device *dev, void *data)
561{
562 int ret = 0;
563 struct omap2_onenand *c;
564
565 c = dev_get_drvdata(dev);
566
567 BUG_ON(c->setup == NULL);
568
569 /* DMA is not in use so this is all that is needed */
570 /* Revisit for OMAP3! */
571 ret = c->setup(c->onenand.base, c->freq);
572
573 return ret;
574}
575
576int omap2_onenand_rephase(void)
577{
578 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
579 NULL, __adjust_timing);
580}
581
d3412dbd 582static void omap2_onenand_shutdown(struct platform_device *pdev)
36cd4fb5
AH
583{
584 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
585
586 /* With certain content in the buffer RAM, the OMAP boot ROM code
587 * can recognize the flash chip incorrectly. Zero it out before
588 * soft reset.
589 */
590 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
591}
592
593static int __devinit omap2_onenand_probe(struct platform_device *pdev)
594{
595 struct omap_onenand_platform_data *pdata;
596 struct omap2_onenand *c;
597 int r;
598
599 pdata = pdev->dev.platform_data;
600 if (pdata == NULL) {
601 dev_err(&pdev->dev, "platform data missing\n");
602 return -ENODEV;
603 }
604
605 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
606 if (!c)
607 return -ENOMEM;
608
609 init_completion(&c->irq_done);
610 init_completion(&c->dma_done);
611 c->gpmc_cs = pdata->cs;
612 c->gpio_irq = pdata->gpio_irq;
613 c->dma_channel = pdata->dma_channel;
614 if (c->dma_channel < 0) {
615 /* if -1, don't use DMA */
616 c->gpio_irq = 0;
617 }
618
619 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
620 if (r < 0) {
621 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
622 goto err_kfree;
623 }
624
625 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
626 pdev->dev.driver->name) == NULL) {
627 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
628 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
629 r = -EBUSY;
630 goto err_free_cs;
631 }
632 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
633 if (c->onenand.base == NULL) {
634 r = -ENOMEM;
635 goto err_release_mem_region;
636 }
637
638 if (pdata->onenand_setup != NULL) {
639 r = pdata->onenand_setup(c->onenand.base, c->freq);
640 if (r < 0) {
641 dev_err(&pdev->dev, "Onenand platform setup failed: "
642 "%d\n", r);
643 goto err_iounmap;
644 }
645 c->setup = pdata->onenand_setup;
646 }
647
648 if (c->gpio_irq) {
73069e38 649 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
36cd4fb5
AH
650 dev_err(&pdev->dev, "Failed to request GPIO%d for "
651 "OneNAND\n", c->gpio_irq);
652 goto err_iounmap;
653 }
40e3925b 654 gpio_direction_input(c->gpio_irq);
36cd4fb5 655
15f74b03 656 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
36cd4fb5
AH
657 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
658 pdev->dev.driver->name, c)) < 0)
659 goto err_release_gpio;
660 }
661
662 if (c->dma_channel >= 0) {
663 r = omap_request_dma(0, pdev->dev.driver->name,
664 omap2_onenand_dma_cb, (void *) c,
665 &c->dma_channel);
666 if (r == 0) {
667 omap_set_dma_write_mode(c->dma_channel,
668 OMAP_DMA_WRITE_NON_POSTED);
669 omap_set_dma_src_data_pack(c->dma_channel, 1);
670 omap_set_dma_src_burst_mode(c->dma_channel,
671 OMAP_DMA_DATA_BURST_8);
672 omap_set_dma_dest_data_pack(c->dma_channel, 1);
673 omap_set_dma_dest_burst_mode(c->dma_channel,
674 OMAP_DMA_DATA_BURST_8);
675 } else {
676 dev_info(&pdev->dev,
677 "failed to allocate DMA for OneNAND, "
678 "using PIO instead\n");
679 c->dma_channel = -1;
680 }
681 }
682
683 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
684 "base %p\n", c->gpmc_cs, c->phys_base,
685 c->onenand.base);
686
687 c->pdev = pdev;
475b44c1 688 c->mtd.name = dev_name(&pdev->dev);
36cd4fb5
AH
689 c->mtd.priv = &c->onenand;
690 c->mtd.owner = THIS_MODULE;
691
87f39f04
DB
692 c->mtd.dev.parent = &pdev->dev;
693
36cd4fb5
AH
694 if (c->dma_channel >= 0) {
695 struct onenand_chip *this = &c->onenand;
696
697 this->wait = omap2_onenand_wait;
698 if (cpu_is_omap34xx()) {
699 this->read_bufferram = omap3_onenand_read_bufferram;
700 this->write_bufferram = omap3_onenand_write_bufferram;
701 } else {
702 this->read_bufferram = omap2_onenand_read_bufferram;
703 this->write_bufferram = omap2_onenand_write_bufferram;
704 }
705 }
706
707 if ((r = onenand_scan(&c->mtd, 1)) < 0)
708 goto err_release_dma;
709
710 switch ((c->onenand.version_id >> 4) & 0xf) {
711 case 0:
712 c->freq = 40;
713 break;
714 case 1:
715 c->freq = 54;
716 break;
717 case 2:
718 c->freq = 66;
719 break;
720 case 3:
721 c->freq = 83;
722 break;
723 }
724
725#ifdef CONFIG_MTD_PARTITIONS
726 if (pdata->parts != NULL)
727 r = add_mtd_partitions(&c->mtd, pdata->parts,
728 pdata->nr_parts);
729 else
730#endif
731 r = add_mtd_device(&c->mtd);
732 if (r < 0)
733 goto err_release_onenand;
734
735 platform_set_drvdata(pdev, c);
736
737 return 0;
738
739err_release_onenand:
740 onenand_release(&c->mtd);
741err_release_dma:
742 if (c->dma_channel != -1)
743 omap_free_dma(c->dma_channel);
744 if (c->gpio_irq)
15f74b03 745 free_irq(gpio_to_irq(c->gpio_irq), c);
36cd4fb5
AH
746err_release_gpio:
747 if (c->gpio_irq)
73069e38 748 gpio_free(c->gpio_irq);
36cd4fb5
AH
749err_iounmap:
750 iounmap(c->onenand.base);
751err_release_mem_region:
752 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
753err_free_cs:
754 gpmc_cs_free(c->gpmc_cs);
755err_kfree:
756 kfree(c);
757
758 return r;
759}
760
761static int __devexit omap2_onenand_remove(struct platform_device *pdev)
762{
763 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
764
765 BUG_ON(c == NULL);
766
767#ifdef CONFIG_MTD_PARTITIONS
768 if (c->parts)
769 del_mtd_partitions(&c->mtd);
770 else
771 del_mtd_device(&c->mtd);
772#else
773 del_mtd_device(&c->mtd);
774#endif
775
776 onenand_release(&c->mtd);
777 if (c->dma_channel != -1)
778 omap_free_dma(c->dma_channel);
779 omap2_onenand_shutdown(pdev);
780 platform_set_drvdata(pdev, NULL);
781 if (c->gpio_irq) {
15f74b03 782 free_irq(gpio_to_irq(c->gpio_irq), c);
73069e38 783 gpio_free(c->gpio_irq);
36cd4fb5
AH
784 }
785 iounmap(c->onenand.base);
786 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
3cae1cc1 787 gpmc_cs_free(c->gpmc_cs);
36cd4fb5
AH
788 kfree(c);
789
790 return 0;
791}
792
793static struct platform_driver omap2_onenand_driver = {
794 .probe = omap2_onenand_probe,
d3412dbd 795 .remove = __devexit_p(omap2_onenand_remove),
36cd4fb5
AH
796 .shutdown = omap2_onenand_shutdown,
797 .driver = {
798 .name = DRIVER_NAME,
799 .owner = THIS_MODULE,
800 },
801};
802
803static int __init omap2_onenand_init(void)
804{
805 printk(KERN_INFO "OneNAND driver initializing\n");
806 return platform_driver_register(&omap2_onenand_driver);
807}
808
809static void __exit omap2_onenand_exit(void)
810{
811 platform_driver_unregister(&omap2_onenand_driver);
812}
813
814module_init(omap2_onenand_init);
815module_exit(omap2_onenand_exit);
816
817MODULE_ALIAS(DRIVER_NAME);
818MODULE_LICENSE("GPL");
819MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
820MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
This page took 0.224072 seconds and 5 git commands to generate.