Merge branches 'iommu/fixes', 'dma-debug', 'x86/amd', 'x86/vt-d', 'arm/tegra' and...
[deliverable/linux.git] / drivers / mtd / nand / lpc32xx_slc.c
1 /*
2 * NXP LPC32XX NAND SLC driver
3 *
4 * Authors:
5 * Kevin Wells <kevin.wells@nxp.com>
6 * Roland Stigge <stigge@antcom.de>
7 *
8 * Copyright © 2011 NXP Semiconductors
9 * Copyright © 2012 Roland Stigge
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22 #include <linux/slab.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/mtd/mtd.h>
26 #include <linux/mtd/nand.h>
27 #include <linux/mtd/partitions.h>
28 #include <linux/clk.h>
29 #include <linux/err.h>
30 #include <linux/delay.h>
31 #include <linux/io.h>
32 #include <linux/mm.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dmaengine.h>
35 #include <linux/mtd/nand_ecc.h>
36 #include <linux/gpio.h>
37 #include <linux/of.h>
38 #include <linux/of_mtd.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mtd/lpc32xx_slc.h>
41
42 #define LPC32XX_MODNAME "lpc32xx-nand"
43
44 /**********************************************************************
45 * SLC NAND controller register offsets
46 **********************************************************************/
47
48 #define SLC_DATA(x) (x + 0x000)
49 #define SLC_ADDR(x) (x + 0x004)
50 #define SLC_CMD(x) (x + 0x008)
51 #define SLC_STOP(x) (x + 0x00C)
52 #define SLC_CTRL(x) (x + 0x010)
53 #define SLC_CFG(x) (x + 0x014)
54 #define SLC_STAT(x) (x + 0x018)
55 #define SLC_INT_STAT(x) (x + 0x01C)
56 #define SLC_IEN(x) (x + 0x020)
57 #define SLC_ISR(x) (x + 0x024)
58 #define SLC_ICR(x) (x + 0x028)
59 #define SLC_TAC(x) (x + 0x02C)
60 #define SLC_TC(x) (x + 0x030)
61 #define SLC_ECC(x) (x + 0x034)
62 #define SLC_DMA_DATA(x) (x + 0x038)
63
64 /**********************************************************************
65 * slc_ctrl register definitions
66 **********************************************************************/
67 #define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
68 #define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
69 #define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
70
71 /**********************************************************************
72 * slc_cfg register definitions
73 **********************************************************************/
74 #define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
75 #define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
76 #define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
77 #define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
78 #define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
79 #define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
80
81 /**********************************************************************
82 * slc_stat register definitions
83 **********************************************************************/
84 #define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
85 #define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
86 #define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
87
88 /**********************************************************************
89 * slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
90 **********************************************************************/
91 #define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
92 #define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
93
94 /**********************************************************************
95 * slc_tac register definitions
96 **********************************************************************/
97 /* Clock setting for RDY write sample wait time in 2*n clocks */
98 #define SLCTAC_WDR(n) (((n) & 0xF) << 28)
99 /* Write pulse width in clock cycles, 1 to 16 clocks */
100 #define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24)
101 /* Write hold time of control and data signals, 1 to 16 clocks */
102 #define SLCTAC_WHOLD(n) (((n) & 0xF) << 20)
103 /* Write setup time of control and data signals, 1 to 16 clocks */
104 #define SLCTAC_WSETUP(n) (((n) & 0xF) << 16)
105 /* Clock setting for RDY read sample wait time in 2*n clocks */
106 #define SLCTAC_RDR(n) (((n) & 0xF) << 12)
107 /* Read pulse width in clock cycles, 1 to 16 clocks */
108 #define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8)
109 /* Read hold time of control and data signals, 1 to 16 clocks */
110 #define SLCTAC_RHOLD(n) (((n) & 0xF) << 4)
111 /* Read setup time of control and data signals, 1 to 16 clocks */
112 #define SLCTAC_RSETUP(n) (((n) & 0xF) << 0)
113
114 /**********************************************************************
115 * slc_ecc register definitions
116 **********************************************************************/
117 /* ECC line party fetch macro */
118 #define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
119 #define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
120
121 /*
122 * DMA requires storage space for the DMA local buffer and the hardware ECC
123 * storage area. The DMA local buffer is only used if DMA mapping fails
124 * during runtime.
125 */
126 #define LPC32XX_DMA_DATA_SIZE 4096
127 #define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
128
129 /* Number of bytes used for ECC stored in NAND per 256 bytes */
130 #define LPC32XX_SLC_DEV_ECC_BYTES 3
131
132 /*
133 * If the NAND base clock frequency can't be fetched, this frequency will be
134 * used instead as the base. This rate is used to setup the timing registers
135 * used for NAND accesses.
136 */
137 #define LPC32XX_DEF_BUS_RATE 133250000
138
139 /* Milliseconds for DMA FIFO timeout (unlikely anyway) */
140 #define LPC32XX_DMA_TIMEOUT 100
141
142 /*
143 * NAND ECC Layout for small page NAND devices
144 * Note: For large and huge page devices, the default layouts are used
145 */
146 static struct nand_ecclayout lpc32xx_nand_oob_16 = {
147 .eccbytes = 6,
148 .eccpos = {10, 11, 12, 13, 14, 15},
149 .oobfree = {
150 { .offset = 0, .length = 4 },
151 { .offset = 6, .length = 4 },
152 },
153 };
154
155 static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
156 static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
157
158 /*
159 * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
160 * Note: Large page devices used the default layout
161 */
162 static struct nand_bbt_descr bbt_smallpage_main_descr = {
163 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
164 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
165 .offs = 0,
166 .len = 4,
167 .veroffs = 6,
168 .maxblocks = 4,
169 .pattern = bbt_pattern
170 };
171
172 static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
173 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
174 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
175 .offs = 0,
176 .len = 4,
177 .veroffs = 6,
178 .maxblocks = 4,
179 .pattern = mirror_pattern
180 };
181
182 /*
183 * NAND platform configuration structure
184 */
185 struct lpc32xx_nand_cfg_slc {
186 uint32_t wdr_clks;
187 uint32_t wwidth;
188 uint32_t whold;
189 uint32_t wsetup;
190 uint32_t rdr_clks;
191 uint32_t rwidth;
192 uint32_t rhold;
193 uint32_t rsetup;
194 bool use_bbt;
195 int wp_gpio;
196 struct mtd_partition *parts;
197 unsigned num_parts;
198 };
199
200 struct lpc32xx_nand_host {
201 struct nand_chip nand_chip;
202 struct lpc32xx_slc_platform_data *pdata;
203 struct clk *clk;
204 struct mtd_info mtd;
205 void __iomem *io_base;
206 struct lpc32xx_nand_cfg_slc *ncfg;
207
208 struct completion comp;
209 struct dma_chan *dma_chan;
210 uint32_t dma_buf_len;
211 struct dma_slave_config dma_slave_config;
212 struct scatterlist sgl;
213
214 /*
215 * DMA and CPU addresses of ECC work area and data buffer
216 */
217 uint32_t *ecc_buf;
218 uint8_t *data_buf;
219 dma_addr_t io_base_dma;
220 };
221
222 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
223 {
224 uint32_t clkrate, tmp;
225
226 /* Reset SLC controller */
227 writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
228 udelay(1000);
229
230 /* Basic setup */
231 writel(0, SLC_CFG(host->io_base));
232 writel(0, SLC_IEN(host->io_base));
233 writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
234 SLC_ICR(host->io_base));
235
236 /* Get base clock for SLC block */
237 clkrate = clk_get_rate(host->clk);
238 if (clkrate == 0)
239 clkrate = LPC32XX_DEF_BUS_RATE;
240
241 /* Compute clock setup values */
242 tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
243 SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
244 SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
245 SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
246 SLCTAC_RDR(host->ncfg->rdr_clks) |
247 SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
248 SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
249 SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
250 writel(tmp, SLC_TAC(host->io_base));
251 }
252
253 /*
254 * Hardware specific access to control lines
255 */
256 static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
257 unsigned int ctrl)
258 {
259 uint32_t tmp;
260 struct nand_chip *chip = mtd->priv;
261 struct lpc32xx_nand_host *host = chip->priv;
262
263 /* Does CE state need to be changed? */
264 tmp = readl(SLC_CFG(host->io_base));
265 if (ctrl & NAND_NCE)
266 tmp |= SLCCFG_CE_LOW;
267 else
268 tmp &= ~SLCCFG_CE_LOW;
269 writel(tmp, SLC_CFG(host->io_base));
270
271 if (cmd != NAND_CMD_NONE) {
272 if (ctrl & NAND_CLE)
273 writel(cmd, SLC_CMD(host->io_base));
274 else
275 writel(cmd, SLC_ADDR(host->io_base));
276 }
277 }
278
279 /*
280 * Read the Device Ready pin
281 */
282 static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
283 {
284 struct nand_chip *chip = mtd->priv;
285 struct lpc32xx_nand_host *host = chip->priv;
286 int rdy = 0;
287
288 if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
289 rdy = 1;
290
291 return rdy;
292 }
293
294 /*
295 * Enable NAND write protect
296 */
297 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
298 {
299 if (gpio_is_valid(host->ncfg->wp_gpio))
300 gpio_set_value(host->ncfg->wp_gpio, 0);
301 }
302
303 /*
304 * Disable NAND write protect
305 */
306 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
307 {
308 if (gpio_is_valid(host->ncfg->wp_gpio))
309 gpio_set_value(host->ncfg->wp_gpio, 1);
310 }
311
312 /*
313 * Prepares SLC for transfers with H/W ECC enabled
314 */
315 static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
316 {
317 /* Hardware ECC is enabled automatically in hardware as needed */
318 }
319
320 /*
321 * Calculates the ECC for the data
322 */
323 static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
324 const unsigned char *buf,
325 unsigned char *code)
326 {
327 /*
328 * ECC is calculated automatically in hardware during syndrome read
329 * and write operations, so it doesn't need to be calculated here.
330 */
331 return 0;
332 }
333
334 /*
335 * Read a single byte from NAND device
336 */
337 static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
338 {
339 struct nand_chip *chip = mtd->priv;
340 struct lpc32xx_nand_host *host = chip->priv;
341
342 return (uint8_t)readl(SLC_DATA(host->io_base));
343 }
344
345 /*
346 * Simple device read without ECC
347 */
348 static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
349 {
350 struct nand_chip *chip = mtd->priv;
351 struct lpc32xx_nand_host *host = chip->priv;
352
353 /* Direct device read with no ECC */
354 while (len-- > 0)
355 *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
356 }
357
358 /*
359 * Simple device write without ECC
360 */
361 static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
362 {
363 struct nand_chip *chip = mtd->priv;
364 struct lpc32xx_nand_host *host = chip->priv;
365
366 /* Direct device write with no ECC */
367 while (len-- > 0)
368 writel((uint32_t)*buf++, SLC_DATA(host->io_base));
369 }
370
371 /*
372 * Read the OOB data from the device without ECC using FIFO method
373 */
374 static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
375 struct nand_chip *chip, int page)
376 {
377 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
378 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
379
380 return 0;
381 }
382
383 /*
384 * Write the OOB data to the device without ECC using FIFO method
385 */
386 static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
387 struct nand_chip *chip, int page)
388 {
389 int status;
390
391 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
392 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
393
394 /* Send command to program the OOB data */
395 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
396
397 status = chip->waitfunc(mtd, chip);
398
399 return status & NAND_STATUS_FAIL ? -EIO : 0;
400 }
401
402 /*
403 * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
404 */
405 static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
406 {
407 int i;
408
409 for (i = 0; i < (count * 3); i += 3) {
410 uint32_t ce = ecc[i / 3];
411 ce = ~(ce << 2) & 0xFFFFFF;
412 spare[i + 2] = (uint8_t)(ce & 0xFF);
413 ce >>= 8;
414 spare[i + 1] = (uint8_t)(ce & 0xFF);
415 ce >>= 8;
416 spare[i] = (uint8_t)(ce & 0xFF);
417 }
418 }
419
420 static void lpc32xx_dma_complete_func(void *completion)
421 {
422 complete(completion);
423 }
424
425 static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
426 void *mem, int len, enum dma_transfer_direction dir)
427 {
428 struct nand_chip *chip = mtd->priv;
429 struct lpc32xx_nand_host *host = chip->priv;
430 struct dma_async_tx_descriptor *desc;
431 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
432 int res;
433
434 host->dma_slave_config.direction = dir;
435 host->dma_slave_config.src_addr = dma;
436 host->dma_slave_config.dst_addr = dma;
437 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
438 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
439 host->dma_slave_config.src_maxburst = 4;
440 host->dma_slave_config.dst_maxburst = 4;
441 /* DMA controller does flow control: */
442 host->dma_slave_config.device_fc = false;
443 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
444 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
445 return -ENXIO;
446 }
447
448 sg_init_one(&host->sgl, mem, len);
449
450 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
451 DMA_BIDIRECTIONAL);
452 if (res != 1) {
453 dev_err(mtd->dev.parent, "Failed to map sg list\n");
454 return -ENXIO;
455 }
456 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
457 flags);
458 if (!desc) {
459 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
460 goto out1;
461 }
462
463 init_completion(&host->comp);
464 desc->callback = lpc32xx_dma_complete_func;
465 desc->callback_param = &host->comp;
466
467 dmaengine_submit(desc);
468 dma_async_issue_pending(host->dma_chan);
469
470 wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
471
472 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
473 DMA_BIDIRECTIONAL);
474
475 return 0;
476 out1:
477 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
478 DMA_BIDIRECTIONAL);
479 return -ENXIO;
480 }
481
482 /*
483 * DMA read/write transfers with ECC support
484 */
485 static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
486 int read)
487 {
488 struct nand_chip *chip = mtd->priv;
489 struct lpc32xx_nand_host *host = chip->priv;
490 int i, status = 0;
491 unsigned long timeout;
492 int res;
493 enum dma_transfer_direction dir =
494 read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
495 uint8_t *dma_buf;
496 bool dma_mapped;
497
498 if ((void *)buf <= high_memory) {
499 dma_buf = buf;
500 dma_mapped = true;
501 } else {
502 dma_buf = host->data_buf;
503 dma_mapped = false;
504 if (!read)
505 memcpy(host->data_buf, buf, mtd->writesize);
506 }
507
508 if (read) {
509 writel(readl(SLC_CFG(host->io_base)) |
510 SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
511 SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
512 } else {
513 writel((readl(SLC_CFG(host->io_base)) |
514 SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
515 ~SLCCFG_DMA_DIR,
516 SLC_CFG(host->io_base));
517 }
518
519 /* Clear initial ECC */
520 writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
521
522 /* Transfer size is data area only */
523 writel(mtd->writesize, SLC_TC(host->io_base));
524
525 /* Start transfer in the NAND controller */
526 writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
527 SLC_CTRL(host->io_base));
528
529 for (i = 0; i < chip->ecc.steps; i++) {
530 /* Data */
531 res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
532 dma_buf + i * chip->ecc.size,
533 mtd->writesize / chip->ecc.steps, dir);
534 if (res)
535 return res;
536
537 /* Always _read_ ECC */
538 if (i == chip->ecc.steps - 1)
539 break;
540 if (!read) /* ECC availability delayed on write */
541 udelay(10);
542 res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
543 &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
544 if (res)
545 return res;
546 }
547
548 /*
549 * According to NXP, the DMA can be finished here, but the NAND
550 * controller may still have buffered data. After porting to using the
551 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
552 * appears to be always true, according to tests. Keeping the check for
553 * safety reasons for now.
554 */
555 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
556 dev_warn(mtd->dev.parent, "FIFO not empty!\n");
557 timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
558 while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
559 time_before(jiffies, timeout))
560 cpu_relax();
561 if (!time_before(jiffies, timeout)) {
562 dev_err(mtd->dev.parent, "FIFO held data too long\n");
563 status = -EIO;
564 }
565 }
566
567 /* Read last calculated ECC value */
568 if (!read)
569 udelay(10);
570 host->ecc_buf[chip->ecc.steps - 1] =
571 readl(SLC_ECC(host->io_base));
572
573 /* Flush DMA */
574 dmaengine_terminate_all(host->dma_chan);
575
576 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
577 readl(SLC_TC(host->io_base))) {
578 /* Something is left in the FIFO, something is wrong */
579 dev_err(mtd->dev.parent, "DMA FIFO failure\n");
580 status = -EIO;
581 }
582
583 /* Stop DMA & HW ECC */
584 writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
585 SLC_CTRL(host->io_base));
586 writel(readl(SLC_CFG(host->io_base)) &
587 ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
588 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
589
590 if (!dma_mapped && read)
591 memcpy(buf, host->data_buf, mtd->writesize);
592
593 return status;
594 }
595
596 /*
597 * Read the data and OOB data from the device, use ECC correction with the
598 * data, disable ECC for the OOB data
599 */
600 static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
601 struct nand_chip *chip, uint8_t *buf,
602 int oob_required, int page)
603 {
604 struct lpc32xx_nand_host *host = chip->priv;
605 int stat, i, status;
606 uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
607
608 /* Issue read command */
609 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
610
611 /* Read data and oob, calculate ECC */
612 status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
613
614 /* Get OOB data */
615 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
616
617 /* Convert to stored ECC format */
618 lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
619
620 /* Pointer to ECC data retrieved from NAND spare area */
621 oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
622
623 for (i = 0; i < chip->ecc.steps; i++) {
624 stat = chip->ecc.correct(mtd, buf, oobecc,
625 &tmpecc[i * chip->ecc.bytes]);
626 if (stat < 0)
627 mtd->ecc_stats.failed++;
628 else
629 mtd->ecc_stats.corrected += stat;
630
631 buf += chip->ecc.size;
632 oobecc += chip->ecc.bytes;
633 }
634
635 return status;
636 }
637
638 /*
639 * Read the data and OOB data from the device, no ECC correction with the
640 * data or OOB data
641 */
642 static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
643 struct nand_chip *chip,
644 uint8_t *buf, int oob_required,
645 int page)
646 {
647 /* Issue read command */
648 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
649
650 /* Raw reads can just use the FIFO interface */
651 chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
652 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
653
654 return 0;
655 }
656
657 /*
658 * Write the data and OOB data to the device, use ECC with the data,
659 * disable ECC for the OOB data
660 */
661 static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
662 struct nand_chip *chip,
663 const uint8_t *buf, int oob_required)
664 {
665 struct lpc32xx_nand_host *host = chip->priv;
666 uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
667 int error;
668
669 /* Write data, calculate ECC on outbound data */
670 error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
671 if (error)
672 return error;
673
674 /*
675 * The calculated ECC needs some manual work done to it before
676 * committing it to NAND. Process the calculated ECC and place
677 * the resultant values directly into the OOB buffer. */
678 lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
679
680 /* Write ECC data to device */
681 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
682 return 0;
683 }
684
685 /*
686 * Write the data and OOB data to the device, no ECC correction with the
687 * data or OOB data
688 */
689 static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
690 struct nand_chip *chip,
691 const uint8_t *buf,
692 int oob_required)
693 {
694 /* Raw writes can just use the FIFO interface */
695 chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
696 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
697 return 0;
698 }
699
700 static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
701 {
702 struct mtd_info *mtd = &host->mtd;
703 dma_cap_mask_t mask;
704
705 if (!host->pdata || !host->pdata->dma_filter) {
706 dev_err(mtd->dev.parent, "no DMA platform data\n");
707 return -ENOENT;
708 }
709
710 dma_cap_zero(mask);
711 dma_cap_set(DMA_SLAVE, mask);
712 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
713 "nand-slc");
714 if (!host->dma_chan) {
715 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
716 return -EBUSY;
717 }
718
719 return 0;
720 }
721
722 static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
723 {
724 struct lpc32xx_nand_cfg_slc *ncfg;
725 struct device_node *np = dev->of_node;
726
727 ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
728 if (!ncfg) {
729 dev_err(dev, "could not allocate memory for NAND config\n");
730 return NULL;
731 }
732
733 of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
734 of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
735 of_property_read_u32(np, "nxp,whold", &ncfg->whold);
736 of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
737 of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
738 of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
739 of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
740 of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
741
742 if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
743 !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
744 !ncfg->rhold || !ncfg->rsetup) {
745 dev_err(dev, "chip parameters not specified correctly\n");
746 return NULL;
747 }
748
749 ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
750 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
751
752 return ncfg;
753 }
754
755 /*
756 * Probe for NAND controller
757 */
758 static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
759 {
760 struct lpc32xx_nand_host *host;
761 struct mtd_info *mtd;
762 struct nand_chip *chip;
763 struct resource *rc;
764 struct mtd_part_parser_data ppdata = {};
765 int res;
766
767 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
768 if (rc == NULL) {
769 dev_err(&pdev->dev, "No memory resource found for device\n");
770 return -EBUSY;
771 }
772
773 /* Allocate memory for the device structure (and zero it) */
774 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
775 if (!host) {
776 dev_err(&pdev->dev, "failed to allocate device structure\n");
777 return -ENOMEM;
778 }
779 host->io_base_dma = rc->start;
780
781 host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
782 if (host->io_base == NULL) {
783 dev_err(&pdev->dev, "ioremap failed\n");
784 return -ENOMEM;
785 }
786
787 if (pdev->dev.of_node)
788 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
789 if (!host->ncfg) {
790 dev_err(&pdev->dev,
791 "Missing or bad NAND config from device tree\n");
792 return -ENOENT;
793 }
794 if (host->ncfg->wp_gpio == -EPROBE_DEFER)
795 return -EPROBE_DEFER;
796 if (gpio_is_valid(host->ncfg->wp_gpio) &&
797 gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
798 dev_err(&pdev->dev, "GPIO not available\n");
799 return -EBUSY;
800 }
801 lpc32xx_wp_disable(host);
802
803 host->pdata = pdev->dev.platform_data;
804
805 mtd = &host->mtd;
806 chip = &host->nand_chip;
807 chip->priv = host;
808 mtd->priv = chip;
809 mtd->owner = THIS_MODULE;
810 mtd->dev.parent = &pdev->dev;
811
812 /* Get NAND clock */
813 host->clk = clk_get(&pdev->dev, NULL);
814 if (IS_ERR(host->clk)) {
815 dev_err(&pdev->dev, "Clock failure\n");
816 res = -ENOENT;
817 goto err_exit1;
818 }
819 clk_enable(host->clk);
820
821 /* Set NAND IO addresses and command/ready functions */
822 chip->IO_ADDR_R = SLC_DATA(host->io_base);
823 chip->IO_ADDR_W = SLC_DATA(host->io_base);
824 chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
825 chip->dev_ready = lpc32xx_nand_device_ready;
826 chip->chip_delay = 20; /* 20us command delay time */
827
828 /* Init NAND controller */
829 lpc32xx_nand_setup(host);
830
831 platform_set_drvdata(pdev, host);
832
833 /* NAND callbacks for LPC32xx SLC hardware */
834 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
835 chip->read_byte = lpc32xx_nand_read_byte;
836 chip->read_buf = lpc32xx_nand_read_buf;
837 chip->write_buf = lpc32xx_nand_write_buf;
838 chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
839 chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
840 chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
841 chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
842 chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
843 chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
844 chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
845 chip->ecc.correct = nand_correct_data;
846 chip->ecc.strength = 1;
847 chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
848
849 /* bitflip_threshold's default is defined as ecc_strength anyway.
850 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
851 * being 0, it causes bad block table scanning errors in
852 * nand_scan_tail(), so preparing it here already. */
853 mtd->bitflip_threshold = chip->ecc.strength;
854
855 /*
856 * Allocate a large enough buffer for a single huge page plus
857 * extra space for the spare area and ECC storage area
858 */
859 host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
860 host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
861 GFP_KERNEL);
862 if (host->data_buf == NULL) {
863 dev_err(&pdev->dev, "Error allocating memory\n");
864 res = -ENOMEM;
865 goto err_exit2;
866 }
867
868 res = lpc32xx_nand_dma_setup(host);
869 if (res) {
870 res = -EIO;
871 goto err_exit2;
872 }
873
874 /* Find NAND device */
875 if (nand_scan_ident(mtd, 1, NULL)) {
876 res = -ENXIO;
877 goto err_exit3;
878 }
879
880 /* OOB and ECC CPU and DMA work areas */
881 host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
882
883 /*
884 * Small page FLASH has a unique OOB layout, but large and huge
885 * page FLASH use the standard layout. Small page FLASH uses a
886 * custom BBT marker layout.
887 */
888 if (mtd->writesize <= 512)
889 chip->ecc.layout = &lpc32xx_nand_oob_16;
890
891 /* These sizes remain the same regardless of page size */
892 chip->ecc.size = 256;
893 chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
894 chip->ecc.prepad = chip->ecc.postpad = 0;
895
896 /* Avoid extra scan if using BBT, setup BBT support */
897 if (host->ncfg->use_bbt) {
898 chip->options |= NAND_SKIP_BBTSCAN;
899 chip->bbt_options |= NAND_BBT_USE_FLASH;
900
901 /*
902 * Use a custom BBT marker setup for small page FLASH that
903 * won't interfere with the ECC layout. Large and huge page
904 * FLASH use the standard layout.
905 */
906 if (mtd->writesize <= 512) {
907 chip->bbt_td = &bbt_smallpage_main_descr;
908 chip->bbt_md = &bbt_smallpage_mirror_descr;
909 }
910 }
911
912 /*
913 * Fills out all the uninitialized function pointers with the defaults
914 */
915 if (nand_scan_tail(mtd)) {
916 res = -ENXIO;
917 goto err_exit3;
918 }
919
920 /* Standard layout in FLASH for bad block tables */
921 if (host->ncfg->use_bbt) {
922 if (nand_default_bbt(mtd) < 0)
923 dev_err(&pdev->dev,
924 "Error initializing default bad block tables\n");
925 }
926
927 mtd->name = "nxp_lpc3220_slc";
928 ppdata.of_node = pdev->dev.of_node;
929 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
930 host->ncfg->num_parts);
931 if (!res)
932 return res;
933
934 nand_release(mtd);
935
936 err_exit3:
937 dma_release_channel(host->dma_chan);
938 err_exit2:
939 clk_disable(host->clk);
940 clk_put(host->clk);
941 platform_set_drvdata(pdev, NULL);
942 err_exit1:
943 lpc32xx_wp_enable(host);
944 gpio_free(host->ncfg->wp_gpio);
945
946 return res;
947 }
948
949 /*
950 * Remove NAND device.
951 */
952 static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
953 {
954 uint32_t tmp;
955 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
956 struct mtd_info *mtd = &host->mtd;
957
958 nand_release(mtd);
959 dma_release_channel(host->dma_chan);
960
961 /* Force CE high */
962 tmp = readl(SLC_CTRL(host->io_base));
963 tmp &= ~SLCCFG_CE_LOW;
964 writel(tmp, SLC_CTRL(host->io_base));
965
966 clk_disable(host->clk);
967 clk_put(host->clk);
968 platform_set_drvdata(pdev, NULL);
969 lpc32xx_wp_enable(host);
970 gpio_free(host->ncfg->wp_gpio);
971
972 return 0;
973 }
974
975 #ifdef CONFIG_PM
976 static int lpc32xx_nand_resume(struct platform_device *pdev)
977 {
978 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
979
980 /* Re-enable NAND clock */
981 clk_enable(host->clk);
982
983 /* Fresh init of NAND controller */
984 lpc32xx_nand_setup(host);
985
986 /* Disable write protect */
987 lpc32xx_wp_disable(host);
988
989 return 0;
990 }
991
992 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
993 {
994 uint32_t tmp;
995 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
996
997 /* Force CE high */
998 tmp = readl(SLC_CTRL(host->io_base));
999 tmp &= ~SLCCFG_CE_LOW;
1000 writel(tmp, SLC_CTRL(host->io_base));
1001
1002 /* Enable write protect for safety */
1003 lpc32xx_wp_enable(host);
1004
1005 /* Disable clock */
1006 clk_disable(host->clk);
1007
1008 return 0;
1009 }
1010
1011 #else
1012 #define lpc32xx_nand_resume NULL
1013 #define lpc32xx_nand_suspend NULL
1014 #endif
1015
1016 static const struct of_device_id lpc32xx_nand_match[] = {
1017 { .compatible = "nxp,lpc3220-slc" },
1018 { /* sentinel */ },
1019 };
1020 MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
1021
1022 static struct platform_driver lpc32xx_nand_driver = {
1023 .probe = lpc32xx_nand_probe,
1024 .remove = __devexit_p(lpc32xx_nand_remove),
1025 .resume = lpc32xx_nand_resume,
1026 .suspend = lpc32xx_nand_suspend,
1027 .driver = {
1028 .name = LPC32XX_MODNAME,
1029 .owner = THIS_MODULE,
1030 .of_match_table = of_match_ptr(lpc32xx_nand_match),
1031 },
1032 };
1033
1034 module_platform_driver(lpc32xx_nand_driver);
1035
1036 MODULE_LICENSE("GPL");
1037 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1038 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1039 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
This page took 0.055008 seconds and 5 git commands to generate.