mtd: pxa3xx_nand: initialiaze pxa3xx_flash_ids to 0
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/clk.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
a1c06ee1
DW
24#include <linux/io.h>
25#include <linux/irq.h>
5a0e3ad6 26#include <linux/slab.h>
1e7ba630
DM
27#include <linux/of.h>
28#include <linux/of_device.h>
776f265e 29#include <linux/of_mtd.h>
fe69af00 30
f4db2e3a
EG
31#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
32#define ARCH_HAS_DMA
33#endif
34
35#ifdef ARCH_HAS_DMA
afb5b5c9 36#include <mach/dma.h>
f4db2e3a
EG
37#endif
38
293b2da1 39#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 40
41#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
f8155a40 42#define NAND_STOP_DELAY (2 * HZ/50)
4eb2da89 43#define PAGE_CHUNK_SIZE (2048)
fe69af00 44
62e8b851
EG
45/*
46 * Define a buffer size for the initial command that detects the flash device:
47 * STATUS, READID and PARAM. The largest of these is the PARAM command,
48 * needing 256 bytes.
49 */
50#define INIT_BUFFER_SIZE 256
51
fe69af00 52/* registers and bit definitions */
53#define NDCR (0x00) /* Control register */
54#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
55#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
56#define NDSR (0x14) /* Status Register */
57#define NDPCR (0x18) /* Page Count Register */
58#define NDBDR0 (0x1C) /* Bad Block Register 0 */
59#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 60#define NDECCCTRL (0x28) /* ECC control */
fe69af00 61#define NDDB (0x40) /* Data Buffer */
62#define NDCB0 (0x48) /* Command Buffer0 */
63#define NDCB1 (0x4C) /* Command Buffer1 */
64#define NDCB2 (0x50) /* Command Buffer2 */
65
66#define NDCR_SPARE_EN (0x1 << 31)
67#define NDCR_ECC_EN (0x1 << 30)
68#define NDCR_DMA_EN (0x1 << 29)
69#define NDCR_ND_RUN (0x1 << 28)
70#define NDCR_DWIDTH_C (0x1 << 27)
71#define NDCR_DWIDTH_M (0x1 << 26)
72#define NDCR_PAGE_SZ (0x1 << 24)
73#define NDCR_NCSX (0x1 << 23)
74#define NDCR_ND_MODE (0x3 << 21)
75#define NDCR_NAND_MODE (0x0)
76#define NDCR_CLR_PG_CNT (0x1 << 20)
f8155a40 77#define NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 78#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
79#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
80
81#define NDCR_RA_START (0x1 << 15)
82#define NDCR_PG_PER_BLK (0x1 << 14)
83#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 84#define NDCR_INT_MASK (0xFFF)
fe69af00 85
86#define NDSR_MASK (0xfff)
87f5336e
EG
87#define NDSR_ERR_CNT_OFF (16)
88#define NDSR_ERR_CNT_MASK (0x1f)
89#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
90#define NDSR_RDY (0x1 << 12)
91#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 92#define NDSR_CS0_PAGED (0x1 << 10)
93#define NDSR_CS1_PAGED (0x1 << 9)
94#define NDSR_CS0_CMDD (0x1 << 8)
95#define NDSR_CS1_CMDD (0x1 << 7)
96#define NDSR_CS0_BBD (0x1 << 6)
97#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
98#define NDSR_UNCORERR (0x1 << 4)
99#define NDSR_CORERR (0x1 << 3)
fe69af00 100#define NDSR_WRDREQ (0x1 << 2)
101#define NDSR_RDDREQ (0x1 << 1)
102#define NDSR_WRCMDREQ (0x1)
103
41a63430 104#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 105#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 106#define NDCB0_AUTO_RS (0x1 << 25)
107#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
108#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
109#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 110#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
111#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
112#define NDCB0_NC (0x1 << 20)
113#define NDCB0_DBC (0x1 << 19)
114#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
115#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
116#define NDCB0_CMD2_MASK (0xff << 8)
117#define NDCB0_CMD1_MASK (0xff)
118#define NDCB0_ADDR_CYC_SHIFT (16)
119
70ed8523
EG
120#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
121#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
122#define EXT_CMD_TYPE_READ 4 /* Read */
123#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
124#define EXT_CMD_TYPE_FINAL 3 /* Final command */
125#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
126#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
127
fe69af00 128/* macros for registers read/write */
129#define nand_writel(info, off, val) \
b7e46062 130 writel_relaxed((val), (info)->mmio_base + (off))
fe69af00 131
132#define nand_readl(info, off) \
b7e46062 133 readl_relaxed((info)->mmio_base + (off))
fe69af00 134
135/* error code and state */
136enum {
137 ERR_NONE = 0,
138 ERR_DMABUSERR = -1,
139 ERR_SENDCMD = -2,
87f5336e 140 ERR_UNCORERR = -3,
fe69af00 141 ERR_BBERR = -4,
87f5336e 142 ERR_CORERR = -5,
fe69af00 143};
144
145enum {
f8155a40 146 STATE_IDLE = 0,
d456882b 147 STATE_PREPARED,
fe69af00 148 STATE_CMD_HANDLE,
149 STATE_DMA_READING,
150 STATE_DMA_WRITING,
151 STATE_DMA_DONE,
152 STATE_PIO_READING,
153 STATE_PIO_WRITING,
f8155a40
LW
154 STATE_CMD_DONE,
155 STATE_READY,
fe69af00 156};
157
c0f3b864
EG
158enum pxa3xx_nand_variant {
159 PXA3XX_NAND_VARIANT_PXA,
160 PXA3XX_NAND_VARIANT_ARMADA370,
161};
162
d456882b
LW
163struct pxa3xx_nand_host {
164 struct nand_chip chip;
d456882b
LW
165 struct mtd_info *mtd;
166 void *info_data;
167
168 /* page size of attached chip */
d456882b 169 int use_ecc;
f3c8cfc2 170 int cs;
fe69af00 171
d456882b
LW
172 /* calculated from pxa3xx_nand_flash data */
173 unsigned int col_addr_cycles;
174 unsigned int row_addr_cycles;
175 size_t read_id_bytes;
176
d456882b
LW
177};
178
179struct pxa3xx_nand_info {
401e67e2 180 struct nand_hw_control controller;
fe69af00 181 struct platform_device *pdev;
fe69af00 182
183 struct clk *clk;
184 void __iomem *mmio_base;
8638fac8 185 unsigned long mmio_phys;
55d9fd6e 186 struct completion cmd_complete, dev_ready;
fe69af00 187
188 unsigned int buf_start;
189 unsigned int buf_count;
62e8b851 190 unsigned int buf_size;
fa543bef
EG
191 unsigned int data_buff_pos;
192 unsigned int oob_buff_pos;
fe69af00 193
194 /* DMA information */
195 int drcmr_dat;
196 int drcmr_cmd;
197
198 unsigned char *data_buff;
18c81b18 199 unsigned char *oob_buff;
fe69af00 200 dma_addr_t data_buff_phys;
fe69af00 201 int data_dma_ch;
202 struct pxa_dma_desc *data_desc;
203 dma_addr_t data_desc_addr;
204
f3c8cfc2 205 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 206 unsigned int state;
207
c0f3b864
EG
208 /*
209 * This driver supports NFCv1 (as found in PXA SoC)
210 * and NFCv2 (as found in Armada 370/XP SoC).
211 */
212 enum pxa3xx_nand_variant variant;
213
f3c8cfc2 214 int cs;
fe69af00 215 int use_ecc; /* use HW ECC ? */
43bcfd2b 216 int ecc_bch; /* using BCH ECC? */
fe69af00 217 int use_dma; /* use DMA ? */
5bb653e8 218 int use_spare; /* use spare ? */
55d9fd6e 219 int need_wait;
fe69af00 220
2128b08c 221 unsigned int data_size; /* data to be read from FIFO */
70ed8523 222 unsigned int chunk_size; /* split commands chunk size */
d456882b 223 unsigned int oob_size;
43bcfd2b
EG
224 unsigned int spare_size;
225 unsigned int ecc_size;
87f5336e
EG
226 unsigned int ecc_err_cnt;
227 unsigned int max_bitflips;
fe69af00 228 int retcode;
fe69af00 229
48cf7efa
EG
230 /* cached register value */
231 uint32_t reg_ndcr;
232 uint32_t ndtr0cs0;
233 uint32_t ndtr1cs0;
234
fe69af00 235 /* generated NDCBx register values */
236 uint32_t ndcb0;
237 uint32_t ndcb1;
238 uint32_t ndcb2;
3a1a344a 239 uint32_t ndcb3;
fe69af00 240};
241
90ab5ee9 242static bool use_dma = 1;
fe69af00 243module_param(use_dma, bool, 0444);
25985edc 244MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 245
c1f82478 246static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
247 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
248 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
249 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
250 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
251};
252
c1f82478 253static struct pxa3xx_nand_flash builtin_flash_types[] = {
4332c116
LW
254{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
255{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
256{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
257{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
258{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
259{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
260{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
261{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
262{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
d3490dfd
HZ
263};
264
776f265e
EG
265static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
266static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
267
268static struct nand_bbt_descr bbt_main_descr = {
269 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
270 | NAND_BBT_2BIT | NAND_BBT_VERSION,
271 .offs = 8,
272 .len = 6,
273 .veroffs = 14,
274 .maxblocks = 8, /* Last 8 blocks in each chip */
275 .pattern = bbt_pattern
276};
277
278static struct nand_bbt_descr bbt_mirror_descr = {
279 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
280 | NAND_BBT_2BIT | NAND_BBT_VERSION,
281 .offs = 8,
282 .len = 6,
283 .veroffs = 14,
284 .maxblocks = 8, /* Last 8 blocks in each chip */
285 .pattern = bbt_mirror_pattern
286};
287
3db227b6
RG
288static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
289 .eccbytes = 32,
290 .eccpos = {
291 32, 33, 34, 35, 36, 37, 38, 39,
292 40, 41, 42, 43, 44, 45, 46, 47,
293 48, 49, 50, 51, 52, 53, 54, 55,
294 56, 57, 58, 59, 60, 61, 62, 63},
295 .oobfree = { {2, 30} }
296};
297
70ed8523
EG
298static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
299 .eccbytes = 64,
300 .eccpos = {
301 32, 33, 34, 35, 36, 37, 38, 39,
302 40, 41, 42, 43, 44, 45, 46, 47,
303 48, 49, 50, 51, 52, 53, 54, 55,
304 56, 57, 58, 59, 60, 61, 62, 63,
305 96, 97, 98, 99, 100, 101, 102, 103,
306 104, 105, 106, 107, 108, 109, 110, 111,
307 112, 113, 114, 115, 116, 117, 118, 119,
308 120, 121, 122, 123, 124, 125, 126, 127},
309 /* Bootrom looks in bytes 0 & 5 for bad blocks */
310 .oobfree = { {6, 26}, { 64, 32} }
311};
312
313static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
314 .eccbytes = 128,
315 .eccpos = {
316 32, 33, 34, 35, 36, 37, 38, 39,
317 40, 41, 42, 43, 44, 45, 46, 47,
318 48, 49, 50, 51, 52, 53, 54, 55,
319 56, 57, 58, 59, 60, 61, 62, 63},
320 .oobfree = { }
321};
322
227a886c
LW
323/* Define a default flash type setting serve as flash detecting only */
324#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
325
fe69af00 326#define NDTR0_tCH(c) (min((c), 7) << 19)
327#define NDTR0_tCS(c) (min((c), 7) << 16)
328#define NDTR0_tWH(c) (min((c), 7) << 11)
329#define NDTR0_tWP(c) (min((c), 7) << 8)
330#define NDTR0_tRH(c) (min((c), 7) << 3)
331#define NDTR0_tRP(c) (min((c), 7) << 0)
332
333#define NDTR1_tR(c) (min((c), 65535) << 16)
334#define NDTR1_tWHR(c) (min((c), 15) << 4)
335#define NDTR1_tAR(c) (min((c), 15) << 0)
336
337/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 338#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 339
17754ad6 340static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
341 {
342 .compatible = "marvell,pxa3xx-nand",
343 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
344 },
1963ff97
EG
345 {
346 .compatible = "marvell,armada370-nand",
347 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
348 },
c7e9c7e7
EG
349 {}
350};
351MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
352
353static enum pxa3xx_nand_variant
354pxa3xx_nand_get_variant(struct platform_device *pdev)
355{
356 const struct of_device_id *of_id =
357 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
358 if (!of_id)
359 return PXA3XX_NAND_VARIANT_PXA;
360 return (enum pxa3xx_nand_variant)of_id->data;
361}
362
d456882b 363static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 364 const struct pxa3xx_nand_timing *t)
fe69af00 365{
d456882b 366 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 367 unsigned long nand_clk = clk_get_rate(info->clk);
368 uint32_t ndtr0, ndtr1;
369
370 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
371 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
372 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
373 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
374 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
375 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
376
377 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
378 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
379 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
380
48cf7efa
EG
381 info->ndtr0cs0 = ndtr0;
382 info->ndtr1cs0 = ndtr1;
fe69af00 383 nand_writel(info, NDTR0CS0, ndtr0);
384 nand_writel(info, NDTR1CS0, ndtr1);
385}
386
6a3e4865
EG
387/*
388 * Set the data and OOB size, depending on the selected
389 * spare and ECC configuration.
390 * Only applicable to READ0, READOOB and PAGEPROG commands.
391 */
fa543bef
EG
392static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
393 struct mtd_info *mtd)
fe69af00 394{
48cf7efa 395 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
9d8b1043 396
fa543bef 397 info->data_size = mtd->writesize;
43bcfd2b 398 if (!oob_enable)
9d8b1043 399 return;
9d8b1043 400
43bcfd2b
EG
401 info->oob_size = info->spare_size;
402 if (!info->use_ecc)
403 info->oob_size += info->ecc_size;
18c81b18
LW
404}
405
f8155a40
LW
406/**
407 * NOTE: it is a must to set ND_RUN firstly, then write
408 * command buffer, otherwise, it does not work.
409 * We enable all the interrupt at the same time, and
410 * let pxa3xx_nand_irq to handle all logic.
411 */
412static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
413{
414 uint32_t ndcr;
415
48cf7efa 416 ndcr = info->reg_ndcr;
cd9d1182 417
43bcfd2b 418 if (info->use_ecc) {
cd9d1182 419 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
420 if (info->ecc_bch)
421 nand_writel(info, NDECCCTRL, 0x1);
422 } else {
cd9d1182 423 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
424 if (info->ecc_bch)
425 nand_writel(info, NDECCCTRL, 0x0);
426 }
cd9d1182
EG
427
428 if (info->use_dma)
429 ndcr |= NDCR_DMA_EN;
430 else
431 ndcr &= ~NDCR_DMA_EN;
432
5bb653e8
EG
433 if (info->use_spare)
434 ndcr |= NDCR_SPARE_EN;
435 else
436 ndcr &= ~NDCR_SPARE_EN;
437
f8155a40
LW
438 ndcr |= NDCR_ND_RUN;
439
440 /* clear status bits and run */
441 nand_writel(info, NDCR, 0);
442 nand_writel(info, NDSR, NDSR_MASK);
443 nand_writel(info, NDCR, ndcr);
444}
445
446static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
447{
448 uint32_t ndcr;
449 int timeout = NAND_STOP_DELAY;
450
451 /* wait RUN bit in NDCR become 0 */
452 ndcr = nand_readl(info, NDCR);
453 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
454 ndcr = nand_readl(info, NDCR);
455 udelay(1);
456 }
457
458 if (timeout <= 0) {
459 ndcr &= ~NDCR_ND_RUN;
460 nand_writel(info, NDCR, ndcr);
461 }
462 /* clear status bits */
463 nand_writel(info, NDSR, NDSR_MASK);
464}
465
57ff88f0
EG
466static void __maybe_unused
467enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 468{
469 uint32_t ndcr;
470
471 ndcr = nand_readl(info, NDCR);
472 nand_writel(info, NDCR, ndcr & ~int_mask);
473}
474
475static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
476{
477 uint32_t ndcr;
478
479 ndcr = nand_readl(info, NDCR);
480 nand_writel(info, NDCR, ndcr | int_mask);
481}
482
8dad0386
MR
483static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
484{
485 if (info->ecc_bch) {
486 int timeout;
487
488 /*
489 * According to the datasheet, when reading from NDDB
490 * with BCH enabled, after each 32 bytes reads, we
491 * have to make sure that the NDSR.RDDREQ bit is set.
492 *
493 * Drain the FIFO 8 32 bits reads at a time, and skip
494 * the polling on the last read.
495 */
496 while (len > 8) {
497 __raw_readsl(info->mmio_base + NDDB, data, 8);
498
499 for (timeout = 0;
500 !(nand_readl(info, NDSR) & NDSR_RDDREQ);
501 timeout++) {
502 if (timeout >= 5) {
503 dev_err(&info->pdev->dev,
504 "Timeout on RDDREQ while draining the FIFO\n");
505 return;
506 }
507
508 mdelay(1);
509 }
510
511 data += 32;
512 len -= 8;
513 }
514 }
515
516 __raw_readsl(info->mmio_base + NDDB, data, len);
517}
518
f8155a40 519static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 520{
70ed8523 521 unsigned int do_bytes = min(info->data_size, info->chunk_size);
fa543bef 522
fe69af00 523 switch (info->state) {
524 case STATE_PIO_WRITING:
fa543bef
EG
525 __raw_writesl(info->mmio_base + NDDB,
526 info->data_buff + info->data_buff_pos,
527 DIV_ROUND_UP(do_bytes, 4));
528
9d8b1043 529 if (info->oob_size > 0)
fa543bef
EG
530 __raw_writesl(info->mmio_base + NDDB,
531 info->oob_buff + info->oob_buff_pos,
532 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 533 break;
534 case STATE_PIO_READING:
8dad0386
MR
535 drain_fifo(info,
536 info->data_buff + info->data_buff_pos,
537 DIV_ROUND_UP(do_bytes, 4));
fa543bef 538
9d8b1043 539 if (info->oob_size > 0)
8dad0386
MR
540 drain_fifo(info,
541 info->oob_buff + info->oob_buff_pos,
542 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 543 break;
544 default:
da675b4e 545 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 546 info->state);
f8155a40 547 BUG();
fe69af00 548 }
fa543bef
EG
549
550 /* Update buffer pointers for multi-page read/write */
551 info->data_buff_pos += do_bytes;
552 info->oob_buff_pos += info->oob_size;
553 info->data_size -= do_bytes;
fe69af00 554}
555
f4db2e3a 556#ifdef ARCH_HAS_DMA
f8155a40 557static void start_data_dma(struct pxa3xx_nand_info *info)
fe69af00 558{
559 struct pxa_dma_desc *desc = info->data_desc;
9d8b1043 560 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
fe69af00 561
562 desc->ddadr = DDADR_STOP;
563 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
564
f8155a40
LW
565 switch (info->state) {
566 case STATE_DMA_WRITING:
fe69af00 567 desc->dsadr = info->data_buff_phys;
8638fac8 568 desc->dtadr = info->mmio_phys + NDDB;
fe69af00 569 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
f8155a40
LW
570 break;
571 case STATE_DMA_READING:
fe69af00 572 desc->dtadr = info->data_buff_phys;
8638fac8 573 desc->dsadr = info->mmio_phys + NDDB;
fe69af00 574 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
f8155a40
LW
575 break;
576 default:
da675b4e 577 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
578 info->state);
579 BUG();
fe69af00 580 }
581
582 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
583 DDADR(info->data_dma_ch) = info->data_desc_addr;
584 DCSR(info->data_dma_ch) |= DCSR_RUN;
585}
586
587static void pxa3xx_nand_data_dma_irq(int channel, void *data)
588{
589 struct pxa3xx_nand_info *info = data;
590 uint32_t dcsr;
591
592 dcsr = DCSR(channel);
593 DCSR(channel) = dcsr;
594
595 if (dcsr & DCSR_BUSERR) {
596 info->retcode = ERR_DMABUSERR;
fe69af00 597 }
598
f8155a40
LW
599 info->state = STATE_DMA_DONE;
600 enable_int(info, NDCR_INT_MASK);
601 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
fe69af00 602}
f4db2e3a
EG
603#else
604static void start_data_dma(struct pxa3xx_nand_info *info)
605{}
606#endif
fe69af00 607
24542257
RJ
608static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
609{
610 struct pxa3xx_nand_info *info = data;
611
612 handle_data_pio(info);
613
614 info->state = STATE_CMD_DONE;
615 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
616
617 return IRQ_HANDLED;
618}
619
fe69af00 620static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
621{
622 struct pxa3xx_nand_info *info = devid;
55d9fd6e 623 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 624 unsigned int ready, cmd_done;
24542257 625 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
626
627 if (info->cs == 0) {
628 ready = NDSR_FLASH_RDY;
629 cmd_done = NDSR_CS0_CMDD;
630 } else {
631 ready = NDSR_RDY;
632 cmd_done = NDSR_CS1_CMDD;
633 }
fe69af00 634
635 status = nand_readl(info, NDSR);
636
87f5336e
EG
637 if (status & NDSR_UNCORERR)
638 info->retcode = ERR_UNCORERR;
639 if (status & NDSR_CORERR) {
640 info->retcode = ERR_CORERR;
641 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
642 info->ecc_bch)
643 info->ecc_err_cnt = NDSR_ERR_CNT(status);
644 else
645 info->ecc_err_cnt = 1;
646
647 /*
648 * Each chunk composing a page is corrected independently,
649 * and we need to store maximum number of corrected bitflips
650 * to return it to the MTD layer in ecc.read_page().
651 */
652 info->max_bitflips = max_t(unsigned int,
653 info->max_bitflips,
654 info->ecc_err_cnt);
655 }
f8155a40
LW
656 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
657 /* whether use dma to transfer data */
fe69af00 658 if (info->use_dma) {
f8155a40
LW
659 disable_int(info, NDCR_INT_MASK);
660 info->state = (status & NDSR_RDDREQ) ?
661 STATE_DMA_READING : STATE_DMA_WRITING;
662 start_data_dma(info);
663 goto NORMAL_IRQ_EXIT;
fe69af00 664 } else {
f8155a40
LW
665 info->state = (status & NDSR_RDDREQ) ?
666 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
667 ret = IRQ_WAKE_THREAD;
668 goto NORMAL_IRQ_EXIT;
fe69af00 669 }
fe69af00 670 }
f3c8cfc2 671 if (status & cmd_done) {
f8155a40
LW
672 info->state = STATE_CMD_DONE;
673 is_completed = 1;
fe69af00 674 }
f3c8cfc2 675 if (status & ready) {
f8155a40 676 info->state = STATE_READY;
55d9fd6e 677 is_ready = 1;
401e67e2 678 }
fe69af00 679
f8155a40
LW
680 if (status & NDSR_WRCMDREQ) {
681 nand_writel(info, NDSR, NDSR_WRCMDREQ);
682 status &= ~NDSR_WRCMDREQ;
683 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
684
685 /*
686 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
687 * must be loaded by writing directly either 12 or 16
688 * bytes directly to NDCB0, four bytes at a time.
689 *
690 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
691 * but each NDCBx register can be read.
692 */
f8155a40
LW
693 nand_writel(info, NDCB0, info->ndcb0);
694 nand_writel(info, NDCB0, info->ndcb1);
695 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
696
697 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
698 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
699 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 700 }
701
f8155a40
LW
702 /* clear NDSR to let the controller exit the IRQ */
703 nand_writel(info, NDSR, status);
704 if (is_completed)
705 complete(&info->cmd_complete);
55d9fd6e
EG
706 if (is_ready)
707 complete(&info->dev_ready);
f8155a40 708NORMAL_IRQ_EXIT:
24542257 709 return ret;
fe69af00 710}
711
fe69af00 712static inline int is_buf_blank(uint8_t *buf, size_t len)
713{
714 for (; len > 0; len--)
715 if (*buf++ != 0xff)
716 return 0;
717 return 1;
718}
719
86beebae
EG
720static void set_command_address(struct pxa3xx_nand_info *info,
721 unsigned int page_size, uint16_t column, int page_addr)
722{
723 /* small page addr setting */
724 if (page_size < PAGE_CHUNK_SIZE) {
725 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
726 | (column & 0xFF);
727
728 info->ndcb2 = 0;
729 } else {
730 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
731 | (column & 0xFFFF);
732
733 if (page_addr & 0xFF0000)
734 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
735 else
736 info->ndcb2 = 0;
737 }
738}
739
c39ff03a 740static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 741{
39f83d15
EG
742 struct pxa3xx_nand_host *host = info->host[info->cs];
743 struct mtd_info *mtd = host->mtd;
744
4eb2da89 745 /* reset data and oob column point to handle data */
401e67e2
LW
746 info->buf_start = 0;
747 info->buf_count = 0;
4eb2da89 748 info->oob_size = 0;
fa543bef
EG
749 info->data_buff_pos = 0;
750 info->oob_buff_pos = 0;
4eb2da89 751 info->use_ecc = 0;
5bb653e8 752 info->use_spare = 1;
4eb2da89 753 info->retcode = ERR_NONE;
87f5336e 754 info->ecc_err_cnt = 0;
f0e6a32e 755 info->ndcb3 = 0;
d20d0a6c 756 info->need_wait = 0;
fe69af00 757
758 switch (command) {
4eb2da89
LW
759 case NAND_CMD_READ0:
760 case NAND_CMD_PAGEPROG:
761 info->use_ecc = 1;
fe69af00 762 case NAND_CMD_READOOB:
fa543bef 763 pxa3xx_set_datasize(info, mtd);
fe69af00 764 break;
41a63430
EG
765 case NAND_CMD_PARAM:
766 info->use_spare = 0;
767 break;
4eb2da89
LW
768 default:
769 info->ndcb1 = 0;
770 info->ndcb2 = 0;
771 break;
772 }
39f83d15
EG
773
774 /*
775 * If we are about to issue a read command, or about to set
776 * the write address, then clean the data buffer.
777 */
778 if (command == NAND_CMD_READ0 ||
779 command == NAND_CMD_READOOB ||
780 command == NAND_CMD_SEQIN) {
781
782 info->buf_count = mtd->writesize + mtd->oobsize;
783 memset(info->data_buff, 0xFF, info->buf_count);
784 }
785
c39ff03a
EG
786}
787
788static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 789 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
790{
791 int addr_cycle, exec_cmd;
792 struct pxa3xx_nand_host *host;
793 struct mtd_info *mtd;
794
795 host = info->host[info->cs];
796 mtd = host->mtd;
797 addr_cycle = 0;
798 exec_cmd = 1;
799
800 if (info->cs != 0)
801 info->ndcb0 = NDCB0_CSEL;
802 else
803 info->ndcb0 = 0;
804
805 if (command == NAND_CMD_SEQIN)
806 exec_cmd = 0;
4eb2da89 807
d456882b
LW
808 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
809 + host->col_addr_cycles);
fe69af00 810
4eb2da89
LW
811 switch (command) {
812 case NAND_CMD_READOOB:
fe69af00 813 case NAND_CMD_READ0:
ec82135a
EG
814 info->buf_start = column;
815 info->ndcb0 |= NDCB0_CMD_TYPE(0)
816 | addr_cycle
817 | NAND_CMD_READ0;
818
4eb2da89 819 if (command == NAND_CMD_READOOB)
ec82135a 820 info->buf_start += mtd->writesize;
4eb2da89 821
70ed8523
EG
822 /*
823 * Multiple page read needs an 'extended command type' field,
824 * which is either naked-read or last-read according to the
825 * state.
826 */
827 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 828 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
829 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
830 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
831 | NDCB0_LEN_OVRD
832 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
833 info->ndcb3 = info->chunk_size +
834 info->oob_size;
835 }
fe69af00 836
01d9947e 837 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
838 break;
839
fe69af00 840 case NAND_CMD_SEQIN:
4eb2da89 841
e7f9a6a4
EG
842 info->buf_start = column;
843 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
844
845 /*
846 * Multiple page programming needs to execute the initial
847 * SEQIN command that sets the page address.
848 */
849 if (mtd->writesize > PAGE_CHUNK_SIZE) {
850 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
851 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
852 | addr_cycle
853 | command;
854 /* No data transfer in this case */
855 info->data_size = 0;
856 exec_cmd = 1;
857 }
fe69af00 858 break;
4eb2da89 859
fe69af00 860 case NAND_CMD_PAGEPROG:
4eb2da89
LW
861 if (is_buf_blank(info->data_buff,
862 (mtd->writesize + mtd->oobsize))) {
863 exec_cmd = 0;
864 break;
865 }
fe69af00 866
535cb57a
EG
867 /* Second command setting for large pages */
868 if (mtd->writesize > PAGE_CHUNK_SIZE) {
869 /*
870 * Multiple page write uses the 'extended command'
871 * field. This can be used to issue a command dispatch
872 * or a naked-write depending on the current stage.
873 */
874 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
875 | NDCB0_LEN_OVRD
876 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
877 info->ndcb3 = info->chunk_size +
878 info->oob_size;
879
880 /*
881 * This is the command dispatch that completes a chunked
882 * page program operation.
883 */
884 if (info->data_size == 0) {
885 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
886 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
887 | command;
888 info->ndcb1 = 0;
889 info->ndcb2 = 0;
890 info->ndcb3 = 0;
891 }
892 } else {
893 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
894 | NDCB0_AUTO_RS
895 | NDCB0_ST_ROW_EN
896 | NDCB0_DBC
897 | (NAND_CMD_PAGEPROG << 8)
898 | NAND_CMD_SEQIN
899 | addr_cycle;
900 }
fe69af00 901 break;
4eb2da89 902
ce0268f6 903 case NAND_CMD_PARAM:
ce0268f6
EG
904 info->buf_count = 256;
905 info->ndcb0 |= NDCB0_CMD_TYPE(0)
906 | NDCB0_ADDR_CYC(1)
41a63430 907 | NDCB0_LEN_OVRD
ec82135a 908 | command;
ce0268f6 909 info->ndcb1 = (column & 0xFF);
41a63430 910 info->ndcb3 = 256;
ce0268f6
EG
911 info->data_size = 256;
912 break;
913
fe69af00 914 case NAND_CMD_READID:
d456882b 915 info->buf_count = host->read_id_bytes;
4eb2da89
LW
916 info->ndcb0 |= NDCB0_CMD_TYPE(3)
917 | NDCB0_ADDR_CYC(1)
ec82135a 918 | command;
d14231f1 919 info->ndcb1 = (column & 0xFF);
4eb2da89
LW
920
921 info->data_size = 8;
922 break;
fe69af00 923 case NAND_CMD_STATUS:
4eb2da89
LW
924 info->buf_count = 1;
925 info->ndcb0 |= NDCB0_CMD_TYPE(4)
926 | NDCB0_ADDR_CYC(1)
ec82135a 927 | command;
4eb2da89
LW
928
929 info->data_size = 8;
930 break;
931
932 case NAND_CMD_ERASE1:
4eb2da89
LW
933 info->ndcb0 |= NDCB0_CMD_TYPE(2)
934 | NDCB0_AUTO_RS
935 | NDCB0_ADDR_CYC(3)
936 | NDCB0_DBC
ec82135a
EG
937 | (NAND_CMD_ERASE2 << 8)
938 | NAND_CMD_ERASE1;
4eb2da89
LW
939 info->ndcb1 = page_addr;
940 info->ndcb2 = 0;
941
fe69af00 942 break;
943 case NAND_CMD_RESET:
4eb2da89 944 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 945 | command;
4eb2da89
LW
946
947 break;
948
949 case NAND_CMD_ERASE2:
950 exec_cmd = 0;
fe69af00 951 break;
4eb2da89 952
fe69af00 953 default:
4eb2da89 954 exec_cmd = 0;
da675b4e
LW
955 dev_err(&info->pdev->dev, "non-supported command %x\n",
956 command);
fe69af00 957 break;
958 }
959
4eb2da89
LW
960 return exec_cmd;
961}
962
5cbbdc6a
EG
963static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
964 int column, int page_addr)
4eb2da89 965{
d456882b
LW
966 struct pxa3xx_nand_host *host = mtd->priv;
967 struct pxa3xx_nand_info *info = host->info_data;
4eb2da89
LW
968 int ret, exec_cmd;
969
970 /*
971 * if this is a x16 device ,then convert the input
972 * "byte" address into a "word" address appropriate
973 * for indexing a word-oriented device
974 */
48cf7efa 975 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
976 column /= 2;
977
f3c8cfc2
LW
978 /*
979 * There may be different NAND chip hooked to
980 * different chip select, so check whether
981 * chip select has been changed, if yes, reset the timing
982 */
983 if (info->cs != host->cs) {
984 info->cs = host->cs;
48cf7efa
EG
985 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
986 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
987 }
988
c39ff03a
EG
989 prepare_start_command(info, command);
990
d456882b 991 info->state = STATE_PREPARED;
70ed8523
EG
992 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
993
f8155a40
LW
994 if (exec_cmd) {
995 init_completion(&info->cmd_complete);
55d9fd6e
EG
996 init_completion(&info->dev_ready);
997 info->need_wait = 1;
f8155a40
LW
998 pxa3xx_nand_start(info);
999
1000 ret = wait_for_completion_timeout(&info->cmd_complete,
1001 CHIP_DELAY_TIMEOUT);
1002 if (!ret) {
da675b4e 1003 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1004 /* Stop State Machine for next command cycle */
1005 pxa3xx_nand_stop(info);
1006 }
f8155a40 1007 }
d456882b 1008 info->state = STATE_IDLE;
f8155a40
LW
1009}
1010
5cbbdc6a
EG
1011static void nand_cmdfunc_extended(struct mtd_info *mtd,
1012 const unsigned command,
1013 int column, int page_addr)
70ed8523
EG
1014{
1015 struct pxa3xx_nand_host *host = mtd->priv;
1016 struct pxa3xx_nand_info *info = host->info_data;
1017 int ret, exec_cmd, ext_cmd_type;
1018
1019 /*
1020 * if this is a x16 device then convert the input
1021 * "byte" address into a "word" address appropriate
1022 * for indexing a word-oriented device
1023 */
1024 if (info->reg_ndcr & NDCR_DWIDTH_M)
1025 column /= 2;
1026
1027 /*
1028 * There may be different NAND chip hooked to
1029 * different chip select, so check whether
1030 * chip select has been changed, if yes, reset the timing
1031 */
1032 if (info->cs != host->cs) {
1033 info->cs = host->cs;
1034 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1035 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1036 }
1037
1038 /* Select the extended command for the first command */
1039 switch (command) {
1040 case NAND_CMD_READ0:
1041 case NAND_CMD_READOOB:
1042 ext_cmd_type = EXT_CMD_TYPE_MONO;
1043 break;
535cb57a
EG
1044 case NAND_CMD_SEQIN:
1045 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1046 break;
1047 case NAND_CMD_PAGEPROG:
1048 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1049 break;
70ed8523
EG
1050 default:
1051 ext_cmd_type = 0;
535cb57a 1052 break;
70ed8523
EG
1053 }
1054
1055 prepare_start_command(info, command);
1056
1057 /*
1058 * Prepare the "is ready" completion before starting a command
1059 * transaction sequence. If the command is not executed the
1060 * completion will be completed, see below.
1061 *
1062 * We can do that inside the loop because the command variable
1063 * is invariant and thus so is the exec_cmd.
1064 */
1065 info->need_wait = 1;
1066 init_completion(&info->dev_ready);
1067 do {
1068 info->state = STATE_PREPARED;
1069 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1070 column, page_addr);
1071 if (!exec_cmd) {
1072 info->need_wait = 0;
1073 complete(&info->dev_ready);
1074 break;
1075 }
1076
1077 init_completion(&info->cmd_complete);
1078 pxa3xx_nand_start(info);
1079
1080 ret = wait_for_completion_timeout(&info->cmd_complete,
1081 CHIP_DELAY_TIMEOUT);
1082 if (!ret) {
1083 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1084 /* Stop State Machine for next command cycle */
1085 pxa3xx_nand_stop(info);
1086 break;
1087 }
1088
1089 /* Check if the sequence is complete */
535cb57a
EG
1090 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1091 break;
1092
1093 /*
1094 * After a splitted program command sequence has issued
1095 * the command dispatch, the command sequence is complete.
1096 */
1097 if (info->data_size == 0 &&
1098 command == NAND_CMD_PAGEPROG &&
1099 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1100 break;
1101
1102 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1103 /* Last read: issue a 'last naked read' */
1104 if (info->data_size == info->chunk_size)
1105 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1106 else
1107 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1108
1109 /*
1110 * If a splitted program command has no more data to transfer,
1111 * the command dispatch must be issued to complete.
1112 */
1113 } else if (command == NAND_CMD_PAGEPROG &&
1114 info->data_size == 0) {
1115 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1116 }
1117 } while (1);
1118
1119 info->state = STATE_IDLE;
1120}
1121
fdbad98d 1122static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1fbb938d 1123 struct nand_chip *chip, const uint8_t *buf, int oob_required)
f8155a40
LW
1124{
1125 chip->write_buf(mtd, buf, mtd->writesize);
1126 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1127
1128 return 0;
f8155a40
LW
1129}
1130
1131static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1132 struct nand_chip *chip, uint8_t *buf, int oob_required,
1133 int page)
f8155a40 1134{
d456882b
LW
1135 struct pxa3xx_nand_host *host = mtd->priv;
1136 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1137
1138 chip->read_buf(mtd, buf, mtd->writesize);
1139 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1140
87f5336e
EG
1141 if (info->retcode == ERR_CORERR && info->use_ecc) {
1142 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1143
1144 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1145 /*
1146 * for blank page (all 0xff), HW will calculate its ECC as
1147 * 0, which is different from the ECC information within
87f5336e 1148 * OOB, ignore such uncorrectable errors
f8155a40
LW
1149 */
1150 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1151 info->retcode = ERR_NONE;
1152 else
f8155a40 1153 mtd->ecc_stats.failed++;
fe69af00 1154 }
f8155a40 1155
87f5336e 1156 return info->max_bitflips;
fe69af00 1157}
1158
1159static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1160{
d456882b
LW
1161 struct pxa3xx_nand_host *host = mtd->priv;
1162 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1163 char retval = 0xFF;
1164
1165 if (info->buf_start < info->buf_count)
1166 /* Has just send a new command? */
1167 retval = info->data_buff[info->buf_start++];
1168
1169 return retval;
1170}
1171
1172static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1173{
d456882b
LW
1174 struct pxa3xx_nand_host *host = mtd->priv;
1175 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1176 u16 retval = 0xFFFF;
1177
1178 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1179 retval = *((u16 *)(info->data_buff+info->buf_start));
1180 info->buf_start += 2;
1181 }
1182 return retval;
1183}
1184
1185static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1186{
d456882b
LW
1187 struct pxa3xx_nand_host *host = mtd->priv;
1188 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1189 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1190
1191 memcpy(buf, info->data_buff + info->buf_start, real_len);
1192 info->buf_start += real_len;
1193}
1194
1195static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1196 const uint8_t *buf, int len)
1197{
d456882b
LW
1198 struct pxa3xx_nand_host *host = mtd->priv;
1199 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1200 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1201
1202 memcpy(info->data_buff + info->buf_start, buf, real_len);
1203 info->buf_start += real_len;
1204}
1205
fe69af00 1206static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1207{
1208 return;
1209}
1210
1211static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1212{
d456882b
LW
1213 struct pxa3xx_nand_host *host = mtd->priv;
1214 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1215 int ret;
1216
1217 if (info->need_wait) {
1218 ret = wait_for_completion_timeout(&info->dev_ready,
1219 CHIP_DELAY_TIMEOUT);
1220 info->need_wait = 0;
1221 if (!ret) {
1222 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1223 return NAND_STATUS_FAIL;
1224 }
1225 }
fe69af00 1226
1227 /* pxa3xx_nand_send_command has waited for command complete */
1228 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1229 if (info->retcode == ERR_NONE)
1230 return 0;
55d9fd6e
EG
1231 else
1232 return NAND_STATUS_FAIL;
fe69af00 1233 }
1234
55d9fd6e 1235 return NAND_STATUS_READY;
fe69af00 1236}
1237
fe69af00 1238static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
c8c17c88 1239 const struct pxa3xx_nand_flash *f)
fe69af00 1240{
1241 struct platform_device *pdev = info->pdev;
453810b7 1242 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f3c8cfc2 1243 struct pxa3xx_nand_host *host = info->host[info->cs];
f8155a40 1244 uint32_t ndcr = 0x0; /* enable all interrupts */
fe69af00 1245
da675b4e
LW
1246 if (f->page_size != 2048 && f->page_size != 512) {
1247 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
fe69af00 1248 return -EINVAL;
da675b4e 1249 }
fe69af00 1250
da675b4e
LW
1251 if (f->flash_width != 16 && f->flash_width != 8) {
1252 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
fe69af00 1253 return -EINVAL;
da675b4e 1254 }
fe69af00 1255
1256 /* calculate flash information */
d456882b 1257 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
fe69af00 1258
1259 /* calculate addressing information */
d456882b 1260 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
fe69af00 1261
1262 if (f->num_blocks * f->page_per_block > 65536)
d456882b 1263 host->row_addr_cycles = 3;
fe69af00 1264 else
d456882b 1265 host->row_addr_cycles = 2;
fe69af00 1266
1267 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
d456882b 1268 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
fe69af00 1269 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1270 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1271 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1272 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1273
d456882b 1274 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
fe69af00 1275 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1276
48cf7efa 1277 info->reg_ndcr = ndcr;
fe69af00 1278
d456882b 1279 pxa3xx_nand_set_timing(host, f->timing);
fe69af00 1280 return 0;
1281}
1282
f271049e
MR
1283static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1284{
f3c8cfc2
LW
1285 /*
1286 * We set 0 by hard coding here, for we don't support keep_config
1287 * when there is more than one chip attached to the controller
1288 */
1289 struct pxa3xx_nand_host *host = info->host[0];
f271049e 1290 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1291
d456882b 1292 if (ndcr & NDCR_PAGE_SZ) {
2128b08c 1293 /* Controller's FIFO size */
70ed8523 1294 info->chunk_size = 2048;
d456882b
LW
1295 host->read_id_bytes = 4;
1296 } else {
70ed8523 1297 info->chunk_size = 512;
d456882b
LW
1298 host->read_id_bytes = 2;
1299 }
1300
70ed8523 1301 /* Set an initial chunk size */
48cf7efa
EG
1302 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1303 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1304 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1305 return 0;
1306}
1307
f4db2e3a 1308#ifdef ARCH_HAS_DMA
fe69af00 1309static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1310{
1311 struct platform_device *pdev = info->pdev;
62e8b851 1312 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
fe69af00 1313
1314 if (use_dma == 0) {
62e8b851 1315 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
fe69af00 1316 if (info->data_buff == NULL)
1317 return -ENOMEM;
1318 return 0;
1319 }
1320
62e8b851 1321 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
fe69af00 1322 &info->data_buff_phys, GFP_KERNEL);
1323 if (info->data_buff == NULL) {
1324 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1325 return -ENOMEM;
1326 }
1327
fe69af00 1328 info->data_desc = (void *)info->data_buff + data_desc_offset;
1329 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1330
1331 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1332 pxa3xx_nand_data_dma_irq, info);
1333 if (info->data_dma_ch < 0) {
1334 dev_err(&pdev->dev, "failed to request data dma\n");
62e8b851 1335 dma_free_coherent(&pdev->dev, info->buf_size,
fe69af00 1336 info->data_buff, info->data_buff_phys);
1337 return info->data_dma_ch;
1338 }
1339
95b26563
EG
1340 /*
1341 * Now that DMA buffers are allocated we turn on
1342 * DMA proper for I/O operations.
1343 */
1344 info->use_dma = 1;
fe69af00 1345 return 0;
1346}
1347
498b6145
EG
1348static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1349{
1350 struct platform_device *pdev = info->pdev;
15b540c7 1351 if (info->use_dma) {
498b6145 1352 pxa_free_dma(info->data_dma_ch);
62e8b851 1353 dma_free_coherent(&pdev->dev, info->buf_size,
498b6145
EG
1354 info->data_buff, info->data_buff_phys);
1355 } else {
1356 kfree(info->data_buff);
1357 }
1358}
f4db2e3a
EG
1359#else
1360static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1361{
62e8b851 1362 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
f4db2e3a
EG
1363 if (info->data_buff == NULL)
1364 return -ENOMEM;
1365 return 0;
1366}
1367
1368static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1369{
1370 kfree(info->data_buff);
1371}
1372#endif
498b6145 1373
401e67e2
LW
1374static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1375{
f3c8cfc2 1376 struct mtd_info *mtd;
2d79ab16 1377 struct nand_chip *chip;
d456882b 1378 int ret;
2d79ab16 1379
f3c8cfc2 1380 mtd = info->host[info->cs]->mtd;
2d79ab16
EG
1381 chip = mtd->priv;
1382
401e67e2 1383 /* use the common timing to make a try */
d456882b
LW
1384 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1385 if (ret)
1386 return ret;
1387
2d79ab16 1388 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
56704d85
EG
1389 ret = chip->waitfunc(mtd, chip);
1390 if (ret & NAND_STATUS_FAIL)
1391 return -ENODEV;
d456882b 1392
56704d85 1393 return 0;
401e67e2 1394}
fe69af00 1395
43bcfd2b
EG
1396static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1397 struct nand_ecc_ctrl *ecc,
30b2afc8 1398 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1399{
30b2afc8 1400 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
70ed8523 1401 info->chunk_size = 2048;
43bcfd2b
EG
1402 info->spare_size = 40;
1403 info->ecc_size = 24;
1404 ecc->mode = NAND_ECC_HW;
1405 ecc->size = 512;
1406 ecc->strength = 1;
43bcfd2b 1407
30b2afc8 1408 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
70ed8523 1409 info->chunk_size = 512;
43bcfd2b
EG
1410 info->spare_size = 8;
1411 info->ecc_size = 8;
1412 ecc->mode = NAND_ECC_HW;
1413 ecc->size = 512;
1414 ecc->strength = 1;
43bcfd2b 1415
6033a949
BN
1416 /*
1417 * Required ECC: 4-bit correction per 512 bytes
1418 * Select: 16-bit correction per 2048 bytes
1419 */
3db227b6
RG
1420 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1421 info->ecc_bch = 1;
1422 info->chunk_size = 2048;
1423 info->spare_size = 32;
1424 info->ecc_size = 32;
1425 ecc->mode = NAND_ECC_HW;
1426 ecc->size = info->chunk_size;
1427 ecc->layout = &ecc_layout_2KB_bch4bit;
1428 ecc->strength = 16;
3db227b6 1429
30b2afc8 1430 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1431 info->ecc_bch = 1;
1432 info->chunk_size = 2048;
1433 info->spare_size = 32;
1434 info->ecc_size = 32;
1435 ecc->mode = NAND_ECC_HW;
1436 ecc->size = info->chunk_size;
1437 ecc->layout = &ecc_layout_4KB_bch4bit;
1438 ecc->strength = 16;
70ed8523 1439
6033a949
BN
1440 /*
1441 * Required ECC: 8-bit correction per 512 bytes
1442 * Select: 16-bit correction per 1024 bytes
1443 */
1444 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1445 info->ecc_bch = 1;
1446 info->chunk_size = 1024;
1447 info->spare_size = 0;
1448 info->ecc_size = 32;
1449 ecc->mode = NAND_ECC_HW;
1450 ecc->size = info->chunk_size;
1451 ecc->layout = &ecc_layout_4KB_bch8bit;
1452 ecc->strength = 16;
eee0166d
EG
1453 } else {
1454 dev_err(&info->pdev->dev,
1455 "ECC strength %d at page size %d is not supported\n",
1456 strength, page_size);
1457 return -ENODEV;
70ed8523 1458 }
eee0166d
EG
1459
1460 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1461 ecc->strength, ecc->size);
43bcfd2b
EG
1462 return 0;
1463}
1464
401e67e2 1465static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1466{
d456882b
LW
1467 struct pxa3xx_nand_host *host = mtd->priv;
1468 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1469 struct platform_device *pdev = info->pdev;
453810b7 1470 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
0fab028b 1471 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
401e67e2
LW
1472 const struct pxa3xx_nand_flash *f = NULL;
1473 struct nand_chip *chip = mtd->priv;
1474 uint32_t id = -1;
4332c116 1475 uint64_t chipsize;
401e67e2 1476 int i, ret, num;
30b2afc8 1477 uint16_t ecc_strength, ecc_step;
401e67e2
LW
1478
1479 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
4332c116 1480 goto KEEP_CONFIG;
401e67e2
LW
1481
1482 ret = pxa3xx_nand_sensing(info);
d456882b 1483 if (ret) {
f3c8cfc2
LW
1484 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1485 info->cs);
401e67e2 1486
d456882b 1487 return ret;
401e67e2
LW
1488 }
1489
1490 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1491 id = *((uint16_t *)(info->data_buff));
1492 if (id != 0)
da675b4e 1493 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
401e67e2 1494 else {
da675b4e
LW
1495 dev_warn(&info->pdev->dev,
1496 "Read out ID 0, potential timing set wrong!!\n");
401e67e2
LW
1497
1498 return -EINVAL;
1499 }
1500
1501 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1502 for (i = 0; i < num; i++) {
1503 if (i < pdata->num_flash)
1504 f = pdata->flash + i;
1505 else
1506 f = &builtin_flash_types[i - pdata->num_flash + 1];
1507
1508 /* find the chip in default list */
4332c116 1509 if (f->chip_id == id)
401e67e2 1510 break;
401e67e2
LW
1511 }
1512
4332c116 1513 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
da675b4e 1514 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
401e67e2
LW
1515
1516 return -EINVAL;
1517 }
1518
d456882b
LW
1519 ret = pxa3xx_nand_config_flash(info, f);
1520 if (ret) {
1521 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1522 return ret;
1523 }
1524
7c2f7176
AT
1525 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1526
4332c116 1527 pxa3xx_flash_ids[0].name = f->name;
68aa352d 1528 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
4332c116
LW
1529 pxa3xx_flash_ids[0].pagesize = f->page_size;
1530 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1531 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1532 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1533 if (f->flash_width == 16)
1534 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
0fab028b
LW
1535 pxa3xx_flash_ids[1].name = NULL;
1536 def = pxa3xx_flash_ids;
4332c116 1537KEEP_CONFIG:
48cf7efa 1538 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1539 chip->options |= NAND_BUSWIDTH_16;
1540
43bcfd2b
EG
1541 /* Device detection must be done with ECC disabled */
1542 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1543 nand_writel(info, NDECCCTRL, 0x0);
1544
0fab028b 1545 if (nand_scan_ident(mtd, 1, def))
4332c116 1546 return -ENODEV;
776f265e
EG
1547
1548 if (pdata->flash_bbt) {
1549 /*
1550 * We'll use a bad block table stored in-flash and don't
1551 * allow writing the bad block marker to the flash.
1552 */
1553 chip->bbt_options |= NAND_BBT_USE_FLASH |
1554 NAND_BBT_NO_OOB_BBM;
1555 chip->bbt_td = &bbt_main_descr;
1556 chip->bbt_md = &bbt_mirror_descr;
1557 }
1558
5cbbdc6a
EG
1559 /*
1560 * If the page size is bigger than the FIFO size, let's check
1561 * we are given the right variant and then switch to the extended
1562 * (aka splitted) command handling,
1563 */
1564 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1565 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1566 chip->cmdfunc = nand_cmdfunc_extended;
1567 } else {
1568 dev_err(&info->pdev->dev,
1569 "unsupported page size on this variant\n");
1570 return -ENODEV;
1571 }
1572 }
1573
5b3e5078
EG
1574 if (pdata->ecc_strength && pdata->ecc_step_size) {
1575 ecc_strength = pdata->ecc_strength;
1576 ecc_step = pdata->ecc_step_size;
1577 } else {
1578 ecc_strength = chip->ecc_strength_ds;
1579 ecc_step = chip->ecc_step_ds;
1580 }
30b2afc8
EG
1581
1582 /* Set default ECC strength requirements on non-ONFI devices */
1583 if (ecc_strength < 1 && ecc_step < 1) {
1584 ecc_strength = 1;
1585 ecc_step = 512;
1586 }
1587
1588 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1589 ecc_step, mtd->writesize);
eee0166d
EG
1590 if (ret)
1591 return ret;
43bcfd2b 1592
4332c116 1593 /* calculate addressing information */
d456882b
LW
1594 if (mtd->writesize >= 2048)
1595 host->col_addr_cycles = 2;
1596 else
1597 host->col_addr_cycles = 1;
1598
62e8b851
EG
1599 /* release the initial buffer */
1600 kfree(info->data_buff);
1601
1602 /* allocate the real data + oob buffer */
1603 info->buf_size = mtd->writesize + mtd->oobsize;
1604 ret = pxa3xx_nand_init_buff(info);
1605 if (ret)
1606 return ret;
4332c116 1607 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1608
4332c116 1609 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1610 host->row_addr_cycles = 3;
4332c116 1611 else
d456882b 1612 host->row_addr_cycles = 2;
401e67e2 1613 return nand_scan_tail(mtd);
fe69af00 1614}
1615
d456882b 1616static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1617{
f3c8cfc2 1618 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1619 struct pxa3xx_nand_info *info;
d456882b 1620 struct pxa3xx_nand_host *host;
6e308f87 1621 struct nand_chip *chip = NULL;
fe69af00 1622 struct mtd_info *mtd;
1623 struct resource *r;
f3c8cfc2 1624 int ret, irq, cs;
fe69af00 1625
453810b7 1626 pdata = dev_get_platdata(&pdev->dev);
e423c90a
RJ
1627 if (pdata->num_cs <= 0)
1628 return -ENODEV;
4c073cd2
EG
1629 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1630 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1631 if (!info)
d456882b 1632 return -ENOMEM;
fe69af00 1633
fe69af00 1634 info->pdev = pdev;
c7e9c7e7 1635 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2
LW
1636 for (cs = 0; cs < pdata->num_cs; cs++) {
1637 mtd = (struct mtd_info *)((unsigned int)&info[1] +
1638 (sizeof(*mtd) + sizeof(*host)) * cs);
1639 chip = (struct nand_chip *)(&mtd[1]);
1640 host = (struct pxa3xx_nand_host *)chip;
1641 info->host[cs] = host;
1642 host->mtd = mtd;
1643 host->cs = cs;
1644 host->info_data = info;
1645 mtd->priv = host;
1646 mtd->owner = THIS_MODULE;
1647
1648 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1649 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1650 chip->controller = &info->controller;
1651 chip->waitfunc = pxa3xx_nand_waitfunc;
1652 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1653 chip->read_word = pxa3xx_nand_read_word;
1654 chip->read_byte = pxa3xx_nand_read_byte;
1655 chip->read_buf = pxa3xx_nand_read_buf;
1656 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1657 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1658 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1659 }
401e67e2
LW
1660
1661 spin_lock_init(&chip->controller->lock);
1662 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1663 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1664 if (IS_ERR(info->clk)) {
1665 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1666 return PTR_ERR(info->clk);
fe69af00 1667 }
1f8eaff2
EG
1668 ret = clk_prepare_enable(info->clk);
1669 if (ret < 0)
1670 return ret;
fe69af00 1671
6b45c1ee
EG
1672 if (use_dma) {
1673 /*
1674 * This is a dirty hack to make this driver work from
1675 * devicetree bindings. It can be removed once we have
1676 * a prober DMA controller framework for DT.
1677 */
1678 if (pdev->dev.of_node &&
1679 of_machine_is_compatible("marvell,pxa3xx")) {
1680 info->drcmr_dat = 97;
1681 info->drcmr_cmd = 99;
1682 } else {
1683 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1684 if (r == NULL) {
1685 dev_err(&pdev->dev,
1686 "no resource defined for data DMA\n");
1687 ret = -ENXIO;
1688 goto fail_disable_clk;
1689 }
1690 info->drcmr_dat = r->start;
1691
1692 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1693 if (r == NULL) {
1694 dev_err(&pdev->dev,
1695 "no resource defined for cmd DMA\n");
1696 ret = -ENXIO;
1697 goto fail_disable_clk;
1698 }
1699 info->drcmr_cmd = r->start;
1e7ba630 1700 }
fe69af00 1701 }
fe69af00 1702
1703 irq = platform_get_irq(pdev, 0);
1704 if (irq < 0) {
1705 dev_err(&pdev->dev, "no IRQ resource defined\n");
1706 ret = -ENXIO;
9ca7944d 1707 goto fail_disable_clk;
fe69af00 1708 }
1709
1710 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1711 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1712 if (IS_ERR(info->mmio_base)) {
1713 ret = PTR_ERR(info->mmio_base);
9ca7944d 1714 goto fail_disable_clk;
fe69af00 1715 }
8638fac8 1716 info->mmio_phys = r->start;
fe69af00 1717
62e8b851
EG
1718 /* Allocate a buffer to allow flash detection */
1719 info->buf_size = INIT_BUFFER_SIZE;
1720 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1721 if (info->data_buff == NULL) {
1722 ret = -ENOMEM;
9ca7944d 1723 goto fail_disable_clk;
62e8b851 1724 }
fe69af00 1725
346e1259
HZ
1726 /* initialize all interrupts to be disabled */
1727 disable_int(info, NDSR_MASK);
1728
24542257
RJ
1729 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1730 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1731 pdev->name, info);
fe69af00 1732 if (ret < 0) {
1733 dev_err(&pdev->dev, "failed to request IRQ\n");
1734 goto fail_free_buf;
1735 }
1736
e353a20a 1737 platform_set_drvdata(pdev, info);
fe69af00 1738
d456882b 1739 return 0;
fe69af00 1740
fe69af00 1741fail_free_buf:
401e67e2 1742 free_irq(irq, info);
62e8b851 1743 kfree(info->data_buff);
9ca7944d 1744fail_disable_clk:
fb32061f 1745 clk_disable_unprepare(info->clk);
d456882b 1746 return ret;
fe69af00 1747}
1748
1749static int pxa3xx_nand_remove(struct platform_device *pdev)
1750{
e353a20a 1751 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1752 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1753 int irq, cs;
fe69af00 1754
d456882b
LW
1755 if (!info)
1756 return 0;
1757
453810b7 1758 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1759
dbf5986a
HZ
1760 irq = platform_get_irq(pdev, 0);
1761 if (irq >= 0)
1762 free_irq(irq, info);
498b6145 1763 pxa3xx_nand_free_buff(info);
82a72d10 1764
fb32061f 1765 clk_disable_unprepare(info->clk);
82a72d10 1766
f3c8cfc2
LW
1767 for (cs = 0; cs < pdata->num_cs; cs++)
1768 nand_release(info->host[cs]->mtd);
fe69af00 1769 return 0;
1770}
1771
1e7ba630
DM
1772static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1773{
1774 struct pxa3xx_nand_platform_data *pdata;
1775 struct device_node *np = pdev->dev.of_node;
1776 const struct of_device_id *of_id =
1777 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1778
1779 if (!of_id)
1780 return 0;
1781
1782 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1783 if (!pdata)
1784 return -ENOMEM;
1785
1786 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1787 pdata->enable_arbiter = 1;
1788 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1789 pdata->keep_config = 1;
1790 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1791 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630 1792
5b3e5078
EG
1793 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1794 if (pdata->ecc_strength < 0)
1795 pdata->ecc_strength = 0;
1796
1797 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1798 if (pdata->ecc_step_size < 0)
1799 pdata->ecc_step_size = 0;
1800
1e7ba630
DM
1801 pdev->dev.platform_data = pdata;
1802
1803 return 0;
1804}
1e7ba630 1805
e353a20a
LW
1806static int pxa3xx_nand_probe(struct platform_device *pdev)
1807{
1808 struct pxa3xx_nand_platform_data *pdata;
1e7ba630 1809 struct mtd_part_parser_data ppdata = {};
e353a20a 1810 struct pxa3xx_nand_info *info;
f3c8cfc2 1811 int ret, cs, probe_success;
e353a20a 1812
f4db2e3a
EG
1813#ifndef ARCH_HAS_DMA
1814 if (use_dma) {
1815 use_dma = 0;
1816 dev_warn(&pdev->dev,
1817 "This platform can't do DMA on this device\n");
1818 }
1819#endif
1e7ba630
DM
1820 ret = pxa3xx_nand_probe_dt(pdev);
1821 if (ret)
1822 return ret;
1823
453810b7 1824 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1825 if (!pdata) {
1826 dev_err(&pdev->dev, "no platform data defined\n");
1827 return -ENODEV;
1828 }
1829
d456882b
LW
1830 ret = alloc_nand_resource(pdev);
1831 if (ret) {
1832 dev_err(&pdev->dev, "alloc nand resource failed\n");
1833 return ret;
1834 }
e353a20a 1835
d456882b 1836 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1837 probe_success = 0;
1838 for (cs = 0; cs < pdata->num_cs; cs++) {
b7655bcb 1839 struct mtd_info *mtd = info->host[cs]->mtd;
f455578d 1840
18a84e93
EG
1841 /*
1842 * The mtd name matches the one used in 'mtdparts' kernel
1843 * parameter. This name cannot be changed or otherwise
1844 * user's mtd partitions configuration would get broken.
1845 */
1846 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1847 info->cs = cs;
b7655bcb 1848 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1849 if (ret) {
1850 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1851 cs);
1852 continue;
1853 }
1854
1e7ba630 1855 ppdata.of_node = pdev->dev.of_node;
b7655bcb 1856 ret = mtd_device_parse_register(mtd, NULL,
1e7ba630 1857 &ppdata, pdata->parts[cs],
42d7fbe2 1858 pdata->nr_parts[cs]);
f3c8cfc2
LW
1859 if (!ret)
1860 probe_success = 1;
1861 }
1862
1863 if (!probe_success) {
e353a20a
LW
1864 pxa3xx_nand_remove(pdev);
1865 return -ENODEV;
1866 }
1867
f3c8cfc2 1868 return 0;
e353a20a
LW
1869}
1870
fe69af00 1871#ifdef CONFIG_PM
1872static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1873{
e353a20a 1874 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1875 struct pxa3xx_nand_platform_data *pdata;
1876 struct mtd_info *mtd;
1877 int cs;
fe69af00 1878
453810b7 1879 pdata = dev_get_platdata(&pdev->dev);
f8155a40 1880 if (info->state) {
fe69af00 1881 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1882 return -EAGAIN;
1883 }
1884
f3c8cfc2
LW
1885 for (cs = 0; cs < pdata->num_cs; cs++) {
1886 mtd = info->host[cs]->mtd;
3fe4bae8 1887 mtd_suspend(mtd);
f3c8cfc2
LW
1888 }
1889
fe69af00 1890 return 0;
1891}
1892
1893static int pxa3xx_nand_resume(struct platform_device *pdev)
1894{
e353a20a 1895 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1896 struct pxa3xx_nand_platform_data *pdata;
1897 struct mtd_info *mtd;
1898 int cs;
051fc41c 1899
453810b7 1900 pdata = dev_get_platdata(&pdev->dev);
051fc41c
LW
1901 /* We don't want to handle interrupt without calling mtd routine */
1902 disable_int(info, NDCR_INT_MASK);
fe69af00 1903
f3c8cfc2
LW
1904 /*
1905 * Directly set the chip select to a invalid value,
1906 * then the driver would reset the timing according
1907 * to current chip select at the beginning of cmdfunc
1908 */
1909 info->cs = 0xff;
fe69af00 1910
051fc41c
LW
1911 /*
1912 * As the spec says, the NDSR would be updated to 0x1800 when
1913 * doing the nand_clk disable/enable.
1914 * To prevent it damaging state machine of the driver, clear
1915 * all status before resume
1916 */
1917 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2
LW
1918 for (cs = 0; cs < pdata->num_cs; cs++) {
1919 mtd = info->host[cs]->mtd;
ead995f8 1920 mtd_resume(mtd);
f3c8cfc2
LW
1921 }
1922
18c81b18 1923 return 0;
fe69af00 1924}
1925#else
1926#define pxa3xx_nand_suspend NULL
1927#define pxa3xx_nand_resume NULL
1928#endif
1929
1930static struct platform_driver pxa3xx_nand_driver = {
1931 .driver = {
1932 .name = "pxa3xx-nand",
5576bc7b 1933 .of_match_table = pxa3xx_nand_dt_ids,
fe69af00 1934 },
1935 .probe = pxa3xx_nand_probe,
1936 .remove = pxa3xx_nand_remove,
1937 .suspend = pxa3xx_nand_suspend,
1938 .resume = pxa3xx_nand_resume,
1939};
1940
f99640de 1941module_platform_driver(pxa3xx_nand_driver);
fe69af00 1942
1943MODULE_LICENSE("GPL");
1944MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.490436 seconds and 5 git commands to generate.