Merge tag 'for-linus-4.6-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
8f5ba31a 18#include <linux/dmaengine.h>
fe69af00 19#include <linux/dma-mapping.h>
8f5ba31a 20#include <linux/dma/pxa-dma.h>
fe69af00 21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h>
a1c06ee1 26#include <linux/io.h>
afca11ec 27#include <linux/iopoll.h>
a1c06ee1 28#include <linux/irq.h>
5a0e3ad6 29#include <linux/slab.h>
1e7ba630
DM
30#include <linux/of.h>
31#include <linux/of_device.h>
776f265e 32#include <linux/of_mtd.h>
293b2da1 33#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 34
e5860c18
NMG
35#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36#define NAND_STOP_DELAY msecs_to_jiffies(40)
4eb2da89 37#define PAGE_CHUNK_SIZE (2048)
fe69af00 38
62e8b851
EG
39/*
40 * Define a buffer size for the initial command that detects the flash device:
c1634097
EG
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
62e8b851 46 */
c1634097 47#define INIT_BUFFER_SIZE 2048
62e8b851 48
fe69af00 49/* registers and bit definitions */
50#define NDCR (0x00) /* Control register */
51#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53#define NDSR (0x14) /* Status Register */
54#define NDPCR (0x18) /* Page Count Register */
55#define NDBDR0 (0x1C) /* Bad Block Register 0 */
56#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 57#define NDECCCTRL (0x28) /* ECC control */
fe69af00 58#define NDDB (0x40) /* Data Buffer */
59#define NDCB0 (0x48) /* Command Buffer0 */
60#define NDCB1 (0x4C) /* Command Buffer1 */
61#define NDCB2 (0x50) /* Command Buffer2 */
62
63#define NDCR_SPARE_EN (0x1 << 31)
64#define NDCR_ECC_EN (0x1 << 30)
65#define NDCR_DMA_EN (0x1 << 29)
66#define NDCR_ND_RUN (0x1 << 28)
67#define NDCR_DWIDTH_C (0x1 << 27)
68#define NDCR_DWIDTH_M (0x1 << 26)
69#define NDCR_PAGE_SZ (0x1 << 24)
70#define NDCR_NCSX (0x1 << 23)
71#define NDCR_ND_MODE (0x3 << 21)
72#define NDCR_NAND_MODE (0x0)
73#define NDCR_CLR_PG_CNT (0x1 << 20)
e971affa
RJ
74#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 76#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
78
79#define NDCR_RA_START (0x1 << 15)
80#define NDCR_PG_PER_BLK (0x1 << 14)
81#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 82#define NDCR_INT_MASK (0xFFF)
fe69af00 83
84#define NDSR_MASK (0xfff)
87f5336e
EG
85#define NDSR_ERR_CNT_OFF (16)
86#define NDSR_ERR_CNT_MASK (0x1f)
87#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
88#define NDSR_RDY (0x1 << 12)
89#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 90#define NDSR_CS0_PAGED (0x1 << 10)
91#define NDSR_CS1_PAGED (0x1 << 9)
92#define NDSR_CS0_CMDD (0x1 << 8)
93#define NDSR_CS1_CMDD (0x1 << 7)
94#define NDSR_CS0_BBD (0x1 << 6)
95#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
96#define NDSR_UNCORERR (0x1 << 4)
97#define NDSR_CORERR (0x1 << 3)
fe69af00 98#define NDSR_WRDREQ (0x1 << 2)
99#define NDSR_RDDREQ (0x1 << 1)
100#define NDSR_WRCMDREQ (0x1)
101
41a63430 102#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 103#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 104#define NDCB0_AUTO_RS (0x1 << 25)
105#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
106#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 108#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110#define NDCB0_NC (0x1 << 20)
111#define NDCB0_DBC (0x1 << 19)
112#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114#define NDCB0_CMD2_MASK (0xff << 8)
115#define NDCB0_CMD1_MASK (0xff)
116#define NDCB0_ADDR_CYC_SHIFT (16)
117
70ed8523
EG
118#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120#define EXT_CMD_TYPE_READ 4 /* Read */
121#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122#define EXT_CMD_TYPE_FINAL 3 /* Final command */
123#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
125
b226eca2
EG
126/*
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
130 */
131#define READ_ID_BYTES 7
132
fe69af00 133/* macros for registers read/write */
26d072e3
RJ
134#define nand_writel(info, off, val) \
135 do { \
136 dev_vdbg(&info->pdev->dev, \
137 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
138 __func__, __LINE__, (val), (off)); \
139 writel_relaxed((val), (info)->mmio_base + (off)); \
140 } while (0)
fe69af00 141
26d072e3
RJ
142#define nand_readl(info, off) \
143 ({ \
144 unsigned int _v; \
145 _v = readl_relaxed((info)->mmio_base + (off)); \
146 dev_vdbg(&info->pdev->dev, \
147 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
148 __func__, __LINE__, (off), _v); \
149 _v; \
150 })
fe69af00 151
152/* error code and state */
153enum {
154 ERR_NONE = 0,
155 ERR_DMABUSERR = -1,
156 ERR_SENDCMD = -2,
87f5336e 157 ERR_UNCORERR = -3,
fe69af00 158 ERR_BBERR = -4,
87f5336e 159 ERR_CORERR = -5,
fe69af00 160};
161
162enum {
f8155a40 163 STATE_IDLE = 0,
d456882b 164 STATE_PREPARED,
fe69af00 165 STATE_CMD_HANDLE,
166 STATE_DMA_READING,
167 STATE_DMA_WRITING,
168 STATE_DMA_DONE,
169 STATE_PIO_READING,
170 STATE_PIO_WRITING,
f8155a40
LW
171 STATE_CMD_DONE,
172 STATE_READY,
fe69af00 173};
174
c0f3b864
EG
175enum pxa3xx_nand_variant {
176 PXA3XX_NAND_VARIANT_PXA,
177 PXA3XX_NAND_VARIANT_ARMADA370,
178};
179
d456882b
LW
180struct pxa3xx_nand_host {
181 struct nand_chip chip;
d456882b
LW
182 void *info_data;
183
184 /* page size of attached chip */
d456882b 185 int use_ecc;
f3c8cfc2 186 int cs;
fe69af00 187
d456882b
LW
188 /* calculated from pxa3xx_nand_flash data */
189 unsigned int col_addr_cycles;
190 unsigned int row_addr_cycles;
d456882b
LW
191};
192
193struct pxa3xx_nand_info {
401e67e2 194 struct nand_hw_control controller;
fe69af00 195 struct platform_device *pdev;
fe69af00 196
197 struct clk *clk;
198 void __iomem *mmio_base;
8638fac8 199 unsigned long mmio_phys;
55d9fd6e 200 struct completion cmd_complete, dev_ready;
fe69af00 201
202 unsigned int buf_start;
203 unsigned int buf_count;
62e8b851 204 unsigned int buf_size;
fa543bef
EG
205 unsigned int data_buff_pos;
206 unsigned int oob_buff_pos;
fe69af00 207
208 /* DMA information */
8f5ba31a
RJ
209 struct scatterlist sg;
210 enum dma_data_direction dma_dir;
211 struct dma_chan *dma_chan;
212 dma_cookie_t dma_cookie;
fe69af00 213 int drcmr_dat;
fe69af00 214
215 unsigned char *data_buff;
18c81b18 216 unsigned char *oob_buff;
fe69af00 217 dma_addr_t data_buff_phys;
fe69af00 218 int data_dma_ch;
fe69af00 219
f3c8cfc2 220 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 221 unsigned int state;
222
c0f3b864
EG
223 /*
224 * This driver supports NFCv1 (as found in PXA SoC)
225 * and NFCv2 (as found in Armada 370/XP SoC).
226 */
227 enum pxa3xx_nand_variant variant;
228
f3c8cfc2 229 int cs;
fe69af00 230 int use_ecc; /* use HW ECC ? */
43bcfd2b 231 int ecc_bch; /* using BCH ECC? */
fe69af00 232 int use_dma; /* use DMA ? */
5bb653e8 233 int use_spare; /* use spare ? */
55d9fd6e 234 int need_wait;
fe69af00 235
c2cdace7
TP
236 /* Amount of real data per full chunk */
237 unsigned int chunk_size;
238
239 /* Amount of spare data per full chunk */
43bcfd2b 240 unsigned int spare_size;
c2cdace7
TP
241
242 /* Number of full chunks (i.e chunk_size + spare_size) */
243 unsigned int nfullchunks;
244
245 /*
246 * Total number of chunks. If equal to nfullchunks, then there
247 * are only full chunks. Otherwise, there is one last chunk of
248 * size (last_chunk_size + last_spare_size)
249 */
250 unsigned int ntotalchunks;
251
252 /* Amount of real data in the last chunk */
253 unsigned int last_chunk_size;
254
255 /* Amount of spare data in the last chunk */
256 unsigned int last_spare_size;
257
43bcfd2b 258 unsigned int ecc_size;
87f5336e
EG
259 unsigned int ecc_err_cnt;
260 unsigned int max_bitflips;
fe69af00 261 int retcode;
fe69af00 262
c2cdace7
TP
263 /*
264 * Variables only valid during command
265 * execution. step_chunk_size and step_spare_size is the
266 * amount of real data and spare data in the current
267 * chunk. cur_chunk is the current chunk being
268 * read/programmed.
269 */
270 unsigned int step_chunk_size;
271 unsigned int step_spare_size;
272 unsigned int cur_chunk;
273
48cf7efa
EG
274 /* cached register value */
275 uint32_t reg_ndcr;
276 uint32_t ndtr0cs0;
277 uint32_t ndtr1cs0;
278
fe69af00 279 /* generated NDCBx register values */
280 uint32_t ndcb0;
281 uint32_t ndcb1;
282 uint32_t ndcb2;
3a1a344a 283 uint32_t ndcb3;
fe69af00 284};
285
90ab5ee9 286static bool use_dma = 1;
fe69af00 287module_param(use_dma, bool, 0444);
25985edc 288MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 289
a9cadf72
EG
290struct pxa3xx_nand_timing {
291 unsigned int tCH; /* Enable signal hold time */
292 unsigned int tCS; /* Enable signal setup time */
293 unsigned int tWH; /* ND_nWE high duration */
294 unsigned int tWP; /* ND_nWE pulse time */
295 unsigned int tRH; /* ND_nRE high duration */
296 unsigned int tRP; /* ND_nRE pulse width */
297 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
298 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
299 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
300};
301
302struct pxa3xx_nand_flash {
a9cadf72 303 uint32_t chip_id;
a9cadf72
EG
304 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
305 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
a9cadf72
EG
306 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
307};
308
c1f82478 309static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
310 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
311 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
312 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
313 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
314};
315
c1f82478 316static struct pxa3xx_nand_flash builtin_flash_types[] = {
89c1702d
AT
317 { 0x46ec, 16, 16, &timing[1] },
318 { 0xdaec, 8, 8, &timing[1] },
319 { 0xd7ec, 8, 8, &timing[1] },
320 { 0xa12c, 8, 8, &timing[2] },
321 { 0xb12c, 16, 16, &timing[2] },
322 { 0xdc2c, 8, 8, &timing[2] },
323 { 0xcc2c, 16, 16, &timing[2] },
324 { 0xba20, 16, 16, &timing[3] },
d3490dfd
HZ
325};
326
776f265e
EG
327static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
328static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
329
330static struct nand_bbt_descr bbt_main_descr = {
331 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
332 | NAND_BBT_2BIT | NAND_BBT_VERSION,
333 .offs = 8,
334 .len = 6,
335 .veroffs = 14,
336 .maxblocks = 8, /* Last 8 blocks in each chip */
337 .pattern = bbt_pattern
338};
339
340static struct nand_bbt_descr bbt_mirror_descr = {
341 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
342 | NAND_BBT_2BIT | NAND_BBT_VERSION,
343 .offs = 8,
344 .len = 6,
345 .veroffs = 14,
346 .maxblocks = 8, /* Last 8 blocks in each chip */
347 .pattern = bbt_mirror_pattern
348};
349
3db227b6
RG
350static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
351 .eccbytes = 32,
352 .eccpos = {
353 32, 33, 34, 35, 36, 37, 38, 39,
354 40, 41, 42, 43, 44, 45, 46, 47,
355 48, 49, 50, 51, 52, 53, 54, 55,
356 56, 57, 58, 59, 60, 61, 62, 63},
357 .oobfree = { {2, 30} }
358};
359
70ed8523
EG
360static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
361 .eccbytes = 64,
362 .eccpos = {
363 32, 33, 34, 35, 36, 37, 38, 39,
364 40, 41, 42, 43, 44, 45, 46, 47,
365 48, 49, 50, 51, 52, 53, 54, 55,
366 56, 57, 58, 59, 60, 61, 62, 63,
367 96, 97, 98, 99, 100, 101, 102, 103,
368 104, 105, 106, 107, 108, 109, 110, 111,
369 112, 113, 114, 115, 116, 117, 118, 119,
370 120, 121, 122, 123, 124, 125, 126, 127},
371 /* Bootrom looks in bytes 0 & 5 for bad blocks */
372 .oobfree = { {6, 26}, { 64, 32} }
373};
374
375static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
376 .eccbytes = 128,
377 .eccpos = {
378 32, 33, 34, 35, 36, 37, 38, 39,
379 40, 41, 42, 43, 44, 45, 46, 47,
380 48, 49, 50, 51, 52, 53, 54, 55,
381 56, 57, 58, 59, 60, 61, 62, 63},
382 .oobfree = { }
383};
384
fe69af00 385#define NDTR0_tCH(c) (min((c), 7) << 19)
386#define NDTR0_tCS(c) (min((c), 7) << 16)
387#define NDTR0_tWH(c) (min((c), 7) << 11)
388#define NDTR0_tWP(c) (min((c), 7) << 8)
389#define NDTR0_tRH(c) (min((c), 7) << 3)
390#define NDTR0_tRP(c) (min((c), 7) << 0)
391
392#define NDTR1_tR(c) (min((c), 65535) << 16)
393#define NDTR1_tWHR(c) (min((c), 15) << 4)
394#define NDTR1_tAR(c) (min((c), 15) << 0)
395
396/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 397#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 398
17754ad6 399static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
400 {
401 .compatible = "marvell,pxa3xx-nand",
402 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
403 },
1963ff97
EG
404 {
405 .compatible = "marvell,armada370-nand",
406 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
407 },
c7e9c7e7
EG
408 {}
409};
410MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
411
412static enum pxa3xx_nand_variant
413pxa3xx_nand_get_variant(struct platform_device *pdev)
414{
415 const struct of_device_id *of_id =
416 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
417 if (!of_id)
418 return PXA3XX_NAND_VARIANT_PXA;
419 return (enum pxa3xx_nand_variant)of_id->data;
420}
421
d456882b 422static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 423 const struct pxa3xx_nand_timing *t)
fe69af00 424{
d456882b 425 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 426 unsigned long nand_clk = clk_get_rate(info->clk);
427 uint32_t ndtr0, ndtr1;
428
429 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
430 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
431 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
432 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
433 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
434 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
435
436 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
437 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
438 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
439
48cf7efa
EG
440 info->ndtr0cs0 = ndtr0;
441 info->ndtr1cs0 = ndtr1;
fe69af00 442 nand_writel(info, NDTR0CS0, ndtr0);
443 nand_writel(info, NDTR1CS0, ndtr1);
444}
445
3f225b7f
AT
446static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
447 const struct nand_sdr_timings *t)
448{
449 struct pxa3xx_nand_info *info = host->info_data;
450 struct nand_chip *chip = &host->chip;
451 unsigned long nand_clk = clk_get_rate(info->clk);
452 uint32_t ndtr0, ndtr1;
453
454 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
455 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
456 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
457 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
458 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
459 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
460 u32 tR = chip->chip_delay * 1000;
461 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
462 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
463
464 /* fallback to a default value if tR = 0 */
465 if (!tR)
466 tR = 20000;
467
468 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
469 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
470 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
471 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
472 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
473 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
474
475 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
476 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
477 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
478
479 info->ndtr0cs0 = ndtr0;
480 info->ndtr1cs0 = ndtr1;
481 nand_writel(info, NDTR0CS0, ndtr0);
482 nand_writel(info, NDTR1CS0, ndtr1);
483}
484
485static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
486 unsigned int *flash_width,
487 unsigned int *dfc_width)
488{
489 struct nand_chip *chip = &host->chip;
490 struct pxa3xx_nand_info *info = host->info_data;
491 const struct pxa3xx_nand_flash *f = NULL;
063294a3 492 struct mtd_info *mtd = nand_to_mtd(&host->chip);
3f225b7f
AT
493 int i, id, ntypes;
494
495 ntypes = ARRAY_SIZE(builtin_flash_types);
496
063294a3 497 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3f225b7f 498
063294a3
BB
499 id = chip->read_byte(mtd);
500 id |= chip->read_byte(mtd) << 0x8;
3f225b7f
AT
501
502 for (i = 0; i < ntypes; i++) {
503 f = &builtin_flash_types[i];
504
505 if (f->chip_id == id)
506 break;
507 }
508
509 if (i == ntypes) {
510 dev_err(&info->pdev->dev, "Error: timings not found\n");
511 return -EINVAL;
512 }
513
514 pxa3xx_nand_set_timing(host, f->timing);
515
516 *flash_width = f->flash_width;
517 *dfc_width = f->dfc_width;
518
519 return 0;
520}
521
522static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
523 int mode)
524{
525 const struct nand_sdr_timings *timings;
526
527 mode = fls(mode) - 1;
528 if (mode < 0)
529 mode = 0;
530
531 timings = onfi_async_timing_mode_to_sdr_timings(mode);
532 if (IS_ERR(timings))
533 return PTR_ERR(timings);
534
535 pxa3xx_nand_set_sdr_timing(host, timings);
536
537 return 0;
538}
539
540static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
541{
542 struct nand_chip *chip = &host->chip;
543 struct pxa3xx_nand_info *info = host->info_data;
544 unsigned int flash_width = 0, dfc_width = 0;
545 int mode, err;
546
547 mode = onfi_get_async_timing_mode(chip);
548 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
549 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
550 &dfc_width);
551 if (err)
552 return err;
553
554 if (flash_width == 16) {
555 info->reg_ndcr |= NDCR_DWIDTH_M;
556 chip->options |= NAND_BUSWIDTH_16;
557 }
558
559 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
560 } else {
561 err = pxa3xx_nand_init_timings_onfi(host, mode);
562 if (err)
563 return err;
564 }
565
566 return 0;
567}
568
f8155a40
LW
569/**
570 * NOTE: it is a must to set ND_RUN firstly, then write
571 * command buffer, otherwise, it does not work.
572 * We enable all the interrupt at the same time, and
573 * let pxa3xx_nand_irq to handle all logic.
574 */
575static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
576{
577 uint32_t ndcr;
578
48cf7efa 579 ndcr = info->reg_ndcr;
cd9d1182 580
43bcfd2b 581 if (info->use_ecc) {
cd9d1182 582 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
583 if (info->ecc_bch)
584 nand_writel(info, NDECCCTRL, 0x1);
585 } else {
cd9d1182 586 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
587 if (info->ecc_bch)
588 nand_writel(info, NDECCCTRL, 0x0);
589 }
cd9d1182
EG
590
591 if (info->use_dma)
592 ndcr |= NDCR_DMA_EN;
593 else
594 ndcr &= ~NDCR_DMA_EN;
595
5bb653e8
EG
596 if (info->use_spare)
597 ndcr |= NDCR_SPARE_EN;
598 else
599 ndcr &= ~NDCR_SPARE_EN;
600
f8155a40
LW
601 ndcr |= NDCR_ND_RUN;
602
603 /* clear status bits and run */
f8155a40 604 nand_writel(info, NDSR, NDSR_MASK);
0b14392d 605 nand_writel(info, NDCR, 0);
f8155a40
LW
606 nand_writel(info, NDCR, ndcr);
607}
608
609static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
610{
611 uint32_t ndcr;
612 int timeout = NAND_STOP_DELAY;
613
614 /* wait RUN bit in NDCR become 0 */
615 ndcr = nand_readl(info, NDCR);
616 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
617 ndcr = nand_readl(info, NDCR);
618 udelay(1);
619 }
620
621 if (timeout <= 0) {
622 ndcr &= ~NDCR_ND_RUN;
623 nand_writel(info, NDCR, ndcr);
624 }
8f5ba31a
RJ
625 if (info->dma_chan)
626 dmaengine_terminate_all(info->dma_chan);
627
f8155a40
LW
628 /* clear status bits */
629 nand_writel(info, NDSR, NDSR_MASK);
630}
631
57ff88f0
EG
632static void __maybe_unused
633enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 634{
635 uint32_t ndcr;
636
637 ndcr = nand_readl(info, NDCR);
638 nand_writel(info, NDCR, ndcr & ~int_mask);
639}
640
641static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
642{
643 uint32_t ndcr;
644
645 ndcr = nand_readl(info, NDCR);
646 nand_writel(info, NDCR, ndcr | int_mask);
647}
648
8dad0386
MR
649static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
650{
651 if (info->ecc_bch) {
afca11ec
MR
652 u32 val;
653 int ret;
8dad0386
MR
654
655 /*
656 * According to the datasheet, when reading from NDDB
657 * with BCH enabled, after each 32 bytes reads, we
658 * have to make sure that the NDSR.RDDREQ bit is set.
659 *
660 * Drain the FIFO 8 32 bits reads at a time, and skip
661 * the polling on the last read.
662 */
663 while (len > 8) {
ab53a571 664 ioread32_rep(info->mmio_base + NDDB, data, 8);
8dad0386 665
afca11ec
MR
666 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
667 val & NDSR_RDDREQ, 1000, 5000);
668 if (ret) {
669 dev_err(&info->pdev->dev,
670 "Timeout on RDDREQ while draining the FIFO\n");
671 return;
8dad0386
MR
672 }
673
674 data += 32;
675 len -= 8;
676 }
677 }
678
ab53a571 679 ioread32_rep(info->mmio_base + NDDB, data, len);
8dad0386
MR
680}
681
f8155a40 682static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 683{
fe69af00 684 switch (info->state) {
685 case STATE_PIO_WRITING:
c2cdace7
TP
686 if (info->step_chunk_size)
687 writesl(info->mmio_base + NDDB,
688 info->data_buff + info->data_buff_pos,
689 DIV_ROUND_UP(info->step_chunk_size, 4));
fa543bef 690
c2cdace7 691 if (info->step_spare_size)
ce914e6b
RH
692 writesl(info->mmio_base + NDDB,
693 info->oob_buff + info->oob_buff_pos,
c2cdace7 694 DIV_ROUND_UP(info->step_spare_size, 4));
fe69af00 695 break;
696 case STATE_PIO_READING:
c2cdace7
TP
697 if (info->step_chunk_size)
698 drain_fifo(info,
699 info->data_buff + info->data_buff_pos,
700 DIV_ROUND_UP(info->step_chunk_size, 4));
fa543bef 701
c2cdace7 702 if (info->step_spare_size)
8dad0386
MR
703 drain_fifo(info,
704 info->oob_buff + info->oob_buff_pos,
c2cdace7 705 DIV_ROUND_UP(info->step_spare_size, 4));
fe69af00 706 break;
707 default:
da675b4e 708 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 709 info->state);
f8155a40 710 BUG();
fe69af00 711 }
fa543bef
EG
712
713 /* Update buffer pointers for multi-page read/write */
c2cdace7
TP
714 info->data_buff_pos += info->step_chunk_size;
715 info->oob_buff_pos += info->step_spare_size;
fe69af00 716}
717
8f5ba31a 718static void pxa3xx_nand_data_dma_irq(void *data)
fe69af00 719{
8f5ba31a
RJ
720 struct pxa3xx_nand_info *info = data;
721 struct dma_tx_state state;
722 enum dma_status status;
fe69af00 723
8f5ba31a
RJ
724 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
725 if (likely(status == DMA_COMPLETE)) {
726 info->state = STATE_DMA_DONE;
727 } else {
728 dev_err(&info->pdev->dev, "DMA error on data channel\n");
729 info->retcode = ERR_DMABUSERR;
730 }
731 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
732
733 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
734 enable_int(info, NDCR_INT_MASK);
735}
736
737static void start_data_dma(struct pxa3xx_nand_info *info)
738{
739 enum dma_transfer_direction direction;
740 struct dma_async_tx_descriptor *tx;
fe69af00 741
f8155a40
LW
742 switch (info->state) {
743 case STATE_DMA_WRITING:
8f5ba31a
RJ
744 info->dma_dir = DMA_TO_DEVICE;
745 direction = DMA_MEM_TO_DEV;
f8155a40
LW
746 break;
747 case STATE_DMA_READING:
8f5ba31a
RJ
748 info->dma_dir = DMA_FROM_DEVICE;
749 direction = DMA_DEV_TO_MEM;
f8155a40
LW
750 break;
751 default:
da675b4e 752 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
753 info->state);
754 BUG();
fe69af00 755 }
c2cdace7
TP
756 info->sg.length = info->chunk_size;
757 if (info->use_spare)
758 info->sg.length += info->spare_size + info->ecc_size;
8f5ba31a
RJ
759 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
760
761 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
762 DMA_PREP_INTERRUPT);
763 if (!tx) {
764 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
765 return;
fe69af00 766 }
8f5ba31a
RJ
767 tx->callback = pxa3xx_nand_data_dma_irq;
768 tx->callback_param = info;
769 info->dma_cookie = dmaengine_submit(tx);
770 dma_async_issue_pending(info->dma_chan);
771 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
772 __func__, direction, info->dma_cookie, info->sg.length);
fe69af00 773}
774
24542257
RJ
775static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
776{
777 struct pxa3xx_nand_info *info = data;
778
779 handle_data_pio(info);
780
781 info->state = STATE_CMD_DONE;
782 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
783
784 return IRQ_HANDLED;
785}
786
fe69af00 787static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
788{
789 struct pxa3xx_nand_info *info = devid;
55d9fd6e 790 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 791 unsigned int ready, cmd_done;
24542257 792 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
793
794 if (info->cs == 0) {
795 ready = NDSR_FLASH_RDY;
796 cmd_done = NDSR_CS0_CMDD;
797 } else {
798 ready = NDSR_RDY;
799 cmd_done = NDSR_CS1_CMDD;
800 }
fe69af00 801
802 status = nand_readl(info, NDSR);
803
87f5336e
EG
804 if (status & NDSR_UNCORERR)
805 info->retcode = ERR_UNCORERR;
806 if (status & NDSR_CORERR) {
807 info->retcode = ERR_CORERR;
808 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
809 info->ecc_bch)
810 info->ecc_err_cnt = NDSR_ERR_CNT(status);
811 else
812 info->ecc_err_cnt = 1;
813
814 /*
815 * Each chunk composing a page is corrected independently,
816 * and we need to store maximum number of corrected bitflips
817 * to return it to the MTD layer in ecc.read_page().
818 */
819 info->max_bitflips = max_t(unsigned int,
820 info->max_bitflips,
821 info->ecc_err_cnt);
822 }
f8155a40
LW
823 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
824 /* whether use dma to transfer data */
fe69af00 825 if (info->use_dma) {
f8155a40
LW
826 disable_int(info, NDCR_INT_MASK);
827 info->state = (status & NDSR_RDDREQ) ?
828 STATE_DMA_READING : STATE_DMA_WRITING;
829 start_data_dma(info);
830 goto NORMAL_IRQ_EXIT;
fe69af00 831 } else {
f8155a40
LW
832 info->state = (status & NDSR_RDDREQ) ?
833 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
834 ret = IRQ_WAKE_THREAD;
835 goto NORMAL_IRQ_EXIT;
fe69af00 836 }
fe69af00 837 }
f3c8cfc2 838 if (status & cmd_done) {
f8155a40
LW
839 info->state = STATE_CMD_DONE;
840 is_completed = 1;
fe69af00 841 }
f3c8cfc2 842 if (status & ready) {
f8155a40 843 info->state = STATE_READY;
55d9fd6e 844 is_ready = 1;
401e67e2 845 }
fe69af00 846
21fc0ef9
RJ
847 /*
848 * Clear all status bit before issuing the next command, which
849 * can and will alter the status bits and will deserve a new
850 * interrupt on its own. This lets the controller exit the IRQ
851 */
852 nand_writel(info, NDSR, status);
853
f8155a40 854 if (status & NDSR_WRCMDREQ) {
f8155a40
LW
855 status &= ~NDSR_WRCMDREQ;
856 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
857
858 /*
859 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
860 * must be loaded by writing directly either 12 or 16
861 * bytes directly to NDCB0, four bytes at a time.
862 *
863 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
864 * but each NDCBx register can be read.
865 */
f8155a40
LW
866 nand_writel(info, NDCB0, info->ndcb0);
867 nand_writel(info, NDCB0, info->ndcb1);
868 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
869
870 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
871 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
872 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 873 }
874
f8155a40
LW
875 if (is_completed)
876 complete(&info->cmd_complete);
55d9fd6e
EG
877 if (is_ready)
878 complete(&info->dev_ready);
f8155a40 879NORMAL_IRQ_EXIT:
24542257 880 return ret;
fe69af00 881}
882
fe69af00 883static inline int is_buf_blank(uint8_t *buf, size_t len)
884{
885 for (; len > 0; len--)
886 if (*buf++ != 0xff)
887 return 0;
888 return 1;
889}
890
86beebae
EG
891static void set_command_address(struct pxa3xx_nand_info *info,
892 unsigned int page_size, uint16_t column, int page_addr)
893{
894 /* small page addr setting */
895 if (page_size < PAGE_CHUNK_SIZE) {
896 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
897 | (column & 0xFF);
898
899 info->ndcb2 = 0;
900 } else {
901 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
902 | (column & 0xFFFF);
903
904 if (page_addr & 0xFF0000)
905 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
906 else
907 info->ndcb2 = 0;
908 }
909}
910
c39ff03a 911static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 912{
39f83d15 913 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3 914 struct mtd_info *mtd = nand_to_mtd(&host->chip);
39f83d15 915
4eb2da89 916 /* reset data and oob column point to handle data */
401e67e2
LW
917 info->buf_start = 0;
918 info->buf_count = 0;
fa543bef
EG
919 info->data_buff_pos = 0;
920 info->oob_buff_pos = 0;
c2cdace7
TP
921 info->step_chunk_size = 0;
922 info->step_spare_size = 0;
923 info->cur_chunk = 0;
4eb2da89 924 info->use_ecc = 0;
5bb653e8 925 info->use_spare = 1;
4eb2da89 926 info->retcode = ERR_NONE;
87f5336e 927 info->ecc_err_cnt = 0;
f0e6a32e 928 info->ndcb3 = 0;
d20d0a6c 929 info->need_wait = 0;
fe69af00 930
931 switch (command) {
4eb2da89
LW
932 case NAND_CMD_READ0:
933 case NAND_CMD_PAGEPROG:
934 info->use_ecc = 1;
fe69af00 935 break;
41a63430
EG
936 case NAND_CMD_PARAM:
937 info->use_spare = 0;
938 break;
4eb2da89
LW
939 default:
940 info->ndcb1 = 0;
941 info->ndcb2 = 0;
942 break;
943 }
39f83d15
EG
944
945 /*
946 * If we are about to issue a read command, or about to set
947 * the write address, then clean the data buffer.
948 */
949 if (command == NAND_CMD_READ0 ||
950 command == NAND_CMD_READOOB ||
951 command == NAND_CMD_SEQIN) {
952
953 info->buf_count = mtd->writesize + mtd->oobsize;
954 memset(info->data_buff, 0xFF, info->buf_count);
955 }
956
c39ff03a
EG
957}
958
959static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 960 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
961{
962 int addr_cycle, exec_cmd;
963 struct pxa3xx_nand_host *host;
964 struct mtd_info *mtd;
965
966 host = info->host[info->cs];
063294a3 967 mtd = nand_to_mtd(&host->chip);
c39ff03a
EG
968 addr_cycle = 0;
969 exec_cmd = 1;
970
971 if (info->cs != 0)
972 info->ndcb0 = NDCB0_CSEL;
973 else
974 info->ndcb0 = 0;
975
976 if (command == NAND_CMD_SEQIN)
977 exec_cmd = 0;
4eb2da89 978
d456882b
LW
979 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
980 + host->col_addr_cycles);
fe69af00 981
4eb2da89
LW
982 switch (command) {
983 case NAND_CMD_READOOB:
fe69af00 984 case NAND_CMD_READ0:
ec82135a
EG
985 info->buf_start = column;
986 info->ndcb0 |= NDCB0_CMD_TYPE(0)
987 | addr_cycle
988 | NAND_CMD_READ0;
989
4eb2da89 990 if (command == NAND_CMD_READOOB)
ec82135a 991 info->buf_start += mtd->writesize;
4eb2da89 992
c2cdace7
TP
993 if (info->cur_chunk < info->nfullchunks) {
994 info->step_chunk_size = info->chunk_size;
995 info->step_spare_size = info->spare_size;
996 } else {
997 info->step_chunk_size = info->last_chunk_size;
998 info->step_spare_size = info->last_spare_size;
999 }
1000
70ed8523
EG
1001 /*
1002 * Multiple page read needs an 'extended command type' field,
1003 * which is either naked-read or last-read according to the
1004 * state.
1005 */
1006 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 1007 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
1008 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1009 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1010 | NDCB0_LEN_OVRD
1011 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
c2cdace7
TP
1012 info->ndcb3 = info->step_chunk_size +
1013 info->step_spare_size;
70ed8523 1014 }
fe69af00 1015
01d9947e 1016 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
1017 break;
1018
fe69af00 1019 case NAND_CMD_SEQIN:
4eb2da89 1020
e7f9a6a4
EG
1021 info->buf_start = column;
1022 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
1023
1024 /*
1025 * Multiple page programming needs to execute the initial
1026 * SEQIN command that sets the page address.
1027 */
1028 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1029 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1030 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1031 | addr_cycle
1032 | command;
535cb57a
EG
1033 exec_cmd = 1;
1034 }
fe69af00 1035 break;
4eb2da89 1036
fe69af00 1037 case NAND_CMD_PAGEPROG:
4eb2da89
LW
1038 if (is_buf_blank(info->data_buff,
1039 (mtd->writesize + mtd->oobsize))) {
1040 exec_cmd = 0;
1041 break;
1042 }
fe69af00 1043
c2cdace7
TP
1044 if (info->cur_chunk < info->nfullchunks) {
1045 info->step_chunk_size = info->chunk_size;
1046 info->step_spare_size = info->spare_size;
1047 } else {
1048 info->step_chunk_size = info->last_chunk_size;
1049 info->step_spare_size = info->last_spare_size;
1050 }
1051
535cb57a
EG
1052 /* Second command setting for large pages */
1053 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1054 /*
1055 * Multiple page write uses the 'extended command'
1056 * field. This can be used to issue a command dispatch
1057 * or a naked-write depending on the current stage.
1058 */
1059 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1060 | NDCB0_LEN_OVRD
1061 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
c2cdace7
TP
1062 info->ndcb3 = info->step_chunk_size +
1063 info->step_spare_size;
535cb57a
EG
1064
1065 /*
1066 * This is the command dispatch that completes a chunked
1067 * page program operation.
1068 */
c2cdace7 1069 if (info->cur_chunk == info->ntotalchunks) {
535cb57a
EG
1070 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1071 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1072 | command;
1073 info->ndcb1 = 0;
1074 info->ndcb2 = 0;
1075 info->ndcb3 = 0;
1076 }
1077 } else {
1078 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1079 | NDCB0_AUTO_RS
1080 | NDCB0_ST_ROW_EN
1081 | NDCB0_DBC
1082 | (NAND_CMD_PAGEPROG << 8)
1083 | NAND_CMD_SEQIN
1084 | addr_cycle;
1085 }
fe69af00 1086 break;
4eb2da89 1087
ce0268f6 1088 case NAND_CMD_PARAM:
c1634097 1089 info->buf_count = INIT_BUFFER_SIZE;
ce0268f6
EG
1090 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1091 | NDCB0_ADDR_CYC(1)
41a63430 1092 | NDCB0_LEN_OVRD
ec82135a 1093 | command;
ce0268f6 1094 info->ndcb1 = (column & 0xFF);
c1634097 1095 info->ndcb3 = INIT_BUFFER_SIZE;
c2cdace7 1096 info->step_chunk_size = INIT_BUFFER_SIZE;
ce0268f6
EG
1097 break;
1098
fe69af00 1099 case NAND_CMD_READID:
b226eca2 1100 info->buf_count = READ_ID_BYTES;
4eb2da89
LW
1101 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1102 | NDCB0_ADDR_CYC(1)
ec82135a 1103 | command;
d14231f1 1104 info->ndcb1 = (column & 0xFF);
4eb2da89 1105
c2cdace7 1106 info->step_chunk_size = 8;
4eb2da89 1107 break;
fe69af00 1108 case NAND_CMD_STATUS:
4eb2da89
LW
1109 info->buf_count = 1;
1110 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1111 | NDCB0_ADDR_CYC(1)
ec82135a 1112 | command;
4eb2da89 1113
c2cdace7 1114 info->step_chunk_size = 8;
4eb2da89
LW
1115 break;
1116
1117 case NAND_CMD_ERASE1:
4eb2da89
LW
1118 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1119 | NDCB0_AUTO_RS
1120 | NDCB0_ADDR_CYC(3)
1121 | NDCB0_DBC
ec82135a
EG
1122 | (NAND_CMD_ERASE2 << 8)
1123 | NAND_CMD_ERASE1;
4eb2da89
LW
1124 info->ndcb1 = page_addr;
1125 info->ndcb2 = 0;
1126
fe69af00 1127 break;
1128 case NAND_CMD_RESET:
4eb2da89 1129 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 1130 | command;
4eb2da89
LW
1131
1132 break;
1133
1134 case NAND_CMD_ERASE2:
1135 exec_cmd = 0;
fe69af00 1136 break;
4eb2da89 1137
fe69af00 1138 default:
4eb2da89 1139 exec_cmd = 0;
da675b4e
LW
1140 dev_err(&info->pdev->dev, "non-supported command %x\n",
1141 command);
fe69af00 1142 break;
1143 }
1144
4eb2da89
LW
1145 return exec_cmd;
1146}
1147
5cbbdc6a
EG
1148static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1149 int column, int page_addr)
4eb2da89 1150{
4bd4ebcc 1151 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1152 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1153 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1154 int exec_cmd;
4eb2da89
LW
1155
1156 /*
1157 * if this is a x16 device ,then convert the input
1158 * "byte" address into a "word" address appropriate
1159 * for indexing a word-oriented device
1160 */
48cf7efa 1161 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
1162 column /= 2;
1163
f3c8cfc2
LW
1164 /*
1165 * There may be different NAND chip hooked to
1166 * different chip select, so check whether
1167 * chip select has been changed, if yes, reset the timing
1168 */
1169 if (info->cs != host->cs) {
1170 info->cs = host->cs;
48cf7efa
EG
1171 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1172 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
1173 }
1174
c39ff03a
EG
1175 prepare_start_command(info, command);
1176
d456882b 1177 info->state = STATE_PREPARED;
70ed8523
EG
1178 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1179
f8155a40
LW
1180 if (exec_cmd) {
1181 init_completion(&info->cmd_complete);
55d9fd6e
EG
1182 init_completion(&info->dev_ready);
1183 info->need_wait = 1;
f8155a40
LW
1184 pxa3xx_nand_start(info);
1185
e5860c18
NMG
1186 if (!wait_for_completion_timeout(&info->cmd_complete,
1187 CHIP_DELAY_TIMEOUT)) {
da675b4e 1188 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1189 /* Stop State Machine for next command cycle */
1190 pxa3xx_nand_stop(info);
1191 }
f8155a40 1192 }
d456882b 1193 info->state = STATE_IDLE;
f8155a40
LW
1194}
1195
5cbbdc6a
EG
1196static void nand_cmdfunc_extended(struct mtd_info *mtd,
1197 const unsigned command,
1198 int column, int page_addr)
70ed8523 1199{
4bd4ebcc 1200 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1201 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
70ed8523 1202 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1203 int exec_cmd, ext_cmd_type;
70ed8523
EG
1204
1205 /*
1206 * if this is a x16 device then convert the input
1207 * "byte" address into a "word" address appropriate
1208 * for indexing a word-oriented device
1209 */
1210 if (info->reg_ndcr & NDCR_DWIDTH_M)
1211 column /= 2;
1212
1213 /*
1214 * There may be different NAND chip hooked to
1215 * different chip select, so check whether
1216 * chip select has been changed, if yes, reset the timing
1217 */
1218 if (info->cs != host->cs) {
1219 info->cs = host->cs;
1220 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1221 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1222 }
1223
1224 /* Select the extended command for the first command */
1225 switch (command) {
1226 case NAND_CMD_READ0:
1227 case NAND_CMD_READOOB:
1228 ext_cmd_type = EXT_CMD_TYPE_MONO;
1229 break;
535cb57a
EG
1230 case NAND_CMD_SEQIN:
1231 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1232 break;
1233 case NAND_CMD_PAGEPROG:
1234 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1235 break;
70ed8523
EG
1236 default:
1237 ext_cmd_type = 0;
535cb57a 1238 break;
70ed8523
EG
1239 }
1240
1241 prepare_start_command(info, command);
1242
1243 /*
1244 * Prepare the "is ready" completion before starting a command
1245 * transaction sequence. If the command is not executed the
1246 * completion will be completed, see below.
1247 *
1248 * We can do that inside the loop because the command variable
1249 * is invariant and thus so is the exec_cmd.
1250 */
1251 info->need_wait = 1;
1252 init_completion(&info->dev_ready);
1253 do {
1254 info->state = STATE_PREPARED;
c2cdace7 1255
70ed8523
EG
1256 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1257 column, page_addr);
1258 if (!exec_cmd) {
1259 info->need_wait = 0;
1260 complete(&info->dev_ready);
1261 break;
1262 }
1263
1264 init_completion(&info->cmd_complete);
1265 pxa3xx_nand_start(info);
1266
e5860c18
NMG
1267 if (!wait_for_completion_timeout(&info->cmd_complete,
1268 CHIP_DELAY_TIMEOUT)) {
70ed8523
EG
1269 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1270 /* Stop State Machine for next command cycle */
1271 pxa3xx_nand_stop(info);
1272 break;
1273 }
1274
c2cdace7
TP
1275 /* Only a few commands need several steps */
1276 if (command != NAND_CMD_PAGEPROG &&
1277 command != NAND_CMD_READ0 &&
1278 command != NAND_CMD_READOOB)
1279 break;
1280
1281 info->cur_chunk++;
1282
70ed8523 1283 /* Check if the sequence is complete */
c2cdace7 1284 if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
535cb57a
EG
1285 break;
1286
1287 /*
1288 * After a splitted program command sequence has issued
1289 * the command dispatch, the command sequence is complete.
1290 */
c2cdace7 1291 if (info->cur_chunk == (info->ntotalchunks + 1) &&
535cb57a
EG
1292 command == NAND_CMD_PAGEPROG &&
1293 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1294 break;
1295
1296 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1297 /* Last read: issue a 'last naked read' */
c2cdace7 1298 if (info->cur_chunk == info->ntotalchunks - 1)
70ed8523
EG
1299 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1300 else
1301 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1302
1303 /*
1304 * If a splitted program command has no more data to transfer,
1305 * the command dispatch must be issued to complete.
1306 */
1307 } else if (command == NAND_CMD_PAGEPROG &&
c2cdace7 1308 info->cur_chunk == info->ntotalchunks) {
535cb57a 1309 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1310 }
1311 } while (1);
1312
1313 info->state = STATE_IDLE;
1314}
1315
fdbad98d 1316static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
45aaeff9
BB
1317 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1318 int page)
f8155a40
LW
1319{
1320 chip->write_buf(mtd, buf, mtd->writesize);
1321 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1322
1323 return 0;
f8155a40
LW
1324}
1325
1326static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1327 struct nand_chip *chip, uint8_t *buf, int oob_required,
1328 int page)
f8155a40 1329{
d699ed25 1330 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1331 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1332
1333 chip->read_buf(mtd, buf, mtd->writesize);
1334 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1335
87f5336e
EG
1336 if (info->retcode == ERR_CORERR && info->use_ecc) {
1337 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1338
1339 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1340 /*
1341 * for blank page (all 0xff), HW will calculate its ECC as
1342 * 0, which is different from the ECC information within
87f5336e 1343 * OOB, ignore such uncorrectable errors
f8155a40
LW
1344 */
1345 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1346 info->retcode = ERR_NONE;
1347 else
f8155a40 1348 mtd->ecc_stats.failed++;
fe69af00 1349 }
f8155a40 1350
87f5336e 1351 return info->max_bitflips;
fe69af00 1352}
1353
1354static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1355{
4bd4ebcc 1356 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1357 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1358 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1359 char retval = 0xFF;
1360
1361 if (info->buf_start < info->buf_count)
1362 /* Has just send a new command? */
1363 retval = info->data_buff[info->buf_start++];
1364
1365 return retval;
1366}
1367
1368static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1369{
4bd4ebcc 1370 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1371 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1372 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1373 u16 retval = 0xFFFF;
1374
1375 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1376 retval = *((u16 *)(info->data_buff+info->buf_start));
1377 info->buf_start += 2;
1378 }
1379 return retval;
1380}
1381
1382static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1383{
4bd4ebcc 1384 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1385 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1386 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1387 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1388
1389 memcpy(buf, info->data_buff + info->buf_start, real_len);
1390 info->buf_start += real_len;
1391}
1392
1393static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1394 const uint8_t *buf, int len)
1395{
4bd4ebcc 1396 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1397 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1398 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1399 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1400
1401 memcpy(info->data_buff + info->buf_start, buf, real_len);
1402 info->buf_start += real_len;
1403}
1404
fe69af00 1405static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1406{
1407 return;
1408}
1409
1410static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1411{
4bd4ebcc 1412 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1413 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1414 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1415
1416 if (info->need_wait) {
55d9fd6e 1417 info->need_wait = 0;
e5860c18
NMG
1418 if (!wait_for_completion_timeout(&info->dev_ready,
1419 CHIP_DELAY_TIMEOUT)) {
55d9fd6e
EG
1420 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1421 return NAND_STATUS_FAIL;
1422 }
1423 }
fe69af00 1424
1425 /* pxa3xx_nand_send_command has waited for command complete */
1426 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1427 if (info->retcode == ERR_NONE)
1428 return 0;
55d9fd6e
EG
1429 else
1430 return NAND_STATUS_FAIL;
fe69af00 1431 }
1432
55d9fd6e 1433 return NAND_STATUS_READY;
fe69af00 1434}
1435
66e8e47e 1436static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
fe69af00 1437{
b1e48577 1438 struct pxa3xx_nand_host *host = info->host[info->cs];
fe69af00 1439 struct platform_device *pdev = info->pdev;
453810b7 1440 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
b1e48577 1441 const struct nand_sdr_timings *timings;
fe69af00 1442
66e8e47e
EG
1443 /* Configure default flash values */
1444 info->chunk_size = PAGE_CHUNK_SIZE;
f19fe983
AT
1445 info->reg_ndcr = 0x0; /* enable all interrupts */
1446 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1447 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
66e8e47e
EG
1448 info->reg_ndcr |= NDCR_SPARE_EN;
1449
b1e48577
EG
1450 /* use the common timing to make a try */
1451 timings = onfi_async_timing_mode_to_sdr_timings(0);
1452 if (IS_ERR(timings))
1453 return PTR_ERR(timings);
1454
1455 pxa3xx_nand_set_sdr_timing(host, timings);
66e8e47e
EG
1456 return 0;
1457}
1458
1459static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1460{
1461 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3
BB
1462 struct nand_chip *chip = &host->chip;
1463 struct mtd_info *mtd = nand_to_mtd(chip);
66e8e47e 1464
f19fe983
AT
1465 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1466 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1467 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
fe69af00 1468}
1469
154f50fb 1470static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
f271049e 1471{
66e8e47e
EG
1472 struct platform_device *pdev = info->pdev;
1473 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f271049e 1474 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1475
70ed8523 1476 /* Set an initial chunk size */
b226eca2 1477 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
e971affa
RJ
1478 info->reg_ndcr = ndcr &
1479 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
66e8e47e 1480 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
48cf7efa
EG
1481 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1482 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1483}
1484
fe69af00 1485static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1486{
1487 struct platform_device *pdev = info->pdev;
8f5ba31a
RJ
1488 struct dma_slave_config config;
1489 dma_cap_mask_t mask;
1490 struct pxad_param param;
1491 int ret;
fe69af00 1492
8f5ba31a
RJ
1493 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1494 if (info->data_buff == NULL)
1495 return -ENOMEM;
1496 if (use_dma == 0)
fe69af00 1497 return 0;
fe69af00 1498
8f5ba31a
RJ
1499 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1500 if (ret)
1501 return ret;
fe69af00 1502
8f5ba31a
RJ
1503 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1504 dma_cap_zero(mask);
1505 dma_cap_set(DMA_SLAVE, mask);
1506 param.prio = PXAD_PRIO_LOWEST;
1507 param.drcmr = info->drcmr_dat;
1508 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1509 &param, &pdev->dev,
1510 "data");
1511 if (!info->dma_chan) {
1512 dev_err(&pdev->dev, "unable to request data dma channel\n");
1513 return -ENODEV;
1514 }
fe69af00 1515
8f5ba31a
RJ
1516 memset(&config, 0, sizeof(config));
1517 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1518 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1519 config.src_addr = info->mmio_phys + NDDB;
1520 config.dst_addr = info->mmio_phys + NDDB;
1521 config.src_maxburst = 32;
1522 config.dst_maxburst = 32;
1523 ret = dmaengine_slave_config(info->dma_chan, &config);
1524 if (ret < 0) {
1525 dev_err(&info->pdev->dev,
1526 "dma channel configuration failed: %d\n",
1527 ret);
1528 return ret;
fe69af00 1529 }
1530
95b26563
EG
1531 /*
1532 * Now that DMA buffers are allocated we turn on
1533 * DMA proper for I/O operations.
1534 */
1535 info->use_dma = 1;
fe69af00 1536 return 0;
1537}
1538
498b6145
EG
1539static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1540{
15b540c7 1541 if (info->use_dma) {
8f5ba31a
RJ
1542 dmaengine_terminate_all(info->dma_chan);
1543 dma_release_channel(info->dma_chan);
498b6145 1544 }
f4db2e3a
EG
1545 kfree(info->data_buff);
1546}
498b6145 1547
43bcfd2b
EG
1548static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1549 struct nand_ecc_ctrl *ecc,
30b2afc8 1550 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1551{
30b2afc8 1552 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
c2cdace7
TP
1553 info->nfullchunks = 1;
1554 info->ntotalchunks = 1;
70ed8523 1555 info->chunk_size = 2048;
43bcfd2b
EG
1556 info->spare_size = 40;
1557 info->ecc_size = 24;
1558 ecc->mode = NAND_ECC_HW;
1559 ecc->size = 512;
1560 ecc->strength = 1;
43bcfd2b 1561
30b2afc8 1562 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
c2cdace7
TP
1563 info->nfullchunks = 1;
1564 info->ntotalchunks = 1;
70ed8523 1565 info->chunk_size = 512;
43bcfd2b
EG
1566 info->spare_size = 8;
1567 info->ecc_size = 8;
1568 ecc->mode = NAND_ECC_HW;
1569 ecc->size = 512;
1570 ecc->strength = 1;
43bcfd2b 1571
6033a949
BN
1572 /*
1573 * Required ECC: 4-bit correction per 512 bytes
1574 * Select: 16-bit correction per 2048 bytes
1575 */
3db227b6
RG
1576 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1577 info->ecc_bch = 1;
c2cdace7
TP
1578 info->nfullchunks = 1;
1579 info->ntotalchunks = 1;
3db227b6
RG
1580 info->chunk_size = 2048;
1581 info->spare_size = 32;
1582 info->ecc_size = 32;
1583 ecc->mode = NAND_ECC_HW;
1584 ecc->size = info->chunk_size;
1585 ecc->layout = &ecc_layout_2KB_bch4bit;
1586 ecc->strength = 16;
3db227b6 1587
30b2afc8 1588 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523 1589 info->ecc_bch = 1;
c2cdace7
TP
1590 info->nfullchunks = 2;
1591 info->ntotalchunks = 2;
70ed8523
EG
1592 info->chunk_size = 2048;
1593 info->spare_size = 32;
1594 info->ecc_size = 32;
1595 ecc->mode = NAND_ECC_HW;
1596 ecc->size = info->chunk_size;
1597 ecc->layout = &ecc_layout_4KB_bch4bit;
1598 ecc->strength = 16;
70ed8523 1599
6033a949
BN
1600 /*
1601 * Required ECC: 8-bit correction per 512 bytes
1602 * Select: 16-bit correction per 1024 bytes
1603 */
1604 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523 1605 info->ecc_bch = 1;
c2cdace7
TP
1606 info->nfullchunks = 4;
1607 info->ntotalchunks = 5;
70ed8523
EG
1608 info->chunk_size = 1024;
1609 info->spare_size = 0;
c2cdace7
TP
1610 info->last_chunk_size = 0;
1611 info->last_spare_size = 64;
70ed8523
EG
1612 info->ecc_size = 32;
1613 ecc->mode = NAND_ECC_HW;
1614 ecc->size = info->chunk_size;
1615 ecc->layout = &ecc_layout_4KB_bch8bit;
1616 ecc->strength = 16;
eee0166d
EG
1617 } else {
1618 dev_err(&info->pdev->dev,
1619 "ECC strength %d at page size %d is not supported\n",
1620 strength, page_size);
1621 return -ENODEV;
70ed8523 1622 }
eee0166d
EG
1623
1624 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1625 ecc->strength, ecc->size);
43bcfd2b
EG
1626 return 0;
1627}
1628
401e67e2 1629static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1630{
4bd4ebcc 1631 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1632 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1633 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1634 struct platform_device *pdev = info->pdev;
453810b7 1635 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f19fe983 1636 int ret;
30b2afc8 1637 uint16_t ecc_strength, ecc_step;
401e67e2 1638
154f50fb
EG
1639 if (pdata->keep_config) {
1640 pxa3xx_nand_detect_config(info);
1641 } else {
1642 ret = pxa3xx_nand_config_ident(info);
1643 if (ret)
1644 return ret;
401e67e2
LW
1645 }
1646
48cf7efa 1647 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1648 chip->options |= NAND_BUSWIDTH_16;
1649
43bcfd2b
EG
1650 /* Device detection must be done with ECC disabled */
1651 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1652 nand_writel(info, NDECCCTRL, 0x0);
1653
f19fe983 1654 if (nand_scan_ident(mtd, 1, NULL))
4332c116 1655 return -ENODEV;
776f265e 1656
f19fe983
AT
1657 if (!pdata->keep_config) {
1658 ret = pxa3xx_nand_init(host);
1659 if (ret) {
1660 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1661 ret);
1662 return ret;
1663 }
1664 }
1665
776f265e
EG
1666 if (pdata->flash_bbt) {
1667 /*
1668 * We'll use a bad block table stored in-flash and don't
1669 * allow writing the bad block marker to the flash.
1670 */
1671 chip->bbt_options |= NAND_BBT_USE_FLASH |
1672 NAND_BBT_NO_OOB_BBM;
1673 chip->bbt_td = &bbt_main_descr;
1674 chip->bbt_md = &bbt_mirror_descr;
1675 }
1676
5cbbdc6a
EG
1677 /*
1678 * If the page size is bigger than the FIFO size, let's check
1679 * we are given the right variant and then switch to the extended
1680 * (aka splitted) command handling,
1681 */
1682 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1683 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1684 chip->cmdfunc = nand_cmdfunc_extended;
1685 } else {
1686 dev_err(&info->pdev->dev,
1687 "unsupported page size on this variant\n");
1688 return -ENODEV;
1689 }
1690 }
1691
5b3e5078
EG
1692 if (pdata->ecc_strength && pdata->ecc_step_size) {
1693 ecc_strength = pdata->ecc_strength;
1694 ecc_step = pdata->ecc_step_size;
1695 } else {
1696 ecc_strength = chip->ecc_strength_ds;
1697 ecc_step = chip->ecc_step_ds;
1698 }
30b2afc8
EG
1699
1700 /* Set default ECC strength requirements on non-ONFI devices */
1701 if (ecc_strength < 1 && ecc_step < 1) {
1702 ecc_strength = 1;
1703 ecc_step = 512;
1704 }
1705
1706 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1707 ecc_step, mtd->writesize);
eee0166d
EG
1708 if (ret)
1709 return ret;
43bcfd2b 1710
4332c116 1711 /* calculate addressing information */
d456882b
LW
1712 if (mtd->writesize >= 2048)
1713 host->col_addr_cycles = 2;
1714 else
1715 host->col_addr_cycles = 1;
1716
62e8b851
EG
1717 /* release the initial buffer */
1718 kfree(info->data_buff);
1719
1720 /* allocate the real data + oob buffer */
1721 info->buf_size = mtd->writesize + mtd->oobsize;
1722 ret = pxa3xx_nand_init_buff(info);
1723 if (ret)
1724 return ret;
4332c116 1725 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1726
4332c116 1727 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1728 host->row_addr_cycles = 3;
4332c116 1729 else
d456882b 1730 host->row_addr_cycles = 2;
66e8e47e
EG
1731
1732 if (!pdata->keep_config)
1733 pxa3xx_nand_config_tail(info);
1734
401e67e2 1735 return nand_scan_tail(mtd);
fe69af00 1736}
1737
d456882b 1738static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1739{
a61ae81a 1740 struct device_node *np = pdev->dev.of_node;
f3c8cfc2 1741 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1742 struct pxa3xx_nand_info *info;
d456882b 1743 struct pxa3xx_nand_host *host;
6e308f87 1744 struct nand_chip *chip = NULL;
fe69af00 1745 struct mtd_info *mtd;
1746 struct resource *r;
f3c8cfc2 1747 int ret, irq, cs;
fe69af00 1748
453810b7 1749 pdata = dev_get_platdata(&pdev->dev);
e423c90a
RJ
1750 if (pdata->num_cs <= 0)
1751 return -ENODEV;
063294a3
BB
1752 info = devm_kzalloc(&pdev->dev,
1753 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1754 GFP_KERNEL);
4c073cd2 1755 if (!info)
d456882b 1756 return -ENOMEM;
fe69af00 1757
fe69af00 1758 info->pdev = pdev;
c7e9c7e7 1759 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2 1760 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3
BB
1761 host = (void *)&info[1] + sizeof(*host) * cs;
1762 chip = &host->chip;
d699ed25 1763 nand_set_controller_data(chip, host);
063294a3 1764 mtd = nand_to_mtd(chip);
f3c8cfc2 1765 info->host[cs] = host;
f3c8cfc2
LW
1766 host->cs = cs;
1767 host->info_data = info;
550dab5b 1768 mtd->dev.parent = &pdev->dev;
a61ae81a
BN
1769 /* FIXME: all chips use the same device tree partitions */
1770 nand_set_flash_node(chip, np);
f3c8cfc2 1771
d699ed25 1772 nand_set_controller_data(chip, host);
f3c8cfc2
LW
1773 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1774 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1775 chip->controller = &info->controller;
1776 chip->waitfunc = pxa3xx_nand_waitfunc;
1777 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1778 chip->read_word = pxa3xx_nand_read_word;
1779 chip->read_byte = pxa3xx_nand_read_byte;
1780 chip->read_buf = pxa3xx_nand_read_buf;
1781 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1782 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1783 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1784 }
401e67e2
LW
1785
1786 spin_lock_init(&chip->controller->lock);
1787 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1788 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1789 if (IS_ERR(info->clk)) {
1790 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1791 return PTR_ERR(info->clk);
fe69af00 1792 }
1f8eaff2
EG
1793 ret = clk_prepare_enable(info->clk);
1794 if (ret < 0)
1795 return ret;
fe69af00 1796
9097103f 1797 if (!np && use_dma) {
8f5ba31a
RJ
1798 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1799 if (r == NULL) {
1800 dev_err(&pdev->dev,
1801 "no resource defined for data DMA\n");
1802 ret = -ENXIO;
1803 goto fail_disable_clk;
1e7ba630 1804 }
8f5ba31a 1805 info->drcmr_dat = r->start;
fe69af00 1806 }
fe69af00 1807
1808 irq = platform_get_irq(pdev, 0);
1809 if (irq < 0) {
1810 dev_err(&pdev->dev, "no IRQ resource defined\n");
1811 ret = -ENXIO;
9ca7944d 1812 goto fail_disable_clk;
fe69af00 1813 }
1814
1815 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1816 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1817 if (IS_ERR(info->mmio_base)) {
1818 ret = PTR_ERR(info->mmio_base);
9ca7944d 1819 goto fail_disable_clk;
fe69af00 1820 }
8638fac8 1821 info->mmio_phys = r->start;
fe69af00 1822
62e8b851
EG
1823 /* Allocate a buffer to allow flash detection */
1824 info->buf_size = INIT_BUFFER_SIZE;
1825 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1826 if (info->data_buff == NULL) {
1827 ret = -ENOMEM;
9ca7944d 1828 goto fail_disable_clk;
62e8b851 1829 }
fe69af00 1830
346e1259
HZ
1831 /* initialize all interrupts to be disabled */
1832 disable_int(info, NDSR_MASK);
1833
24542257
RJ
1834 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1835 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1836 pdev->name, info);
fe69af00 1837 if (ret < 0) {
1838 dev_err(&pdev->dev, "failed to request IRQ\n");
1839 goto fail_free_buf;
1840 }
1841
e353a20a 1842 platform_set_drvdata(pdev, info);
fe69af00 1843
d456882b 1844 return 0;
fe69af00 1845
fe69af00 1846fail_free_buf:
401e67e2 1847 free_irq(irq, info);
62e8b851 1848 kfree(info->data_buff);
9ca7944d 1849fail_disable_clk:
fb32061f 1850 clk_disable_unprepare(info->clk);
d456882b 1851 return ret;
fe69af00 1852}
1853
1854static int pxa3xx_nand_remove(struct platform_device *pdev)
1855{
e353a20a 1856 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1857 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1858 int irq, cs;
fe69af00 1859
d456882b
LW
1860 if (!info)
1861 return 0;
1862
453810b7 1863 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1864
dbf5986a
HZ
1865 irq = platform_get_irq(pdev, 0);
1866 if (irq >= 0)
1867 free_irq(irq, info);
498b6145 1868 pxa3xx_nand_free_buff(info);
82a72d10 1869
e971affa
RJ
1870 /*
1871 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1872 * In order to prevent a lockup of the system bus, the DFI bus
1873 * arbitration is granted to SMC upon driver removal. This is done by
1874 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1875 * access to the bus anymore.
1876 */
1877 nand_writel(info, NDCR,
1878 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1879 NFCV1_NDCR_ARB_CNTL);
fb32061f 1880 clk_disable_unprepare(info->clk);
82a72d10 1881
f3c8cfc2 1882 for (cs = 0; cs < pdata->num_cs; cs++)
063294a3 1883 nand_release(nand_to_mtd(&info->host[cs]->chip));
fe69af00 1884 return 0;
1885}
1886
1e7ba630
DM
1887static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1888{
1889 struct pxa3xx_nand_platform_data *pdata;
1890 struct device_node *np = pdev->dev.of_node;
1891 const struct of_device_id *of_id =
1892 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1893
1894 if (!of_id)
1895 return 0;
1896
1897 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1898 if (!pdata)
1899 return -ENOMEM;
1900
1901 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1902 pdata->enable_arbiter = 1;
1903 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1904 pdata->keep_config = 1;
1905 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1906 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630 1907
5b3e5078
EG
1908 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1909 if (pdata->ecc_strength < 0)
1910 pdata->ecc_strength = 0;
1911
1912 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1913 if (pdata->ecc_step_size < 0)
1914 pdata->ecc_step_size = 0;
1915
1e7ba630
DM
1916 pdev->dev.platform_data = pdata;
1917
1918 return 0;
1919}
1e7ba630 1920
e353a20a
LW
1921static int pxa3xx_nand_probe(struct platform_device *pdev)
1922{
1923 struct pxa3xx_nand_platform_data *pdata;
1924 struct pxa3xx_nand_info *info;
8f5ba31a 1925 int ret, cs, probe_success, dma_available;
e353a20a 1926
8f5ba31a
RJ
1927 dma_available = IS_ENABLED(CONFIG_ARM) &&
1928 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1929 if (use_dma && !dma_available) {
f4db2e3a
EG
1930 use_dma = 0;
1931 dev_warn(&pdev->dev,
1932 "This platform can't do DMA on this device\n");
1933 }
8f5ba31a 1934
1e7ba630
DM
1935 ret = pxa3xx_nand_probe_dt(pdev);
1936 if (ret)
1937 return ret;
1938
453810b7 1939 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1940 if (!pdata) {
1941 dev_err(&pdev->dev, "no platform data defined\n");
1942 return -ENODEV;
1943 }
1944
d456882b
LW
1945 ret = alloc_nand_resource(pdev);
1946 if (ret) {
1947 dev_err(&pdev->dev, "alloc nand resource failed\n");
1948 return ret;
1949 }
e353a20a 1950
d456882b 1951 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1952 probe_success = 0;
1953 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3 1954 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
f455578d 1955
18a84e93
EG
1956 /*
1957 * The mtd name matches the one used in 'mtdparts' kernel
1958 * parameter. This name cannot be changed or otherwise
1959 * user's mtd partitions configuration would get broken.
1960 */
1961 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1962 info->cs = cs;
b7655bcb 1963 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1964 if (ret) {
1965 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1966 cs);
1967 continue;
1968 }
1969
a61ae81a
BN
1970 ret = mtd_device_register(mtd, pdata->parts[cs],
1971 pdata->nr_parts[cs]);
f3c8cfc2
LW
1972 if (!ret)
1973 probe_success = 1;
1974 }
1975
1976 if (!probe_success) {
e353a20a
LW
1977 pxa3xx_nand_remove(pdev);
1978 return -ENODEV;
1979 }
1980
f3c8cfc2 1981 return 0;
e353a20a
LW
1982}
1983
fe69af00 1984#ifdef CONFIG_PM
d3e94f3f 1985static int pxa3xx_nand_suspend(struct device *dev)
fe69af00 1986{
d3e94f3f 1987 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
fe69af00 1988
f8155a40 1989 if (info->state) {
d3e94f3f 1990 dev_err(dev, "driver busy, state = %d\n", info->state);
fe69af00 1991 return -EAGAIN;
1992 }
1993
d55d31a6 1994 clk_disable(info->clk);
fe69af00 1995 return 0;
1996}
1997
d3e94f3f 1998static int pxa3xx_nand_resume(struct device *dev)
fe69af00 1999{
d3e94f3f 2000 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
d55d31a6
EG
2001 int ret;
2002
2003 ret = clk_enable(info->clk);
2004 if (ret < 0)
2005 return ret;
051fc41c
LW
2006
2007 /* We don't want to handle interrupt without calling mtd routine */
2008 disable_int(info, NDCR_INT_MASK);
fe69af00 2009
f3c8cfc2
LW
2010 /*
2011 * Directly set the chip select to a invalid value,
2012 * then the driver would reset the timing according
2013 * to current chip select at the beginning of cmdfunc
2014 */
2015 info->cs = 0xff;
fe69af00 2016
051fc41c
LW
2017 /*
2018 * As the spec says, the NDSR would be updated to 0x1800 when
2019 * doing the nand_clk disable/enable.
2020 * To prevent it damaging state machine of the driver, clear
2021 * all status before resume
2022 */
2023 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2 2024
18c81b18 2025 return 0;
fe69af00 2026}
2027#else
2028#define pxa3xx_nand_suspend NULL
2029#define pxa3xx_nand_resume NULL
2030#endif
2031
d3e94f3f
BN
2032static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2033 .suspend = pxa3xx_nand_suspend,
2034 .resume = pxa3xx_nand_resume,
2035};
2036
fe69af00 2037static struct platform_driver pxa3xx_nand_driver = {
2038 .driver = {
2039 .name = "pxa3xx-nand",
5576bc7b 2040 .of_match_table = pxa3xx_nand_dt_ids,
d3e94f3f 2041 .pm = &pxa3xx_nand_pm_ops,
fe69af00 2042 },
2043 .probe = pxa3xx_nand_probe,
2044 .remove = pxa3xx_nand_remove,
fe69af00 2045};
2046
f99640de 2047module_platform_driver(pxa3xx_nand_driver);
fe69af00 2048
2049MODULE_LICENSE("GPL");
2050MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.610735 seconds and 5 git commands to generate.