Commit | Line | Data |
---|---|---|
10a2bcae HS |
1 | /* |
2 | * Freescale GPMI NAND Flash Driver | |
3 | * | |
4 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | |
5 | * Copyright (C) 2008 Embedded Alley Solutions, Inc. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along | |
18 | * with this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
20 | */ | |
3d10095a FE |
21 | |
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
23 | ||
10a2bcae HS |
24 | #include <linux/clk.h> |
25 | #include <linux/slab.h> | |
26 | #include <linux/interrupt.h> | |
df16c86a | 27 | #include <linux/module.h> |
10a2bcae | 28 | #include <linux/mtd/partitions.h> |
39febc01 | 29 | #include <linux/pinctrl/consumer.h> |
e10db1f0 HS |
30 | #include <linux/of.h> |
31 | #include <linux/of_device.h> | |
c50c6940 | 32 | #include <linux/of_mtd.h> |
10a2bcae HS |
33 | #include "gpmi-nand.h" |
34 | ||
5de0b52e HS |
35 | /* Resource names for the GPMI NAND driver. */ |
36 | #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" | |
37 | #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" | |
38 | #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" | |
39 | #define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma" | |
40 | ||
10a2bcae HS |
41 | /* add our owner bbt descriptor */ |
42 | static uint8_t scan_ff_pattern[] = { 0xff }; | |
43 | static struct nand_bbt_descr gpmi_bbt_descr = { | |
44 | .options = 0, | |
45 | .offs = 0, | |
46 | .len = 1, | |
47 | .pattern = scan_ff_pattern | |
48 | }; | |
49 | ||
50 | /* We will use all the (page + OOB). */ | |
51 | static struct nand_ecclayout gpmi_hw_ecclayout = { | |
52 | .eccbytes = 0, | |
53 | .eccpos = { 0, }, | |
54 | .oobfree = { {.offset = 0, .length = 0} } | |
55 | }; | |
56 | ||
57 | static irqreturn_t bch_irq(int irq, void *cookie) | |
58 | { | |
59 | struct gpmi_nand_data *this = cookie; | |
60 | ||
61 | gpmi_clear_bch(this); | |
62 | complete(&this->bch_done); | |
63 | return IRQ_HANDLED; | |
64 | } | |
65 | ||
66 | /* | |
67 | * Calculate the ECC strength by hand: | |
68 | * E : The ECC strength. | |
69 | * G : the length of Galois Field. | |
70 | * N : The chunk count of per page. | |
71 | * O : the oobsize of the NAND chip. | |
72 | * M : the metasize of per page. | |
73 | * | |
74 | * The formula is : | |
75 | * E * G * N | |
76 | * ------------ <= (O - M) | |
77 | * 8 | |
78 | * | |
79 | * So, we get E by: | |
80 | * (O - M) * 8 | |
81 | * E <= ------------- | |
82 | * G * N | |
83 | */ | |
84 | static inline int get_ecc_strength(struct gpmi_nand_data *this) | |
85 | { | |
86 | struct bch_geometry *geo = &this->bch_geometry; | |
87 | struct mtd_info *mtd = &this->mtd; | |
88 | int ecc_strength; | |
89 | ||
90 | ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) | |
91 | / (geo->gf_len * geo->ecc_chunk_count); | |
92 | ||
93 | /* We need the minor even number. */ | |
94 | return round_down(ecc_strength, 2); | |
95 | } | |
96 | ||
97 | int common_nfc_set_geometry(struct gpmi_nand_data *this) | |
98 | { | |
99 | struct bch_geometry *geo = &this->bch_geometry; | |
100 | struct mtd_info *mtd = &this->mtd; | |
101 | unsigned int metadata_size; | |
102 | unsigned int status_size; | |
103 | unsigned int block_mark_bit_offset; | |
104 | ||
105 | /* | |
106 | * The size of the metadata can be changed, though we set it to 10 | |
107 | * bytes now. But it can't be too large, because we have to save | |
108 | * enough space for BCH. | |
109 | */ | |
110 | geo->metadata_size = 10; | |
111 | ||
112 | /* The default for the length of Galois Field. */ | |
113 | geo->gf_len = 13; | |
114 | ||
115 | /* The default for chunk size. There is no oobsize greater then 512. */ | |
116 | geo->ecc_chunk_size = 512; | |
117 | while (geo->ecc_chunk_size < mtd->oobsize) | |
118 | geo->ecc_chunk_size *= 2; /* keep C >= O */ | |
119 | ||
120 | geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; | |
121 | ||
122 | /* We use the same ECC strength for all chunks. */ | |
123 | geo->ecc_strength = get_ecc_strength(this); | |
124 | if (!geo->ecc_strength) { | |
3d10095a | 125 | pr_err("wrong ECC strength.\n"); |
10a2bcae HS |
126 | return -EINVAL; |
127 | } | |
128 | ||
129 | geo->page_size = mtd->writesize + mtd->oobsize; | |
130 | geo->payload_size = mtd->writesize; | |
131 | ||
132 | /* | |
133 | * The auxiliary buffer contains the metadata and the ECC status. The | |
134 | * metadata is padded to the nearest 32-bit boundary. The ECC status | |
135 | * contains one byte for every ECC chunk, and is also padded to the | |
136 | * nearest 32-bit boundary. | |
137 | */ | |
138 | metadata_size = ALIGN(geo->metadata_size, 4); | |
139 | status_size = ALIGN(geo->ecc_chunk_count, 4); | |
140 | ||
141 | geo->auxiliary_size = metadata_size + status_size; | |
142 | geo->auxiliary_status_offset = metadata_size; | |
143 | ||
144 | if (!this->swap_block_mark) | |
145 | return 0; | |
146 | ||
147 | /* | |
148 | * We need to compute the byte and bit offsets of | |
149 | * the physical block mark within the ECC-based view of the page. | |
150 | * | |
151 | * NAND chip with 2K page shows below: | |
152 | * (Block Mark) | |
153 | * | | | |
154 | * | D | | |
155 | * |<---->| | |
156 | * V V | |
157 | * +---+----------+-+----------+-+----------+-+----------+-+ | |
158 | * | M | data |E| data |E| data |E| data |E| | |
159 | * +---+----------+-+----------+-+----------+-+----------+-+ | |
160 | * | |
161 | * The position of block mark moves forward in the ECC-based view | |
162 | * of page, and the delta is: | |
163 | * | |
164 | * E * G * (N - 1) | |
165 | * D = (---------------- + M) | |
166 | * 8 | |
167 | * | |
168 | * With the formula to compute the ECC strength, and the condition | |
169 | * : C >= O (C is the ecc chunk size) | |
170 | * | |
171 | * It's easy to deduce to the following result: | |
172 | * | |
173 | * E * G (O - M) C - M C - M | |
174 | * ----------- <= ------- <= -------- < --------- | |
175 | * 8 N N (N - 1) | |
176 | * | |
177 | * So, we get: | |
178 | * | |
179 | * E * G * (N - 1) | |
180 | * D = (---------------- + M) < C | |
181 | * 8 | |
182 | * | |
183 | * The above inequality means the position of block mark | |
184 | * within the ECC-based view of the page is still in the data chunk, | |
185 | * and it's NOT in the ECC bits of the chunk. | |
186 | * | |
187 | * Use the following to compute the bit position of the | |
188 | * physical block mark within the ECC-based view of the page: | |
189 | * (page_size - D) * 8 | |
190 | * | |
191 | * --Huang Shijie | |
192 | */ | |
193 | block_mark_bit_offset = mtd->writesize * 8 - | |
194 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) | |
195 | + geo->metadata_size * 8); | |
196 | ||
197 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; | |
198 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; | |
199 | return 0; | |
200 | } | |
201 | ||
202 | struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) | |
203 | { | |
204 | int chipnr = this->current_chip; | |
205 | ||
206 | return this->dma_chans[chipnr]; | |
207 | } | |
208 | ||
209 | /* Can we use the upper's buffer directly for DMA? */ | |
210 | void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr) | |
211 | { | |
212 | struct scatterlist *sgl = &this->data_sgl; | |
213 | int ret; | |
214 | ||
215 | this->direct_dma_map_ok = true; | |
216 | ||
217 | /* first try to map the upper buffer directly */ | |
218 | sg_init_one(sgl, this->upper_buf, this->upper_len); | |
219 | ret = dma_map_sg(this->dev, sgl, 1, dr); | |
220 | if (ret == 0) { | |
221 | /* We have to use our own DMA buffer. */ | |
222 | sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE); | |
223 | ||
224 | if (dr == DMA_TO_DEVICE) | |
225 | memcpy(this->data_buffer_dma, this->upper_buf, | |
226 | this->upper_len); | |
227 | ||
228 | ret = dma_map_sg(this->dev, sgl, 1, dr); | |
229 | if (ret == 0) | |
2d350e5a | 230 | pr_err("DMA mapping failed.\n"); |
10a2bcae HS |
231 | |
232 | this->direct_dma_map_ok = false; | |
233 | } | |
234 | } | |
235 | ||
236 | /* This will be called after the DMA operation is finished. */ | |
237 | static void dma_irq_callback(void *param) | |
238 | { | |
239 | struct gpmi_nand_data *this = param; | |
240 | struct completion *dma_c = &this->dma_done; | |
241 | ||
242 | complete(dma_c); | |
243 | ||
244 | switch (this->dma_type) { | |
245 | case DMA_FOR_COMMAND: | |
246 | dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE); | |
247 | break; | |
248 | ||
249 | case DMA_FOR_READ_DATA: | |
250 | dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); | |
251 | if (this->direct_dma_map_ok == false) | |
252 | memcpy(this->upper_buf, this->data_buffer_dma, | |
253 | this->upper_len); | |
254 | break; | |
255 | ||
256 | case DMA_FOR_WRITE_DATA: | |
257 | dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); | |
258 | break; | |
259 | ||
260 | case DMA_FOR_READ_ECC_PAGE: | |
261 | case DMA_FOR_WRITE_ECC_PAGE: | |
262 | /* We have to wait the BCH interrupt to finish. */ | |
263 | break; | |
264 | ||
265 | default: | |
266 | pr_err("in wrong DMA operation.\n"); | |
267 | } | |
268 | } | |
269 | ||
270 | int start_dma_without_bch_irq(struct gpmi_nand_data *this, | |
271 | struct dma_async_tx_descriptor *desc) | |
272 | { | |
273 | struct completion *dma_c = &this->dma_done; | |
274 | int err; | |
275 | ||
276 | init_completion(dma_c); | |
277 | ||
278 | desc->callback = dma_irq_callback; | |
279 | desc->callback_param = this; | |
280 | dmaengine_submit(desc); | |
d04525ed | 281 | dma_async_issue_pending(get_dma_chan(this)); |
10a2bcae HS |
282 | |
283 | /* Wait for the interrupt from the DMA block. */ | |
284 | err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); | |
285 | if (!err) { | |
286 | pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type); | |
287 | gpmi_dump_info(this); | |
288 | return -ETIMEDOUT; | |
289 | } | |
290 | return 0; | |
291 | } | |
292 | ||
293 | /* | |
294 | * This function is used in BCH reading or BCH writing pages. | |
295 | * It will wait for the BCH interrupt as long as ONE second. | |
296 | * Actually, we must wait for two interrupts : | |
297 | * [1] firstly the DMA interrupt and | |
298 | * [2] secondly the BCH interrupt. | |
299 | */ | |
300 | int start_dma_with_bch_irq(struct gpmi_nand_data *this, | |
301 | struct dma_async_tx_descriptor *desc) | |
302 | { | |
303 | struct completion *bch_c = &this->bch_done; | |
304 | int err; | |
305 | ||
306 | /* Prepare to receive an interrupt from the BCH block. */ | |
307 | init_completion(bch_c); | |
308 | ||
309 | /* start the DMA */ | |
310 | start_dma_without_bch_irq(this, desc); | |
311 | ||
312 | /* Wait for the interrupt from the BCH block. */ | |
313 | err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); | |
314 | if (!err) { | |
315 | pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type); | |
316 | gpmi_dump_info(this); | |
317 | return -ETIMEDOUT; | |
318 | } | |
319 | return 0; | |
320 | } | |
321 | ||
06f25510 | 322 | static int |
10a2bcae HS |
323 | acquire_register_block(struct gpmi_nand_data *this, const char *res_name) |
324 | { | |
325 | struct platform_device *pdev = this->pdev; | |
326 | struct resources *res = &this->resources; | |
327 | struct resource *r; | |
513d57e1 | 328 | void __iomem *p; |
10a2bcae HS |
329 | |
330 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); | |
331 | if (!r) { | |
332 | pr_err("Can't get resource for %s\n", res_name); | |
333 | return -ENXIO; | |
334 | } | |
335 | ||
336 | p = ioremap(r->start, resource_size(r)); | |
337 | if (!p) { | |
338 | pr_err("Can't remap %s\n", res_name); | |
339 | return -ENOMEM; | |
340 | } | |
341 | ||
342 | if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) | |
343 | res->gpmi_regs = p; | |
344 | else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) | |
345 | res->bch_regs = p; | |
346 | else | |
347 | pr_err("unknown resource name : %s\n", res_name); | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
352 | static void release_register_block(struct gpmi_nand_data *this) | |
353 | { | |
354 | struct resources *res = &this->resources; | |
355 | if (res->gpmi_regs) | |
356 | iounmap(res->gpmi_regs); | |
357 | if (res->bch_regs) | |
358 | iounmap(res->bch_regs); | |
359 | res->gpmi_regs = NULL; | |
360 | res->bch_regs = NULL; | |
361 | } | |
362 | ||
06f25510 | 363 | static int |
10a2bcae HS |
364 | acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) |
365 | { | |
366 | struct platform_device *pdev = this->pdev; | |
367 | struct resources *res = &this->resources; | |
368 | const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; | |
369 | struct resource *r; | |
370 | int err; | |
371 | ||
372 | r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); | |
373 | if (!r) { | |
374 | pr_err("Can't get resource for %s\n", res_name); | |
375 | return -ENXIO; | |
376 | } | |
377 | ||
378 | err = request_irq(r->start, irq_h, 0, res_name, this); | |
379 | if (err) { | |
380 | pr_err("Can't own %s\n", res_name); | |
381 | return err; | |
382 | } | |
383 | ||
384 | res->bch_low_interrupt = r->start; | |
385 | res->bch_high_interrupt = r->end; | |
386 | return 0; | |
387 | } | |
388 | ||
389 | static void release_bch_irq(struct gpmi_nand_data *this) | |
390 | { | |
391 | struct resources *res = &this->resources; | |
392 | int i = res->bch_low_interrupt; | |
393 | ||
394 | for (; i <= res->bch_high_interrupt; i++) | |
395 | free_irq(i, this); | |
396 | } | |
397 | ||
398 | static bool gpmi_dma_filter(struct dma_chan *chan, void *param) | |
399 | { | |
400 | struct gpmi_nand_data *this = param; | |
e10db1f0 | 401 | int dma_channel = (int)this->private; |
10a2bcae HS |
402 | |
403 | if (!mxs_dma_is_apbh(chan)) | |
404 | return false; | |
405 | /* | |
406 | * only catch the GPMI dma channels : | |
407 | * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3 | |
408 | * (These four channels share the same IRQ!) | |
409 | * | |
410 | * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7 | |
411 | * (These eight channels share the same IRQ!) | |
412 | */ | |
e10db1f0 | 413 | if (dma_channel == chan->chan_id) { |
10a2bcae HS |
414 | chan->private = &this->dma_data; |
415 | return true; | |
416 | } | |
417 | return false; | |
418 | } | |
419 | ||
420 | static void release_dma_channels(struct gpmi_nand_data *this) | |
421 | { | |
422 | unsigned int i; | |
423 | for (i = 0; i < DMA_CHANS; i++) | |
424 | if (this->dma_chans[i]) { | |
425 | dma_release_channel(this->dma_chans[i]); | |
426 | this->dma_chans[i] = NULL; | |
427 | } | |
428 | } | |
429 | ||
06f25510 | 430 | static int acquire_dma_channels(struct gpmi_nand_data *this) |
10a2bcae HS |
431 | { |
432 | struct platform_device *pdev = this->pdev; | |
e10db1f0 HS |
433 | struct resource *r_dma; |
434 | struct device_node *dn; | |
513d57e1 HS |
435 | u32 dma_channel; |
436 | int ret; | |
e10db1f0 HS |
437 | struct dma_chan *dma_chan; |
438 | dma_cap_mask_t mask; | |
439 | ||
440 | /* dma channel, we only use the first one. */ | |
441 | dn = pdev->dev.of_node; | |
442 | ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel); | |
443 | if (ret) { | |
444 | pr_err("unable to get DMA channel from dt.\n"); | |
445 | goto acquire_err; | |
446 | } | |
447 | this->private = (void *)dma_channel; | |
10a2bcae | 448 | |
e10db1f0 | 449 | /* gpmi dma interrupt */ |
10a2bcae HS |
450 | r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ, |
451 | GPMI_NAND_DMA_INTERRUPT_RES_NAME); | |
e10db1f0 | 452 | if (!r_dma) { |
10a2bcae | 453 | pr_err("Can't get resource for DMA\n"); |
e10db1f0 | 454 | goto acquire_err; |
10a2bcae | 455 | } |
e10db1f0 | 456 | this->dma_data.chan_irq = r_dma->start; |
10a2bcae | 457 | |
e10db1f0 HS |
458 | /* request dma channel */ |
459 | dma_cap_zero(mask); | |
460 | dma_cap_set(DMA_SLAVE, mask); | |
10a2bcae | 461 | |
e10db1f0 HS |
462 | dma_chan = dma_request_channel(mask, gpmi_dma_filter, this); |
463 | if (!dma_chan) { | |
2d350e5a | 464 | pr_err("Failed to request DMA channel.\n"); |
e10db1f0 | 465 | goto acquire_err; |
10a2bcae HS |
466 | } |
467 | ||
e10db1f0 | 468 | this->dma_chans[0] = dma_chan; |
10a2bcae HS |
469 | return 0; |
470 | ||
471 | acquire_err: | |
10a2bcae HS |
472 | release_dma_channels(this); |
473 | return -EINVAL; | |
474 | } | |
475 | ||
ff506172 HS |
476 | static void gpmi_put_clks(struct gpmi_nand_data *this) |
477 | { | |
478 | struct resources *r = &this->resources; | |
479 | struct clk *clk; | |
480 | int i; | |
481 | ||
482 | for (i = 0; i < GPMI_CLK_MAX; i++) { | |
483 | clk = r->clock[i]; | |
484 | if (clk) { | |
485 | clk_put(clk); | |
486 | r->clock[i] = NULL; | |
487 | } | |
488 | } | |
489 | } | |
490 | ||
491 | static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = { | |
492 | "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", | |
493 | }; | |
494 | ||
06f25510 | 495 | static int gpmi_get_clks(struct gpmi_nand_data *this) |
ff506172 HS |
496 | { |
497 | struct resources *r = &this->resources; | |
498 | char **extra_clks = NULL; | |
499 | struct clk *clk; | |
500 | int i; | |
501 | ||
502 | /* The main clock is stored in the first. */ | |
503 | r->clock[0] = clk_get(this->dev, "gpmi_io"); | |
504 | if (IS_ERR(r->clock[0])) | |
505 | goto err_clock; | |
506 | ||
507 | /* Get extra clocks */ | |
508 | if (GPMI_IS_MX6Q(this)) | |
509 | extra_clks = extra_clks_for_mx6q; | |
510 | if (!extra_clks) | |
511 | return 0; | |
512 | ||
513 | for (i = 1; i < GPMI_CLK_MAX; i++) { | |
514 | if (extra_clks[i - 1] == NULL) | |
515 | break; | |
516 | ||
517 | clk = clk_get(this->dev, extra_clks[i - 1]); | |
518 | if (IS_ERR(clk)) | |
519 | goto err_clock; | |
520 | ||
521 | r->clock[i] = clk; | |
522 | } | |
523 | ||
e1ca95e3 | 524 | if (GPMI_IS_MX6Q(this)) |
ff506172 | 525 | /* |
e1ca95e3 | 526 | * Set the default value for the gpmi clock in mx6q: |
ff506172 | 527 | * |
e1ca95e3 HS |
528 | * If you want to use the ONFI nand which is in the |
529 | * Synchronous Mode, you should change the clock as you need. | |
ff506172 HS |
530 | */ |
531 | clk_set_rate(r->clock[0], 22000000); | |
e1ca95e3 | 532 | |
ff506172 HS |
533 | return 0; |
534 | ||
535 | err_clock: | |
536 | dev_dbg(this->dev, "failed in finding the clocks.\n"); | |
537 | gpmi_put_clks(this); | |
538 | return -ENOMEM; | |
539 | } | |
540 | ||
06f25510 | 541 | static int acquire_resources(struct gpmi_nand_data *this) |
10a2bcae | 542 | { |
39febc01 | 543 | struct pinctrl *pinctrl; |
10a2bcae HS |
544 | int ret; |
545 | ||
546 | ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); | |
547 | if (ret) | |
548 | goto exit_regs; | |
549 | ||
550 | ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); | |
551 | if (ret) | |
552 | goto exit_regs; | |
553 | ||
554 | ret = acquire_bch_irq(this, bch_irq); | |
555 | if (ret) | |
556 | goto exit_regs; | |
557 | ||
558 | ret = acquire_dma_channels(this); | |
559 | if (ret) | |
560 | goto exit_dma_channels; | |
561 | ||
3e48b1ba | 562 | pinctrl = devm_pinctrl_get_select_default(&this->pdev->dev); |
39febc01 SG |
563 | if (IS_ERR(pinctrl)) { |
564 | ret = PTR_ERR(pinctrl); | |
565 | goto exit_pin; | |
566 | } | |
567 | ||
ff506172 HS |
568 | ret = gpmi_get_clks(this); |
569 | if (ret) | |
10a2bcae | 570 | goto exit_clock; |
10a2bcae HS |
571 | return 0; |
572 | ||
573 | exit_clock: | |
39febc01 | 574 | exit_pin: |
10a2bcae HS |
575 | release_dma_channels(this); |
576 | exit_dma_channels: | |
577 | release_bch_irq(this); | |
578 | exit_regs: | |
579 | release_register_block(this); | |
580 | return ret; | |
581 | } | |
582 | ||
583 | static void release_resources(struct gpmi_nand_data *this) | |
584 | { | |
ff506172 | 585 | gpmi_put_clks(this); |
10a2bcae HS |
586 | release_register_block(this); |
587 | release_bch_irq(this); | |
588 | release_dma_channels(this); | |
589 | } | |
590 | ||
06f25510 | 591 | static int init_hardware(struct gpmi_nand_data *this) |
10a2bcae HS |
592 | { |
593 | int ret; | |
594 | ||
595 | /* | |
596 | * This structure contains the "safe" GPMI timing that should succeed | |
597 | * with any NAND Flash device | |
598 | * (although, with less-than-optimal performance). | |
599 | */ | |
600 | struct nand_timing safe_timing = { | |
601 | .data_setup_in_ns = 80, | |
602 | .data_hold_in_ns = 60, | |
603 | .address_setup_in_ns = 25, | |
604 | .gpmi_sample_delay_in_ns = 6, | |
605 | .tREA_in_ns = -1, | |
606 | .tRLOH_in_ns = -1, | |
607 | .tRHOH_in_ns = -1, | |
608 | }; | |
609 | ||
610 | /* Initialize the hardwares. */ | |
611 | ret = gpmi_init(this); | |
612 | if (ret) | |
613 | return ret; | |
614 | ||
615 | this->timing = safe_timing; | |
616 | return 0; | |
617 | } | |
618 | ||
619 | static int read_page_prepare(struct gpmi_nand_data *this, | |
620 | void *destination, unsigned length, | |
621 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | |
622 | void **use_virt, dma_addr_t *use_phys) | |
623 | { | |
624 | struct device *dev = this->dev; | |
625 | ||
626 | if (virt_addr_valid(destination)) { | |
627 | dma_addr_t dest_phys; | |
628 | ||
629 | dest_phys = dma_map_single(dev, destination, | |
630 | length, DMA_FROM_DEVICE); | |
631 | if (dma_mapping_error(dev, dest_phys)) { | |
632 | if (alt_size < length) { | |
2d350e5a VN |
633 | pr_err("%s, Alternate buffer is too small\n", |
634 | __func__); | |
10a2bcae HS |
635 | return -ENOMEM; |
636 | } | |
637 | goto map_failed; | |
638 | } | |
639 | *use_virt = destination; | |
640 | *use_phys = dest_phys; | |
641 | this->direct_dma_map_ok = true; | |
642 | return 0; | |
643 | } | |
644 | ||
645 | map_failed: | |
646 | *use_virt = alt_virt; | |
647 | *use_phys = alt_phys; | |
648 | this->direct_dma_map_ok = false; | |
649 | return 0; | |
650 | } | |
651 | ||
652 | static inline void read_page_end(struct gpmi_nand_data *this, | |
653 | void *destination, unsigned length, | |
654 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | |
655 | void *used_virt, dma_addr_t used_phys) | |
656 | { | |
657 | if (this->direct_dma_map_ok) | |
658 | dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE); | |
659 | } | |
660 | ||
661 | static inline void read_page_swap_end(struct gpmi_nand_data *this, | |
662 | void *destination, unsigned length, | |
663 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | |
664 | void *used_virt, dma_addr_t used_phys) | |
665 | { | |
666 | if (!this->direct_dma_map_ok) | |
667 | memcpy(destination, alt_virt, length); | |
668 | } | |
669 | ||
670 | static int send_page_prepare(struct gpmi_nand_data *this, | |
671 | const void *source, unsigned length, | |
672 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | |
673 | const void **use_virt, dma_addr_t *use_phys) | |
674 | { | |
675 | struct device *dev = this->dev; | |
676 | ||
677 | if (virt_addr_valid(source)) { | |
678 | dma_addr_t source_phys; | |
679 | ||
680 | source_phys = dma_map_single(dev, (void *)source, length, | |
681 | DMA_TO_DEVICE); | |
682 | if (dma_mapping_error(dev, source_phys)) { | |
683 | if (alt_size < length) { | |
2d350e5a VN |
684 | pr_err("%s, Alternate buffer is too small\n", |
685 | __func__); | |
10a2bcae HS |
686 | return -ENOMEM; |
687 | } | |
688 | goto map_failed; | |
689 | } | |
690 | *use_virt = source; | |
691 | *use_phys = source_phys; | |
692 | return 0; | |
693 | } | |
694 | map_failed: | |
695 | /* | |
696 | * Copy the content of the source buffer into the alternate | |
697 | * buffer and set up the return values accordingly. | |
698 | */ | |
699 | memcpy(alt_virt, source, length); | |
700 | ||
701 | *use_virt = alt_virt; | |
702 | *use_phys = alt_phys; | |
703 | return 0; | |
704 | } | |
705 | ||
706 | static void send_page_end(struct gpmi_nand_data *this, | |
707 | const void *source, unsigned length, | |
708 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | |
709 | const void *used_virt, dma_addr_t used_phys) | |
710 | { | |
711 | struct device *dev = this->dev; | |
712 | if (used_virt == source) | |
713 | dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE); | |
714 | } | |
715 | ||
716 | static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) | |
717 | { | |
718 | struct device *dev = this->dev; | |
719 | ||
720 | if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) | |
721 | dma_free_coherent(dev, this->page_buffer_size, | |
722 | this->page_buffer_virt, | |
723 | this->page_buffer_phys); | |
724 | kfree(this->cmd_buffer); | |
725 | kfree(this->data_buffer_dma); | |
726 | ||
727 | this->cmd_buffer = NULL; | |
728 | this->data_buffer_dma = NULL; | |
729 | this->page_buffer_virt = NULL; | |
730 | this->page_buffer_size = 0; | |
731 | } | |
732 | ||
733 | /* Allocate the DMA buffers */ | |
734 | static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) | |
735 | { | |
736 | struct bch_geometry *geo = &this->bch_geometry; | |
737 | struct device *dev = this->dev; | |
738 | ||
739 | /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ | |
513d57e1 | 740 | this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); |
10a2bcae HS |
741 | if (this->cmd_buffer == NULL) |
742 | goto error_alloc; | |
743 | ||
744 | /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ | |
513d57e1 | 745 | this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); |
10a2bcae HS |
746 | if (this->data_buffer_dma == NULL) |
747 | goto error_alloc; | |
748 | ||
749 | /* | |
750 | * [3] Allocate the page buffer. | |
751 | * | |
752 | * Both the payload buffer and the auxiliary buffer must appear on | |
753 | * 32-bit boundaries. We presume the size of the payload buffer is a | |
754 | * power of two and is much larger than four, which guarantees the | |
755 | * auxiliary buffer will appear on a 32-bit boundary. | |
756 | */ | |
757 | this->page_buffer_size = geo->payload_size + geo->auxiliary_size; | |
758 | this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size, | |
759 | &this->page_buffer_phys, GFP_DMA); | |
760 | if (!this->page_buffer_virt) | |
761 | goto error_alloc; | |
762 | ||
763 | ||
764 | /* Slice up the page buffer. */ | |
765 | this->payload_virt = this->page_buffer_virt; | |
766 | this->payload_phys = this->page_buffer_phys; | |
767 | this->auxiliary_virt = this->payload_virt + geo->payload_size; | |
768 | this->auxiliary_phys = this->payload_phys + geo->payload_size; | |
769 | return 0; | |
770 | ||
771 | error_alloc: | |
772 | gpmi_free_dma_buffer(this); | |
2d350e5a | 773 | pr_err("Error allocating DMA buffers!\n"); |
10a2bcae HS |
774 | return -ENOMEM; |
775 | } | |
776 | ||
777 | static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) | |
778 | { | |
779 | struct nand_chip *chip = mtd->priv; | |
780 | struct gpmi_nand_data *this = chip->priv; | |
781 | int ret; | |
782 | ||
783 | /* | |
784 | * Every operation begins with a command byte and a series of zero or | |
785 | * more address bytes. These are distinguished by either the Address | |
786 | * Latch Enable (ALE) or Command Latch Enable (CLE) signals being | |
787 | * asserted. When MTD is ready to execute the command, it will deassert | |
788 | * both latch enables. | |
789 | * | |
790 | * Rather than run a separate DMA operation for every single byte, we | |
791 | * queue them up and run a single DMA operation for the entire series | |
792 | * of command and data bytes. NAND_CMD_NONE means the END of the queue. | |
793 | */ | |
794 | if ((ctrl & (NAND_ALE | NAND_CLE))) { | |
795 | if (data != NAND_CMD_NONE) | |
796 | this->cmd_buffer[this->command_length++] = data; | |
797 | return; | |
798 | } | |
799 | ||
800 | if (!this->command_length) | |
801 | return; | |
802 | ||
803 | ret = gpmi_send_command(this); | |
804 | if (ret) | |
805 | pr_err("Chip: %u, Error %d\n", this->current_chip, ret); | |
806 | ||
807 | this->command_length = 0; | |
808 | } | |
809 | ||
810 | static int gpmi_dev_ready(struct mtd_info *mtd) | |
811 | { | |
812 | struct nand_chip *chip = mtd->priv; | |
813 | struct gpmi_nand_data *this = chip->priv; | |
814 | ||
815 | return gpmi_is_ready(this, this->current_chip); | |
816 | } | |
817 | ||
818 | static void gpmi_select_chip(struct mtd_info *mtd, int chipnr) | |
819 | { | |
820 | struct nand_chip *chip = mtd->priv; | |
821 | struct gpmi_nand_data *this = chip->priv; | |
822 | ||
823 | if ((this->current_chip < 0) && (chipnr >= 0)) | |
824 | gpmi_begin(this); | |
825 | else if ((this->current_chip >= 0) && (chipnr < 0)) | |
826 | gpmi_end(this); | |
827 | ||
828 | this->current_chip = chipnr; | |
829 | } | |
830 | ||
831 | static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |
832 | { | |
833 | struct nand_chip *chip = mtd->priv; | |
834 | struct gpmi_nand_data *this = chip->priv; | |
835 | ||
836 | pr_debug("len is %d\n", len); | |
837 | this->upper_buf = buf; | |
838 | this->upper_len = len; | |
839 | ||
840 | gpmi_read_data(this); | |
841 | } | |
842 | ||
843 | static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |
844 | { | |
845 | struct nand_chip *chip = mtd->priv; | |
846 | struct gpmi_nand_data *this = chip->priv; | |
847 | ||
848 | pr_debug("len is %d\n", len); | |
849 | this->upper_buf = (uint8_t *)buf; | |
850 | this->upper_len = len; | |
851 | ||
852 | gpmi_send_data(this); | |
853 | } | |
854 | ||
855 | static uint8_t gpmi_read_byte(struct mtd_info *mtd) | |
856 | { | |
857 | struct nand_chip *chip = mtd->priv; | |
858 | struct gpmi_nand_data *this = chip->priv; | |
859 | uint8_t *buf = this->data_buffer_dma; | |
860 | ||
861 | gpmi_read_buf(mtd, buf, 1); | |
862 | return buf[0]; | |
863 | } | |
864 | ||
865 | /* | |
866 | * Handles block mark swapping. | |
867 | * It can be called in swapping the block mark, or swapping it back, | |
868 | * because the the operations are the same. | |
869 | */ | |
870 | static void block_mark_swapping(struct gpmi_nand_data *this, | |
871 | void *payload, void *auxiliary) | |
872 | { | |
873 | struct bch_geometry *nfc_geo = &this->bch_geometry; | |
874 | unsigned char *p; | |
875 | unsigned char *a; | |
876 | unsigned int bit; | |
877 | unsigned char mask; | |
878 | unsigned char from_data; | |
879 | unsigned char from_oob; | |
880 | ||
881 | if (!this->swap_block_mark) | |
882 | return; | |
883 | ||
884 | /* | |
885 | * If control arrives here, we're swapping. Make some convenience | |
886 | * variables. | |
887 | */ | |
888 | bit = nfc_geo->block_mark_bit_offset; | |
889 | p = payload + nfc_geo->block_mark_byte_offset; | |
890 | a = auxiliary; | |
891 | ||
892 | /* | |
893 | * Get the byte from the data area that overlays the block mark. Since | |
894 | * the ECC engine applies its own view to the bits in the page, the | |
895 | * physical block mark won't (in general) appear on a byte boundary in | |
896 | * the data. | |
897 | */ | |
898 | from_data = (p[0] >> bit) | (p[1] << (8 - bit)); | |
899 | ||
900 | /* Get the byte from the OOB. */ | |
901 | from_oob = a[0]; | |
902 | ||
903 | /* Swap them. */ | |
904 | a[0] = from_data; | |
905 | ||
906 | mask = (0x1 << bit) - 1; | |
907 | p[0] = (p[0] & mask) | (from_oob << bit); | |
908 | ||
909 | mask = ~0 << bit; | |
910 | p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); | |
911 | } | |
912 | ||
913 | static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, | |
1fbb938d | 914 | uint8_t *buf, int oob_required, int page) |
10a2bcae HS |
915 | { |
916 | struct gpmi_nand_data *this = chip->priv; | |
917 | struct bch_geometry *nfc_geo = &this->bch_geometry; | |
918 | void *payload_virt; | |
919 | dma_addr_t payload_phys; | |
920 | void *auxiliary_virt; | |
921 | dma_addr_t auxiliary_phys; | |
922 | unsigned int i; | |
923 | unsigned char *status; | |
924 | unsigned int failed; | |
925 | unsigned int corrected; | |
926 | int ret; | |
927 | ||
928 | pr_debug("page number is : %d\n", page); | |
929 | ret = read_page_prepare(this, buf, mtd->writesize, | |
930 | this->payload_virt, this->payload_phys, | |
931 | nfc_geo->payload_size, | |
932 | &payload_virt, &payload_phys); | |
933 | if (ret) { | |
934 | pr_err("Inadequate DMA buffer\n"); | |
935 | ret = -ENOMEM; | |
936 | return ret; | |
937 | } | |
938 | auxiliary_virt = this->auxiliary_virt; | |
939 | auxiliary_phys = this->auxiliary_phys; | |
940 | ||
941 | /* go! */ | |
942 | ret = gpmi_read_page(this, payload_phys, auxiliary_phys); | |
943 | read_page_end(this, buf, mtd->writesize, | |
944 | this->payload_virt, this->payload_phys, | |
945 | nfc_geo->payload_size, | |
946 | payload_virt, payload_phys); | |
947 | if (ret) { | |
948 | pr_err("Error in ECC-based read: %d\n", ret); | |
949 | goto exit_nfc; | |
950 | } | |
951 | ||
952 | /* handle the block mark swapping */ | |
953 | block_mark_swapping(this, payload_virt, auxiliary_virt); | |
954 | ||
955 | /* Loop over status bytes, accumulating ECC status. */ | |
956 | failed = 0; | |
957 | corrected = 0; | |
958 | status = auxiliary_virt + nfc_geo->auxiliary_status_offset; | |
959 | ||
960 | for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { | |
961 | if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) | |
962 | continue; | |
963 | ||
964 | if (*status == STATUS_UNCORRECTABLE) { | |
965 | failed++; | |
966 | continue; | |
967 | } | |
968 | corrected += *status; | |
969 | } | |
970 | ||
971 | /* | |
972 | * Propagate ECC status to the owning MTD only when failed or | |
973 | * corrected times nearly reaches our ECC correction threshold. | |
974 | */ | |
975 | if (failed || corrected >= (nfc_geo->ecc_strength - 1)) { | |
976 | mtd->ecc_stats.failed += failed; | |
977 | mtd->ecc_stats.corrected += corrected; | |
978 | } | |
979 | ||
7725cc85 BN |
980 | if (oob_required) { |
981 | /* | |
982 | * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() | |
983 | * for details about our policy for delivering the OOB. | |
984 | * | |
985 | * We fill the caller's buffer with set bits, and then copy the | |
986 | * block mark to th caller's buffer. Note that, if block mark | |
987 | * swapping was necessary, it has already been done, so we can | |
988 | * rely on the first byte of the auxiliary buffer to contain | |
989 | * the block mark. | |
990 | */ | |
991 | memset(chip->oob_poi, ~0, mtd->oobsize); | |
992 | chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; | |
7725cc85 | 993 | } |
6023813a SH |
994 | |
995 | read_page_swap_end(this, buf, mtd->writesize, | |
996 | this->payload_virt, this->payload_phys, | |
997 | nfc_geo->payload_size, | |
998 | payload_virt, payload_phys); | |
10a2bcae HS |
999 | exit_nfc: |
1000 | return ret; | |
1001 | } | |
1002 | ||
fdbad98d | 1003 | static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
1fbb938d | 1004 | const uint8_t *buf, int oob_required) |
10a2bcae HS |
1005 | { |
1006 | struct gpmi_nand_data *this = chip->priv; | |
1007 | struct bch_geometry *nfc_geo = &this->bch_geometry; | |
1008 | const void *payload_virt; | |
1009 | dma_addr_t payload_phys; | |
1010 | const void *auxiliary_virt; | |
1011 | dma_addr_t auxiliary_phys; | |
1012 | int ret; | |
1013 | ||
1014 | pr_debug("ecc write page.\n"); | |
1015 | if (this->swap_block_mark) { | |
1016 | /* | |
1017 | * If control arrives here, we're doing block mark swapping. | |
1018 | * Since we can't modify the caller's buffers, we must copy them | |
1019 | * into our own. | |
1020 | */ | |
1021 | memcpy(this->payload_virt, buf, mtd->writesize); | |
1022 | payload_virt = this->payload_virt; | |
1023 | payload_phys = this->payload_phys; | |
1024 | ||
1025 | memcpy(this->auxiliary_virt, chip->oob_poi, | |
1026 | nfc_geo->auxiliary_size); | |
1027 | auxiliary_virt = this->auxiliary_virt; | |
1028 | auxiliary_phys = this->auxiliary_phys; | |
1029 | ||
1030 | /* Handle block mark swapping. */ | |
1031 | block_mark_swapping(this, | |
1032 | (void *) payload_virt, (void *) auxiliary_virt); | |
1033 | } else { | |
1034 | /* | |
1035 | * If control arrives here, we're not doing block mark swapping, | |
1036 | * so we can to try and use the caller's buffers. | |
1037 | */ | |
1038 | ret = send_page_prepare(this, | |
1039 | buf, mtd->writesize, | |
1040 | this->payload_virt, this->payload_phys, | |
1041 | nfc_geo->payload_size, | |
1042 | &payload_virt, &payload_phys); | |
1043 | if (ret) { | |
1044 | pr_err("Inadequate payload DMA buffer\n"); | |
fdbad98d | 1045 | return 0; |
10a2bcae HS |
1046 | } |
1047 | ||
1048 | ret = send_page_prepare(this, | |
1049 | chip->oob_poi, mtd->oobsize, | |
1050 | this->auxiliary_virt, this->auxiliary_phys, | |
1051 | nfc_geo->auxiliary_size, | |
1052 | &auxiliary_virt, &auxiliary_phys); | |
1053 | if (ret) { | |
1054 | pr_err("Inadequate auxiliary DMA buffer\n"); | |
1055 | goto exit_auxiliary; | |
1056 | } | |
1057 | } | |
1058 | ||
1059 | /* Ask the NFC. */ | |
1060 | ret = gpmi_send_page(this, payload_phys, auxiliary_phys); | |
1061 | if (ret) | |
1062 | pr_err("Error in ECC-based write: %d\n", ret); | |
1063 | ||
1064 | if (!this->swap_block_mark) { | |
1065 | send_page_end(this, chip->oob_poi, mtd->oobsize, | |
1066 | this->auxiliary_virt, this->auxiliary_phys, | |
1067 | nfc_geo->auxiliary_size, | |
1068 | auxiliary_virt, auxiliary_phys); | |
1069 | exit_auxiliary: | |
1070 | send_page_end(this, buf, mtd->writesize, | |
1071 | this->payload_virt, this->payload_phys, | |
1072 | nfc_geo->payload_size, | |
1073 | payload_virt, payload_phys); | |
1074 | } | |
fdbad98d JW |
1075 | |
1076 | return 0; | |
10a2bcae HS |
1077 | } |
1078 | ||
1079 | /* | |
1080 | * There are several places in this driver where we have to handle the OOB and | |
1081 | * block marks. This is the function where things are the most complicated, so | |
1082 | * this is where we try to explain it all. All the other places refer back to | |
1083 | * here. | |
1084 | * | |
1085 | * These are the rules, in order of decreasing importance: | |
1086 | * | |
1087 | * 1) Nothing the caller does can be allowed to imperil the block mark. | |
1088 | * | |
1089 | * 2) In read operations, the first byte of the OOB we return must reflect the | |
1090 | * true state of the block mark, no matter where that block mark appears in | |
1091 | * the physical page. | |
1092 | * | |
1093 | * 3) ECC-based read operations return an OOB full of set bits (since we never | |
1094 | * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads | |
1095 | * return). | |
1096 | * | |
1097 | * 4) "Raw" read operations return a direct view of the physical bytes in the | |
1098 | * page, using the conventional definition of which bytes are data and which | |
1099 | * are OOB. This gives the caller a way to see the actual, physical bytes | |
1100 | * in the page, without the distortions applied by our ECC engine. | |
1101 | * | |
1102 | * | |
1103 | * What we do for this specific read operation depends on two questions: | |
1104 | * | |
1105 | * 1) Are we doing a "raw" read, or an ECC-based read? | |
1106 | * | |
1107 | * 2) Are we using block mark swapping or transcription? | |
1108 | * | |
1109 | * There are four cases, illustrated by the following Karnaugh map: | |
1110 | * | |
1111 | * | Raw | ECC-based | | |
1112 | * -------------+-------------------------+-------------------------+ | |
1113 | * | Read the conventional | | | |
1114 | * | OOB at the end of the | | | |
1115 | * Swapping | page and return it. It | | | |
1116 | * | contains exactly what | | | |
1117 | * | we want. | Read the block mark and | | |
1118 | * -------------+-------------------------+ return it in a buffer | | |
1119 | * | Read the conventional | full of set bits. | | |
1120 | * | OOB at the end of the | | | |
1121 | * | page and also the block | | | |
1122 | * Transcribing | mark in the metadata. | | | |
1123 | * | Copy the block mark | | | |
1124 | * | into the first byte of | | | |
1125 | * | the OOB. | | | |
1126 | * -------------+-------------------------+-------------------------+ | |
1127 | * | |
1128 | * Note that we break rule #4 in the Transcribing/Raw case because we're not | |
1129 | * giving an accurate view of the actual, physical bytes in the page (we're | |
1130 | * overwriting the block mark). That's OK because it's more important to follow | |
1131 | * rule #2. | |
1132 | * | |
1133 | * It turns out that knowing whether we want an "ECC-based" or "raw" read is not | |
1134 | * easy. When reading a page, for example, the NAND Flash MTD code calls our | |
1135 | * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an | |
1136 | * ECC-based or raw view of the page is implicit in which function it calls | |
1137 | * (there is a similar pair of ECC-based/raw functions for writing). | |
1138 | * | |
271b874b BN |
1139 | * FIXME: The following paragraph is incorrect, now that there exist |
1140 | * ecc.read_oob_raw and ecc.write_oob_raw functions. | |
1141 | * | |
10a2bcae HS |
1142 | * Since MTD assumes the OOB is not covered by ECC, there is no pair of |
1143 | * ECC-based/raw functions for reading or or writing the OOB. The fact that the | |
1144 | * caller wants an ECC-based or raw view of the page is not propagated down to | |
1145 | * this driver. | |
1146 | */ | |
1147 | static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | |
5c2ffb11 | 1148 | int page) |
10a2bcae HS |
1149 | { |
1150 | struct gpmi_nand_data *this = chip->priv; | |
1151 | ||
1152 | pr_debug("page number is %d\n", page); | |
1153 | /* clear the OOB buffer */ | |
1154 | memset(chip->oob_poi, ~0, mtd->oobsize); | |
1155 | ||
1156 | /* Read out the conventional OOB. */ | |
1157 | chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); | |
1158 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | |
1159 | ||
1160 | /* | |
1161 | * Now, we want to make sure the block mark is correct. In the | |
1162 | * Swapping/Raw case, we already have it. Otherwise, we need to | |
1163 | * explicitly read it. | |
1164 | */ | |
1165 | if (!this->swap_block_mark) { | |
1166 | /* Read the block mark into the first byte of the OOB buffer. */ | |
1167 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | |
1168 | chip->oob_poi[0] = chip->read_byte(mtd); | |
1169 | } | |
1170 | ||
5c2ffb11 | 1171 | return 0; |
10a2bcae HS |
1172 | } |
1173 | ||
1174 | static int | |
1175 | gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) | |
1176 | { | |
1177 | /* | |
1178 | * The BCH will use all the (page + oob). | |
1179 | * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob. | |
1180 | * But it can not stop some ioctls such MEMWRITEOOB which uses | |
0612b9dd | 1181 | * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit |
10a2bcae HS |
1182 | * these ioctls too. |
1183 | */ | |
1184 | return -EPERM; | |
1185 | } | |
1186 | ||
1187 | static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) | |
1188 | { | |
1189 | struct nand_chip *chip = mtd->priv; | |
1190 | struct gpmi_nand_data *this = chip->priv; | |
1191 | int block, ret = 0; | |
1192 | uint8_t *block_mark; | |
1193 | int column, page, status, chipnr; | |
1194 | ||
1195 | /* Get block number */ | |
1196 | block = (int)(ofs >> chip->bbt_erase_shift); | |
1197 | if (chip->bbt) | |
1198 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); | |
1199 | ||
1200 | /* Do we have a flash based bad block table ? */ | |
5289966e | 1201 | if (chip->bbt_options & NAND_BBT_USE_FLASH) |
10a2bcae HS |
1202 | ret = nand_update_bbt(mtd, ofs); |
1203 | else { | |
1204 | chipnr = (int)(ofs >> chip->chip_shift); | |
1205 | chip->select_chip(mtd, chipnr); | |
1206 | ||
1207 | column = this->swap_block_mark ? mtd->writesize : 0; | |
1208 | ||
1209 | /* Write the block mark. */ | |
1210 | block_mark = this->data_buffer_dma; | |
1211 | block_mark[0] = 0; /* bad block marker */ | |
1212 | ||
1213 | /* Shift to get page */ | |
1214 | page = (int)(ofs >> chip->page_shift); | |
1215 | ||
1216 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page); | |
1217 | chip->write_buf(mtd, block_mark, 1); | |
1218 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); | |
1219 | ||
1220 | status = chip->waitfunc(mtd, chip); | |
1221 | if (status & NAND_STATUS_FAIL) | |
1222 | ret = -EIO; | |
1223 | ||
1224 | chip->select_chip(mtd, -1); | |
1225 | } | |
1226 | if (!ret) | |
1227 | mtd->ecc_stats.badblocks++; | |
1228 | ||
1229 | return ret; | |
1230 | } | |
1231 | ||
a78da287 | 1232 | static int nand_boot_set_geometry(struct gpmi_nand_data *this) |
10a2bcae HS |
1233 | { |
1234 | struct boot_rom_geometry *geometry = &this->rom_geometry; | |
1235 | ||
1236 | /* | |
1237 | * Set the boot block stride size. | |
1238 | * | |
1239 | * In principle, we should be reading this from the OTP bits, since | |
1240 | * that's where the ROM is going to get it. In fact, we don't have any | |
1241 | * way to read the OTP bits, so we go with the default and hope for the | |
1242 | * best. | |
1243 | */ | |
1244 | geometry->stride_size_in_pages = 64; | |
1245 | ||
1246 | /* | |
1247 | * Set the search area stride exponent. | |
1248 | * | |
1249 | * In principle, we should be reading this from the OTP bits, since | |
1250 | * that's where the ROM is going to get it. In fact, we don't have any | |
1251 | * way to read the OTP bits, so we go with the default and hope for the | |
1252 | * best. | |
1253 | */ | |
1254 | geometry->search_area_stride_exponent = 2; | |
1255 | return 0; | |
1256 | } | |
1257 | ||
1258 | static const char *fingerprint = "STMP"; | |
a78da287 | 1259 | static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) |
10a2bcae HS |
1260 | { |
1261 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; | |
1262 | struct device *dev = this->dev; | |
1263 | struct mtd_info *mtd = &this->mtd; | |
1264 | struct nand_chip *chip = &this->nand; | |
1265 | unsigned int search_area_size_in_strides; | |
1266 | unsigned int stride; | |
1267 | unsigned int page; | |
10a2bcae HS |
1268 | uint8_t *buffer = chip->buffers->databuf; |
1269 | int saved_chip_number; | |
1270 | int found_an_ncb_fingerprint = false; | |
1271 | ||
1272 | /* Compute the number of strides in a search area. */ | |
1273 | search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; | |
1274 | ||
1275 | saved_chip_number = this->current_chip; | |
1276 | chip->select_chip(mtd, 0); | |
1277 | ||
1278 | /* | |
1279 | * Loop through the first search area, looking for the NCB fingerprint. | |
1280 | */ | |
1281 | dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); | |
1282 | ||
1283 | for (stride = 0; stride < search_area_size_in_strides; stride++) { | |
513d57e1 | 1284 | /* Compute the page addresses. */ |
10a2bcae | 1285 | page = stride * rom_geo->stride_size_in_pages; |
10a2bcae HS |
1286 | |
1287 | dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); | |
1288 | ||
1289 | /* | |
1290 | * Read the NCB fingerprint. The fingerprint is four bytes long | |
1291 | * and starts in the 12th byte of the page. | |
1292 | */ | |
1293 | chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page); | |
1294 | chip->read_buf(mtd, buffer, strlen(fingerprint)); | |
1295 | ||
1296 | /* Look for the fingerprint. */ | |
1297 | if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { | |
1298 | found_an_ncb_fingerprint = true; | |
1299 | break; | |
1300 | } | |
1301 | ||
1302 | } | |
1303 | ||
1304 | chip->select_chip(mtd, saved_chip_number); | |
1305 | ||
1306 | if (found_an_ncb_fingerprint) | |
1307 | dev_dbg(dev, "\tFound a fingerprint\n"); | |
1308 | else | |
1309 | dev_dbg(dev, "\tNo fingerprint found\n"); | |
1310 | return found_an_ncb_fingerprint; | |
1311 | } | |
1312 | ||
1313 | /* Writes a transcription stamp. */ | |
a78da287 | 1314 | static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) |
10a2bcae HS |
1315 | { |
1316 | struct device *dev = this->dev; | |
1317 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; | |
1318 | struct mtd_info *mtd = &this->mtd; | |
1319 | struct nand_chip *chip = &this->nand; | |
1320 | unsigned int block_size_in_pages; | |
1321 | unsigned int search_area_size_in_strides; | |
1322 | unsigned int search_area_size_in_pages; | |
1323 | unsigned int search_area_size_in_blocks; | |
1324 | unsigned int block; | |
1325 | unsigned int stride; | |
1326 | unsigned int page; | |
10a2bcae HS |
1327 | uint8_t *buffer = chip->buffers->databuf; |
1328 | int saved_chip_number; | |
1329 | int status; | |
1330 | ||
1331 | /* Compute the search area geometry. */ | |
1332 | block_size_in_pages = mtd->erasesize / mtd->writesize; | |
1333 | search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; | |
1334 | search_area_size_in_pages = search_area_size_in_strides * | |
1335 | rom_geo->stride_size_in_pages; | |
1336 | search_area_size_in_blocks = | |
1337 | (search_area_size_in_pages + (block_size_in_pages - 1)) / | |
1338 | block_size_in_pages; | |
1339 | ||
1340 | dev_dbg(dev, "Search Area Geometry :\n"); | |
1341 | dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); | |
1342 | dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); | |
1343 | dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); | |
1344 | ||
1345 | /* Select chip 0. */ | |
1346 | saved_chip_number = this->current_chip; | |
1347 | chip->select_chip(mtd, 0); | |
1348 | ||
1349 | /* Loop over blocks in the first search area, erasing them. */ | |
1350 | dev_dbg(dev, "Erasing the search area...\n"); | |
1351 | ||
1352 | for (block = 0; block < search_area_size_in_blocks; block++) { | |
1353 | /* Compute the page address. */ | |
1354 | page = block * block_size_in_pages; | |
1355 | ||
1356 | /* Erase this block. */ | |
1357 | dev_dbg(dev, "\tErasing block 0x%x\n", block); | |
1358 | chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); | |
1359 | chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); | |
1360 | ||
1361 | /* Wait for the erase to finish. */ | |
1362 | status = chip->waitfunc(mtd, chip); | |
1363 | if (status & NAND_STATUS_FAIL) | |
1364 | dev_err(dev, "[%s] Erase failed.\n", __func__); | |
1365 | } | |
1366 | ||
1367 | /* Write the NCB fingerprint into the page buffer. */ | |
1368 | memset(buffer, ~0, mtd->writesize); | |
1369 | memset(chip->oob_poi, ~0, mtd->oobsize); | |
1370 | memcpy(buffer + 12, fingerprint, strlen(fingerprint)); | |
1371 | ||
1372 | /* Loop through the first search area, writing NCB fingerprints. */ | |
1373 | dev_dbg(dev, "Writing NCB fingerprints...\n"); | |
1374 | for (stride = 0; stride < search_area_size_in_strides; stride++) { | |
513d57e1 | 1375 | /* Compute the page addresses. */ |
10a2bcae | 1376 | page = stride * rom_geo->stride_size_in_pages; |
10a2bcae HS |
1377 | |
1378 | /* Write the first page of the current stride. */ | |
1379 | dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); | |
1380 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); | |
1fbb938d | 1381 | chip->ecc.write_page_raw(mtd, chip, buffer, 0); |
10a2bcae HS |
1382 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); |
1383 | ||
1384 | /* Wait for the write to finish. */ | |
1385 | status = chip->waitfunc(mtd, chip); | |
1386 | if (status & NAND_STATUS_FAIL) | |
1387 | dev_err(dev, "[%s] Write failed.\n", __func__); | |
1388 | } | |
1389 | ||
1390 | /* Deselect chip 0. */ | |
1391 | chip->select_chip(mtd, saved_chip_number); | |
1392 | return 0; | |
1393 | } | |
1394 | ||
a78da287 | 1395 | static int mx23_boot_init(struct gpmi_nand_data *this) |
10a2bcae HS |
1396 | { |
1397 | struct device *dev = this->dev; | |
1398 | struct nand_chip *chip = &this->nand; | |
1399 | struct mtd_info *mtd = &this->mtd; | |
1400 | unsigned int block_count; | |
1401 | unsigned int block; | |
1402 | int chipnr; | |
1403 | int page; | |
1404 | loff_t byte; | |
1405 | uint8_t block_mark; | |
1406 | int ret = 0; | |
1407 | ||
1408 | /* | |
1409 | * If control arrives here, we can't use block mark swapping, which | |
1410 | * means we're forced to use transcription. First, scan for the | |
1411 | * transcription stamp. If we find it, then we don't have to do | |
1412 | * anything -- the block marks are already transcribed. | |
1413 | */ | |
1414 | if (mx23_check_transcription_stamp(this)) | |
1415 | return 0; | |
1416 | ||
1417 | /* | |
1418 | * If control arrives here, we couldn't find a transcription stamp, so | |
1419 | * so we presume the block marks are in the conventional location. | |
1420 | */ | |
1421 | dev_dbg(dev, "Transcribing bad block marks...\n"); | |
1422 | ||
1423 | /* Compute the number of blocks in the entire medium. */ | |
1424 | block_count = chip->chipsize >> chip->phys_erase_shift; | |
1425 | ||
1426 | /* | |
1427 | * Loop over all the blocks in the medium, transcribing block marks as | |
1428 | * we go. | |
1429 | */ | |
1430 | for (block = 0; block < block_count; block++) { | |
1431 | /* | |
1432 | * Compute the chip, page and byte addresses for this block's | |
1433 | * conventional mark. | |
1434 | */ | |
1435 | chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); | |
1436 | page = block << (chip->phys_erase_shift - chip->page_shift); | |
1437 | byte = block << chip->phys_erase_shift; | |
1438 | ||
1439 | /* Send the command to read the conventional block mark. */ | |
1440 | chip->select_chip(mtd, chipnr); | |
1441 | chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); | |
1442 | block_mark = chip->read_byte(mtd); | |
1443 | chip->select_chip(mtd, -1); | |
1444 | ||
1445 | /* | |
1446 | * Check if the block is marked bad. If so, we need to mark it | |
1447 | * again, but this time the result will be a mark in the | |
1448 | * location where we transcribe block marks. | |
1449 | */ | |
1450 | if (block_mark != 0xff) { | |
1451 | dev_dbg(dev, "Transcribing mark in block %u\n", block); | |
1452 | ret = chip->block_markbad(mtd, byte); | |
1453 | if (ret) | |
1454 | dev_err(dev, "Failed to mark block bad with " | |
1455 | "ret %d\n", ret); | |
1456 | } | |
1457 | } | |
1458 | ||
1459 | /* Write the stamp that indicates we've transcribed the block marks. */ | |
1460 | mx23_write_transcription_stamp(this); | |
1461 | return 0; | |
1462 | } | |
1463 | ||
a78da287 | 1464 | static int nand_boot_init(struct gpmi_nand_data *this) |
10a2bcae HS |
1465 | { |
1466 | nand_boot_set_geometry(this); | |
1467 | ||
1468 | /* This is ROM arch-specific initilization before the BBT scanning. */ | |
1469 | if (GPMI_IS_MX23(this)) | |
1470 | return mx23_boot_init(this); | |
1471 | return 0; | |
1472 | } | |
1473 | ||
a78da287 | 1474 | static int gpmi_set_geometry(struct gpmi_nand_data *this) |
10a2bcae HS |
1475 | { |
1476 | int ret; | |
1477 | ||
1478 | /* Free the temporary DMA memory for reading ID. */ | |
1479 | gpmi_free_dma_buffer(this); | |
1480 | ||
1481 | /* Set up the NFC geometry which is used by BCH. */ | |
1482 | ret = bch_set_geometry(this); | |
1483 | if (ret) { | |
2d350e5a | 1484 | pr_err("Error setting BCH geometry : %d\n", ret); |
10a2bcae HS |
1485 | return ret; |
1486 | } | |
1487 | ||
1488 | /* Alloc the new DMA buffers according to the pagesize and oobsize */ | |
1489 | return gpmi_alloc_dma_buffer(this); | |
1490 | } | |
1491 | ||
1492 | static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this) | |
1493 | { | |
1494 | int ret; | |
1495 | ||
1496 | /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ | |
1497 | if (GPMI_IS_MX23(this)) | |
1498 | this->swap_block_mark = false; | |
1499 | else | |
1500 | this->swap_block_mark = true; | |
1501 | ||
1502 | /* Set up the medium geometry */ | |
1503 | ret = gpmi_set_geometry(this); | |
1504 | if (ret) | |
1505 | return ret; | |
1506 | ||
5636ce0f MV |
1507 | /* Adjust the ECC strength according to the chip. */ |
1508 | this->nand.ecc.strength = this->bch_geometry.ecc_strength; | |
1509 | this->mtd.ecc_strength = this->bch_geometry.ecc_strength; | |
e0dd89c5 | 1510 | this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength; |
5636ce0f | 1511 | |
10a2bcae HS |
1512 | /* NAND boot init, depends on the gpmi_set_geometry(). */ |
1513 | return nand_boot_init(this); | |
1514 | } | |
1515 | ||
1516 | static int gpmi_scan_bbt(struct mtd_info *mtd) | |
1517 | { | |
1518 | struct nand_chip *chip = mtd->priv; | |
1519 | struct gpmi_nand_data *this = chip->priv; | |
1520 | int ret; | |
1521 | ||
1522 | /* Prepare for the BBT scan. */ | |
1523 | ret = gpmi_pre_bbt_scan(this); | |
1524 | if (ret) | |
1525 | return ret; | |
1526 | ||
995fbbf5 HS |
1527 | /* |
1528 | * Can we enable the extra features? such as EDO or Sync mode. | |
1529 | * | |
1530 | * We do not check the return value now. That's means if we fail in | |
1531 | * enable the extra features, we still can run in the normal way. | |
1532 | */ | |
1533 | gpmi_extra_init(this); | |
1534 | ||
10a2bcae HS |
1535 | /* use the default BBT implementation */ |
1536 | return nand_default_bbt(mtd); | |
1537 | } | |
1538 | ||
513d57e1 | 1539 | static void gpmi_nfc_exit(struct gpmi_nand_data *this) |
10a2bcae HS |
1540 | { |
1541 | nand_release(&this->mtd); | |
1542 | gpmi_free_dma_buffer(this); | |
1543 | } | |
1544 | ||
06f25510 | 1545 | static int gpmi_nfc_init(struct gpmi_nand_data *this) |
10a2bcae | 1546 | { |
10a2bcae HS |
1547 | struct mtd_info *mtd = &this->mtd; |
1548 | struct nand_chip *chip = &this->nand; | |
e10db1f0 | 1549 | struct mtd_part_parser_data ppdata = {}; |
10a2bcae HS |
1550 | int ret; |
1551 | ||
1552 | /* init current chip */ | |
1553 | this->current_chip = -1; | |
1554 | ||
1555 | /* init the MTD data structures */ | |
1556 | mtd->priv = chip; | |
1557 | mtd->name = "gpmi-nand"; | |
1558 | mtd->owner = THIS_MODULE; | |
1559 | ||
1560 | /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ | |
1561 | chip->priv = this; | |
1562 | chip->select_chip = gpmi_select_chip; | |
1563 | chip->cmd_ctrl = gpmi_cmd_ctrl; | |
1564 | chip->dev_ready = gpmi_dev_ready; | |
1565 | chip->read_byte = gpmi_read_byte; | |
1566 | chip->read_buf = gpmi_read_buf; | |
1567 | chip->write_buf = gpmi_write_buf; | |
1568 | chip->ecc.read_page = gpmi_ecc_read_page; | |
1569 | chip->ecc.write_page = gpmi_ecc_write_page; | |
1570 | chip->ecc.read_oob = gpmi_ecc_read_oob; | |
1571 | chip->ecc.write_oob = gpmi_ecc_write_oob; | |
1572 | chip->scan_bbt = gpmi_scan_bbt; | |
1573 | chip->badblock_pattern = &gpmi_bbt_descr; | |
1574 | chip->block_markbad = gpmi_block_markbad; | |
1575 | chip->options |= NAND_NO_SUBPAGE_WRITE; | |
1576 | chip->ecc.mode = NAND_ECC_HW; | |
1577 | chip->ecc.size = 1; | |
5636ce0f | 1578 | chip->ecc.strength = 8; |
10a2bcae | 1579 | chip->ecc.layout = &gpmi_hw_ecclayout; |
c50c6940 HS |
1580 | if (of_get_nand_on_flash_bbt(this->dev->of_node)) |
1581 | chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; | |
10a2bcae HS |
1582 | |
1583 | /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ | |
1584 | this->bch_geometry.payload_size = 1024; | |
1585 | this->bch_geometry.auxiliary_size = 128; | |
1586 | ret = gpmi_alloc_dma_buffer(this); | |
1587 | if (ret) | |
1588 | goto err_out; | |
1589 | ||
e10db1f0 | 1590 | ret = nand_scan(mtd, 1); |
10a2bcae HS |
1591 | if (ret) { |
1592 | pr_err("Chip scan failed\n"); | |
1593 | goto err_out; | |
1594 | } | |
1595 | ||
e10db1f0 HS |
1596 | ppdata.of_node = this->pdev->dev.of_node; |
1597 | ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); | |
10a2bcae HS |
1598 | if (ret) |
1599 | goto err_out; | |
1600 | return 0; | |
1601 | ||
1602 | err_out: | |
1603 | gpmi_nfc_exit(this); | |
1604 | return ret; | |
1605 | } | |
1606 | ||
e10db1f0 HS |
1607 | static const struct platform_device_id gpmi_ids[] = { |
1608 | { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, }, | |
1609 | { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, }, | |
9013bb40 | 1610 | { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, }, |
e10db1f0 HS |
1611 | {}, |
1612 | }; | |
1613 | ||
1614 | static const struct of_device_id gpmi_nand_id_table[] = { | |
1615 | { | |
1616 | .compatible = "fsl,imx23-gpmi-nand", | |
1617 | .data = (void *)&gpmi_ids[IS_MX23] | |
1618 | }, { | |
1619 | .compatible = "fsl,imx28-gpmi-nand", | |
1620 | .data = (void *)&gpmi_ids[IS_MX28] | |
9013bb40 HS |
1621 | }, { |
1622 | .compatible = "fsl,imx6q-gpmi-nand", | |
1623 | .data = (void *)&gpmi_ids[IS_MX6Q] | |
e10db1f0 HS |
1624 | }, {} |
1625 | }; | |
1626 | MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); | |
1627 | ||
06f25510 | 1628 | static int gpmi_nand_probe(struct platform_device *pdev) |
10a2bcae | 1629 | { |
10a2bcae | 1630 | struct gpmi_nand_data *this; |
e10db1f0 | 1631 | const struct of_device_id *of_id; |
10a2bcae HS |
1632 | int ret; |
1633 | ||
e10db1f0 HS |
1634 | of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); |
1635 | if (of_id) { | |
1636 | pdev->id_entry = of_id->data; | |
1637 | } else { | |
1638 | pr_err("Failed to find the right device id.\n"); | |
1639 | return -ENOMEM; | |
1640 | } | |
1641 | ||
10a2bcae HS |
1642 | this = kzalloc(sizeof(*this), GFP_KERNEL); |
1643 | if (!this) { | |
1644 | pr_err("Failed to allocate per-device memory\n"); | |
1645 | return -ENOMEM; | |
1646 | } | |
1647 | ||
1648 | platform_set_drvdata(pdev, this); | |
1649 | this->pdev = pdev; | |
1650 | this->dev = &pdev->dev; | |
10a2bcae HS |
1651 | |
1652 | ret = acquire_resources(this); | |
1653 | if (ret) | |
1654 | goto exit_acquire_resources; | |
1655 | ||
1656 | ret = init_hardware(this); | |
1657 | if (ret) | |
1658 | goto exit_nfc_init; | |
1659 | ||
1660 | ret = gpmi_nfc_init(this); | |
1661 | if (ret) | |
1662 | goto exit_nfc_init; | |
1663 | ||
490e280a FE |
1664 | dev_info(this->dev, "driver registered.\n"); |
1665 | ||
10a2bcae HS |
1666 | return 0; |
1667 | ||
1668 | exit_nfc_init: | |
1669 | release_resources(this); | |
10a2bcae HS |
1670 | exit_acquire_resources: |
1671 | platform_set_drvdata(pdev, NULL); | |
1672 | kfree(this); | |
490e280a FE |
1673 | dev_err(this->dev, "driver registration failed: %d\n", ret); |
1674 | ||
10a2bcae HS |
1675 | return ret; |
1676 | } | |
1677 | ||
810b7e06 | 1678 | static int gpmi_nand_remove(struct platform_device *pdev) |
10a2bcae HS |
1679 | { |
1680 | struct gpmi_nand_data *this = platform_get_drvdata(pdev); | |
1681 | ||
1682 | gpmi_nfc_exit(this); | |
1683 | release_resources(this); | |
1684 | platform_set_drvdata(pdev, NULL); | |
1685 | kfree(this); | |
1686 | return 0; | |
1687 | } | |
1688 | ||
10a2bcae HS |
1689 | static struct platform_driver gpmi_nand_driver = { |
1690 | .driver = { | |
1691 | .name = "gpmi-nand", | |
e10db1f0 | 1692 | .of_match_table = gpmi_nand_id_table, |
10a2bcae HS |
1693 | }, |
1694 | .probe = gpmi_nand_probe, | |
5153b88c | 1695 | .remove = gpmi_nand_remove, |
10a2bcae HS |
1696 | .id_table = gpmi_ids, |
1697 | }; | |
490e280a | 1698 | module_platform_driver(gpmi_nand_driver); |
10a2bcae HS |
1699 | |
1700 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); | |
1701 | MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); | |
1702 | MODULE_LICENSE("GPL"); |