2 * Freescale GPMI NAND Flash Driver
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/clk.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/mtd/partitions.h>
29 #include <linux/pinctrl/consumer.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33 #include "gpmi-nand.h"
35 /* Resource names for the GPMI NAND driver. */
36 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
37 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
38 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
39 #define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
41 /* add our owner bbt descriptor */
42 static uint8_t scan_ff_pattern
[] = { 0xff };
43 static struct nand_bbt_descr gpmi_bbt_descr
= {
47 .pattern
= scan_ff_pattern
50 /* We will use all the (page + OOB). */
51 static struct nand_ecclayout gpmi_hw_ecclayout
= {
54 .oobfree
= { {.offset
= 0, .length
= 0} }
57 static irqreturn_t
bch_irq(int irq
, void *cookie
)
59 struct gpmi_nand_data
*this = cookie
;
62 complete(&this->bch_done
);
67 * Calculate the ECC strength by hand:
68 * E : The ECC strength.
69 * G : the length of Galois Field.
70 * N : The chunk count of per page.
71 * O : the oobsize of the NAND chip.
72 * M : the metasize of per page.
76 * ------------ <= (O - M)
84 static inline int get_ecc_strength(struct gpmi_nand_data
*this)
86 struct bch_geometry
*geo
= &this->bch_geometry
;
87 struct mtd_info
*mtd
= &this->mtd
;
90 ecc_strength
= ((mtd
->oobsize
- geo
->metadata_size
) * 8)
91 / (geo
->gf_len
* geo
->ecc_chunk_count
);
93 /* We need the minor even number. */
94 return round_down(ecc_strength
, 2);
97 int common_nfc_set_geometry(struct gpmi_nand_data
*this)
99 struct bch_geometry
*geo
= &this->bch_geometry
;
100 struct mtd_info
*mtd
= &this->mtd
;
101 unsigned int metadata_size
;
102 unsigned int status_size
;
103 unsigned int block_mark_bit_offset
;
106 * The size of the metadata can be changed, though we set it to 10
107 * bytes now. But it can't be too large, because we have to save
108 * enough space for BCH.
110 geo
->metadata_size
= 10;
112 /* The default for the length of Galois Field. */
115 /* The default for chunk size. There is no oobsize greater then 512. */
116 geo
->ecc_chunk_size
= 512;
117 while (geo
->ecc_chunk_size
< mtd
->oobsize
)
118 geo
->ecc_chunk_size
*= 2; /* keep C >= O */
120 geo
->ecc_chunk_count
= mtd
->writesize
/ geo
->ecc_chunk_size
;
122 /* We use the same ECC strength for all chunks. */
123 geo
->ecc_strength
= get_ecc_strength(this);
124 if (!geo
->ecc_strength
) {
125 pr_err("wrong ECC strength.\n");
129 geo
->page_size
= mtd
->writesize
+ mtd
->oobsize
;
130 geo
->payload_size
= mtd
->writesize
;
133 * The auxiliary buffer contains the metadata and the ECC status. The
134 * metadata is padded to the nearest 32-bit boundary. The ECC status
135 * contains one byte for every ECC chunk, and is also padded to the
136 * nearest 32-bit boundary.
138 metadata_size
= ALIGN(geo
->metadata_size
, 4);
139 status_size
= ALIGN(geo
->ecc_chunk_count
, 4);
141 geo
->auxiliary_size
= metadata_size
+ status_size
;
142 geo
->auxiliary_status_offset
= metadata_size
;
144 if (!this->swap_block_mark
)
148 * We need to compute the byte and bit offsets of
149 * the physical block mark within the ECC-based view of the page.
151 * NAND chip with 2K page shows below:
157 * +---+----------+-+----------+-+----------+-+----------+-+
158 * | M | data |E| data |E| data |E| data |E|
159 * +---+----------+-+----------+-+----------+-+----------+-+
161 * The position of block mark moves forward in the ECC-based view
162 * of page, and the delta is:
165 * D = (---------------- + M)
168 * With the formula to compute the ECC strength, and the condition
169 * : C >= O (C is the ecc chunk size)
171 * It's easy to deduce to the following result:
173 * E * G (O - M) C - M C - M
174 * ----------- <= ------- <= -------- < ---------
180 * D = (---------------- + M) < C
183 * The above inequality means the position of block mark
184 * within the ECC-based view of the page is still in the data chunk,
185 * and it's NOT in the ECC bits of the chunk.
187 * Use the following to compute the bit position of the
188 * physical block mark within the ECC-based view of the page:
189 * (page_size - D) * 8
193 block_mark_bit_offset
= mtd
->writesize
* 8 -
194 (geo
->ecc_strength
* geo
->gf_len
* (geo
->ecc_chunk_count
- 1)
195 + geo
->metadata_size
* 8);
197 geo
->block_mark_byte_offset
= block_mark_bit_offset
/ 8;
198 geo
->block_mark_bit_offset
= block_mark_bit_offset
% 8;
202 struct dma_chan
*get_dma_chan(struct gpmi_nand_data
*this)
204 int chipnr
= this->current_chip
;
206 return this->dma_chans
[chipnr
];
209 /* Can we use the upper's buffer directly for DMA? */
210 void prepare_data_dma(struct gpmi_nand_data
*this, enum dma_data_direction dr
)
212 struct scatterlist
*sgl
= &this->data_sgl
;
215 this->direct_dma_map_ok
= true;
217 /* first try to map the upper buffer directly */
218 sg_init_one(sgl
, this->upper_buf
, this->upper_len
);
219 ret
= dma_map_sg(this->dev
, sgl
, 1, dr
);
221 /* We have to use our own DMA buffer. */
222 sg_init_one(sgl
, this->data_buffer_dma
, PAGE_SIZE
);
224 if (dr
== DMA_TO_DEVICE
)
225 memcpy(this->data_buffer_dma
, this->upper_buf
,
228 ret
= dma_map_sg(this->dev
, sgl
, 1, dr
);
230 pr_err("DMA mapping failed.\n");
232 this->direct_dma_map_ok
= false;
236 /* This will be called after the DMA operation is finished. */
237 static void dma_irq_callback(void *param
)
239 struct gpmi_nand_data
*this = param
;
240 struct completion
*dma_c
= &this->dma_done
;
244 switch (this->dma_type
) {
245 case DMA_FOR_COMMAND
:
246 dma_unmap_sg(this->dev
, &this->cmd_sgl
, 1, DMA_TO_DEVICE
);
249 case DMA_FOR_READ_DATA
:
250 dma_unmap_sg(this->dev
, &this->data_sgl
, 1, DMA_FROM_DEVICE
);
251 if (this->direct_dma_map_ok
== false)
252 memcpy(this->upper_buf
, this->data_buffer_dma
,
256 case DMA_FOR_WRITE_DATA
:
257 dma_unmap_sg(this->dev
, &this->data_sgl
, 1, DMA_TO_DEVICE
);
260 case DMA_FOR_READ_ECC_PAGE
:
261 case DMA_FOR_WRITE_ECC_PAGE
:
262 /* We have to wait the BCH interrupt to finish. */
266 pr_err("in wrong DMA operation.\n");
270 int start_dma_without_bch_irq(struct gpmi_nand_data
*this,
271 struct dma_async_tx_descriptor
*desc
)
273 struct completion
*dma_c
= &this->dma_done
;
276 init_completion(dma_c
);
278 desc
->callback
= dma_irq_callback
;
279 desc
->callback_param
= this;
280 dmaengine_submit(desc
);
281 dma_async_issue_pending(get_dma_chan(this));
283 /* Wait for the interrupt from the DMA block. */
284 err
= wait_for_completion_timeout(dma_c
, msecs_to_jiffies(1000));
286 pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type
);
287 gpmi_dump_info(this);
294 * This function is used in BCH reading or BCH writing pages.
295 * It will wait for the BCH interrupt as long as ONE second.
296 * Actually, we must wait for two interrupts :
297 * [1] firstly the DMA interrupt and
298 * [2] secondly the BCH interrupt.
300 int start_dma_with_bch_irq(struct gpmi_nand_data
*this,
301 struct dma_async_tx_descriptor
*desc
)
303 struct completion
*bch_c
= &this->bch_done
;
306 /* Prepare to receive an interrupt from the BCH block. */
307 init_completion(bch_c
);
310 start_dma_without_bch_irq(this, desc
);
312 /* Wait for the interrupt from the BCH block. */
313 err
= wait_for_completion_timeout(bch_c
, msecs_to_jiffies(1000));
315 pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type
);
316 gpmi_dump_info(this);
323 acquire_register_block(struct gpmi_nand_data
*this, const char *res_name
)
325 struct platform_device
*pdev
= this->pdev
;
326 struct resources
*res
= &this->resources
;
330 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, res_name
);
332 pr_err("Can't get resource for %s\n", res_name
);
336 p
= ioremap(r
->start
, resource_size(r
));
338 pr_err("Can't remap %s\n", res_name
);
342 if (!strcmp(res_name
, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
))
344 else if (!strcmp(res_name
, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
))
347 pr_err("unknown resource name : %s\n", res_name
);
352 static void release_register_block(struct gpmi_nand_data
*this)
354 struct resources
*res
= &this->resources
;
356 iounmap(res
->gpmi_regs
);
358 iounmap(res
->bch_regs
);
359 res
->gpmi_regs
= NULL
;
360 res
->bch_regs
= NULL
;
364 acquire_bch_irq(struct gpmi_nand_data
*this, irq_handler_t irq_h
)
366 struct platform_device
*pdev
= this->pdev
;
367 struct resources
*res
= &this->resources
;
368 const char *res_name
= GPMI_NAND_BCH_INTERRUPT_RES_NAME
;
372 r
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
, res_name
);
374 pr_err("Can't get resource for %s\n", res_name
);
378 err
= request_irq(r
->start
, irq_h
, 0, res_name
, this);
380 pr_err("Can't own %s\n", res_name
);
384 res
->bch_low_interrupt
= r
->start
;
385 res
->bch_high_interrupt
= r
->end
;
389 static void release_bch_irq(struct gpmi_nand_data
*this)
391 struct resources
*res
= &this->resources
;
392 int i
= res
->bch_low_interrupt
;
394 for (; i
<= res
->bch_high_interrupt
; i
++)
398 static bool gpmi_dma_filter(struct dma_chan
*chan
, void *param
)
400 struct gpmi_nand_data
*this = param
;
401 int dma_channel
= (int)this->private;
403 if (!mxs_dma_is_apbh(chan
))
406 * only catch the GPMI dma channels :
407 * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
408 * (These four channels share the same IRQ!)
410 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
411 * (These eight channels share the same IRQ!)
413 if (dma_channel
== chan
->chan_id
) {
414 chan
->private = &this->dma_data
;
420 static void release_dma_channels(struct gpmi_nand_data
*this)
423 for (i
= 0; i
< DMA_CHANS
; i
++)
424 if (this->dma_chans
[i
]) {
425 dma_release_channel(this->dma_chans
[i
]);
426 this->dma_chans
[i
] = NULL
;
430 static int acquire_dma_channels(struct gpmi_nand_data
*this)
432 struct platform_device
*pdev
= this->pdev
;
433 struct resource
*r_dma
;
434 struct device_node
*dn
;
437 struct dma_chan
*dma_chan
;
440 /* dma channel, we only use the first one. */
441 dn
= pdev
->dev
.of_node
;
442 ret
= of_property_read_u32(dn
, "fsl,gpmi-dma-channel", &dma_channel
);
444 pr_err("unable to get DMA channel from dt.\n");
447 this->private = (void *)dma_channel
;
449 /* gpmi dma interrupt */
450 r_dma
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
,
451 GPMI_NAND_DMA_INTERRUPT_RES_NAME
);
453 pr_err("Can't get resource for DMA\n");
456 this->dma_data
.chan_irq
= r_dma
->start
;
458 /* request dma channel */
460 dma_cap_set(DMA_SLAVE
, mask
);
462 dma_chan
= dma_request_channel(mask
, gpmi_dma_filter
, this);
464 pr_err("Failed to request DMA channel.\n");
468 this->dma_chans
[0] = dma_chan
;
472 release_dma_channels(this);
476 static void gpmi_put_clks(struct gpmi_nand_data
*this)
478 struct resources
*r
= &this->resources
;
482 for (i
= 0; i
< GPMI_CLK_MAX
; i
++) {
491 static char *extra_clks_for_mx6q
[GPMI_CLK_MAX
] = {
492 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
495 static int gpmi_get_clks(struct gpmi_nand_data
*this)
497 struct resources
*r
= &this->resources
;
498 char **extra_clks
= NULL
;
502 /* The main clock is stored in the first. */
503 r
->clock
[0] = clk_get(this->dev
, "gpmi_io");
504 if (IS_ERR(r
->clock
[0]))
507 /* Get extra clocks */
508 if (GPMI_IS_MX6Q(this))
509 extra_clks
= extra_clks_for_mx6q
;
513 for (i
= 1; i
< GPMI_CLK_MAX
; i
++) {
514 if (extra_clks
[i
- 1] == NULL
)
517 clk
= clk_get(this->dev
, extra_clks
[i
- 1]);
524 if (GPMI_IS_MX6Q(this))
526 * Set the default value for the gpmi clock in mx6q:
528 * If you want to use the ONFI nand which is in the
529 * Synchronous Mode, you should change the clock as you need.
531 clk_set_rate(r
->clock
[0], 22000000);
536 dev_dbg(this->dev
, "failed in finding the clocks.\n");
541 static int acquire_resources(struct gpmi_nand_data
*this)
543 struct pinctrl
*pinctrl
;
546 ret
= acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
);
550 ret
= acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
);
554 ret
= acquire_bch_irq(this, bch_irq
);
558 ret
= acquire_dma_channels(this);
560 goto exit_dma_channels
;
562 pinctrl
= devm_pinctrl_get_select_default(&this->pdev
->dev
);
563 if (IS_ERR(pinctrl
)) {
564 ret
= PTR_ERR(pinctrl
);
568 ret
= gpmi_get_clks(this);
575 release_dma_channels(this);
577 release_bch_irq(this);
579 release_register_block(this);
583 static void release_resources(struct gpmi_nand_data
*this)
586 release_register_block(this);
587 release_bch_irq(this);
588 release_dma_channels(this);
591 static int init_hardware(struct gpmi_nand_data
*this)
596 * This structure contains the "safe" GPMI timing that should succeed
597 * with any NAND Flash device
598 * (although, with less-than-optimal performance).
600 struct nand_timing safe_timing
= {
601 .data_setup_in_ns
= 80,
602 .data_hold_in_ns
= 60,
603 .address_setup_in_ns
= 25,
604 .gpmi_sample_delay_in_ns
= 6,
610 /* Initialize the hardwares. */
611 ret
= gpmi_init(this);
615 this->timing
= safe_timing
;
619 static int read_page_prepare(struct gpmi_nand_data
*this,
620 void *destination
, unsigned length
,
621 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
622 void **use_virt
, dma_addr_t
*use_phys
)
624 struct device
*dev
= this->dev
;
626 if (virt_addr_valid(destination
)) {
627 dma_addr_t dest_phys
;
629 dest_phys
= dma_map_single(dev
, destination
,
630 length
, DMA_FROM_DEVICE
);
631 if (dma_mapping_error(dev
, dest_phys
)) {
632 if (alt_size
< length
) {
633 pr_err("%s, Alternate buffer is too small\n",
639 *use_virt
= destination
;
640 *use_phys
= dest_phys
;
641 this->direct_dma_map_ok
= true;
646 *use_virt
= alt_virt
;
647 *use_phys
= alt_phys
;
648 this->direct_dma_map_ok
= false;
652 static inline void read_page_end(struct gpmi_nand_data
*this,
653 void *destination
, unsigned length
,
654 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
655 void *used_virt
, dma_addr_t used_phys
)
657 if (this->direct_dma_map_ok
)
658 dma_unmap_single(this->dev
, used_phys
, length
, DMA_FROM_DEVICE
);
661 static inline void read_page_swap_end(struct gpmi_nand_data
*this,
662 void *destination
, unsigned length
,
663 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
664 void *used_virt
, dma_addr_t used_phys
)
666 if (!this->direct_dma_map_ok
)
667 memcpy(destination
, alt_virt
, length
);
670 static int send_page_prepare(struct gpmi_nand_data
*this,
671 const void *source
, unsigned length
,
672 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
673 const void **use_virt
, dma_addr_t
*use_phys
)
675 struct device
*dev
= this->dev
;
677 if (virt_addr_valid(source
)) {
678 dma_addr_t source_phys
;
680 source_phys
= dma_map_single(dev
, (void *)source
, length
,
682 if (dma_mapping_error(dev
, source_phys
)) {
683 if (alt_size
< length
) {
684 pr_err("%s, Alternate buffer is too small\n",
691 *use_phys
= source_phys
;
696 * Copy the content of the source buffer into the alternate
697 * buffer and set up the return values accordingly.
699 memcpy(alt_virt
, source
, length
);
701 *use_virt
= alt_virt
;
702 *use_phys
= alt_phys
;
706 static void send_page_end(struct gpmi_nand_data
*this,
707 const void *source
, unsigned length
,
708 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
709 const void *used_virt
, dma_addr_t used_phys
)
711 struct device
*dev
= this->dev
;
712 if (used_virt
== source
)
713 dma_unmap_single(dev
, used_phys
, length
, DMA_TO_DEVICE
);
716 static void gpmi_free_dma_buffer(struct gpmi_nand_data
*this)
718 struct device
*dev
= this->dev
;
720 if (this->page_buffer_virt
&& virt_addr_valid(this->page_buffer_virt
))
721 dma_free_coherent(dev
, this->page_buffer_size
,
722 this->page_buffer_virt
,
723 this->page_buffer_phys
);
724 kfree(this->cmd_buffer
);
725 kfree(this->data_buffer_dma
);
727 this->cmd_buffer
= NULL
;
728 this->data_buffer_dma
= NULL
;
729 this->page_buffer_virt
= NULL
;
730 this->page_buffer_size
= 0;
733 /* Allocate the DMA buffers */
734 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data
*this)
736 struct bch_geometry
*geo
= &this->bch_geometry
;
737 struct device
*dev
= this->dev
;
739 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
740 this->cmd_buffer
= kzalloc(PAGE_SIZE
, GFP_DMA
| GFP_KERNEL
);
741 if (this->cmd_buffer
== NULL
)
744 /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
745 this->data_buffer_dma
= kzalloc(PAGE_SIZE
, GFP_DMA
| GFP_KERNEL
);
746 if (this->data_buffer_dma
== NULL
)
750 * [3] Allocate the page buffer.
752 * Both the payload buffer and the auxiliary buffer must appear on
753 * 32-bit boundaries. We presume the size of the payload buffer is a
754 * power of two and is much larger than four, which guarantees the
755 * auxiliary buffer will appear on a 32-bit boundary.
757 this->page_buffer_size
= geo
->payload_size
+ geo
->auxiliary_size
;
758 this->page_buffer_virt
= dma_alloc_coherent(dev
, this->page_buffer_size
,
759 &this->page_buffer_phys
, GFP_DMA
);
760 if (!this->page_buffer_virt
)
764 /* Slice up the page buffer. */
765 this->payload_virt
= this->page_buffer_virt
;
766 this->payload_phys
= this->page_buffer_phys
;
767 this->auxiliary_virt
= this->payload_virt
+ geo
->payload_size
;
768 this->auxiliary_phys
= this->payload_phys
+ geo
->payload_size
;
772 gpmi_free_dma_buffer(this);
773 pr_err("Error allocating DMA buffers!\n");
777 static void gpmi_cmd_ctrl(struct mtd_info
*mtd
, int data
, unsigned int ctrl
)
779 struct nand_chip
*chip
= mtd
->priv
;
780 struct gpmi_nand_data
*this = chip
->priv
;
784 * Every operation begins with a command byte and a series of zero or
785 * more address bytes. These are distinguished by either the Address
786 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
787 * asserted. When MTD is ready to execute the command, it will deassert
788 * both latch enables.
790 * Rather than run a separate DMA operation for every single byte, we
791 * queue them up and run a single DMA operation for the entire series
792 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
794 if ((ctrl
& (NAND_ALE
| NAND_CLE
))) {
795 if (data
!= NAND_CMD_NONE
)
796 this->cmd_buffer
[this->command_length
++] = data
;
800 if (!this->command_length
)
803 ret
= gpmi_send_command(this);
805 pr_err("Chip: %u, Error %d\n", this->current_chip
, ret
);
807 this->command_length
= 0;
810 static int gpmi_dev_ready(struct mtd_info
*mtd
)
812 struct nand_chip
*chip
= mtd
->priv
;
813 struct gpmi_nand_data
*this = chip
->priv
;
815 return gpmi_is_ready(this, this->current_chip
);
818 static void gpmi_select_chip(struct mtd_info
*mtd
, int chipnr
)
820 struct nand_chip
*chip
= mtd
->priv
;
821 struct gpmi_nand_data
*this = chip
->priv
;
823 if ((this->current_chip
< 0) && (chipnr
>= 0))
825 else if ((this->current_chip
>= 0) && (chipnr
< 0))
828 this->current_chip
= chipnr
;
831 static void gpmi_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
833 struct nand_chip
*chip
= mtd
->priv
;
834 struct gpmi_nand_data
*this = chip
->priv
;
836 pr_debug("len is %d\n", len
);
837 this->upper_buf
= buf
;
838 this->upper_len
= len
;
840 gpmi_read_data(this);
843 static void gpmi_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
, int len
)
845 struct nand_chip
*chip
= mtd
->priv
;
846 struct gpmi_nand_data
*this = chip
->priv
;
848 pr_debug("len is %d\n", len
);
849 this->upper_buf
= (uint8_t *)buf
;
850 this->upper_len
= len
;
852 gpmi_send_data(this);
855 static uint8_t gpmi_read_byte(struct mtd_info
*mtd
)
857 struct nand_chip
*chip
= mtd
->priv
;
858 struct gpmi_nand_data
*this = chip
->priv
;
859 uint8_t *buf
= this->data_buffer_dma
;
861 gpmi_read_buf(mtd
, buf
, 1);
866 * Handles block mark swapping.
867 * It can be called in swapping the block mark, or swapping it back,
868 * because the the operations are the same.
870 static void block_mark_swapping(struct gpmi_nand_data
*this,
871 void *payload
, void *auxiliary
)
873 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
878 unsigned char from_data
;
879 unsigned char from_oob
;
881 if (!this->swap_block_mark
)
885 * If control arrives here, we're swapping. Make some convenience
888 bit
= nfc_geo
->block_mark_bit_offset
;
889 p
= payload
+ nfc_geo
->block_mark_byte_offset
;
893 * Get the byte from the data area that overlays the block mark. Since
894 * the ECC engine applies its own view to the bits in the page, the
895 * physical block mark won't (in general) appear on a byte boundary in
898 from_data
= (p
[0] >> bit
) | (p
[1] << (8 - bit
));
900 /* Get the byte from the OOB. */
906 mask
= (0x1 << bit
) - 1;
907 p
[0] = (p
[0] & mask
) | (from_oob
<< bit
);
910 p
[1] = (p
[1] & mask
) | (from_oob
>> (8 - bit
));
913 static int gpmi_ecc_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
914 uint8_t *buf
, int oob_required
, int page
)
916 struct gpmi_nand_data
*this = chip
->priv
;
917 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
919 dma_addr_t payload_phys
;
920 void *auxiliary_virt
;
921 dma_addr_t auxiliary_phys
;
923 unsigned char *status
;
925 unsigned int corrected
;
928 pr_debug("page number is : %d\n", page
);
929 ret
= read_page_prepare(this, buf
, mtd
->writesize
,
930 this->payload_virt
, this->payload_phys
,
931 nfc_geo
->payload_size
,
932 &payload_virt
, &payload_phys
);
934 pr_err("Inadequate DMA buffer\n");
938 auxiliary_virt
= this->auxiliary_virt
;
939 auxiliary_phys
= this->auxiliary_phys
;
942 ret
= gpmi_read_page(this, payload_phys
, auxiliary_phys
);
943 read_page_end(this, buf
, mtd
->writesize
,
944 this->payload_virt
, this->payload_phys
,
945 nfc_geo
->payload_size
,
946 payload_virt
, payload_phys
);
948 pr_err("Error in ECC-based read: %d\n", ret
);
952 /* handle the block mark swapping */
953 block_mark_swapping(this, payload_virt
, auxiliary_virt
);
955 /* Loop over status bytes, accumulating ECC status. */
958 status
= auxiliary_virt
+ nfc_geo
->auxiliary_status_offset
;
960 for (i
= 0; i
< nfc_geo
->ecc_chunk_count
; i
++, status
++) {
961 if ((*status
== STATUS_GOOD
) || (*status
== STATUS_ERASED
))
964 if (*status
== STATUS_UNCORRECTABLE
) {
968 corrected
+= *status
;
972 * Propagate ECC status to the owning MTD only when failed or
973 * corrected times nearly reaches our ECC correction threshold.
975 if (failed
|| corrected
>= (nfc_geo
->ecc_strength
- 1)) {
976 mtd
->ecc_stats
.failed
+= failed
;
977 mtd
->ecc_stats
.corrected
+= corrected
;
982 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
983 * for details about our policy for delivering the OOB.
985 * We fill the caller's buffer with set bits, and then copy the
986 * block mark to th caller's buffer. Note that, if block mark
987 * swapping was necessary, it has already been done, so we can
988 * rely on the first byte of the auxiliary buffer to contain
991 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
992 chip
->oob_poi
[0] = ((uint8_t *) auxiliary_virt
)[0];
995 read_page_swap_end(this, buf
, mtd
->writesize
,
996 this->payload_virt
, this->payload_phys
,
997 nfc_geo
->payload_size
,
998 payload_virt
, payload_phys
);
1003 static int gpmi_ecc_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1004 const uint8_t *buf
, int oob_required
)
1006 struct gpmi_nand_data
*this = chip
->priv
;
1007 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1008 const void *payload_virt
;
1009 dma_addr_t payload_phys
;
1010 const void *auxiliary_virt
;
1011 dma_addr_t auxiliary_phys
;
1014 pr_debug("ecc write page.\n");
1015 if (this->swap_block_mark
) {
1017 * If control arrives here, we're doing block mark swapping.
1018 * Since we can't modify the caller's buffers, we must copy them
1021 memcpy(this->payload_virt
, buf
, mtd
->writesize
);
1022 payload_virt
= this->payload_virt
;
1023 payload_phys
= this->payload_phys
;
1025 memcpy(this->auxiliary_virt
, chip
->oob_poi
,
1026 nfc_geo
->auxiliary_size
);
1027 auxiliary_virt
= this->auxiliary_virt
;
1028 auxiliary_phys
= this->auxiliary_phys
;
1030 /* Handle block mark swapping. */
1031 block_mark_swapping(this,
1032 (void *) payload_virt
, (void *) auxiliary_virt
);
1035 * If control arrives here, we're not doing block mark swapping,
1036 * so we can to try and use the caller's buffers.
1038 ret
= send_page_prepare(this,
1039 buf
, mtd
->writesize
,
1040 this->payload_virt
, this->payload_phys
,
1041 nfc_geo
->payload_size
,
1042 &payload_virt
, &payload_phys
);
1044 pr_err("Inadequate payload DMA buffer\n");
1048 ret
= send_page_prepare(this,
1049 chip
->oob_poi
, mtd
->oobsize
,
1050 this->auxiliary_virt
, this->auxiliary_phys
,
1051 nfc_geo
->auxiliary_size
,
1052 &auxiliary_virt
, &auxiliary_phys
);
1054 pr_err("Inadequate auxiliary DMA buffer\n");
1055 goto exit_auxiliary
;
1060 ret
= gpmi_send_page(this, payload_phys
, auxiliary_phys
);
1062 pr_err("Error in ECC-based write: %d\n", ret
);
1064 if (!this->swap_block_mark
) {
1065 send_page_end(this, chip
->oob_poi
, mtd
->oobsize
,
1066 this->auxiliary_virt
, this->auxiliary_phys
,
1067 nfc_geo
->auxiliary_size
,
1068 auxiliary_virt
, auxiliary_phys
);
1070 send_page_end(this, buf
, mtd
->writesize
,
1071 this->payload_virt
, this->payload_phys
,
1072 nfc_geo
->payload_size
,
1073 payload_virt
, payload_phys
);
1080 * There are several places in this driver where we have to handle the OOB and
1081 * block marks. This is the function where things are the most complicated, so
1082 * this is where we try to explain it all. All the other places refer back to
1085 * These are the rules, in order of decreasing importance:
1087 * 1) Nothing the caller does can be allowed to imperil the block mark.
1089 * 2) In read operations, the first byte of the OOB we return must reflect the
1090 * true state of the block mark, no matter where that block mark appears in
1091 * the physical page.
1093 * 3) ECC-based read operations return an OOB full of set bits (since we never
1094 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1097 * 4) "Raw" read operations return a direct view of the physical bytes in the
1098 * page, using the conventional definition of which bytes are data and which
1099 * are OOB. This gives the caller a way to see the actual, physical bytes
1100 * in the page, without the distortions applied by our ECC engine.
1103 * What we do for this specific read operation depends on two questions:
1105 * 1) Are we doing a "raw" read, or an ECC-based read?
1107 * 2) Are we using block mark swapping or transcription?
1109 * There are four cases, illustrated by the following Karnaugh map:
1111 * | Raw | ECC-based |
1112 * -------------+-------------------------+-------------------------+
1113 * | Read the conventional | |
1114 * | OOB at the end of the | |
1115 * Swapping | page and return it. It | |
1116 * | contains exactly what | |
1117 * | we want. | Read the block mark and |
1118 * -------------+-------------------------+ return it in a buffer |
1119 * | Read the conventional | full of set bits. |
1120 * | OOB at the end of the | |
1121 * | page and also the block | |
1122 * Transcribing | mark in the metadata. | |
1123 * | Copy the block mark | |
1124 * | into the first byte of | |
1126 * -------------+-------------------------+-------------------------+
1128 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1129 * giving an accurate view of the actual, physical bytes in the page (we're
1130 * overwriting the block mark). That's OK because it's more important to follow
1133 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1134 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1135 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1136 * ECC-based or raw view of the page is implicit in which function it calls
1137 * (there is a similar pair of ECC-based/raw functions for writing).
1139 * FIXME: The following paragraph is incorrect, now that there exist
1140 * ecc.read_oob_raw and ecc.write_oob_raw functions.
1142 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1143 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1144 * caller wants an ECC-based or raw view of the page is not propagated down to
1147 static int gpmi_ecc_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1150 struct gpmi_nand_data
*this = chip
->priv
;
1152 pr_debug("page number is %d\n", page
);
1153 /* clear the OOB buffer */
1154 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1156 /* Read out the conventional OOB. */
1157 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, mtd
->writesize
, page
);
1158 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1161 * Now, we want to make sure the block mark is correct. In the
1162 * Swapping/Raw case, we already have it. Otherwise, we need to
1163 * explicitly read it.
1165 if (!this->swap_block_mark
) {
1166 /* Read the block mark into the first byte of the OOB buffer. */
1167 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
1168 chip
->oob_poi
[0] = chip
->read_byte(mtd
);
1175 gpmi_ecc_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
1178 * The BCH will use all the (page + oob).
1179 * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
1180 * But it can not stop some ioctls such MEMWRITEOOB which uses
1181 * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
1187 static int gpmi_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
1189 struct nand_chip
*chip
= mtd
->priv
;
1190 struct gpmi_nand_data
*this = chip
->priv
;
1192 uint8_t *block_mark
;
1193 int column
, page
, status
, chipnr
;
1195 /* Get block number */
1196 block
= (int)(ofs
>> chip
->bbt_erase_shift
);
1198 chip
->bbt
[block
>> 2] |= 0x01 << ((block
& 0x03) << 1);
1200 /* Do we have a flash based bad block table ? */
1201 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
)
1202 ret
= nand_update_bbt(mtd
, ofs
);
1204 chipnr
= (int)(ofs
>> chip
->chip_shift
);
1205 chip
->select_chip(mtd
, chipnr
);
1207 column
= this->swap_block_mark
? mtd
->writesize
: 0;
1209 /* Write the block mark. */
1210 block_mark
= this->data_buffer_dma
;
1211 block_mark
[0] = 0; /* bad block marker */
1213 /* Shift to get page */
1214 page
= (int)(ofs
>> chip
->page_shift
);
1216 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, column
, page
);
1217 chip
->write_buf(mtd
, block_mark
, 1);
1218 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1220 status
= chip
->waitfunc(mtd
, chip
);
1221 if (status
& NAND_STATUS_FAIL
)
1224 chip
->select_chip(mtd
, -1);
1227 mtd
->ecc_stats
.badblocks
++;
1232 static int nand_boot_set_geometry(struct gpmi_nand_data
*this)
1234 struct boot_rom_geometry
*geometry
= &this->rom_geometry
;
1237 * Set the boot block stride size.
1239 * In principle, we should be reading this from the OTP bits, since
1240 * that's where the ROM is going to get it. In fact, we don't have any
1241 * way to read the OTP bits, so we go with the default and hope for the
1244 geometry
->stride_size_in_pages
= 64;
1247 * Set the search area stride exponent.
1249 * In principle, we should be reading this from the OTP bits, since
1250 * that's where the ROM is going to get it. In fact, we don't have any
1251 * way to read the OTP bits, so we go with the default and hope for the
1254 geometry
->search_area_stride_exponent
= 2;
1258 static const char *fingerprint
= "STMP";
1259 static int mx23_check_transcription_stamp(struct gpmi_nand_data
*this)
1261 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
1262 struct device
*dev
= this->dev
;
1263 struct mtd_info
*mtd
= &this->mtd
;
1264 struct nand_chip
*chip
= &this->nand
;
1265 unsigned int search_area_size_in_strides
;
1266 unsigned int stride
;
1268 uint8_t *buffer
= chip
->buffers
->databuf
;
1269 int saved_chip_number
;
1270 int found_an_ncb_fingerprint
= false;
1272 /* Compute the number of strides in a search area. */
1273 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
1275 saved_chip_number
= this->current_chip
;
1276 chip
->select_chip(mtd
, 0);
1279 * Loop through the first search area, looking for the NCB fingerprint.
1281 dev_dbg(dev
, "Scanning for an NCB fingerprint...\n");
1283 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
1284 /* Compute the page addresses. */
1285 page
= stride
* rom_geo
->stride_size_in_pages
;
1287 dev_dbg(dev
, "Looking for a fingerprint in page 0x%x\n", page
);
1290 * Read the NCB fingerprint. The fingerprint is four bytes long
1291 * and starts in the 12th byte of the page.
1293 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 12, page
);
1294 chip
->read_buf(mtd
, buffer
, strlen(fingerprint
));
1296 /* Look for the fingerprint. */
1297 if (!memcmp(buffer
, fingerprint
, strlen(fingerprint
))) {
1298 found_an_ncb_fingerprint
= true;
1304 chip
->select_chip(mtd
, saved_chip_number
);
1306 if (found_an_ncb_fingerprint
)
1307 dev_dbg(dev
, "\tFound a fingerprint\n");
1309 dev_dbg(dev
, "\tNo fingerprint found\n");
1310 return found_an_ncb_fingerprint
;
1313 /* Writes a transcription stamp. */
1314 static int mx23_write_transcription_stamp(struct gpmi_nand_data
*this)
1316 struct device
*dev
= this->dev
;
1317 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
1318 struct mtd_info
*mtd
= &this->mtd
;
1319 struct nand_chip
*chip
= &this->nand
;
1320 unsigned int block_size_in_pages
;
1321 unsigned int search_area_size_in_strides
;
1322 unsigned int search_area_size_in_pages
;
1323 unsigned int search_area_size_in_blocks
;
1325 unsigned int stride
;
1327 uint8_t *buffer
= chip
->buffers
->databuf
;
1328 int saved_chip_number
;
1331 /* Compute the search area geometry. */
1332 block_size_in_pages
= mtd
->erasesize
/ mtd
->writesize
;
1333 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
1334 search_area_size_in_pages
= search_area_size_in_strides
*
1335 rom_geo
->stride_size_in_pages
;
1336 search_area_size_in_blocks
=
1337 (search_area_size_in_pages
+ (block_size_in_pages
- 1)) /
1338 block_size_in_pages
;
1340 dev_dbg(dev
, "Search Area Geometry :\n");
1341 dev_dbg(dev
, "\tin Blocks : %u\n", search_area_size_in_blocks
);
1342 dev_dbg(dev
, "\tin Strides: %u\n", search_area_size_in_strides
);
1343 dev_dbg(dev
, "\tin Pages : %u\n", search_area_size_in_pages
);
1345 /* Select chip 0. */
1346 saved_chip_number
= this->current_chip
;
1347 chip
->select_chip(mtd
, 0);
1349 /* Loop over blocks in the first search area, erasing them. */
1350 dev_dbg(dev
, "Erasing the search area...\n");
1352 for (block
= 0; block
< search_area_size_in_blocks
; block
++) {
1353 /* Compute the page address. */
1354 page
= block
* block_size_in_pages
;
1356 /* Erase this block. */
1357 dev_dbg(dev
, "\tErasing block 0x%x\n", block
);
1358 chip
->cmdfunc(mtd
, NAND_CMD_ERASE1
, -1, page
);
1359 chip
->cmdfunc(mtd
, NAND_CMD_ERASE2
, -1, -1);
1361 /* Wait for the erase to finish. */
1362 status
= chip
->waitfunc(mtd
, chip
);
1363 if (status
& NAND_STATUS_FAIL
)
1364 dev_err(dev
, "[%s] Erase failed.\n", __func__
);
1367 /* Write the NCB fingerprint into the page buffer. */
1368 memset(buffer
, ~0, mtd
->writesize
);
1369 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1370 memcpy(buffer
+ 12, fingerprint
, strlen(fingerprint
));
1372 /* Loop through the first search area, writing NCB fingerprints. */
1373 dev_dbg(dev
, "Writing NCB fingerprints...\n");
1374 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
1375 /* Compute the page addresses. */
1376 page
= stride
* rom_geo
->stride_size_in_pages
;
1378 /* Write the first page of the current stride. */
1379 dev_dbg(dev
, "Writing an NCB fingerprint in page 0x%x\n", page
);
1380 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, 0x00, page
);
1381 chip
->ecc
.write_page_raw(mtd
, chip
, buffer
, 0);
1382 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1384 /* Wait for the write to finish. */
1385 status
= chip
->waitfunc(mtd
, chip
);
1386 if (status
& NAND_STATUS_FAIL
)
1387 dev_err(dev
, "[%s] Write failed.\n", __func__
);
1390 /* Deselect chip 0. */
1391 chip
->select_chip(mtd
, saved_chip_number
);
1395 static int mx23_boot_init(struct gpmi_nand_data
*this)
1397 struct device
*dev
= this->dev
;
1398 struct nand_chip
*chip
= &this->nand
;
1399 struct mtd_info
*mtd
= &this->mtd
;
1400 unsigned int block_count
;
1409 * If control arrives here, we can't use block mark swapping, which
1410 * means we're forced to use transcription. First, scan for the
1411 * transcription stamp. If we find it, then we don't have to do
1412 * anything -- the block marks are already transcribed.
1414 if (mx23_check_transcription_stamp(this))
1418 * If control arrives here, we couldn't find a transcription stamp, so
1419 * so we presume the block marks are in the conventional location.
1421 dev_dbg(dev
, "Transcribing bad block marks...\n");
1423 /* Compute the number of blocks in the entire medium. */
1424 block_count
= chip
->chipsize
>> chip
->phys_erase_shift
;
1427 * Loop over all the blocks in the medium, transcribing block marks as
1430 for (block
= 0; block
< block_count
; block
++) {
1432 * Compute the chip, page and byte addresses for this block's
1433 * conventional mark.
1435 chipnr
= block
>> (chip
->chip_shift
- chip
->phys_erase_shift
);
1436 page
= block
<< (chip
->phys_erase_shift
- chip
->page_shift
);
1437 byte
= block
<< chip
->phys_erase_shift
;
1439 /* Send the command to read the conventional block mark. */
1440 chip
->select_chip(mtd
, chipnr
);
1441 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, mtd
->writesize
, page
);
1442 block_mark
= chip
->read_byte(mtd
);
1443 chip
->select_chip(mtd
, -1);
1446 * Check if the block is marked bad. If so, we need to mark it
1447 * again, but this time the result will be a mark in the
1448 * location where we transcribe block marks.
1450 if (block_mark
!= 0xff) {
1451 dev_dbg(dev
, "Transcribing mark in block %u\n", block
);
1452 ret
= chip
->block_markbad(mtd
, byte
);
1454 dev_err(dev
, "Failed to mark block bad with "
1459 /* Write the stamp that indicates we've transcribed the block marks. */
1460 mx23_write_transcription_stamp(this);
1464 static int nand_boot_init(struct gpmi_nand_data
*this)
1466 nand_boot_set_geometry(this);
1468 /* This is ROM arch-specific initilization before the BBT scanning. */
1469 if (GPMI_IS_MX23(this))
1470 return mx23_boot_init(this);
1474 static int gpmi_set_geometry(struct gpmi_nand_data
*this)
1478 /* Free the temporary DMA memory for reading ID. */
1479 gpmi_free_dma_buffer(this);
1481 /* Set up the NFC geometry which is used by BCH. */
1482 ret
= bch_set_geometry(this);
1484 pr_err("Error setting BCH geometry : %d\n", ret
);
1488 /* Alloc the new DMA buffers according to the pagesize and oobsize */
1489 return gpmi_alloc_dma_buffer(this);
1492 static int gpmi_pre_bbt_scan(struct gpmi_nand_data
*this)
1496 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
1497 if (GPMI_IS_MX23(this))
1498 this->swap_block_mark
= false;
1500 this->swap_block_mark
= true;
1502 /* Set up the medium geometry */
1503 ret
= gpmi_set_geometry(this);
1507 /* Adjust the ECC strength according to the chip. */
1508 this->nand
.ecc
.strength
= this->bch_geometry
.ecc_strength
;
1509 this->mtd
.ecc_strength
= this->bch_geometry
.ecc_strength
;
1510 this->mtd
.bitflip_threshold
= this->bch_geometry
.ecc_strength
;
1512 /* NAND boot init, depends on the gpmi_set_geometry(). */
1513 return nand_boot_init(this);
1516 static int gpmi_scan_bbt(struct mtd_info
*mtd
)
1518 struct nand_chip
*chip
= mtd
->priv
;
1519 struct gpmi_nand_data
*this = chip
->priv
;
1522 /* Prepare for the BBT scan. */
1523 ret
= gpmi_pre_bbt_scan(this);
1528 * Can we enable the extra features? such as EDO or Sync mode.
1530 * We do not check the return value now. That's means if we fail in
1531 * enable the extra features, we still can run in the normal way.
1533 gpmi_extra_init(this);
1535 /* use the default BBT implementation */
1536 return nand_default_bbt(mtd
);
1539 static void gpmi_nfc_exit(struct gpmi_nand_data
*this)
1541 nand_release(&this->mtd
);
1542 gpmi_free_dma_buffer(this);
1545 static int gpmi_nfc_init(struct gpmi_nand_data
*this)
1547 struct mtd_info
*mtd
= &this->mtd
;
1548 struct nand_chip
*chip
= &this->nand
;
1549 struct mtd_part_parser_data ppdata
= {};
1552 /* init current chip */
1553 this->current_chip
= -1;
1555 /* init the MTD data structures */
1557 mtd
->name
= "gpmi-nand";
1558 mtd
->owner
= THIS_MODULE
;
1560 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1562 chip
->select_chip
= gpmi_select_chip
;
1563 chip
->cmd_ctrl
= gpmi_cmd_ctrl
;
1564 chip
->dev_ready
= gpmi_dev_ready
;
1565 chip
->read_byte
= gpmi_read_byte
;
1566 chip
->read_buf
= gpmi_read_buf
;
1567 chip
->write_buf
= gpmi_write_buf
;
1568 chip
->ecc
.read_page
= gpmi_ecc_read_page
;
1569 chip
->ecc
.write_page
= gpmi_ecc_write_page
;
1570 chip
->ecc
.read_oob
= gpmi_ecc_read_oob
;
1571 chip
->ecc
.write_oob
= gpmi_ecc_write_oob
;
1572 chip
->scan_bbt
= gpmi_scan_bbt
;
1573 chip
->badblock_pattern
= &gpmi_bbt_descr
;
1574 chip
->block_markbad
= gpmi_block_markbad
;
1575 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1576 chip
->ecc
.mode
= NAND_ECC_HW
;
1578 chip
->ecc
.strength
= 8;
1579 chip
->ecc
.layout
= &gpmi_hw_ecclayout
;
1580 if (of_get_nand_on_flash_bbt(this->dev
->of_node
))
1581 chip
->bbt_options
|= NAND_BBT_USE_FLASH
| NAND_BBT_NO_OOB
;
1583 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
1584 this->bch_geometry
.payload_size
= 1024;
1585 this->bch_geometry
.auxiliary_size
= 128;
1586 ret
= gpmi_alloc_dma_buffer(this);
1590 ret
= nand_scan(mtd
, 1);
1592 pr_err("Chip scan failed\n");
1596 ppdata
.of_node
= this->pdev
->dev
.of_node
;
1597 ret
= mtd_device_parse_register(mtd
, NULL
, &ppdata
, NULL
, 0);
1603 gpmi_nfc_exit(this);
1607 static const struct platform_device_id gpmi_ids
[] = {
1608 { .name
= "imx23-gpmi-nand", .driver_data
= IS_MX23
, },
1609 { .name
= "imx28-gpmi-nand", .driver_data
= IS_MX28
, },
1610 { .name
= "imx6q-gpmi-nand", .driver_data
= IS_MX6Q
, },
1614 static const struct of_device_id gpmi_nand_id_table
[] = {
1616 .compatible
= "fsl,imx23-gpmi-nand",
1617 .data
= (void *)&gpmi_ids
[IS_MX23
]
1619 .compatible
= "fsl,imx28-gpmi-nand",
1620 .data
= (void *)&gpmi_ids
[IS_MX28
]
1622 .compatible
= "fsl,imx6q-gpmi-nand",
1623 .data
= (void *)&gpmi_ids
[IS_MX6Q
]
1626 MODULE_DEVICE_TABLE(of
, gpmi_nand_id_table
);
1628 static int gpmi_nand_probe(struct platform_device
*pdev
)
1630 struct gpmi_nand_data
*this;
1631 const struct of_device_id
*of_id
;
1634 of_id
= of_match_device(gpmi_nand_id_table
, &pdev
->dev
);
1636 pdev
->id_entry
= of_id
->data
;
1638 pr_err("Failed to find the right device id.\n");
1642 this = kzalloc(sizeof(*this), GFP_KERNEL
);
1644 pr_err("Failed to allocate per-device memory\n");
1648 platform_set_drvdata(pdev
, this);
1650 this->dev
= &pdev
->dev
;
1652 ret
= acquire_resources(this);
1654 goto exit_acquire_resources
;
1656 ret
= init_hardware(this);
1660 ret
= gpmi_nfc_init(this);
1664 dev_info(this->dev
, "driver registered.\n");
1669 release_resources(this);
1670 exit_acquire_resources
:
1671 platform_set_drvdata(pdev
, NULL
);
1673 dev_err(this->dev
, "driver registration failed: %d\n", ret
);
1678 static int gpmi_nand_remove(struct platform_device
*pdev
)
1680 struct gpmi_nand_data
*this = platform_get_drvdata(pdev
);
1682 gpmi_nfc_exit(this);
1683 release_resources(this);
1684 platform_set_drvdata(pdev
, NULL
);
1689 static struct platform_driver gpmi_nand_driver
= {
1691 .name
= "gpmi-nand",
1692 .of_match_table
= gpmi_nand_id_table
,
1694 .probe
= gpmi_nand_probe
,
1695 .remove
= gpmi_nand_remove
,
1696 .id_table
= gpmi_ids
,
1698 module_platform_driver(gpmi_nand_driver
);
1700 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1701 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
1702 MODULE_LICENSE("GPL");