Merge tag 'renesas-dt-fixes2-for-v4.5' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / include / linux / mtd / mtd.h
1 /*
2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 */
19
20 #ifndef __MTD_MTD_H__
21 #define __MTD_MTD_H__
22
23 #include <linux/types.h>
24 #include <linux/uio.h>
25 #include <linux/notifier.h>
26 #include <linux/device.h>
27
28 #include <mtd/mtd-abi.h>
29
30 #include <asm/div64.h>
31
32 #define MTD_ERASE_PENDING 0x01
33 #define MTD_ERASING 0x02
34 #define MTD_ERASE_SUSPEND 0x04
35 #define MTD_ERASE_DONE 0x08
36 #define MTD_ERASE_FAILED 0x10
37
38 #define MTD_FAIL_ADDR_UNKNOWN -1LL
39
40 /*
41 * If the erase fails, fail_addr might indicate exactly which block failed. If
42 * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
43 * or was not specific to any particular block.
44 */
45 struct erase_info {
46 struct mtd_info *mtd;
47 uint64_t addr;
48 uint64_t len;
49 uint64_t fail_addr;
50 u_long time;
51 u_long retries;
52 unsigned dev;
53 unsigned cell;
54 void (*callback) (struct erase_info *self);
55 u_long priv;
56 u_char state;
57 struct erase_info *next;
58 };
59
60 struct mtd_erase_region_info {
61 uint64_t offset; /* At which this region starts, from the beginning of the MTD */
62 uint32_t erasesize; /* For this region */
63 uint32_t numblocks; /* Number of blocks of erasesize in this region */
64 unsigned long *lockmap; /* If keeping bitmap of locks */
65 };
66
67 /**
68 * struct mtd_oob_ops - oob operation operands
69 * @mode: operation mode
70 *
71 * @len: number of data bytes to write/read
72 *
73 * @retlen: number of data bytes written/read
74 *
75 * @ooblen: number of oob bytes to write/read
76 * @oobretlen: number of oob bytes written/read
77 * @ooboffs: offset of oob data in the oob area (only relevant when
78 * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
79 * @datbuf: data buffer - if NULL only oob data are read/written
80 * @oobbuf: oob data buffer
81 *
82 * Note, it is allowed to read more than one OOB area at one go, but not write.
83 * The interface assumes that the OOB write requests program only one page's
84 * OOB area.
85 */
86 struct mtd_oob_ops {
87 unsigned int mode;
88 size_t len;
89 size_t retlen;
90 size_t ooblen;
91 size_t oobretlen;
92 uint32_t ooboffs;
93 uint8_t *datbuf;
94 uint8_t *oobbuf;
95 };
96
97 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
98 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
99 /*
100 * Internal ECC layout control structure. For historical reasons, there is a
101 * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
102 * for export to user-space via the ECCGETLAYOUT ioctl.
103 * nand_ecclayout should be expandable in the future simply by the above macros.
104 */
105 struct nand_ecclayout {
106 __u32 eccbytes;
107 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
108 __u32 oobavail;
109 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
110 };
111
112 struct module; /* only needed for owner field in mtd_info */
113
114 struct mtd_info {
115 u_char type;
116 uint32_t flags;
117 uint64_t size; // Total size of the MTD
118
119 /* "Major" erase size for the device. Naïve users may take this
120 * to be the only erase size available, or may use the more detailed
121 * information below if they desire
122 */
123 uint32_t erasesize;
124 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
125 * though individual bits can be cleared), in case of NAND flash it is
126 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
127 * it is of ECC block size, etc. It is illegal to have writesize = 0.
128 * Any driver registering a struct mtd_info must ensure a writesize of
129 * 1 or larger.
130 */
131 uint32_t writesize;
132
133 /*
134 * Size of the write buffer used by the MTD. MTD devices having a write
135 * buffer can write multiple writesize chunks at a time. E.g. while
136 * writing 4 * writesize bytes to a device with 2 * writesize bytes
137 * buffer the MTD driver can (but doesn't have to) do 2 writesize
138 * operations, but not 4. Currently, all NANDs have writebufsize
139 * equivalent to writesize (NAND page size). Some NOR flashes do have
140 * writebufsize greater than writesize.
141 */
142 uint32_t writebufsize;
143
144 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
145 uint32_t oobavail; // Available OOB bytes per block
146
147 /*
148 * If erasesize is a power of 2 then the shift is stored in
149 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
150 */
151 unsigned int erasesize_shift;
152 unsigned int writesize_shift;
153 /* Masks based on erasesize_shift and writesize_shift */
154 unsigned int erasesize_mask;
155 unsigned int writesize_mask;
156
157 /*
158 * read ops return -EUCLEAN if max number of bitflips corrected on any
159 * one region comprising an ecc step equals or exceeds this value.
160 * Settable by driver, else defaults to ecc_strength. User can override
161 * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed;
162 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
163 */
164 unsigned int bitflip_threshold;
165
166 // Kernel-only stuff starts here.
167 const char *name;
168 int index;
169
170 /* ECC layout structure pointer - read only! */
171 struct nand_ecclayout *ecclayout;
172
173 /* the ecc step size. */
174 unsigned int ecc_step_size;
175
176 /* max number of correctible bit errors per ecc step */
177 unsigned int ecc_strength;
178
179 /* Data for variable erase regions. If numeraseregions is zero,
180 * it means that the whole device has erasesize as given above.
181 */
182 int numeraseregions;
183 struct mtd_erase_region_info *eraseregions;
184
185 /*
186 * Do not call via these pointers, use corresponding mtd_*()
187 * wrappers instead.
188 */
189 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
190 int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
191 size_t *retlen, void **virt, resource_size_t *phys);
192 int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
193 unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
194 unsigned long len,
195 unsigned long offset,
196 unsigned long flags);
197 int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
198 size_t *retlen, u_char *buf);
199 int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
200 size_t *retlen, const u_char *buf);
201 int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
202 size_t *retlen, const u_char *buf);
203 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
204 struct mtd_oob_ops *ops);
205 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
206 struct mtd_oob_ops *ops);
207 int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
208 size_t *retlen, struct otp_info *buf);
209 int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
210 size_t len, size_t *retlen, u_char *buf);
211 int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
212 size_t *retlen, struct otp_info *buf);
213 int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
214 size_t len, size_t *retlen, u_char *buf);
215 int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
216 size_t len, size_t *retlen, u_char *buf);
217 int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
218 size_t len);
219 int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
220 unsigned long count, loff_t to, size_t *retlen);
221 void (*_sync) (struct mtd_info *mtd);
222 int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
223 int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
224 int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
225 int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
226 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
227 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
228 int (*_suspend) (struct mtd_info *mtd);
229 void (*_resume) (struct mtd_info *mtd);
230 void (*_reboot) (struct mtd_info *mtd);
231 /*
232 * If the driver is something smart, like UBI, it may need to maintain
233 * its own reference counting. The below functions are only for driver.
234 */
235 int (*_get_device) (struct mtd_info *mtd);
236 void (*_put_device) (struct mtd_info *mtd);
237
238 /* Backing device capabilities for this device
239 * - provides mmap capabilities
240 */
241 struct backing_dev_info *backing_dev_info;
242
243 struct notifier_block reboot_notifier; /* default mode before reboot */
244
245 /* ECC status information */
246 struct mtd_ecc_stats ecc_stats;
247 /* Subpage shift (NAND) */
248 int subpage_sft;
249
250 void *priv;
251
252 struct module *owner;
253 struct device dev;
254 int usecount;
255 };
256
257 static inline void mtd_set_of_node(struct mtd_info *mtd,
258 struct device_node *np)
259 {
260 mtd->dev.of_node = np;
261 }
262
263 static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
264 {
265 return mtd->dev.of_node;
266 }
267
268 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
269 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
270 void **virt, resource_size_t *phys);
271 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
272 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
273 unsigned long offset, unsigned long flags);
274 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
275 u_char *buf);
276 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
277 const u_char *buf);
278 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
279 const u_char *buf);
280
281 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
282
283 static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
284 struct mtd_oob_ops *ops)
285 {
286 ops->retlen = ops->oobretlen = 0;
287 if (!mtd->_write_oob)
288 return -EOPNOTSUPP;
289 if (!(mtd->flags & MTD_WRITEABLE))
290 return -EROFS;
291 return mtd->_write_oob(mtd, to, ops);
292 }
293
294 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
295 struct otp_info *buf);
296 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
297 size_t *retlen, u_char *buf);
298 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
299 struct otp_info *buf);
300 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
301 size_t *retlen, u_char *buf);
302 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
303 size_t *retlen, u_char *buf);
304 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
305
306 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
307 unsigned long count, loff_t to, size_t *retlen);
308
309 static inline void mtd_sync(struct mtd_info *mtd)
310 {
311 if (mtd->_sync)
312 mtd->_sync(mtd);
313 }
314
315 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
316 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
317 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
318 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
319 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
320 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
321
322 static inline int mtd_suspend(struct mtd_info *mtd)
323 {
324 return mtd->_suspend ? mtd->_suspend(mtd) : 0;
325 }
326
327 static inline void mtd_resume(struct mtd_info *mtd)
328 {
329 if (mtd->_resume)
330 mtd->_resume(mtd);
331 }
332
333 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
334 {
335 if (mtd->erasesize_shift)
336 return sz >> mtd->erasesize_shift;
337 do_div(sz, mtd->erasesize);
338 return sz;
339 }
340
341 static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
342 {
343 if (mtd->erasesize_shift)
344 return sz & mtd->erasesize_mask;
345 return do_div(sz, mtd->erasesize);
346 }
347
348 static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
349 {
350 if (mtd->writesize_shift)
351 return sz >> mtd->writesize_shift;
352 do_div(sz, mtd->writesize);
353 return sz;
354 }
355
356 static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
357 {
358 if (mtd->writesize_shift)
359 return sz & mtd->writesize_mask;
360 return do_div(sz, mtd->writesize);
361 }
362
363 static inline int mtd_has_oob(const struct mtd_info *mtd)
364 {
365 return mtd->_read_oob && mtd->_write_oob;
366 }
367
368 static inline int mtd_type_is_nand(const struct mtd_info *mtd)
369 {
370 return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
371 }
372
373 static inline int mtd_can_have_bb(const struct mtd_info *mtd)
374 {
375 return !!mtd->_block_isbad;
376 }
377
378 /* Kernel-side ioctl definitions */
379
380 struct mtd_partition;
381 struct mtd_part_parser_data;
382
383 extern int mtd_device_parse_register(struct mtd_info *mtd,
384 const char * const *part_probe_types,
385 struct mtd_part_parser_data *parser_data,
386 const struct mtd_partition *defparts,
387 int defnr_parts);
388 #define mtd_device_register(master, parts, nr_parts) \
389 mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
390 extern int mtd_device_unregister(struct mtd_info *master);
391 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
392 extern int __get_mtd_device(struct mtd_info *mtd);
393 extern void __put_mtd_device(struct mtd_info *mtd);
394 extern struct mtd_info *get_mtd_device_nm(const char *name);
395 extern void put_mtd_device(struct mtd_info *mtd);
396
397
398 struct mtd_notifier {
399 void (*add)(struct mtd_info *mtd);
400 void (*remove)(struct mtd_info *mtd);
401 struct list_head list;
402 };
403
404
405 extern void register_mtd_user (struct mtd_notifier *new);
406 extern int unregister_mtd_user (struct mtd_notifier *old);
407 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
408
409 void mtd_erase_callback(struct erase_info *instr);
410
411 static inline int mtd_is_bitflip(int err) {
412 return err == -EUCLEAN;
413 }
414
415 static inline int mtd_is_eccerr(int err) {
416 return err == -EBADMSG;
417 }
418
419 static inline int mtd_is_bitflip_or_eccerr(int err) {
420 return mtd_is_bitflip(err) || mtd_is_eccerr(err);
421 }
422
423 unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
424
425 #endif /* __MTD_MTD_H__ */
This page took 0.039189 seconds and 5 git commands to generate.