Commit | Line | Data |
---|---|---|
8e404fff AH |
1 | /* |
2 | * drivers/mtd/devices/goldfish_nand.c | |
3 | * | |
4 | * Copyright (C) 2007 Google, Inc. | |
5 | * Copyright (C) 2012 Intel, Inc. | |
6 | * Copyright (C) 2013 Intel, Inc. | |
7 | * | |
8 | * This software is licensed under the terms of the GNU General Public | |
9 | * License version 2, as published by the Free Software Foundation, and | |
10 | * may be copied, distributed, and modified under those terms. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | */ | |
18 | ||
19 | #include <linux/io.h> | |
20 | #include <linux/device.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/ioport.h> | |
24 | #include <linux/vmalloc.h> | |
8e404fff AH |
25 | #include <linux/mtd/mtd.h> |
26 | #include <linux/platform_device.h> | |
67c20cfb | 27 | #include <linux/mutex.h> |
f6279717 | 28 | #include <linux/goldfish.h> |
8e404fff AH |
29 | #include <asm/div64.h> |
30 | ||
31 | #include "goldfish_nand_reg.h" | |
32 | ||
33 | struct goldfish_nand { | |
2c507415 | 34 | /* lock protects access to the device registers */ |
67c20cfb | 35 | struct mutex lock; |
8e404fff AH |
36 | unsigned char __iomem *base; |
37 | struct cmd_params *cmd_params; | |
38 | size_t mtd_count; | |
39 | struct mtd_info mtd[0]; | |
40 | }; | |
41 | ||
42 | static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd, | |
8f52e264 LP |
43 | enum nand_cmd cmd, u64 addr, u32 len, |
44 | void *ptr, u32 *rv) | |
8e404fff AH |
45 | { |
46 | u32 cmdp; | |
47 | struct goldfish_nand *nand = mtd->priv; | |
48 | struct cmd_params *cps = nand->cmd_params; | |
49 | unsigned char __iomem *base = nand->base; | |
50 | ||
10d71084 | 51 | if (!cps) |
8e404fff AH |
52 | return -1; |
53 | ||
54 | switch (cmd) { | |
55 | case NAND_CMD_ERASE: | |
56 | cmdp = NAND_CMD_ERASE_WITH_PARAMS; | |
57 | break; | |
58 | case NAND_CMD_READ: | |
59 | cmdp = NAND_CMD_READ_WITH_PARAMS; | |
60 | break; | |
61 | case NAND_CMD_WRITE: | |
62 | cmdp = NAND_CMD_WRITE_WITH_PARAMS; | |
63 | break; | |
64 | default: | |
65 | return -1; | |
66 | } | |
67 | cps->dev = mtd - nand->mtd; | |
68 | cps->addr_high = (u32)(addr >> 32); | |
69 | cps->addr_low = (u32)addr; | |
70 | cps->transfer_size = len; | |
7f09d4a0 | 71 | cps->data = (unsigned long)ptr; |
8e404fff AH |
72 | writel(cmdp, base + NAND_COMMAND); |
73 | *rv = cps->result; | |
74 | return 0; | |
75 | } | |
76 | ||
77 | static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd, | |
8f52e264 | 78 | u64 addr, u32 len, void *ptr) |
8e404fff AH |
79 | { |
80 | struct goldfish_nand *nand = mtd->priv; | |
81 | u32 rv; | |
8e404fff AH |
82 | unsigned char __iomem *base = nand->base; |
83 | ||
67c20cfb | 84 | mutex_lock(&nand->lock); |
8e404fff AH |
85 | if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) { |
86 | writel(mtd - nand->mtd, base + NAND_DEV); | |
87 | writel((u32)(addr >> 32), base + NAND_ADDR_HIGH); | |
88 | writel((u32)addr, base + NAND_ADDR_LOW); | |
89 | writel(len, base + NAND_TRANSFER_SIZE); | |
07d783fd | 90 | gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH); |
8e404fff AH |
91 | writel(cmd, base + NAND_COMMAND); |
92 | rv = readl(base + NAND_RESULT); | |
93 | } | |
67c20cfb | 94 | mutex_unlock(&nand->lock); |
8e404fff AH |
95 | return rv; |
96 | } | |
97 | ||
98 | static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr) | |
99 | { | |
100 | loff_t ofs = instr->addr; | |
101 | u32 len = instr->len; | |
102 | u32 rem; | |
103 | ||
104 | if (ofs + len > mtd->size) | |
105 | goto invalid_arg; | |
106 | rem = do_div(ofs, mtd->writesize); | |
107 | if (rem) | |
108 | goto invalid_arg; | |
109 | ofs *= (mtd->writesize + mtd->oobsize); | |
110 | ||
111 | if (len % mtd->writesize) | |
112 | goto invalid_arg; | |
113 | len = len / mtd->writesize * (mtd->writesize + mtd->oobsize); | |
114 | ||
115 | if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) { | |
116 | pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n", | |
8f52e264 | 117 | ofs, len, mtd->size, mtd->erasesize); |
8e404fff AH |
118 | return -EIO; |
119 | } | |
120 | ||
121 | instr->state = MTD_ERASE_DONE; | |
122 | mtd_erase_callback(instr); | |
123 | ||
124 | return 0; | |
125 | ||
126 | invalid_arg: | |
127 | pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n", | |
8f52e264 | 128 | ofs, len, mtd->size, mtd->erasesize); |
8e404fff AH |
129 | return -EINVAL; |
130 | } | |
131 | ||
132 | static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, | |
8f52e264 | 133 | struct mtd_oob_ops *ops) |
8e404fff AH |
134 | { |
135 | u32 rem; | |
136 | ||
137 | if (ofs + ops->len > mtd->size) | |
138 | goto invalid_arg; | |
139 | if (ops->datbuf && ops->len && ops->len != mtd->writesize) | |
140 | goto invalid_arg; | |
141 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) | |
142 | goto invalid_arg; | |
143 | ||
144 | rem = do_div(ofs, mtd->writesize); | |
145 | if (rem) | |
146 | goto invalid_arg; | |
147 | ofs *= (mtd->writesize + mtd->oobsize); | |
148 | ||
149 | if (ops->datbuf) | |
150 | ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs, | |
151 | ops->len, ops->datbuf); | |
152 | ofs += mtd->writesize + ops->ooboffs; | |
153 | if (ops->oobbuf) | |
154 | ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs, | |
155 | ops->ooblen, ops->oobbuf); | |
156 | return 0; | |
157 | ||
158 | invalid_arg: | |
b4fcf48a | 159 | pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", |
8f52e264 | 160 | ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); |
8e404fff AH |
161 | return -EINVAL; |
162 | } | |
163 | ||
164 | static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, | |
8f52e264 | 165 | struct mtd_oob_ops *ops) |
8e404fff AH |
166 | { |
167 | u32 rem; | |
168 | ||
169 | if (ofs + ops->len > mtd->size) | |
170 | goto invalid_arg; | |
171 | if (ops->len && ops->len != mtd->writesize) | |
172 | goto invalid_arg; | |
173 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) | |
174 | goto invalid_arg; | |
175 | ||
176 | rem = do_div(ofs, mtd->writesize); | |
177 | if (rem) | |
178 | goto invalid_arg; | |
179 | ofs *= (mtd->writesize + mtd->oobsize); | |
180 | ||
181 | if (ops->datbuf) | |
182 | ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs, | |
183 | ops->len, ops->datbuf); | |
184 | ofs += mtd->writesize + ops->ooboffs; | |
185 | if (ops->oobbuf) | |
186 | ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs, | |
187 | ops->ooblen, ops->oobbuf); | |
188 | return 0; | |
189 | ||
190 | invalid_arg: | |
b4fcf48a | 191 | pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", |
8f52e264 | 192 | ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); |
8e404fff AH |
193 | return -EINVAL; |
194 | } | |
195 | ||
196 | static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, | |
8f52e264 | 197 | size_t *retlen, u_char *buf) |
8e404fff AH |
198 | { |
199 | u32 rem; | |
200 | ||
201 | if (from + len > mtd->size) | |
202 | goto invalid_arg; | |
8e404fff AH |
203 | |
204 | rem = do_div(from, mtd->writesize); | |
205 | if (rem) | |
206 | goto invalid_arg; | |
207 | from *= (mtd->writesize + mtd->oobsize); | |
208 | ||
209 | *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf); | |
210 | return 0; | |
211 | ||
212 | invalid_arg: | |
b4fcf48a | 213 | pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n", |
8f52e264 | 214 | from, len, mtd->size, mtd->writesize); |
8e404fff AH |
215 | return -EINVAL; |
216 | } | |
217 | ||
218 | static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |
8f52e264 | 219 | size_t *retlen, const u_char *buf) |
8e404fff AH |
220 | { |
221 | u32 rem; | |
222 | ||
223 | if (to + len > mtd->size) | |
224 | goto invalid_arg; | |
8e404fff AH |
225 | |
226 | rem = do_div(to, mtd->writesize); | |
227 | if (rem) | |
228 | goto invalid_arg; | |
229 | to *= (mtd->writesize + mtd->oobsize); | |
230 | ||
231 | *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf); | |
232 | return 0; | |
233 | ||
234 | invalid_arg: | |
b4fcf48a | 235 | pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n", |
8f52e264 | 236 | to, len, mtd->size, mtd->writesize); |
8e404fff AH |
237 | return -EINVAL; |
238 | } | |
239 | ||
240 | static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) | |
241 | { | |
242 | u32 rem; | |
243 | ||
244 | if (ofs >= mtd->size) | |
245 | goto invalid_arg; | |
246 | ||
247 | rem = do_div(ofs, mtd->erasesize); | |
248 | if (rem) | |
249 | goto invalid_arg; | |
250 | ofs *= mtd->erasesize / mtd->writesize; | |
251 | ofs *= (mtd->writesize + mtd->oobsize); | |
252 | ||
253 | return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL); | |
254 | ||
255 | invalid_arg: | |
256 | pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n", | |
8f52e264 | 257 | ofs, mtd->size, mtd->writesize); |
8e404fff AH |
258 | return -EINVAL; |
259 | } | |
260 | ||
261 | static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) | |
262 | { | |
263 | u32 rem; | |
264 | ||
265 | if (ofs >= mtd->size) | |
266 | goto invalid_arg; | |
267 | ||
268 | rem = do_div(ofs, mtd->erasesize); | |
269 | if (rem) | |
270 | goto invalid_arg; | |
271 | ofs *= mtd->erasesize / mtd->writesize; | |
272 | ofs *= (mtd->writesize + mtd->oobsize); | |
273 | ||
274 | if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1) | |
275 | return -EIO; | |
276 | return 0; | |
277 | ||
278 | invalid_arg: | |
279 | pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n", | |
8f52e264 | 280 | ofs, mtd->size, mtd->writesize); |
8e404fff AH |
281 | return -EINVAL; |
282 | } | |
283 | ||
284 | static int nand_setup_cmd_params(struct platform_device *pdev, | |
8f52e264 | 285 | struct goldfish_nand *nand) |
8e404fff AH |
286 | { |
287 | u64 paddr; | |
288 | unsigned char __iomem *base = nand->base; | |
289 | ||
290 | nand->cmd_params = devm_kzalloc(&pdev->dev, | |
291 | sizeof(struct cmd_params), GFP_KERNEL); | |
292 | if (!nand->cmd_params) | |
293 | return -1; | |
294 | ||
295 | paddr = __pa(nand->cmd_params); | |
296 | writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH); | |
297 | writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW); | |
298 | return 0; | |
299 | } | |
300 | ||
301 | static int goldfish_nand_init_device(struct platform_device *pdev, | |
8f52e264 | 302 | struct goldfish_nand *nand, int id) |
8e404fff AH |
303 | { |
304 | u32 name_len; | |
305 | u32 result; | |
306 | u32 flags; | |
8e404fff AH |
307 | unsigned char __iomem *base = nand->base; |
308 | struct mtd_info *mtd = &nand->mtd[id]; | |
309 | char *name; | |
310 | ||
67c20cfb | 311 | mutex_lock(&nand->lock); |
8e404fff AH |
312 | writel(id, base + NAND_DEV); |
313 | flags = readl(base + NAND_DEV_FLAGS); | |
314 | name_len = readl(base + NAND_DEV_NAME_LEN); | |
315 | mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE); | |
316 | mtd->size = readl(base + NAND_DEV_SIZE_LOW); | |
317 | mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32; | |
318 | mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE); | |
319 | mtd->oobavail = mtd->oobsize; | |
320 | mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) / | |
321 | (mtd->writesize + mtd->oobsize) * mtd->writesize; | |
322 | do_div(mtd->size, mtd->writesize + mtd->oobsize); | |
323 | mtd->size *= mtd->writesize; | |
36270be3 | 324 | dev_dbg(&pdev->dev, |
8e404fff | 325 | "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n", |
13aa4016 HP |
326 | id, mtd->size, mtd->writesize, |
327 | mtd->oobsize, mtd->erasesize); | |
67c20cfb | 328 | mutex_unlock(&nand->lock); |
8e404fff AH |
329 | |
330 | mtd->priv = nand; | |
331 | ||
1f11b38c | 332 | name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL); |
6e3f3bb8 | 333 | if (!name) |
8e404fff | 334 | return -ENOMEM; |
1f11b38c | 335 | mtd->name = name; |
8e404fff AH |
336 | |
337 | result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len, | |
8f52e264 | 338 | name); |
8e404fff | 339 | if (result != name_len) { |
36270be3 | 340 | dev_err(&pdev->dev, |
8e404fff AH |
341 | "goldfish_nand_init_device failed to get dev name %d != %d\n", |
342 | result, name_len); | |
343 | return -ENODEV; | |
344 | } | |
1977533a | 345 | ((char *)mtd->name)[name_len] = '\0'; |
8e404fff AH |
346 | |
347 | /* Setup the MTD structure */ | |
348 | mtd->type = MTD_NANDFLASH; | |
349 | mtd->flags = MTD_CAP_NANDFLASH; | |
350 | if (flags & NAND_DEV_FLAG_READ_ONLY) | |
351 | mtd->flags &= ~MTD_WRITEABLE; | |
352 | if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP) | |
353 | nand_setup_cmd_params(pdev, nand); | |
354 | ||
355 | mtd->owner = THIS_MODULE; | |
356 | mtd->_erase = goldfish_nand_erase; | |
357 | mtd->_read = goldfish_nand_read; | |
358 | mtd->_write = goldfish_nand_write; | |
359 | mtd->_read_oob = goldfish_nand_read_oob; | |
360 | mtd->_write_oob = goldfish_nand_write_oob; | |
361 | mtd->_block_isbad = goldfish_nand_block_isbad; | |
362 | mtd->_block_markbad = goldfish_nand_block_markbad; | |
363 | ||
364 | if (mtd_device_register(mtd, NULL, 0)) | |
365 | return -EIO; | |
366 | ||
367 | return 0; | |
368 | } | |
369 | ||
370 | static int goldfish_nand_probe(struct platform_device *pdev) | |
371 | { | |
372 | u32 num_dev; | |
373 | int i; | |
374 | int err; | |
375 | u32 num_dev_working; | |
376 | u32 version; | |
377 | struct resource *r; | |
378 | struct goldfish_nand *nand; | |
379 | unsigned char __iomem *base; | |
380 | ||
381 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
10d71084 | 382 | if (!r) |
8e404fff AH |
383 | return -ENODEV; |
384 | ||
385 | base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); | |
6e3f3bb8 | 386 | if (!base) |
8e404fff AH |
387 | return -ENOMEM; |
388 | ||
389 | version = readl(base + NAND_VERSION); | |
390 | if (version != NAND_VERSION_CURRENT) { | |
36270be3 | 391 | dev_err(&pdev->dev, |
8e404fff AH |
392 | "goldfish_nand_init: version mismatch, got %d, expected %d\n", |
393 | version, NAND_VERSION_CURRENT); | |
394 | return -ENODEV; | |
395 | } | |
396 | num_dev = readl(base + NAND_NUM_DEV); | |
397 | if (num_dev == 0) | |
398 | return -ENODEV; | |
399 | ||
36270be3 | 400 | nand = devm_kzalloc(&pdev->dev, sizeof(*nand) + |
8e404fff | 401 | sizeof(struct mtd_info) * num_dev, GFP_KERNEL); |
6e3f3bb8 | 402 | if (!nand) |
8e404fff AH |
403 | return -ENOMEM; |
404 | ||
67c20cfb | 405 | mutex_init(&nand->lock); |
8e404fff AH |
406 | nand->base = base; |
407 | nand->mtd_count = num_dev; | |
408 | platform_set_drvdata(pdev, nand); | |
409 | ||
410 | num_dev_working = 0; | |
411 | for (i = 0; i < num_dev; i++) { | |
412 | err = goldfish_nand_init_device(pdev, nand, i); | |
413 | if (err == 0) | |
414 | num_dev_working++; | |
415 | } | |
416 | if (num_dev_working == 0) | |
417 | return -ENODEV; | |
418 | return 0; | |
419 | } | |
420 | ||
421 | static int goldfish_nand_remove(struct platform_device *pdev) | |
422 | { | |
423 | struct goldfish_nand *nand = platform_get_drvdata(pdev); | |
424 | int i; | |
ef323812 | 425 | |
8e404fff AH |
426 | for (i = 0; i < nand->mtd_count; i++) { |
427 | if (nand->mtd[i].name) | |
428 | mtd_device_unregister(&nand->mtd[i]); | |
429 | } | |
430 | return 0; | |
431 | } | |
432 | ||
433 | static struct platform_driver goldfish_nand_driver = { | |
434 | .probe = goldfish_nand_probe, | |
435 | .remove = goldfish_nand_remove, | |
436 | .driver = { | |
437 | .name = "goldfish_nand" | |
438 | } | |
439 | }; | |
440 | ||
441 | module_platform_driver(goldfish_nand_driver); | |
442 | MODULE_LICENSE("GPL"); |