]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
e29c22f5 KP |
2 | /* |
3 | * Core registration and callback routines for MTD | |
4 | * drivers and users. | |
5 | * | |
ff94bc40 | 6 | * Copyright © 1999-2010 David Woodhouse <[email protected]> |
0a50b3c9 | 7 | * Copyright © 2006 Red Hat UK Limited |
ff94bc40 | 8 | * |
e29c22f5 KP |
9 | */ |
10 | ||
ff94bc40 HS |
11 | #ifndef __UBOOT__ |
12 | #include <linux/module.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/ptrace.h> | |
15 | #include <linux/seq_file.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/timer.h> | |
18 | #include <linux/major.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/ioctl.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/proc_fs.h> | |
24 | #include <linux/idr.h> | |
25 | #include <linux/backing-dev.h> | |
26 | #include <linux/gfp.h> | |
27 | #include <linux/slab.h> | |
28 | #else | |
cd93d625 | 29 | #include <linux/bitops.h> |
eb41d8a1 | 30 | #include <linux/bug.h> |
ff94bc40 | 31 | #include <linux/err.h> |
e29c22f5 | 32 | #include <ubi_uboot.h> |
ff94bc40 HS |
33 | #endif |
34 | ||
f8fdb81f | 35 | #include <linux/log2.h> |
ff94bc40 HS |
36 | #include <linux/mtd/mtd.h> |
37 | #include <linux/mtd/partitions.h> | |
38 | ||
39 | #include "mtdcore.h" | |
40 | ||
41 | #ifndef __UBOOT__ | |
42 | /* | |
43 | * backing device capabilities for non-mappable devices (such as NAND flash) | |
44 | * - permits private mappings, copies are taken of the data | |
45 | */ | |
46 | static struct backing_dev_info mtd_bdi_unmappable = { | |
47 | .capabilities = BDI_CAP_MAP_COPY, | |
48 | }; | |
49 | ||
50 | /* | |
51 | * backing device capabilities for R/O mappable devices (such as ROM) | |
52 | * - permits private mappings, copies are taken of the data | |
53 | * - permits non-writable shared mappings | |
54 | */ | |
55 | static struct backing_dev_info mtd_bdi_ro_mappable = { | |
56 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | |
57 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | |
58 | }; | |
59 | ||
60 | /* | |
61 | * backing device capabilities for writable mappable devices (such as RAM) | |
62 | * - permits private mappings, copies are taken of the data | |
63 | * - permits non-writable shared mappings | |
64 | */ | |
65 | static struct backing_dev_info mtd_bdi_rw_mappable = { | |
66 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | |
67 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | |
68 | BDI_CAP_WRITE_MAP), | |
69 | }; | |
70 | ||
71 | static int mtd_cls_suspend(struct device *dev, pm_message_t state); | |
72 | static int mtd_cls_resume(struct device *dev); | |
e29c22f5 | 73 | |
ff94bc40 HS |
74 | static struct class mtd_class = { |
75 | .name = "mtd", | |
76 | .owner = THIS_MODULE, | |
77 | .suspend = mtd_cls_suspend, | |
78 | .resume = mtd_cls_resume, | |
79 | }; | |
80 | #else | |
ff94bc40 HS |
81 | #define MAX_IDR_ID 64 |
82 | ||
83 | struct idr_layer { | |
84 | int used; | |
85 | void *ptr; | |
86 | }; | |
87 | ||
88 | struct idr { | |
89 | struct idr_layer id[MAX_IDR_ID]; | |
4c47fd0b | 90 | bool updated; |
ff94bc40 HS |
91 | }; |
92 | ||
93 | #define DEFINE_IDR(name) struct idr name; | |
94 | ||
95 | void idr_remove(struct idr *idp, int id) | |
96 | { | |
4c47fd0b | 97 | if (idp->id[id].used) { |
ff94bc40 | 98 | idp->id[id].used = 0; |
4c47fd0b BB |
99 | idp->updated = true; |
100 | } | |
ff94bc40 HS |
101 | |
102 | return; | |
103 | } | |
104 | void *idr_find(struct idr *idp, int id) | |
105 | { | |
106 | if (idp->id[id].used) | |
107 | return idp->id[id].ptr; | |
108 | ||
109 | return NULL; | |
110 | } | |
111 | ||
112 | void *idr_get_next(struct idr *idp, int *next) | |
113 | { | |
114 | void *ret; | |
115 | int id = *next; | |
116 | ||
117 | ret = idr_find(idp, id); | |
118 | if (ret) { | |
119 | id ++; | |
120 | if (!idp->id[id].used) | |
121 | id = 0; | |
122 | *next = id; | |
123 | } else { | |
124 | *next = 0; | |
125 | } | |
0a50b3c9 | 126 | |
ff94bc40 HS |
127 | return ret; |
128 | } | |
129 | ||
130 | int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask) | |
131 | { | |
132 | struct idr_layer *idl; | |
133 | int i = 0; | |
134 | ||
135 | while (i < MAX_IDR_ID) { | |
136 | idl = &idp->id[i]; | |
137 | if (idl->used == 0) { | |
138 | idl->used = 1; | |
139 | idl->ptr = ptr; | |
4c47fd0b | 140 | idp->updated = true; |
ff94bc40 HS |
141 | return i; |
142 | } | |
143 | i++; | |
144 | } | |
145 | return -ENOSPC; | |
146 | } | |
147 | #endif | |
148 | ||
149 | static DEFINE_IDR(mtd_idr); | |
150 | ||
151 | /* These are exported solely for the purpose of mtd_blkdevs.c. You | |
152 | should not use them for _anything_ else */ | |
153 | DEFINE_MUTEX(mtd_table_mutex); | |
154 | EXPORT_SYMBOL_GPL(mtd_table_mutex); | |
155 | ||
156 | struct mtd_info *__mtd_next_device(int i) | |
157 | { | |
158 | return idr_get_next(&mtd_idr, &i); | |
159 | } | |
160 | EXPORT_SYMBOL_GPL(__mtd_next_device); | |
161 | ||
4c47fd0b BB |
162 | bool mtd_dev_list_updated(void) |
163 | { | |
164 | if (mtd_idr.updated) { | |
165 | mtd_idr.updated = false; | |
166 | return true; | |
167 | } | |
168 | ||
169 | return false; | |
170 | } | |
171 | ||
ff94bc40 HS |
172 | #ifndef __UBOOT__ |
173 | static LIST_HEAD(mtd_notifiers); | |
174 | ||
ff94bc40 HS |
175 | #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) |
176 | ||
177 | /* REVISIT once MTD uses the driver model better, whoever allocates | |
178 | * the mtd_info will probably want to use the release() hook... | |
179 | */ | |
180 | static void mtd_release(struct device *dev) | |
181 | { | |
182 | struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev); | |
183 | dev_t index = MTD_DEVT(mtd->index); | |
184 | ||
185 | /* remove /dev/mtdXro node if needed */ | |
186 | if (index) | |
187 | device_destroy(&mtd_class, index + 1); | |
188 | } | |
189 | ||
190 | static int mtd_cls_suspend(struct device *dev, pm_message_t state) | |
191 | { | |
192 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
193 | ||
194 | return mtd ? mtd_suspend(mtd) : 0; | |
195 | } | |
196 | ||
197 | static int mtd_cls_resume(struct device *dev) | |
198 | { | |
199 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
200 | ||
201 | if (mtd) | |
202 | mtd_resume(mtd); | |
203 | return 0; | |
204 | } | |
205 | ||
206 | static ssize_t mtd_type_show(struct device *dev, | |
207 | struct device_attribute *attr, char *buf) | |
208 | { | |
209 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
210 | char *type; | |
211 | ||
212 | switch (mtd->type) { | |
213 | case MTD_ABSENT: | |
214 | type = "absent"; | |
215 | break; | |
216 | case MTD_RAM: | |
217 | type = "ram"; | |
218 | break; | |
219 | case MTD_ROM: | |
220 | type = "rom"; | |
221 | break; | |
222 | case MTD_NORFLASH: | |
223 | type = "nor"; | |
224 | break; | |
225 | case MTD_NANDFLASH: | |
226 | type = "nand"; | |
227 | break; | |
228 | case MTD_DATAFLASH: | |
229 | type = "dataflash"; | |
230 | break; | |
231 | case MTD_UBIVOLUME: | |
232 | type = "ubi"; | |
233 | break; | |
234 | case MTD_MLCNANDFLASH: | |
235 | type = "mlc-nand"; | |
236 | break; | |
237 | default: | |
238 | type = "unknown"; | |
239 | } | |
240 | ||
241 | return snprintf(buf, PAGE_SIZE, "%s\n", type); | |
242 | } | |
243 | static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); | |
244 | ||
245 | static ssize_t mtd_flags_show(struct device *dev, | |
246 | struct device_attribute *attr, char *buf) | |
247 | { | |
248 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
249 | ||
250 | return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); | |
251 | ||
252 | } | |
253 | static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); | |
254 | ||
255 | static ssize_t mtd_size_show(struct device *dev, | |
256 | struct device_attribute *attr, char *buf) | |
257 | { | |
258 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
259 | ||
260 | return snprintf(buf, PAGE_SIZE, "%llu\n", | |
261 | (unsigned long long)mtd->size); | |
262 | ||
263 | } | |
264 | static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); | |
265 | ||
266 | static ssize_t mtd_erasesize_show(struct device *dev, | |
267 | struct device_attribute *attr, char *buf) | |
268 | { | |
269 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
270 | ||
271 | return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); | |
272 | ||
273 | } | |
274 | static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); | |
275 | ||
276 | static ssize_t mtd_writesize_show(struct device *dev, | |
277 | struct device_attribute *attr, char *buf) | |
278 | { | |
279 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
280 | ||
281 | return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); | |
282 | ||
283 | } | |
284 | static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); | |
285 | ||
286 | static ssize_t mtd_subpagesize_show(struct device *dev, | |
287 | struct device_attribute *attr, char *buf) | |
288 | { | |
289 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
290 | unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; | |
291 | ||
292 | return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); | |
293 | ||
294 | } | |
295 | static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); | |
296 | ||
297 | static ssize_t mtd_oobsize_show(struct device *dev, | |
298 | struct device_attribute *attr, char *buf) | |
299 | { | |
300 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
301 | ||
302 | return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); | |
303 | ||
304 | } | |
305 | static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); | |
306 | ||
307 | static ssize_t mtd_numeraseregions_show(struct device *dev, | |
308 | struct device_attribute *attr, char *buf) | |
309 | { | |
310 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
311 | ||
312 | return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); | |
313 | ||
314 | } | |
315 | static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, | |
316 | NULL); | |
317 | ||
318 | static ssize_t mtd_name_show(struct device *dev, | |
319 | struct device_attribute *attr, char *buf) | |
320 | { | |
321 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
322 | ||
323 | return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); | |
324 | ||
325 | } | |
326 | static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); | |
327 | ||
328 | static ssize_t mtd_ecc_strength_show(struct device *dev, | |
329 | struct device_attribute *attr, char *buf) | |
330 | { | |
331 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
332 | ||
333 | return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); | |
334 | } | |
335 | static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); | |
336 | ||
337 | static ssize_t mtd_bitflip_threshold_show(struct device *dev, | |
338 | struct device_attribute *attr, | |
339 | char *buf) | |
340 | { | |
341 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
342 | ||
343 | return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); | |
344 | } | |
345 | ||
346 | static ssize_t mtd_bitflip_threshold_store(struct device *dev, | |
347 | struct device_attribute *attr, | |
348 | const char *buf, size_t count) | |
349 | { | |
350 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
351 | unsigned int bitflip_threshold; | |
352 | int retval; | |
353 | ||
354 | retval = kstrtouint(buf, 0, &bitflip_threshold); | |
355 | if (retval) | |
356 | return retval; | |
357 | ||
358 | mtd->bitflip_threshold = bitflip_threshold; | |
359 | return count; | |
360 | } | |
361 | static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, | |
362 | mtd_bitflip_threshold_show, | |
363 | mtd_bitflip_threshold_store); | |
364 | ||
365 | static ssize_t mtd_ecc_step_size_show(struct device *dev, | |
366 | struct device_attribute *attr, char *buf) | |
367 | { | |
368 | struct mtd_info *mtd = dev_get_drvdata(dev); | |
369 | ||
370 | return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); | |
371 | ||
372 | } | |
373 | static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); | |
374 | ||
375 | static struct attribute *mtd_attrs[] = { | |
376 | &dev_attr_type.attr, | |
377 | &dev_attr_flags.attr, | |
378 | &dev_attr_size.attr, | |
379 | &dev_attr_erasesize.attr, | |
380 | &dev_attr_writesize.attr, | |
381 | &dev_attr_subpagesize.attr, | |
382 | &dev_attr_oobsize.attr, | |
383 | &dev_attr_numeraseregions.attr, | |
384 | &dev_attr_name.attr, | |
385 | &dev_attr_ecc_strength.attr, | |
386 | &dev_attr_ecc_step_size.attr, | |
387 | &dev_attr_bitflip_threshold.attr, | |
388 | NULL, | |
389 | }; | |
390 | ATTRIBUTE_GROUPS(mtd); | |
391 | ||
392 | static struct device_type mtd_devtype = { | |
393 | .name = "mtd", | |
394 | .groups = mtd_groups, | |
395 | .release = mtd_release, | |
396 | }; | |
397 | #endif | |
398 | ||
399 | /** | |
400 | * add_mtd_device - register an MTD device | |
401 | * @mtd: pointer to new MTD device info structure | |
402 | * | |
403 | * Add a device to the list of MTD devices present in the system, and | |
404 | * notify each currently active MTD 'user' of its arrival. Returns | |
405 | * zero on success or 1 on failure, which currently will only happen | |
406 | * if there is insufficient memory or a sysfs error. | |
407 | */ | |
408 | ||
e29c22f5 KP |
409 | int add_mtd_device(struct mtd_info *mtd) |
410 | { | |
ff94bc40 HS |
411 | #ifndef __UBOOT__ |
412 | struct mtd_notifier *not; | |
413 | #endif | |
414 | int i, error; | |
415 | ||
416 | #ifndef __UBOOT__ | |
417 | if (!mtd->backing_dev_info) { | |
418 | switch (mtd->type) { | |
419 | case MTD_RAM: | |
420 | mtd->backing_dev_info = &mtd_bdi_rw_mappable; | |
421 | break; | |
422 | case MTD_ROM: | |
423 | mtd->backing_dev_info = &mtd_bdi_ro_mappable; | |
424 | break; | |
425 | default: | |
426 | mtd->backing_dev_info = &mtd_bdi_unmappable; | |
427 | break; | |
428 | } | |
429 | } | |
430 | #endif | |
e29c22f5 KP |
431 | |
432 | BUG_ON(mtd->writesize == 0); | |
ff94bc40 | 433 | mutex_lock(&mtd_table_mutex); |
e29c22f5 | 434 | |
ff94bc40 HS |
435 | i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); |
436 | if (i < 0) | |
437 | goto fail_locked; | |
e29c22f5 | 438 | |
ff94bc40 HS |
439 | mtd->index = i; |
440 | mtd->usecount = 0; | |
dfe64e2c | 441 | |
2a74930d MR |
442 | INIT_LIST_HEAD(&mtd->partitions); |
443 | ||
ff94bc40 HS |
444 | /* default value if not set by driver */ |
445 | if (mtd->bitflip_threshold == 0) | |
446 | mtd->bitflip_threshold = mtd->ecc_strength; | |
dfe64e2c | 447 | |
ff94bc40 HS |
448 | if (is_power_of_2(mtd->erasesize)) |
449 | mtd->erasesize_shift = ffs(mtd->erasesize) - 1; | |
450 | else | |
451 | mtd->erasesize_shift = 0; | |
e29c22f5 | 452 | |
ff94bc40 HS |
453 | if (is_power_of_2(mtd->writesize)) |
454 | mtd->writesize_shift = ffs(mtd->writesize) - 1; | |
455 | else | |
456 | mtd->writesize_shift = 0; | |
457 | ||
458 | mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; | |
459 | mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; | |
460 | ||
461 | /* Some chips always power up locked. Unlock them now */ | |
462 | if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { | |
463 | error = mtd_unlock(mtd, 0, mtd->size); | |
464 | if (error && error != -EOPNOTSUPP) | |
465 | printk(KERN_WARNING | |
466 | "%s: unlock failed, writes may not work\n", | |
467 | mtd->name); | |
468 | } | |
469 | ||
470 | #ifndef __UBOOT__ | |
471 | /* Caller should have set dev.parent to match the | |
472 | * physical device. | |
473 | */ | |
474 | mtd->dev.type = &mtd_devtype; | |
475 | mtd->dev.class = &mtd_class; | |
476 | mtd->dev.devt = MTD_DEVT(i); | |
477 | dev_set_name(&mtd->dev, "mtd%d", i); | |
478 | dev_set_drvdata(&mtd->dev, mtd); | |
479 | if (device_register(&mtd->dev) != 0) | |
480 | goto fail_added; | |
e29c22f5 | 481 | |
ff94bc40 HS |
482 | if (MTD_DEVT(i)) |
483 | device_create(&mtd_class, mtd->dev.parent, | |
484 | MTD_DEVT(i) + 1, | |
485 | NULL, "mtd%dro", i); | |
486 | ||
487 | pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); | |
488 | /* No need to get a refcount on the module containing | |
489 | the notifier, since we hold the mtd_table_mutex */ | |
490 | list_for_each_entry(not, &mtd_notifiers, list) | |
491 | not->add(mtd); | |
ddf7bcfa HS |
492 | #else |
493 | pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); | |
ff94bc40 HS |
494 | #endif |
495 | ||
496 | mutex_unlock(&mtd_table_mutex); | |
497 | /* We _know_ we aren't being removed, because | |
498 | our caller is still holding us here. So none | |
499 | of this try_ nonsense, and no bitching about it | |
500 | either. :) */ | |
501 | __module_get(THIS_MODULE); | |
502 | return 0; | |
503 | ||
504 | #ifndef __UBOOT__ | |
505 | fail_added: | |
506 | idr_remove(&mtd_idr, i); | |
507 | #endif | |
508 | fail_locked: | |
509 | mutex_unlock(&mtd_table_mutex); | |
e29c22f5 KP |
510 | return 1; |
511 | } | |
512 | ||
513 | /** | |
ff94bc40 HS |
514 | * del_mtd_device - unregister an MTD device |
515 | * @mtd: pointer to MTD device info structure | |
e29c22f5 | 516 | * |
ff94bc40 HS |
517 | * Remove a device from the list of MTD devices present in the system, |
518 | * and notify each currently active MTD 'user' of its departure. | |
519 | * Returns zero on success or 1 on failure, which currently will happen | |
520 | * if the requested device does not appear to be present in the list. | |
e29c22f5 | 521 | */ |
ff94bc40 | 522 | |
e29c22f5 KP |
523 | int del_mtd_device(struct mtd_info *mtd) |
524 | { | |
525 | int ret; | |
ff94bc40 HS |
526 | #ifndef __UBOOT__ |
527 | struct mtd_notifier *not; | |
528 | #endif | |
529 | ||
a02820fc BB |
530 | ret = del_mtd_partitions(mtd); |
531 | if (ret) { | |
532 | debug("Failed to delete MTD partitions attached to %s (err %d)\n", | |
533 | mtd->name, ret); | |
534 | return ret; | |
535 | } | |
536 | ||
ff94bc40 | 537 | mutex_lock(&mtd_table_mutex); |
e29c22f5 | 538 | |
ff94bc40 | 539 | if (idr_find(&mtd_idr, mtd->index) != mtd) { |
e29c22f5 | 540 | ret = -ENODEV; |
ff94bc40 HS |
541 | goto out_error; |
542 | } | |
543 | ||
544 | #ifndef __UBOOT__ | |
545 | /* No need to get a refcount on the module containing | |
546 | the notifier, since we hold the mtd_table_mutex */ | |
547 | list_for_each_entry(not, &mtd_notifiers, list) | |
548 | not->remove(mtd); | |
549 | #endif | |
550 | ||
551 | if (mtd->usecount) { | |
552 | printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", | |
553 | mtd->index, mtd->name, mtd->usecount); | |
e29c22f5 KP |
554 | ret = -EBUSY; |
555 | } else { | |
ff94bc40 HS |
556 | #ifndef __UBOOT__ |
557 | device_unregister(&mtd->dev); | |
558 | #endif | |
559 | ||
560 | idr_remove(&mtd_idr, mtd->index); | |
e29c22f5 | 561 | |
ff94bc40 | 562 | module_put(THIS_MODULE); |
e29c22f5 KP |
563 | ret = 0; |
564 | } | |
565 | ||
ff94bc40 HS |
566 | out_error: |
567 | mutex_unlock(&mtd_table_mutex); | |
e29c22f5 KP |
568 | return ret; |
569 | } | |
570 | ||
ff94bc40 HS |
571 | #ifndef __UBOOT__ |
572 | /** | |
573 | * mtd_device_parse_register - parse partitions and register an MTD device. | |
574 | * | |
575 | * @mtd: the MTD device to register | |
576 | * @types: the list of MTD partition probes to try, see | |
577 | * 'parse_mtd_partitions()' for more information | |
578 | * @parser_data: MTD partition parser-specific data | |
579 | * @parts: fallback partition information to register, if parsing fails; | |
580 | * only valid if %nr_parts > %0 | |
581 | * @nr_parts: the number of partitions in parts, if zero then the full | |
582 | * MTD device is registered if no partition info is found | |
583 | * | |
584 | * This function aggregates MTD partitions parsing (done by | |
585 | * 'parse_mtd_partitions()') and MTD device and partitions registering. It | |
586 | * basically follows the most common pattern found in many MTD drivers: | |
587 | * | |
588 | * * It first tries to probe partitions on MTD device @mtd using parsers | |
589 | * specified in @types (if @types is %NULL, then the default list of parsers | |
590 | * is used, see 'parse_mtd_partitions()' for more information). If none are | |
591 | * found this functions tries to fallback to information specified in | |
592 | * @parts/@nr_parts. | |
593 | * * If any partitioning info was found, this function registers the found | |
594 | * partitions. | |
595 | * * If no partitions were found this function just registers the MTD device | |
596 | * @mtd and exits. | |
597 | * | |
598 | * Returns zero in case of success and a negative error code in case of failure. | |
599 | */ | |
600 | int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, | |
601 | struct mtd_part_parser_data *parser_data, | |
602 | const struct mtd_partition *parts, | |
603 | int nr_parts) | |
604 | { | |
605 | int err; | |
606 | struct mtd_partition *real_parts; | |
607 | ||
608 | err = parse_mtd_partitions(mtd, types, &real_parts, parser_data); | |
609 | if (err <= 0 && nr_parts && parts) { | |
610 | real_parts = kmemdup(parts, sizeof(*parts) * nr_parts, | |
611 | GFP_KERNEL); | |
612 | if (!real_parts) | |
613 | err = -ENOMEM; | |
614 | else | |
615 | err = nr_parts; | |
616 | } | |
617 | ||
618 | if (err > 0) { | |
619 | err = add_mtd_partitions(mtd, real_parts, err); | |
620 | kfree(real_parts); | |
621 | } else if (err == 0) { | |
622 | err = add_mtd_device(mtd); | |
623 | if (err == 1) | |
624 | err = -ENODEV; | |
625 | } | |
626 | ||
627 | return err; | |
628 | } | |
629 | EXPORT_SYMBOL_GPL(mtd_device_parse_register); | |
630 | ||
631 | /** | |
632 | * mtd_device_unregister - unregister an existing MTD device. | |
633 | * | |
634 | * @master: the MTD device to unregister. This will unregister both the master | |
635 | * and any partitions if registered. | |
636 | */ | |
637 | int mtd_device_unregister(struct mtd_info *master) | |
638 | { | |
639 | int err; | |
640 | ||
641 | err = del_mtd_partitions(master); | |
642 | if (err) | |
643 | return err; | |
644 | ||
645 | if (!device_is_registered(&master->dev)) | |
646 | return 0; | |
647 | ||
648 | return del_mtd_device(master); | |
649 | } | |
650 | EXPORT_SYMBOL_GPL(mtd_device_unregister); | |
651 | ||
652 | /** | |
653 | * register_mtd_user - register a 'user' of MTD devices. | |
654 | * @new: pointer to notifier info structure | |
655 | * | |
656 | * Registers a pair of callbacks function to be called upon addition | |
657 | * or removal of MTD devices. Causes the 'add' callback to be immediately | |
658 | * invoked for each MTD device currently present in the system. | |
659 | */ | |
660 | void register_mtd_user (struct mtd_notifier *new) | |
661 | { | |
662 | struct mtd_info *mtd; | |
663 | ||
664 | mutex_lock(&mtd_table_mutex); | |
665 | ||
666 | list_add(&new->list, &mtd_notifiers); | |
667 | ||
668 | __module_get(THIS_MODULE); | |
669 | ||
670 | mtd_for_each_device(mtd) | |
671 | new->add(mtd); | |
672 | ||
673 | mutex_unlock(&mtd_table_mutex); | |
674 | } | |
675 | EXPORT_SYMBOL_GPL(register_mtd_user); | |
676 | ||
677 | /** | |
678 | * unregister_mtd_user - unregister a 'user' of MTD devices. | |
679 | * @old: pointer to notifier info structure | |
680 | * | |
681 | * Removes a callback function pair from the list of 'users' to be | |
682 | * notified upon addition or removal of MTD devices. Causes the | |
683 | * 'remove' callback to be immediately invoked for each MTD device | |
684 | * currently present in the system. | |
685 | */ | |
686 | int unregister_mtd_user (struct mtd_notifier *old) | |
687 | { | |
688 | struct mtd_info *mtd; | |
689 | ||
690 | mutex_lock(&mtd_table_mutex); | |
691 | ||
692 | module_put(THIS_MODULE); | |
693 | ||
694 | mtd_for_each_device(mtd) | |
695 | old->remove(mtd); | |
696 | ||
697 | list_del(&old->list); | |
698 | mutex_unlock(&mtd_table_mutex); | |
699 | return 0; | |
700 | } | |
701 | EXPORT_SYMBOL_GPL(unregister_mtd_user); | |
702 | #endif | |
703 | ||
e29c22f5 KP |
704 | /** |
705 | * get_mtd_device - obtain a validated handle for an MTD device | |
706 | * @mtd: last known address of the required MTD device | |
707 | * @num: internal device number of the required MTD device | |
708 | * | |
709 | * Given a number and NULL address, return the num'th entry in the device | |
ff94bc40 HS |
710 | * table, if any. Given an address and num == -1, search the device table |
711 | * for a device with that address and return if it's still present. Given | |
712 | * both, return the num'th driver only if its address matches. Return | |
713 | * error code if not. | |
e29c22f5 KP |
714 | */ |
715 | struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) | |
716 | { | |
ff94bc40 HS |
717 | struct mtd_info *ret = NULL, *other; |
718 | int err = -ENODEV; | |
719 | ||
720 | mutex_lock(&mtd_table_mutex); | |
e29c22f5 KP |
721 | |
722 | if (num == -1) { | |
ff94bc40 HS |
723 | mtd_for_each_device(other) { |
724 | if (other == mtd) { | |
725 | ret = mtd; | |
726 | break; | |
727 | } | |
728 | } | |
729 | } else if (num >= 0) { | |
730 | ret = idr_find(&mtd_idr, num); | |
e29c22f5 KP |
731 | if (mtd && mtd != ret) |
732 | ret = NULL; | |
733 | } | |
734 | ||
ff94bc40 HS |
735 | if (!ret) { |
736 | ret = ERR_PTR(err); | |
737 | goto out; | |
738 | } | |
e29c22f5 | 739 | |
ff94bc40 HS |
740 | err = __get_mtd_device(ret); |
741 | if (err) | |
742 | ret = ERR_PTR(err); | |
743 | out: | |
744 | mutex_unlock(&mtd_table_mutex); | |
e29c22f5 | 745 | return ret; |
ff94bc40 HS |
746 | } |
747 | EXPORT_SYMBOL_GPL(get_mtd_device); | |
e29c22f5 | 748 | |
ff94bc40 HS |
749 | int __get_mtd_device(struct mtd_info *mtd) |
750 | { | |
751 | int err; | |
752 | ||
753 | if (!try_module_get(mtd->owner)) | |
754 | return -ENODEV; | |
755 | ||
756 | if (mtd->_get_device) { | |
757 | err = mtd->_get_device(mtd); | |
758 | ||
759 | if (err) { | |
760 | module_put(mtd->owner); | |
761 | return err; | |
762 | } | |
763 | } | |
764 | mtd->usecount++; | |
765 | return 0; | |
e29c22f5 | 766 | } |
ff94bc40 | 767 | EXPORT_SYMBOL_GPL(__get_mtd_device); |
e29c22f5 | 768 | |
dcb9a803 MB |
769 | #if CONFIG_IS_ENABLED(DM) && CONFIG_IS_ENABLED(OF_CONTROL) |
770 | static bool mtd_device_matches_name(struct mtd_info *mtd, const char *name) | |
771 | { | |
772 | struct udevice *dev = NULL; | |
773 | bool is_part; | |
774 | ||
775 | /* | |
776 | * If the first character of mtd name is '/', try interpreting as OF | |
777 | * path. Otherwise try comparing by mtd->name and mtd->dev->name. | |
778 | */ | |
779 | if (*name == '/') | |
780 | device_get_global_by_ofnode(ofnode_path(name), &dev); | |
781 | ||
782 | is_part = mtd_is_partition(mtd); | |
783 | ||
784 | return (!is_part && dev && mtd->dev == dev) || | |
785 | !strcmp(name, mtd->name) || | |
786 | (is_part && mtd->dev && !strcmp(name, mtd->dev->name)); | |
787 | } | |
788 | #else | |
789 | static bool mtd_device_matches_name(struct mtd_info *mtd, const char *name) | |
790 | { | |
791 | return !strcmp(name, mtd->name); | |
792 | } | |
793 | #endif | |
794 | ||
e29c22f5 | 795 | /** |
ff94bc40 HS |
796 | * get_mtd_device_nm - obtain a validated handle for an MTD device by |
797 | * device name | |
798 | * @name: MTD device name to open | |
e29c22f5 | 799 | * |
0cf207ec WD |
800 | * This function returns MTD device description structure in case of |
801 | * success and an error code in case of failure. | |
e29c22f5 KP |
802 | */ |
803 | struct mtd_info *get_mtd_device_nm(const char *name) | |
804 | { | |
ff94bc40 HS |
805 | int err = -ENODEV; |
806 | struct mtd_info *mtd = NULL, *other; | |
807 | ||
808 | mutex_lock(&mtd_table_mutex); | |
e29c22f5 | 809 | |
ff94bc40 | 810 | mtd_for_each_device(other) { |
dcb9a803 MB |
811 | #ifdef __UBOOT__ |
812 | if (mtd_device_matches_name(other, name)) { | |
813 | if (mtd) | |
814 | printf("\nWarning: MTD name \"%s\" is not unique!\n\n", | |
815 | name); | |
816 | mtd = other; | |
817 | } | |
818 | #else /* !__UBOOT__ */ | |
ff94bc40 HS |
819 | if (!strcmp(name, other->name)) { |
820 | mtd = other; | |
e29c22f5 KP |
821 | break; |
822 | } | |
dcb9a803 | 823 | #endif /* !__UBOOT__ */ |
e29c22f5 KP |
824 | } |
825 | ||
826 | if (!mtd) | |
827 | goto out_unlock; | |
828 | ||
ff94bc40 HS |
829 | err = __get_mtd_device(mtd); |
830 | if (err) | |
831 | goto out_unlock; | |
832 | ||
833 | mutex_unlock(&mtd_table_mutex); | |
e29c22f5 KP |
834 | return mtd; |
835 | ||
836 | out_unlock: | |
ff94bc40 | 837 | mutex_unlock(&mtd_table_mutex); |
e29c22f5 KP |
838 | return ERR_PTR(err); |
839 | } | |
ff94bc40 | 840 | EXPORT_SYMBOL_GPL(get_mtd_device_nm); |
4ba692fb BG |
841 | |
842 | #if defined(CONFIG_CMD_MTDPARTS_SPREAD) | |
843 | /** | |
844 | * mtd_get_len_incl_bad | |
845 | * | |
846 | * Check if length including bad blocks fits into device. | |
847 | * | |
848 | * @param mtd an MTD device | |
849 | * @param offset offset in flash | |
850 | * @param length image length | |
185f812c | 851 | * Return: image length including bad blocks in *len_incl_bad and whether or not |
4ba692fb BG |
852 | * the length returned was truncated in *truncated |
853 | */ | |
854 | void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset, | |
855 | const uint64_t length, uint64_t *len_incl_bad, | |
856 | int *truncated) | |
857 | { | |
858 | *truncated = 0; | |
859 | *len_incl_bad = 0; | |
860 | ||
5da163d6 | 861 | if (!mtd->_block_isbad) { |
4ba692fb BG |
862 | *len_incl_bad = length; |
863 | return; | |
864 | } | |
865 | ||
866 | uint64_t len_excl_bad = 0; | |
867 | uint64_t block_len; | |
868 | ||
869 | while (len_excl_bad < length) { | |
36650ca9 SW |
870 | if (offset >= mtd->size) { |
871 | *truncated = 1; | |
872 | return; | |
873 | } | |
874 | ||
4ba692fb BG |
875 | block_len = mtd->erasesize - (offset & (mtd->erasesize - 1)); |
876 | ||
5da163d6 | 877 | if (!mtd->_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) |
4ba692fb BG |
878 | len_excl_bad += block_len; |
879 | ||
880 | *len_incl_bad += block_len; | |
881 | offset += block_len; | |
4ba692fb BG |
882 | } |
883 | } | |
884 | #endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */ | |
dfe64e2c | 885 | |
ff94bc40 HS |
886 | void put_mtd_device(struct mtd_info *mtd) |
887 | { | |
888 | mutex_lock(&mtd_table_mutex); | |
889 | __put_mtd_device(mtd); | |
890 | mutex_unlock(&mtd_table_mutex); | |
891 | ||
892 | } | |
893 | EXPORT_SYMBOL_GPL(put_mtd_device); | |
894 | ||
895 | void __put_mtd_device(struct mtd_info *mtd) | |
896 | { | |
897 | --mtd->usecount; | |
898 | BUG_ON(mtd->usecount < 0); | |
899 | ||
900 | if (mtd->_put_device) | |
901 | mtd->_put_device(mtd); | |
902 | ||
903 | module_put(mtd->owner); | |
904 | } | |
905 | EXPORT_SYMBOL_GPL(__put_mtd_device); | |
906 | ||
dfe64e2c SL |
907 | int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) |
908 | { | |
909 | if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr) | |
910 | return -EINVAL; | |
911 | if (!(mtd->flags & MTD_WRITEABLE)) | |
912 | return -EROFS; | |
913 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; | |
914 | if (!instr->len) { | |
915 | instr->state = MTD_ERASE_DONE; | |
dfe64e2c SL |
916 | return 0; |
917 | } | |
918 | return mtd->_erase(mtd, instr); | |
919 | } | |
ff94bc40 HS |
920 | EXPORT_SYMBOL_GPL(mtd_erase); |
921 | ||
922 | #ifndef __UBOOT__ | |
923 | /* | |
924 | * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. | |
925 | */ | |
926 | int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, | |
927 | void **virt, resource_size_t *phys) | |
928 | { | |
929 | *retlen = 0; | |
930 | *virt = NULL; | |
931 | if (phys) | |
932 | *phys = 0; | |
933 | if (!mtd->_point) | |
934 | return -EOPNOTSUPP; | |
935 | if (from < 0 || from > mtd->size || len > mtd->size - from) | |
936 | return -EINVAL; | |
937 | if (!len) | |
938 | return 0; | |
939 | return mtd->_point(mtd, from, len, retlen, virt, phys); | |
940 | } | |
941 | EXPORT_SYMBOL_GPL(mtd_point); | |
942 | ||
943 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ | |
944 | int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |
945 | { | |
946 | if (!mtd->_point) | |
947 | return -EOPNOTSUPP; | |
948 | if (from < 0 || from > mtd->size || len > mtd->size - from) | |
949 | return -EINVAL; | |
950 | if (!len) | |
951 | return 0; | |
952 | return mtd->_unpoint(mtd, from, len); | |
953 | } | |
954 | EXPORT_SYMBOL_GPL(mtd_unpoint); | |
955 | #endif | |
956 | ||
957 | /* | |
958 | * Allow NOMMU mmap() to directly map the device (if not NULL) | |
959 | * - return the address to which the offset maps | |
960 | * - return -ENOSYS to indicate refusal to do the mapping | |
961 | */ | |
962 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, | |
963 | unsigned long offset, unsigned long flags) | |
964 | { | |
965 | if (!mtd->_get_unmapped_area) | |
966 | return -EOPNOTSUPP; | |
967 | if (offset > mtd->size || len > mtd->size - offset) | |
968 | return -EINVAL; | |
969 | return mtd->_get_unmapped_area(mtd, len, offset, flags); | |
970 | } | |
971 | EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); | |
dfe64e2c SL |
972 | |
973 | int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, | |
974 | u_char *buf) | |
975 | { | |
40462e54 | 976 | int ret_code; |
ff94bc40 | 977 | *retlen = 0; |
dfe64e2c SL |
978 | if (from < 0 || from > mtd->size || len > mtd->size - from) |
979 | return -EINVAL; | |
980 | if (!len) | |
981 | return 0; | |
40462e54 PB |
982 | |
983 | /* | |
984 | * In the absence of an error, drivers return a non-negative integer | |
985 | * representing the maximum number of bitflips that were corrected on | |
986 | * any one ecc region (if applicable; zero otherwise). | |
987 | */ | |
596cf083 BB |
988 | if (mtd->_read) { |
989 | ret_code = mtd->_read(mtd, from, len, retlen, buf); | |
990 | } else if (mtd->_read_oob) { | |
991 | struct mtd_oob_ops ops = { | |
992 | .len = len, | |
993 | .datbuf = buf, | |
994 | }; | |
995 | ||
996 | ret_code = mtd->_read_oob(mtd, from, &ops); | |
997 | *retlen = ops.retlen; | |
998 | } else { | |
999 | return -ENOTSUPP; | |
1000 | } | |
1001 | ||
40462e54 PB |
1002 | if (unlikely(ret_code < 0)) |
1003 | return ret_code; | |
1004 | if (mtd->ecc_strength == 0) | |
1005 | return 0; /* device lacks ecc */ | |
1006 | return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; | |
dfe64e2c | 1007 | } |
ff94bc40 | 1008 | EXPORT_SYMBOL_GPL(mtd_read); |
dfe64e2c SL |
1009 | |
1010 | int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | |
1011 | const u_char *buf) | |
1012 | { | |
1013 | *retlen = 0; | |
1014 | if (to < 0 || to > mtd->size || len > mtd->size - to) | |
1015 | return -EINVAL; | |
596cf083 BB |
1016 | if ((!mtd->_write && !mtd->_write_oob) || |
1017 | !(mtd->flags & MTD_WRITEABLE)) | |
dfe64e2c SL |
1018 | return -EROFS; |
1019 | if (!len) | |
1020 | return 0; | |
596cf083 BB |
1021 | |
1022 | if (!mtd->_write) { | |
1023 | struct mtd_oob_ops ops = { | |
1024 | .len = len, | |
1025 | .datbuf = (u8 *)buf, | |
1026 | }; | |
1027 | int ret; | |
1028 | ||
1029 | ret = mtd->_write_oob(mtd, to, &ops); | |
1030 | *retlen = ops.retlen; | |
1031 | return ret; | |
1032 | } | |
1033 | ||
dfe64e2c SL |
1034 | return mtd->_write(mtd, to, len, retlen, buf); |
1035 | } | |
ff94bc40 | 1036 | EXPORT_SYMBOL_GPL(mtd_write); |
dfe64e2c SL |
1037 | |
1038 | /* | |
1039 | * In blackbox flight recorder like scenarios we want to make successful writes | |
1040 | * in interrupt context. panic_write() is only intended to be called when its | |
1041 | * known the kernel is about to panic and we need the write to succeed. Since | |
1042 | * the kernel is not going to be running for much longer, this function can | |
1043 | * break locks and delay to ensure the write succeeds (but not sleep). | |
1044 | */ | |
1045 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | |
1046 | const u_char *buf) | |
1047 | { | |
1048 | *retlen = 0; | |
1049 | if (!mtd->_panic_write) | |
1050 | return -EOPNOTSUPP; | |
1051 | if (to < 0 || to > mtd->size || len > mtd->size - to) | |
1052 | return -EINVAL; | |
1053 | if (!(mtd->flags & MTD_WRITEABLE)) | |
1054 | return -EROFS; | |
1055 | if (!len) | |
1056 | return 0; | |
1057 | return mtd->_panic_write(mtd, to, len, retlen, buf); | |
1058 | } | |
ff94bc40 | 1059 | EXPORT_SYMBOL_GPL(mtd_panic_write); |
dfe64e2c | 1060 | |
8fad769f BB |
1061 | static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, |
1062 | struct mtd_oob_ops *ops) | |
1063 | { | |
1064 | /* | |
1065 | * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving | |
1066 | * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in | |
1067 | * this case. | |
1068 | */ | |
1069 | if (!ops->datbuf) | |
1070 | ops->len = 0; | |
1071 | ||
1072 | if (!ops->oobbuf) | |
1073 | ops->ooblen = 0; | |
1074 | ||
1075 | if (offs < 0 || offs + ops->len > mtd->size) | |
1076 | return -EINVAL; | |
1077 | ||
1078 | if (ops->ooblen) { | |
3f3aef4b | 1079 | size_t maxooblen; |
8fad769f BB |
1080 | |
1081 | if (ops->ooboffs >= mtd_oobavail(mtd, ops)) | |
1082 | return -EINVAL; | |
1083 | ||
3f3aef4b MR |
1084 | maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) - |
1085 | mtd_div_by_ws(offs, mtd)) * | |
8fad769f BB |
1086 | mtd_oobavail(mtd, ops)) - ops->ooboffs; |
1087 | if (ops->ooblen > maxooblen) | |
1088 | return -EINVAL; | |
1089 | } | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
dfe64e2c SL |
1094 | int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) |
1095 | { | |
ff94bc40 | 1096 | int ret_code; |
dfe64e2c | 1097 | ops->retlen = ops->oobretlen = 0; |
8fad769f BB |
1098 | |
1099 | ret_code = mtd_check_oob_ops(mtd, from, ops); | |
1100 | if (ret_code) | |
1101 | return ret_code; | |
1102 | ||
ca040d85 MR |
1103 | /* Check the validity of a potential fallback on mtd->_read */ |
1104 | if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf)) | |
1105 | return -EOPNOTSUPP; | |
1106 | ||
1107 | if (mtd->_read_oob) | |
1108 | ret_code = mtd->_read_oob(mtd, from, ops); | |
1109 | else | |
1110 | ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen, | |
1111 | ops->datbuf); | |
1112 | ||
ff94bc40 HS |
1113 | /* |
1114 | * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics | |
1115 | * similar to mtd->_read(), returning a non-negative integer | |
1116 | * representing max bitflips. In other cases, mtd->_read_oob() may | |
1117 | * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). | |
1118 | */ | |
ff94bc40 HS |
1119 | if (unlikely(ret_code < 0)) |
1120 | return ret_code; | |
1121 | if (mtd->ecc_strength == 0) | |
1122 | return 0; /* device lacks ecc */ | |
1123 | return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; | |
dfe64e2c | 1124 | } |
ff94bc40 | 1125 | EXPORT_SYMBOL_GPL(mtd_read_oob); |
dfe64e2c | 1126 | |
1fac5772 MT |
1127 | /* This is a bare copy of mtd_read_oob returning the actual number of bitflips */ |
1128 | int mtd_read_oob_bf(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) | |
1129 | { | |
1130 | int ret_code; | |
1131 | ops->retlen = ops->oobretlen = 0; | |
1132 | if (!mtd->_read_oob) | |
1133 | return -EOPNOTSUPP; | |
1134 | /* | |
1135 | * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics | |
1136 | * similar to mtd->_read(), returning a non-negative integer | |
1137 | * representing max bitflips. In other cases, mtd->_read_oob() may | |
1138 | * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). | |
1139 | */ | |
1140 | ret_code = mtd->_read_oob(mtd, from, ops); | |
1141 | if (unlikely(ret_code < 0)) | |
1142 | return ret_code; | |
1143 | if (mtd->ecc_strength == 0) | |
1144 | return 0; /* device lacks ecc */ | |
1145 | return ret_code; | |
1146 | } | |
1147 | EXPORT_SYMBOL_GPL(mtd_read_oob_bf); | |
1148 | ||
5f50d82d EG |
1149 | int mtd_write_oob(struct mtd_info *mtd, loff_t to, |
1150 | struct mtd_oob_ops *ops) | |
1151 | { | |
8fad769f BB |
1152 | int ret; |
1153 | ||
5f50d82d | 1154 | ops->retlen = ops->oobretlen = 0; |
ca040d85 | 1155 | |
5f50d82d EG |
1156 | if (!(mtd->flags & MTD_WRITEABLE)) |
1157 | return -EROFS; | |
8fad769f BB |
1158 | |
1159 | ret = mtd_check_oob_ops(mtd, to, ops); | |
1160 | if (ret) | |
1161 | return ret; | |
1162 | ||
ca040d85 MR |
1163 | /* Check the validity of a potential fallback on mtd->_write */ |
1164 | if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf)) | |
1165 | return -EOPNOTSUPP; | |
1166 | ||
1167 | if (mtd->_write_oob) | |
1168 | return mtd->_write_oob(mtd, to, ops); | |
1169 | else | |
1170 | return mtd->_write(mtd, to, ops->len, &ops->retlen, | |
1171 | ops->datbuf); | |
5f50d82d EG |
1172 | } |
1173 | EXPORT_SYMBOL_GPL(mtd_write_oob); | |
1174 | ||
13f3b04f BB |
1175 | /** |
1176 | * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section | |
1177 | * @mtd: MTD device structure | |
1178 | * @section: ECC section. Depending on the layout you may have all the ECC | |
1179 | * bytes stored in a single contiguous section, or one section | |
1180 | * per ECC chunk (and sometime several sections for a single ECC | |
1181 | * ECC chunk) | |
1182 | * @oobecc: OOB region struct filled with the appropriate ECC position | |
1183 | * information | |
1184 | * | |
1185 | * This function returns ECC section information in the OOB area. If you want | |
1186 | * to get all the ECC bytes information, then you should call | |
1187 | * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. | |
1188 | * | |
1189 | * Returns zero on success, a negative error code otherwise. | |
1190 | */ | |
1191 | int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, | |
1192 | struct mtd_oob_region *oobecc) | |
1193 | { | |
1194 | memset(oobecc, 0, sizeof(*oobecc)); | |
1195 | ||
1196 | if (!mtd || section < 0) | |
1197 | return -EINVAL; | |
1198 | ||
1199 | if (!mtd->ooblayout || !mtd->ooblayout->ecc) | |
1200 | return -ENOTSUPP; | |
1201 | ||
1202 | return mtd->ooblayout->ecc(mtd, section, oobecc); | |
1203 | } | |
1204 | EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); | |
1205 | ||
1206 | /** | |
1207 | * mtd_ooblayout_free - Get the OOB region definition of a specific free | |
1208 | * section | |
1209 | * @mtd: MTD device structure | |
1210 | * @section: Free section you are interested in. Depending on the layout | |
1211 | * you may have all the free bytes stored in a single contiguous | |
1212 | * section, or one section per ECC chunk plus an extra section | |
1213 | * for the remaining bytes (or other funky layout). | |
1214 | * @oobfree: OOB region struct filled with the appropriate free position | |
1215 | * information | |
1216 | * | |
1217 | * This function returns free bytes position in the OOB area. If you want | |
1218 | * to get all the free bytes information, then you should call | |
1219 | * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. | |
1220 | * | |
1221 | * Returns zero on success, a negative error code otherwise. | |
1222 | */ | |
1223 | int mtd_ooblayout_free(struct mtd_info *mtd, int section, | |
1224 | struct mtd_oob_region *oobfree) | |
1225 | { | |
1226 | memset(oobfree, 0, sizeof(*oobfree)); | |
1227 | ||
1228 | if (!mtd || section < 0) | |
1229 | return -EINVAL; | |
1230 | ||
8d38a845 | 1231 | if (!mtd->ooblayout || !mtd->ooblayout->rfree) |
13f3b04f BB |
1232 | return -ENOTSUPP; |
1233 | ||
8d38a845 | 1234 | return mtd->ooblayout->rfree(mtd, section, oobfree); |
13f3b04f BB |
1235 | } |
1236 | EXPORT_SYMBOL_GPL(mtd_ooblayout_free); | |
1237 | ||
1238 | /** | |
1239 | * mtd_ooblayout_find_region - Find the region attached to a specific byte | |
1240 | * @mtd: mtd info structure | |
1241 | * @byte: the byte we are searching for | |
1242 | * @sectionp: pointer where the section id will be stored | |
1243 | * @oobregion: used to retrieve the ECC position | |
1244 | * @iter: iterator function. Should be either mtd_ooblayout_free or | |
1245 | * mtd_ooblayout_ecc depending on the region type you're searching for | |
1246 | * | |
1247 | * This function returns the section id and oobregion information of a | |
1248 | * specific byte. For example, say you want to know where the 4th ECC byte is | |
1249 | * stored, you'll use: | |
1250 | * | |
1251 | * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); | |
1252 | * | |
1253 | * Returns zero on success, a negative error code otherwise. | |
1254 | */ | |
1255 | static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, | |
1256 | int *sectionp, struct mtd_oob_region *oobregion, | |
1257 | int (*iter)(struct mtd_info *, | |
1258 | int section, | |
1259 | struct mtd_oob_region *oobregion)) | |
1260 | { | |
1261 | int pos = 0, ret, section = 0; | |
1262 | ||
1263 | memset(oobregion, 0, sizeof(*oobregion)); | |
1264 | ||
1265 | while (1) { | |
1266 | ret = iter(mtd, section, oobregion); | |
1267 | if (ret) | |
1268 | return ret; | |
1269 | ||
1270 | if (pos + oobregion->length > byte) | |
1271 | break; | |
1272 | ||
1273 | pos += oobregion->length; | |
1274 | section++; | |
1275 | } | |
1276 | ||
1277 | /* | |
1278 | * Adjust region info to make it start at the beginning at the | |
1279 | * 'start' ECC byte. | |
1280 | */ | |
1281 | oobregion->offset += byte - pos; | |
1282 | oobregion->length -= byte - pos; | |
1283 | *sectionp = section; | |
1284 | ||
1285 | return 0; | |
1286 | } | |
1287 | ||
1288 | /** | |
1289 | * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific | |
1290 | * ECC byte | |
1291 | * @mtd: mtd info structure | |
1292 | * @eccbyte: the byte we are searching for | |
1293 | * @sectionp: pointer where the section id will be stored | |
1294 | * @oobregion: OOB region information | |
1295 | * | |
1296 | * Works like mtd_ooblayout_find_region() except it searches for a specific ECC | |
1297 | * byte. | |
1298 | * | |
1299 | * Returns zero on success, a negative error code otherwise. | |
1300 | */ | |
1301 | int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, | |
1302 | int *section, | |
1303 | struct mtd_oob_region *oobregion) | |
1304 | { | |
1305 | return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, | |
1306 | mtd_ooblayout_ecc); | |
1307 | } | |
1308 | EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); | |
1309 | ||
1310 | /** | |
1311 | * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer | |
1312 | * @mtd: mtd info structure | |
1313 | * @buf: destination buffer to store OOB bytes | |
1314 | * @oobbuf: OOB buffer | |
1315 | * @start: first byte to retrieve | |
1316 | * @nbytes: number of bytes to retrieve | |
1317 | * @iter: section iterator | |
1318 | * | |
1319 | * Extract bytes attached to a specific category (ECC or free) | |
1320 | * from the OOB buffer and copy them into buf. | |
1321 | * | |
1322 | * Returns zero on success, a negative error code otherwise. | |
1323 | */ | |
1324 | static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, | |
1325 | const u8 *oobbuf, int start, int nbytes, | |
1326 | int (*iter)(struct mtd_info *, | |
1327 | int section, | |
1328 | struct mtd_oob_region *oobregion)) | |
1329 | { | |
1330 | struct mtd_oob_region oobregion; | |
1331 | int section, ret; | |
1332 | ||
1333 | ret = mtd_ooblayout_find_region(mtd, start, §ion, | |
1334 | &oobregion, iter); | |
1335 | ||
1336 | while (!ret) { | |
1337 | int cnt; | |
1338 | ||
1339 | cnt = min_t(int, nbytes, oobregion.length); | |
1340 | memcpy(buf, oobbuf + oobregion.offset, cnt); | |
1341 | buf += cnt; | |
1342 | nbytes -= cnt; | |
1343 | ||
1344 | if (!nbytes) | |
1345 | break; | |
1346 | ||
1347 | ret = iter(mtd, ++section, &oobregion); | |
1348 | } | |
1349 | ||
1350 | return ret; | |
1351 | } | |
1352 | ||
1353 | /** | |
1354 | * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer | |
1355 | * @mtd: mtd info structure | |
1356 | * @buf: source buffer to get OOB bytes from | |
1357 | * @oobbuf: OOB buffer | |
1358 | * @start: first OOB byte to set | |
1359 | * @nbytes: number of OOB bytes to set | |
1360 | * @iter: section iterator | |
1361 | * | |
1362 | * Fill the OOB buffer with data provided in buf. The category (ECC or free) | |
1363 | * is selected by passing the appropriate iterator. | |
1364 | * | |
1365 | * Returns zero on success, a negative error code otherwise. | |
1366 | */ | |
1367 | static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, | |
1368 | u8 *oobbuf, int start, int nbytes, | |
1369 | int (*iter)(struct mtd_info *, | |
1370 | int section, | |
1371 | struct mtd_oob_region *oobregion)) | |
1372 | { | |
1373 | struct mtd_oob_region oobregion; | |
1374 | int section, ret; | |
1375 | ||
1376 | ret = mtd_ooblayout_find_region(mtd, start, §ion, | |
1377 | &oobregion, iter); | |
1378 | ||
1379 | while (!ret) { | |
1380 | int cnt; | |
1381 | ||
1382 | cnt = min_t(int, nbytes, oobregion.length); | |
1383 | memcpy(oobbuf + oobregion.offset, buf, cnt); | |
1384 | buf += cnt; | |
1385 | nbytes -= cnt; | |
1386 | ||
1387 | if (!nbytes) | |
1388 | break; | |
1389 | ||
1390 | ret = iter(mtd, ++section, &oobregion); | |
1391 | } | |
1392 | ||
1393 | return ret; | |
1394 | } | |
1395 | ||
1396 | /** | |
1397 | * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category | |
1398 | * @mtd: mtd info structure | |
1399 | * @iter: category iterator | |
1400 | * | |
1401 | * Count the number of bytes in a given category. | |
1402 | * | |
1403 | * Returns a positive value on success, a negative error code otherwise. | |
1404 | */ | |
1405 | static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, | |
1406 | int (*iter)(struct mtd_info *, | |
1407 | int section, | |
1408 | struct mtd_oob_region *oobregion)) | |
1409 | { | |
1410 | struct mtd_oob_region oobregion; | |
1411 | int section = 0, ret, nbytes = 0; | |
1412 | ||
1413 | while (1) { | |
1414 | ret = iter(mtd, section++, &oobregion); | |
1415 | if (ret) { | |
1416 | if (ret == -ERANGE) | |
1417 | ret = nbytes; | |
1418 | break; | |
1419 | } | |
1420 | ||
1421 | nbytes += oobregion.length; | |
1422 | } | |
1423 | ||
1424 | return ret; | |
1425 | } | |
1426 | ||
1427 | /** | |
1428 | * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer | |
1429 | * @mtd: mtd info structure | |
1430 | * @eccbuf: destination buffer to store ECC bytes | |
1431 | * @oobbuf: OOB buffer | |
1432 | * @start: first ECC byte to retrieve | |
1433 | * @nbytes: number of ECC bytes to retrieve | |
1434 | * | |
1435 | * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. | |
1436 | * | |
1437 | * Returns zero on success, a negative error code otherwise. | |
1438 | */ | |
1439 | int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, | |
1440 | const u8 *oobbuf, int start, int nbytes) | |
1441 | { | |
1442 | return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, | |
1443 | mtd_ooblayout_ecc); | |
1444 | } | |
1445 | EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); | |
1446 | ||
1447 | /** | |
1448 | * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer | |
1449 | * @mtd: mtd info structure | |
1450 | * @eccbuf: source buffer to get ECC bytes from | |
1451 | * @oobbuf: OOB buffer | |
1452 | * @start: first ECC byte to set | |
1453 | * @nbytes: number of ECC bytes to set | |
1454 | * | |
1455 | * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. | |
1456 | * | |
1457 | * Returns zero on success, a negative error code otherwise. | |
1458 | */ | |
1459 | int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, | |
1460 | u8 *oobbuf, int start, int nbytes) | |
1461 | { | |
1462 | return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, | |
1463 | mtd_ooblayout_ecc); | |
1464 | } | |
1465 | EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); | |
1466 | ||
1467 | /** | |
1468 | * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer | |
1469 | * @mtd: mtd info structure | |
1470 | * @databuf: destination buffer to store ECC bytes | |
1471 | * @oobbuf: OOB buffer | |
1472 | * @start: first ECC byte to retrieve | |
1473 | * @nbytes: number of ECC bytes to retrieve | |
1474 | * | |
1475 | * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. | |
1476 | * | |
1477 | * Returns zero on success, a negative error code otherwise. | |
1478 | */ | |
1479 | int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, | |
1480 | const u8 *oobbuf, int start, int nbytes) | |
1481 | { | |
1482 | return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, | |
1483 | mtd_ooblayout_free); | |
1484 | } | |
1485 | EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); | |
1486 | ||
1487 | /** | |
1488 | * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer | |
1489 | * @mtd: mtd info structure | |
1490 | * @eccbuf: source buffer to get data bytes from | |
1491 | * @oobbuf: OOB buffer | |
1492 | * @start: first ECC byte to set | |
1493 | * @nbytes: number of ECC bytes to set | |
1494 | * | |
1495 | * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. | |
1496 | * | |
1497 | * Returns zero on success, a negative error code otherwise. | |
1498 | */ | |
1499 | int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, | |
1500 | u8 *oobbuf, int start, int nbytes) | |
1501 | { | |
1502 | return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, | |
1503 | mtd_ooblayout_free); | |
1504 | } | |
1505 | EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); | |
1506 | ||
1507 | /** | |
1508 | * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB | |
1509 | * @mtd: mtd info structure | |
1510 | * | |
1511 | * Works like mtd_ooblayout_count_bytes(), except it count free bytes. | |
1512 | * | |
1513 | * Returns zero on success, a negative error code otherwise. | |
1514 | */ | |
1515 | int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) | |
1516 | { | |
1517 | return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); | |
1518 | } | |
1519 | EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); | |
1520 | ||
1521 | /** | |
1522 | * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB | |
1523 | * @mtd: mtd info structure | |
1524 | * | |
1525 | * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. | |
1526 | * | |
1527 | * Returns zero on success, a negative error code otherwise. | |
1528 | */ | |
1529 | int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) | |
1530 | { | |
1531 | return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); | |
1532 | } | |
1533 | EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); | |
1534 | ||
dfe64e2c SL |
1535 | /* |
1536 | * Method to access the protection register area, present in some flash | |
1537 | * devices. The user data is one time programmable but the factory data is read | |
1538 | * only. | |
1539 | */ | |
4e67c571 HS |
1540 | int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, |
1541 | struct otp_info *buf) | |
dfe64e2c SL |
1542 | { |
1543 | if (!mtd->_get_fact_prot_info) | |
1544 | return -EOPNOTSUPP; | |
1545 | if (!len) | |
1546 | return 0; | |
4e67c571 | 1547 | return mtd->_get_fact_prot_info(mtd, len, retlen, buf); |
dfe64e2c | 1548 | } |
ff94bc40 | 1549 | EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); |
dfe64e2c SL |
1550 | |
1551 | int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, | |
1552 | size_t *retlen, u_char *buf) | |
1553 | { | |
1554 | *retlen = 0; | |
1555 | if (!mtd->_read_fact_prot_reg) | |
1556 | return -EOPNOTSUPP; | |
1557 | if (!len) | |
1558 | return 0; | |
1559 | return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); | |
1560 | } | |
ff94bc40 | 1561 | EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); |
dfe64e2c | 1562 | |
4e67c571 HS |
1563 | int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, |
1564 | struct otp_info *buf) | |
dfe64e2c SL |
1565 | { |
1566 | if (!mtd->_get_user_prot_info) | |
1567 | return -EOPNOTSUPP; | |
1568 | if (!len) | |
1569 | return 0; | |
4e67c571 | 1570 | return mtd->_get_user_prot_info(mtd, len, retlen, buf); |
dfe64e2c | 1571 | } |
ff94bc40 | 1572 | EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); |
dfe64e2c SL |
1573 | |
1574 | int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, | |
1575 | size_t *retlen, u_char *buf) | |
1576 | { | |
1577 | *retlen = 0; | |
1578 | if (!mtd->_read_user_prot_reg) | |
1579 | return -EOPNOTSUPP; | |
1580 | if (!len) | |
1581 | return 0; | |
1582 | return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); | |
1583 | } | |
ff94bc40 | 1584 | EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); |
dfe64e2c SL |
1585 | |
1586 | int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, | |
1587 | size_t *retlen, u_char *buf) | |
1588 | { | |
4e67c571 HS |
1589 | int ret; |
1590 | ||
dfe64e2c SL |
1591 | *retlen = 0; |
1592 | if (!mtd->_write_user_prot_reg) | |
1593 | return -EOPNOTSUPP; | |
1594 | if (!len) | |
1595 | return 0; | |
4e67c571 HS |
1596 | ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); |
1597 | if (ret) | |
1598 | return ret; | |
1599 | ||
1600 | /* | |
1601 | * If no data could be written at all, we are out of memory and | |
1602 | * must return -ENOSPC. | |
1603 | */ | |
1604 | return (*retlen) ? 0 : -ENOSPC; | |
dfe64e2c | 1605 | } |
ff94bc40 | 1606 | EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); |
dfe64e2c SL |
1607 | |
1608 | int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) | |
1609 | { | |
1610 | if (!mtd->_lock_user_prot_reg) | |
1611 | return -EOPNOTSUPP; | |
1612 | if (!len) | |
1613 | return 0; | |
1614 | return mtd->_lock_user_prot_reg(mtd, from, len); | |
1615 | } | |
ff94bc40 | 1616 | EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); |
dfe64e2c SL |
1617 | |
1618 | /* Chip-supported device locking */ | |
1619 | int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |
1620 | { | |
1621 | if (!mtd->_lock) | |
1622 | return -EOPNOTSUPP; | |
1623 | if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) | |
1624 | return -EINVAL; | |
1625 | if (!len) | |
1626 | return 0; | |
1627 | return mtd->_lock(mtd, ofs, len); | |
1628 | } | |
ff94bc40 | 1629 | EXPORT_SYMBOL_GPL(mtd_lock); |
dfe64e2c SL |
1630 | |
1631 | int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |
1632 | { | |
1633 | if (!mtd->_unlock) | |
1634 | return -EOPNOTSUPP; | |
1635 | if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) | |
1636 | return -EINVAL; | |
1637 | if (!len) | |
1638 | return 0; | |
1639 | return mtd->_unlock(mtd, ofs, len); | |
1640 | } | |
ff94bc40 HS |
1641 | EXPORT_SYMBOL_GPL(mtd_unlock); |
1642 | ||
1643 | int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |
1644 | { | |
1645 | if (!mtd->_is_locked) | |
1646 | return -EOPNOTSUPP; | |
1647 | if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) | |
1648 | return -EINVAL; | |
1649 | if (!len) | |
1650 | return 0; | |
1651 | return mtd->_is_locked(mtd, ofs, len); | |
1652 | } | |
1653 | EXPORT_SYMBOL_GPL(mtd_is_locked); | |
dfe64e2c | 1654 | |
86a720aa | 1655 | int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) |
dfe64e2c | 1656 | { |
86a720aa EG |
1657 | if (ofs < 0 || ofs > mtd->size) |
1658 | return -EINVAL; | |
1659 | if (!mtd->_block_isreserved) | |
dfe64e2c | 1660 | return 0; |
86a720aa EG |
1661 | return mtd->_block_isreserved(mtd, ofs); |
1662 | } | |
1663 | EXPORT_SYMBOL_GPL(mtd_block_isreserved); | |
1664 | ||
1665 | int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) | |
1666 | { | |
dfe64e2c SL |
1667 | if (ofs < 0 || ofs > mtd->size) |
1668 | return -EINVAL; | |
86a720aa EG |
1669 | if (!mtd->_block_isbad) |
1670 | return 0; | |
dfe64e2c SL |
1671 | return mtd->_block_isbad(mtd, ofs); |
1672 | } | |
ff94bc40 | 1673 | EXPORT_SYMBOL_GPL(mtd_block_isbad); |
dfe64e2c SL |
1674 | |
1675 | int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) | |
1676 | { | |
1677 | if (!mtd->_block_markbad) | |
1678 | return -EOPNOTSUPP; | |
1679 | if (ofs < 0 || ofs > mtd->size) | |
1680 | return -EINVAL; | |
1681 | if (!(mtd->flags & MTD_WRITEABLE)) | |
1682 | return -EROFS; | |
1683 | return mtd->_block_markbad(mtd, ofs); | |
1684 | } | |
ff94bc40 HS |
1685 | EXPORT_SYMBOL_GPL(mtd_block_markbad); |
1686 | ||
1687 | #ifndef __UBOOT__ | |
1688 | /* | |
1689 | * default_mtd_writev - the default writev method | |
1690 | * @mtd: mtd device description object pointer | |
1691 | * @vecs: the vectors to write | |
1692 | * @count: count of vectors in @vecs | |
1693 | * @to: the MTD device offset to write to | |
1694 | * @retlen: on exit contains the count of bytes written to the MTD device. | |
1695 | * | |
1696 | * This function returns zero in case of success and a negative error code in | |
1697 | * case of failure. | |
1698 | */ | |
1699 | static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, | |
1700 | unsigned long count, loff_t to, size_t *retlen) | |
1701 | { | |
1702 | unsigned long i; | |
1703 | size_t totlen = 0, thislen; | |
1704 | int ret = 0; | |
1705 | ||
1706 | for (i = 0; i < count; i++) { | |
1707 | if (!vecs[i].iov_len) | |
1708 | continue; | |
1709 | ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, | |
1710 | vecs[i].iov_base); | |
1711 | totlen += thislen; | |
1712 | if (ret || thislen != vecs[i].iov_len) | |
1713 | break; | |
1714 | to += vecs[i].iov_len; | |
1715 | } | |
1716 | *retlen = totlen; | |
1717 | return ret; | |
1718 | } | |
1719 | ||
1720 | /* | |
1721 | * mtd_writev - the vector-based MTD write method | |
1722 | * @mtd: mtd device description object pointer | |
1723 | * @vecs: the vectors to write | |
1724 | * @count: count of vectors in @vecs | |
1725 | * @to: the MTD device offset to write to | |
1726 | * @retlen: on exit contains the count of bytes written to the MTD device. | |
1727 | * | |
1728 | * This function returns zero in case of success and a negative error code in | |
1729 | * case of failure. | |
1730 | */ | |
1731 | int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, | |
1732 | unsigned long count, loff_t to, size_t *retlen) | |
1733 | { | |
1734 | *retlen = 0; | |
1735 | if (!(mtd->flags & MTD_WRITEABLE)) | |
1736 | return -EROFS; | |
1737 | if (!mtd->_writev) | |
1738 | return default_mtd_writev(mtd, vecs, count, to, retlen); | |
1739 | return mtd->_writev(mtd, vecs, count, to, retlen); | |
1740 | } | |
1741 | EXPORT_SYMBOL_GPL(mtd_writev); | |
1742 | ||
1743 | /** | |
1744 | * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size | |
1745 | * @mtd: mtd device description object pointer | |
1746 | * @size: a pointer to the ideal or maximum size of the allocation, points | |
1747 | * to the actual allocation size on success. | |
1748 | * | |
1749 | * This routine attempts to allocate a contiguous kernel buffer up to | |
1750 | * the specified size, backing off the size of the request exponentially | |
1751 | * until the request succeeds or until the allocation size falls below | |
1752 | * the system page size. This attempts to make sure it does not adversely | |
1753 | * impact system performance, so when allocating more than one page, we | |
1754 | * ask the memory allocator to avoid re-trying, swapping, writing back | |
1755 | * or performing I/O. | |
1756 | * | |
1757 | * Note, this function also makes sure that the allocated buffer is aligned to | |
1758 | * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. | |
1759 | * | |
1760 | * This is called, for example by mtd_{read,write} and jffs2_scan_medium, | |
1761 | * to handle smaller (i.e. degraded) buffer allocations under low- or | |
1762 | * fragmented-memory situations where such reduced allocations, from a | |
1763 | * requested ideal, are allowed. | |
1764 | * | |
1765 | * Returns a pointer to the allocated buffer on success; otherwise, NULL. | |
1766 | */ | |
1767 | void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) | |
1768 | { | |
1769 | gfp_t flags = __GFP_NOWARN | __GFP_WAIT | | |
1770 | __GFP_NORETRY | __GFP_NO_KSWAPD; | |
1771 | size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); | |
1772 | void *kbuf; | |
1773 | ||
1774 | *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); | |
1775 | ||
1776 | while (*size > min_alloc) { | |
1777 | kbuf = kmalloc(*size, flags); | |
1778 | if (kbuf) | |
1779 | return kbuf; | |
1780 | ||
1781 | *size >>= 1; | |
1782 | *size = ALIGN(*size, mtd->writesize); | |
1783 | } | |
1784 | ||
1785 | /* | |
1786 | * For the last resort allocation allow 'kmalloc()' to do all sorts of | |
1787 | * things (write-back, dropping caches, etc) by using GFP_KERNEL. | |
1788 | */ | |
1789 | return kmalloc(*size, GFP_KERNEL); | |
1790 | } | |
1791 | EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); | |
1792 | #endif | |
1793 | ||
1794 | #ifdef CONFIG_PROC_FS | |
1795 | ||
1796 | /*====================================================================*/ | |
1797 | /* Support for /proc/mtd */ | |
1798 | ||
1799 | static int mtd_proc_show(struct seq_file *m, void *v) | |
1800 | { | |
1801 | struct mtd_info *mtd; | |
1802 | ||
1803 | seq_puts(m, "dev: size erasesize name\n"); | |
1804 | mutex_lock(&mtd_table_mutex); | |
1805 | mtd_for_each_device(mtd) { | |
1806 | seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", | |
1807 | mtd->index, (unsigned long long)mtd->size, | |
1808 | mtd->erasesize, mtd->name); | |
1809 | } | |
1810 | mutex_unlock(&mtd_table_mutex); | |
1811 | return 0; | |
1812 | } | |
1813 | ||
1814 | static int mtd_proc_open(struct inode *inode, struct file *file) | |
1815 | { | |
1816 | return single_open(file, mtd_proc_show, NULL); | |
1817 | } | |
1818 | ||
1819 | static const struct file_operations mtd_proc_ops = { | |
1820 | .open = mtd_proc_open, | |
1821 | .read = seq_read, | |
1822 | .llseek = seq_lseek, | |
1823 | .release = single_release, | |
1824 | }; | |
1825 | #endif /* CONFIG_PROC_FS */ | |
1826 | ||
1827 | /*====================================================================*/ | |
1828 | /* Init code */ | |
1829 | ||
1830 | #ifndef __UBOOT__ | |
1831 | static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) | |
1832 | { | |
1833 | int ret; | |
1834 | ||
1835 | ret = bdi_init(bdi); | |
1836 | if (!ret) | |
1837 | ret = bdi_register(bdi, NULL, "%s", name); | |
1838 | ||
1839 | if (ret) | |
1840 | bdi_destroy(bdi); | |
1841 | ||
1842 | return ret; | |
1843 | } | |
1844 | ||
1845 | static struct proc_dir_entry *proc_mtd; | |
1846 | ||
1847 | static int __init init_mtd(void) | |
1848 | { | |
1849 | int ret; | |
1850 | ||
1851 | ret = class_register(&mtd_class); | |
1852 | if (ret) | |
1853 | goto err_reg; | |
1854 | ||
1855 | ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); | |
1856 | if (ret) | |
1857 | goto err_bdi1; | |
1858 | ||
1859 | ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap"); | |
1860 | if (ret) | |
1861 | goto err_bdi2; | |
1862 | ||
1863 | ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap"); | |
1864 | if (ret) | |
1865 | goto err_bdi3; | |
1866 | ||
1867 | proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); | |
1868 | ||
1869 | ret = init_mtdchar(); | |
1870 | if (ret) | |
1871 | goto out_procfs; | |
1872 | ||
1873 | return 0; | |
1874 | ||
1875 | out_procfs: | |
1876 | if (proc_mtd) | |
1877 | remove_proc_entry("mtd", NULL); | |
1878 | err_bdi3: | |
1879 | bdi_destroy(&mtd_bdi_ro_mappable); | |
1880 | err_bdi2: | |
1881 | bdi_destroy(&mtd_bdi_unmappable); | |
1882 | err_bdi1: | |
1883 | class_unregister(&mtd_class); | |
1884 | err_reg: | |
1885 | pr_err("Error registering mtd class or bdi: %d\n", ret); | |
1886 | return ret; | |
1887 | } | |
1888 | ||
1889 | static void __exit cleanup_mtd(void) | |
1890 | { | |
1891 | cleanup_mtdchar(); | |
1892 | if (proc_mtd) | |
1893 | remove_proc_entry("mtd", NULL); | |
1894 | class_unregister(&mtd_class); | |
1895 | bdi_destroy(&mtd_bdi_unmappable); | |
1896 | bdi_destroy(&mtd_bdi_ro_mappable); | |
1897 | bdi_destroy(&mtd_bdi_rw_mappable); | |
1898 | } | |
1899 | ||
1900 | module_init(init_mtd); | |
1901 | module_exit(cleanup_mtd); | |
1902 | #endif | |
1903 | ||
1904 | MODULE_LICENSE("GPL"); | |
1905 | MODULE_AUTHOR("David Woodhouse <[email protected]>"); | |
1906 | MODULE_DESCRIPTION("Core MTD registration and access routines"); |