Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/seq_file.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/major.h>
17#include <linux/fs.h>
18#include <linux/err.h>
19#include <linux/ioctl.h>
20#include <linux/init.h>
21#include <linux/of.h>
22#include <linux/proc_fs.h>
23#include <linux/idr.h>
24#include <linux/backing-dev.h>
25#include <linux/gfp.h>
26#include <linux/random.h>
27#include <linux/slab.h>
28#include <linux/reboot.h>
29#include <linux/leds.h>
30#include <linux/debugfs.h>
31#include <linux/nvmem-provider.h>
32#include <linux/root_dev.h>
33#include <linux/error-injection.h>
34
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/partitions.h>
37#include <linux/mtd/concat.h>
38
39#include "mtdcore.h"
40
41struct backing_dev_info *mtd_bdi;
42
43#ifdef CONFIG_PM_SLEEP
44
45static int mtd_cls_suspend(struct device *dev)
46{
47 struct mtd_info *mtd = dev_get_drvdata(dev);
48
49 return mtd ? mtd_suspend(mtd) : 0;
50}
51
52static int mtd_cls_resume(struct device *dev)
53{
54 struct mtd_info *mtd = dev_get_drvdata(dev);
55
56 if (mtd)
57 mtd_resume(mtd);
58 return 0;
59}
60
61static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
62#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
63#else
64#define MTD_CLS_PM_OPS NULL
65#endif
66
67static struct class mtd_class = {
68 .name = "mtd",
69 .pm = MTD_CLS_PM_OPS,
70};
71
72static DEFINE_IDR(mtd_idr);
73
74/* These are exported solely for the purpose of mtd_blkdevs.c. You
75 should not use them for _anything_ else */
76DEFINE_MUTEX(mtd_table_mutex);
77EXPORT_SYMBOL_GPL(mtd_table_mutex);
78
79struct mtd_info *__mtd_next_device(int i)
80{
81 return idr_get_next(&mtd_idr, &i);
82}
83EXPORT_SYMBOL_GPL(__mtd_next_device);
84
85static LIST_HEAD(mtd_notifiers);
86
87
88#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
89
90/* REVISIT once MTD uses the driver model better, whoever allocates
91 * the mtd_info will probably want to use the release() hook...
92 */
93static void mtd_release(struct device *dev)
94{
95 struct mtd_info *mtd = dev_get_drvdata(dev);
96 dev_t index = MTD_DEVT(mtd->index);
97
98 idr_remove(&mtd_idr, mtd->index);
99 of_node_put(mtd_get_of_node(mtd));
100
101 if (mtd_is_partition(mtd))
102 release_mtd_partition(mtd);
103
104 /* remove /dev/mtdXro node */
105 device_destroy(&mtd_class, index + 1);
106}
107
108static void mtd_device_release(struct kref *kref)
109{
110 struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
111 bool is_partition = mtd_is_partition(mtd);
112
113 debugfs_remove_recursive(mtd->dbg.dfs_dir);
114
115 /* Try to remove the NVMEM provider */
116 nvmem_unregister(mtd->nvmem);
117
118 device_unregister(&mtd->dev);
119
120 /*
121 * Clear dev so mtd can be safely re-registered later if desired.
122 * Should not be done for partition,
123 * as it was already destroyed in device_unregister().
124 */
125 if (!is_partition)
126 memset(&mtd->dev, 0, sizeof(mtd->dev));
127
128 module_put(THIS_MODULE);
129}
130
131#define MTD_DEVICE_ATTR_RO(name) \
132static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
133
134#define MTD_DEVICE_ATTR_RW(name) \
135static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
136
137static ssize_t mtd_type_show(struct device *dev,
138 struct device_attribute *attr, char *buf)
139{
140 struct mtd_info *mtd = dev_get_drvdata(dev);
141 char *type;
142
143 switch (mtd->type) {
144 case MTD_ABSENT:
145 type = "absent";
146 break;
147 case MTD_RAM:
148 type = "ram";
149 break;
150 case MTD_ROM:
151 type = "rom";
152 break;
153 case MTD_NORFLASH:
154 type = "nor";
155 break;
156 case MTD_NANDFLASH:
157 type = "nand";
158 break;
159 case MTD_DATAFLASH:
160 type = "dataflash";
161 break;
162 case MTD_UBIVOLUME:
163 type = "ubi";
164 break;
165 case MTD_MLCNANDFLASH:
166 type = "mlc-nand";
167 break;
168 default:
169 type = "unknown";
170 }
171
172 return sysfs_emit(buf, "%s\n", type);
173}
174MTD_DEVICE_ATTR_RO(type);
175
176static ssize_t mtd_flags_show(struct device *dev,
177 struct device_attribute *attr, char *buf)
178{
179 struct mtd_info *mtd = dev_get_drvdata(dev);
180
181 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
182}
183MTD_DEVICE_ATTR_RO(flags);
184
185static ssize_t mtd_size_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187{
188 struct mtd_info *mtd = dev_get_drvdata(dev);
189
190 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
191}
192MTD_DEVICE_ATTR_RO(size);
193
194static ssize_t mtd_erasesize_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196{
197 struct mtd_info *mtd = dev_get_drvdata(dev);
198
199 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
200}
201MTD_DEVICE_ATTR_RO(erasesize);
202
203static ssize_t mtd_writesize_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 struct mtd_info *mtd = dev_get_drvdata(dev);
207
208 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
209}
210MTD_DEVICE_ATTR_RO(writesize);
211
212static ssize_t mtd_subpagesize_show(struct device *dev,
213 struct device_attribute *attr, char *buf)
214{
215 struct mtd_info *mtd = dev_get_drvdata(dev);
216 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
217
218 return sysfs_emit(buf, "%u\n", subpagesize);
219}
220MTD_DEVICE_ATTR_RO(subpagesize);
221
222static ssize_t mtd_oobsize_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224{
225 struct mtd_info *mtd = dev_get_drvdata(dev);
226
227 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
228}
229MTD_DEVICE_ATTR_RO(oobsize);
230
231static ssize_t mtd_oobavail_show(struct device *dev,
232 struct device_attribute *attr, char *buf)
233{
234 struct mtd_info *mtd = dev_get_drvdata(dev);
235
236 return sysfs_emit(buf, "%u\n", mtd->oobavail);
237}
238MTD_DEVICE_ATTR_RO(oobavail);
239
240static ssize_t mtd_numeraseregions_show(struct device *dev,
241 struct device_attribute *attr, char *buf)
242{
243 struct mtd_info *mtd = dev_get_drvdata(dev);
244
245 return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
246}
247MTD_DEVICE_ATTR_RO(numeraseregions);
248
249static ssize_t mtd_name_show(struct device *dev,
250 struct device_attribute *attr, char *buf)
251{
252 struct mtd_info *mtd = dev_get_drvdata(dev);
253
254 return sysfs_emit(buf, "%s\n", mtd->name);
255}
256MTD_DEVICE_ATTR_RO(name);
257
258static ssize_t mtd_ecc_strength_show(struct device *dev,
259 struct device_attribute *attr, char *buf)
260{
261 struct mtd_info *mtd = dev_get_drvdata(dev);
262
263 return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
264}
265MTD_DEVICE_ATTR_RO(ecc_strength);
266
267static ssize_t mtd_bitflip_threshold_show(struct device *dev,
268 struct device_attribute *attr,
269 char *buf)
270{
271 struct mtd_info *mtd = dev_get_drvdata(dev);
272
273 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
274}
275
276static ssize_t mtd_bitflip_threshold_store(struct device *dev,
277 struct device_attribute *attr,
278 const char *buf, size_t count)
279{
280 struct mtd_info *mtd = dev_get_drvdata(dev);
281 unsigned int bitflip_threshold;
282 int retval;
283
284 retval = kstrtouint(buf, 0, &bitflip_threshold);
285 if (retval)
286 return retval;
287
288 mtd->bitflip_threshold = bitflip_threshold;
289 return count;
290}
291MTD_DEVICE_ATTR_RW(bitflip_threshold);
292
293static ssize_t mtd_ecc_step_size_show(struct device *dev,
294 struct device_attribute *attr, char *buf)
295{
296 struct mtd_info *mtd = dev_get_drvdata(dev);
297
298 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
299
300}
301MTD_DEVICE_ATTR_RO(ecc_step_size);
302
303static ssize_t mtd_corrected_bits_show(struct device *dev,
304 struct device_attribute *attr, char *buf)
305{
306 struct mtd_info *mtd = dev_get_drvdata(dev);
307 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
308
309 return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
310}
311MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
312
313static ssize_t mtd_ecc_failures_show(struct device *dev,
314 struct device_attribute *attr, char *buf)
315{
316 struct mtd_info *mtd = dev_get_drvdata(dev);
317 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
318
319 return sysfs_emit(buf, "%u\n", ecc_stats->failed);
320}
321MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
322
323static ssize_t mtd_bad_blocks_show(struct device *dev,
324 struct device_attribute *attr, char *buf)
325{
326 struct mtd_info *mtd = dev_get_drvdata(dev);
327 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
328
329 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
330}
331MTD_DEVICE_ATTR_RO(bad_blocks);
332
333static ssize_t mtd_bbt_blocks_show(struct device *dev,
334 struct device_attribute *attr, char *buf)
335{
336 struct mtd_info *mtd = dev_get_drvdata(dev);
337 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
338
339 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
340}
341MTD_DEVICE_ATTR_RO(bbt_blocks);
342
343static struct attribute *mtd_attrs[] = {
344 &dev_attr_type.attr,
345 &dev_attr_flags.attr,
346 &dev_attr_size.attr,
347 &dev_attr_erasesize.attr,
348 &dev_attr_writesize.attr,
349 &dev_attr_subpagesize.attr,
350 &dev_attr_oobsize.attr,
351 &dev_attr_oobavail.attr,
352 &dev_attr_numeraseregions.attr,
353 &dev_attr_name.attr,
354 &dev_attr_ecc_strength.attr,
355 &dev_attr_ecc_step_size.attr,
356 &dev_attr_corrected_bits.attr,
357 &dev_attr_ecc_failures.attr,
358 &dev_attr_bad_blocks.attr,
359 &dev_attr_bbt_blocks.attr,
360 &dev_attr_bitflip_threshold.attr,
361 NULL,
362};
363ATTRIBUTE_GROUPS(mtd);
364
365static const struct device_type mtd_devtype = {
366 .name = "mtd",
367 .groups = mtd_groups,
368 .release = mtd_release,
369};
370
371static bool mtd_expert_analysis_mode;
372
373#ifdef CONFIG_DEBUG_FS
374bool mtd_check_expert_analysis_mode(void)
375{
376 const char *mtd_expert_analysis_warning =
377 "Bad block checks have been entirely disabled.\n"
378 "This is only reserved for post-mortem forensics and debug purposes.\n"
379 "Never enable this mode if you do not know what you are doing!\n";
380
381 return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
382}
383EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
384#endif
385
386static struct dentry *dfs_dir_mtd;
387
388static int mtd_ooblayout_show(struct seq_file *s, void *p,
389 int (*iter)(struct mtd_info *, int section,
390 struct mtd_oob_region *region))
391{
392 struct mtd_info *mtd = s->private;
393 int section;
394
395 for (section = 0;; section++) {
396 struct mtd_oob_region region;
397 int err;
398
399 err = iter(mtd, section, ®ion);
400 if (err) {
401 if (err == -ERANGE)
402 break;
403
404 return err;
405 }
406
407 seq_printf(s, "%-3d %4u %4u\n", section, region.offset,
408 region.length);
409 }
410
411 return 0;
412}
413
414static int mtd_ooblayout_ecc_show(struct seq_file *s, void *p)
415{
416 return mtd_ooblayout_show(s, p, mtd_ooblayout_ecc);
417}
418DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_ecc);
419
420static int mtd_ooblayout_free_show(struct seq_file *s, void *p)
421{
422 return mtd_ooblayout_show(s, p, mtd_ooblayout_free);
423}
424DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_free);
425
426static void mtd_debugfs_populate(struct mtd_info *mtd)
427{
428 struct device *dev = &mtd->dev;
429 struct mtd_oob_region region;
430
431 if (IS_ERR_OR_NULL(dfs_dir_mtd))
432 return;
433
434 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
435 if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir))
436 return;
437
438 /* Create ooblayout files only if at least one region is present. */
439 if (mtd_ooblayout_ecc(mtd, 0, ®ion) == 0)
440 debugfs_create_file("ooblayout_ecc", 0444, mtd->dbg.dfs_dir,
441 mtd, &mtd_ooblayout_ecc_fops);
442
443 if (mtd_ooblayout_free(mtd, 0, ®ion) == 0)
444 debugfs_create_file("ooblayout_free", 0444, mtd->dbg.dfs_dir,
445 mtd, &mtd_ooblayout_free_fops);
446}
447
448#ifndef CONFIG_MMU
449unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
450{
451 switch (mtd->type) {
452 case MTD_RAM:
453 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
454 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
455 case MTD_ROM:
456 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
457 NOMMU_MAP_READ;
458 default:
459 return NOMMU_MAP_COPY;
460 }
461}
462EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
463#endif
464
465static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
466 void *cmd)
467{
468 struct mtd_info *mtd;
469
470 mtd = container_of(n, struct mtd_info, reboot_notifier);
471 mtd->_reboot(mtd);
472
473 return NOTIFY_DONE;
474}
475
476/**
477 * mtd_wunit_to_pairing_info - get pairing information of a wunit
478 * @mtd: pointer to new MTD device info structure
479 * @wunit: write unit we are interested in
480 * @info: returned pairing information
481 *
482 * Retrieve pairing information associated to the wunit.
483 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
484 * paired together, and where programming a page may influence the page it is
485 * paired with.
486 * The notion of page is replaced by the term wunit (write-unit) to stay
487 * consistent with the ->writesize field.
488 *
489 * The @wunit argument can be extracted from an absolute offset using
490 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
491 * to @wunit.
492 *
493 * From the pairing info the MTD user can find all the wunits paired with
494 * @wunit using the following loop:
495 *
496 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
497 * info.pair = i;
498 * mtd_pairing_info_to_wunit(mtd, &info);
499 * ...
500 * }
501 */
502int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
503 struct mtd_pairing_info *info)
504{
505 struct mtd_info *master = mtd_get_master(mtd);
506 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
507
508 if (wunit < 0 || wunit >= npairs)
509 return -EINVAL;
510
511 if (master->pairing && master->pairing->get_info)
512 return master->pairing->get_info(master, wunit, info);
513
514 info->group = 0;
515 info->pair = wunit;
516
517 return 0;
518}
519EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
520
521/**
522 * mtd_pairing_info_to_wunit - get wunit from pairing information
523 * @mtd: pointer to new MTD device info structure
524 * @info: pairing information struct
525 *
526 * Returns a positive number representing the wunit associated to the info
527 * struct, or a negative error code.
528 *
529 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
530 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
531 * doc).
532 *
533 * It can also be used to only program the first page of each pair (i.e.
534 * page attached to group 0), which allows one to use an MLC NAND in
535 * software-emulated SLC mode:
536 *
537 * info.group = 0;
538 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
539 * for (info.pair = 0; info.pair < npairs; info.pair++) {
540 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
541 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
542 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
543 * }
544 */
545int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
546 const struct mtd_pairing_info *info)
547{
548 struct mtd_info *master = mtd_get_master(mtd);
549 int ngroups = mtd_pairing_groups(master);
550 int npairs = mtd_wunit_per_eb(master) / ngroups;
551
552 if (!info || info->pair < 0 || info->pair >= npairs ||
553 info->group < 0 || info->group >= ngroups)
554 return -EINVAL;
555
556 if (master->pairing && master->pairing->get_wunit)
557 return mtd->pairing->get_wunit(master, info);
558
559 return info->pair;
560}
561EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
562
563/**
564 * mtd_pairing_groups - get the number of pairing groups
565 * @mtd: pointer to new MTD device info structure
566 *
567 * Returns the number of pairing groups.
568 *
569 * This number is usually equal to the number of bits exposed by a single
570 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
571 * to iterate over all pages of a given pair.
572 */
573int mtd_pairing_groups(struct mtd_info *mtd)
574{
575 struct mtd_info *master = mtd_get_master(mtd);
576
577 if (!master->pairing || !master->pairing->ngroups)
578 return 1;
579
580 return master->pairing->ngroups;
581}
582EXPORT_SYMBOL_GPL(mtd_pairing_groups);
583
584static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
585 void *val, size_t bytes)
586{
587 struct mtd_info *mtd = priv;
588 size_t retlen;
589 int err;
590
591 err = mtd_read(mtd, offset, bytes, &retlen, val);
592 if (err && err != -EUCLEAN)
593 return err;
594
595 return retlen == bytes ? 0 : -EIO;
596}
597
598static int mtd_nvmem_add(struct mtd_info *mtd)
599{
600 struct device_node *node = mtd_get_of_node(mtd);
601 struct nvmem_config config = {};
602
603 config.id = NVMEM_DEVID_NONE;
604 config.dev = &mtd->dev;
605 config.name = dev_name(&mtd->dev);
606 config.owner = THIS_MODULE;
607 config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
608 config.reg_read = mtd_nvmem_reg_read;
609 config.size = mtd->size;
610 config.word_size = 1;
611 config.stride = 1;
612 config.read_only = true;
613 config.root_only = true;
614 config.ignore_wp = true;
615 config.priv = mtd;
616
617 mtd->nvmem = nvmem_register(&config);
618 if (IS_ERR(mtd->nvmem)) {
619 /* Just ignore if there is no NVMEM support in the kernel */
620 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
621 mtd->nvmem = NULL;
622 else
623 return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
624 "Failed to register NVMEM device\n");
625 }
626
627 return 0;
628}
629
630static void mtd_check_of_node(struct mtd_info *mtd)
631{
632 struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
633 const char *pname, *prefix = "partition-";
634 int plen, mtd_name_len, offset, prefix_len;
635
636 /* Check if MTD already has a device node */
637 if (mtd_get_of_node(mtd))
638 return;
639
640 if (!mtd_is_partition(mtd))
641 return;
642
643 parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
644 if (!parent_dn)
645 return;
646
647 if (mtd_is_partition(mtd->parent))
648 partitions = of_node_get(parent_dn);
649 else
650 partitions = of_get_child_by_name(parent_dn, "partitions");
651 if (!partitions)
652 goto exit_parent;
653
654 prefix_len = strlen(prefix);
655 mtd_name_len = strlen(mtd->name);
656
657 /* Search if a partition is defined with the same name */
658 for_each_child_of_node(partitions, mtd_dn) {
659 /* Skip partition with no/wrong prefix */
660 if (!of_node_name_prefix(mtd_dn, prefix))
661 continue;
662
663 /* Label have priority. Check that first */
664 if (!of_property_read_string(mtd_dn, "label", &pname)) {
665 offset = 0;
666 } else {
667 pname = mtd_dn->name;
668 offset = prefix_len;
669 }
670
671 plen = strlen(pname) - offset;
672 if (plen == mtd_name_len &&
673 !strncmp(mtd->name, pname + offset, plen)) {
674 mtd_set_of_node(mtd, mtd_dn);
675 of_node_put(mtd_dn);
676 break;
677 }
678 }
679
680 of_node_put(partitions);
681exit_parent:
682 of_node_put(parent_dn);
683}
684
685/**
686 * add_mtd_device - register an MTD device
687 * @mtd: pointer to new MTD device info structure
688 *
689 * Add a device to the list of MTD devices present in the system, and
690 * notify each currently active MTD 'user' of its arrival. Returns
691 * zero on success or non-zero on failure.
692 */
693
694int add_mtd_device(struct mtd_info *mtd)
695{
696 struct device_node *np = mtd_get_of_node(mtd);
697 struct mtd_info *master = mtd_get_master(mtd);
698 struct mtd_notifier *not;
699 int i, error, ofidx;
700
701 /*
702 * May occur, for instance, on buggy drivers which call
703 * mtd_device_parse_register() multiple times on the same master MTD,
704 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
705 */
706 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
707 return -EEXIST;
708
709 BUG_ON(mtd->writesize == 0);
710
711 /*
712 * MTD drivers should implement ->_{write,read}() or
713 * ->_{write,read}_oob(), but not both.
714 */
715 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
716 (mtd->_read && mtd->_read_oob)))
717 return -EINVAL;
718
719 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
720 !(mtd->flags & MTD_NO_ERASE)))
721 return -EINVAL;
722
723 /*
724 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
725 * master is an MLC NAND and has a proper pairing scheme defined.
726 * We also reject masters that implement ->_writev() for now, because
727 * NAND controller drivers don't implement this hook, and adding the
728 * SLC -> MLC address/length conversion to this path is useless if we
729 * don't have a user.
730 */
731 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
732 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
733 !master->pairing || master->_writev))
734 return -EINVAL;
735
736 mutex_lock(&mtd_table_mutex);
737
738 ofidx = -1;
739 if (np)
740 ofidx = of_alias_get_id(np, "mtd");
741 if (ofidx >= 0)
742 i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
743 else
744 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
745 if (i < 0) {
746 error = i;
747 goto fail_locked;
748 }
749
750 mtd->index = i;
751 kref_init(&mtd->refcnt);
752
753 /* default value if not set by driver */
754 if (mtd->bitflip_threshold == 0)
755 mtd->bitflip_threshold = mtd->ecc_strength;
756
757 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
758 int ngroups = mtd_pairing_groups(master);
759
760 mtd->erasesize /= ngroups;
761 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
762 mtd->erasesize;
763 }
764
765 if (is_power_of_2(mtd->erasesize))
766 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
767 else
768 mtd->erasesize_shift = 0;
769
770 if (is_power_of_2(mtd->writesize))
771 mtd->writesize_shift = ffs(mtd->writesize) - 1;
772 else
773 mtd->writesize_shift = 0;
774
775 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
776 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
777
778 /* Some chips always power up locked. Unlock them now */
779 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
780 error = mtd_unlock(mtd, 0, mtd->size);
781 if (error && error != -EOPNOTSUPP)
782 printk(KERN_WARNING
783 "%s: unlock failed, writes may not work\n",
784 mtd->name);
785 /* Ignore unlock failures? */
786 error = 0;
787 }
788
789 /* Caller should have set dev.parent to match the
790 * physical device, if appropriate.
791 */
792 mtd->dev.type = &mtd_devtype;
793 mtd->dev.class = &mtd_class;
794 mtd->dev.devt = MTD_DEVT(i);
795 error = dev_set_name(&mtd->dev, "mtd%d", i);
796 if (error)
797 goto fail_devname;
798 dev_set_drvdata(&mtd->dev, mtd);
799 mtd_check_of_node(mtd);
800 of_node_get(mtd_get_of_node(mtd));
801 error = device_register(&mtd->dev);
802 if (error) {
803 put_device(&mtd->dev);
804 goto fail_added;
805 }
806
807 /* Add the nvmem provider */
808 error = mtd_nvmem_add(mtd);
809 if (error)
810 goto fail_nvmem_add;
811
812 mtd_debugfs_populate(mtd);
813
814 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
815 "mtd%dro", i);
816
817 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
818 /* No need to get a refcount on the module containing
819 the notifier, since we hold the mtd_table_mutex */
820 list_for_each_entry(not, &mtd_notifiers, list)
821 not->add(mtd);
822
823 mutex_unlock(&mtd_table_mutex);
824
825 if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
826 if (IS_BUILTIN(CONFIG_MTD)) {
827 pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
828 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
829 } else {
830 pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
831 mtd->index, mtd->name);
832 }
833 }
834
835 /* We _know_ we aren't being removed, because
836 our caller is still holding us here. So none
837 of this try_ nonsense, and no bitching about it
838 either. :) */
839 __module_get(THIS_MODULE);
840 return 0;
841
842fail_nvmem_add:
843 device_unregister(&mtd->dev);
844fail_added:
845 of_node_put(mtd_get_of_node(mtd));
846fail_devname:
847 idr_remove(&mtd_idr, i);
848fail_locked:
849 mutex_unlock(&mtd_table_mutex);
850 return error;
851}
852
853/**
854 * del_mtd_device - unregister an MTD device
855 * @mtd: pointer to MTD device info structure
856 *
857 * Remove a device from the list of MTD devices present in the system,
858 * and notify each currently active MTD 'user' of its departure.
859 * Returns zero on success or 1 on failure, which currently will happen
860 * if the requested device does not appear to be present in the list.
861 */
862
863int del_mtd_device(struct mtd_info *mtd)
864{
865 int ret;
866 struct mtd_notifier *not;
867
868 mutex_lock(&mtd_table_mutex);
869
870 if (idr_find(&mtd_idr, mtd->index) != mtd) {
871 ret = -ENODEV;
872 goto out_error;
873 }
874
875 /* No need to get a refcount on the module containing
876 the notifier, since we hold the mtd_table_mutex */
877 list_for_each_entry(not, &mtd_notifiers, list)
878 not->remove(mtd);
879
880 kref_put(&mtd->refcnt, mtd_device_release);
881 ret = 0;
882
883out_error:
884 mutex_unlock(&mtd_table_mutex);
885 return ret;
886}
887
888/*
889 * Set a few defaults based on the parent devices, if not provided by the
890 * driver
891 */
892static void mtd_set_dev_defaults(struct mtd_info *mtd)
893{
894 if (mtd->dev.parent) {
895 if (!mtd->owner && mtd->dev.parent->driver)
896 mtd->owner = mtd->dev.parent->driver->owner;
897 if (!mtd->name)
898 mtd->name = dev_name(mtd->dev.parent);
899 } else {
900 pr_debug("mtd device won't show a device symlink in sysfs\n");
901 }
902
903 INIT_LIST_HEAD(&mtd->partitions);
904 mutex_init(&mtd->master.partitions_lock);
905 mutex_init(&mtd->master.chrdev_lock);
906}
907
908static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
909{
910 struct otp_info *info;
911 ssize_t size = 0;
912 unsigned int i;
913 size_t retlen;
914 int ret;
915
916 info = kmalloc(PAGE_SIZE, GFP_KERNEL);
917 if (!info)
918 return -ENOMEM;
919
920 if (is_user)
921 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
922 else
923 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
924 if (ret)
925 goto err;
926
927 for (i = 0; i < retlen / sizeof(*info); i++)
928 size += info[i].length;
929
930 kfree(info);
931 return size;
932
933err:
934 kfree(info);
935
936 /* ENODATA means there is no OTP region. */
937 return ret == -ENODATA ? 0 : ret;
938}
939
940static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
941 const char *compatible,
942 int size,
943 nvmem_reg_read_t reg_read)
944{
945 struct nvmem_device *nvmem = NULL;
946 struct nvmem_config config = {};
947 struct device_node *np;
948
949 /* DT binding is optional */
950 np = of_get_compatible_child(mtd->dev.of_node, compatible);
951
952 /* OTP nvmem will be registered on the physical device */
953 config.dev = mtd->dev.parent;
954 config.name = compatible;
955 config.id = NVMEM_DEVID_AUTO;
956 config.owner = THIS_MODULE;
957 config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
958 config.type = NVMEM_TYPE_OTP;
959 config.root_only = true;
960 config.ignore_wp = true;
961 config.reg_read = reg_read;
962 config.size = size;
963 config.of_node = np;
964 config.priv = mtd;
965
966 nvmem = nvmem_register(&config);
967 /* Just ignore if there is no NVMEM support in the kernel */
968 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
969 nvmem = NULL;
970
971 of_node_put(np);
972
973 return nvmem;
974}
975
976static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
977 void *val, size_t bytes)
978{
979 struct mtd_info *mtd = priv;
980 size_t retlen;
981 int ret;
982
983 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
984 if (ret)
985 return ret;
986
987 return retlen == bytes ? 0 : -EIO;
988}
989
990static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
991 void *val, size_t bytes)
992{
993 struct mtd_info *mtd = priv;
994 size_t retlen;
995 int ret;
996
997 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
998 if (ret)
999 return ret;
1000
1001 return retlen == bytes ? 0 : -EIO;
1002}
1003
1004static int mtd_otp_nvmem_add(struct mtd_info *mtd)
1005{
1006 struct device *dev = mtd->dev.parent;
1007 struct nvmem_device *nvmem;
1008 ssize_t size;
1009 int err;
1010
1011 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
1012 size = mtd_otp_size(mtd, true);
1013 if (size < 0) {
1014 err = size;
1015 goto err;
1016 }
1017
1018 if (size > 0) {
1019 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
1020 mtd_nvmem_user_otp_reg_read);
1021 if (IS_ERR(nvmem)) {
1022 err = PTR_ERR(nvmem);
1023 goto err;
1024 }
1025 mtd->otp_user_nvmem = nvmem;
1026 }
1027 }
1028
1029 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
1030 size = mtd_otp_size(mtd, false);
1031 if (size < 0) {
1032 err = size;
1033 goto err;
1034 }
1035
1036 if (size > 0) {
1037 /*
1038 * The factory OTP contains thing such as a unique serial
1039 * number and is small, so let's read it out and put it
1040 * into the entropy pool.
1041 */
1042 void *otp;
1043
1044 otp = kmalloc(size, GFP_KERNEL);
1045 if (!otp) {
1046 err = -ENOMEM;
1047 goto err;
1048 }
1049 err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
1050 if (err < 0) {
1051 kfree(otp);
1052 goto err;
1053 }
1054 add_device_randomness(otp, err);
1055 kfree(otp);
1056
1057 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
1058 mtd_nvmem_fact_otp_reg_read);
1059 if (IS_ERR(nvmem)) {
1060 err = PTR_ERR(nvmem);
1061 goto err;
1062 }
1063 mtd->otp_factory_nvmem = nvmem;
1064 }
1065 }
1066
1067 return 0;
1068
1069err:
1070 nvmem_unregister(mtd->otp_user_nvmem);
1071 /* Don't report error if OTP is not supported. */
1072 if (err == -EOPNOTSUPP)
1073 return 0;
1074 return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
1075}
1076
1077/**
1078 * mtd_device_parse_register - parse partitions and register an MTD device.
1079 *
1080 * @mtd: the MTD device to register
1081 * @types: the list of MTD partition probes to try, see
1082 * 'parse_mtd_partitions()' for more information
1083 * @parser_data: MTD partition parser-specific data
1084 * @parts: fallback partition information to register, if parsing fails;
1085 * only valid if %nr_parts > %0
1086 * @nr_parts: the number of partitions in parts, if zero then the full
1087 * MTD device is registered if no partition info is found
1088 *
1089 * This function aggregates MTD partitions parsing (done by
1090 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1091 * basically follows the most common pattern found in many MTD drivers:
1092 *
1093 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1094 * registered first.
1095 * * Then It tries to probe partitions on MTD device @mtd using parsers
1096 * specified in @types (if @types is %NULL, then the default list of parsers
1097 * is used, see 'parse_mtd_partitions()' for more information). If none are
1098 * found this functions tries to fallback to information specified in
1099 * @parts/@nr_parts.
1100 * * If no partitions were found this function just registers the MTD device
1101 * @mtd and exits.
1102 *
1103 * Returns zero in case of success and a negative error code in case of failure.
1104 */
1105int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1106 struct mtd_part_parser_data *parser_data,
1107 const struct mtd_partition *parts,
1108 int nr_parts)
1109{
1110 int ret, err;
1111
1112 mtd_set_dev_defaults(mtd);
1113
1114 ret = mtd_otp_nvmem_add(mtd);
1115 if (ret)
1116 goto out;
1117
1118 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1119 ret = add_mtd_device(mtd);
1120 if (ret)
1121 goto out;
1122 }
1123
1124 if (IS_REACHABLE(CONFIG_MTD_VIRT_CONCAT)) {
1125 ret = mtd_virt_concat_node_create();
1126 if (ret < 0)
1127 goto out;
1128 }
1129
1130 /* Prefer parsed partitions over driver-provided fallback */
1131 ret = parse_mtd_partitions(mtd, types, parser_data);
1132 if (ret == -EPROBE_DEFER)
1133 goto out;
1134
1135 if (ret > 0)
1136 ret = 0;
1137 else if (nr_parts)
1138 ret = add_mtd_partitions(mtd, parts, nr_parts);
1139 else if (!device_is_registered(&mtd->dev))
1140 ret = add_mtd_device(mtd);
1141 else
1142 ret = 0;
1143
1144 if (ret)
1145 goto out;
1146
1147 if (IS_REACHABLE(CONFIG_MTD_VIRT_CONCAT)) {
1148 ret = mtd_virt_concat_create_join();
1149 if (ret < 0)
1150 goto out;
1151 }
1152 /*
1153 * FIXME: some drivers unfortunately call this function more than once.
1154 * So we have to check if we've already assigned the reboot notifier.
1155 *
1156 * Generally, we can make multiple calls work for most cases, but it
1157 * does cause problems with parse_mtd_partitions() above (e.g.,
1158 * cmdlineparts will register partitions more than once).
1159 */
1160 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1161 "MTD already registered\n");
1162 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1163 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1164 register_reboot_notifier(&mtd->reboot_notifier);
1165 }
1166
1167out:
1168 if (ret) {
1169 nvmem_unregister(mtd->otp_user_nvmem);
1170 nvmem_unregister(mtd->otp_factory_nvmem);
1171 }
1172
1173 if (ret && device_is_registered(&mtd->dev)) {
1174 err = del_mtd_device(mtd);
1175 if (err)
1176 pr_err("Error when deleting MTD device (%d)\n", err);
1177 }
1178
1179 return ret;
1180}
1181EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1182
1183/**
1184 * mtd_device_unregister - unregister an existing MTD device.
1185 *
1186 * @master: the MTD device to unregister. This will unregister both the master
1187 * and any partitions if registered.
1188 */
1189int mtd_device_unregister(struct mtd_info *master)
1190{
1191 int err;
1192
1193 if (master->_reboot) {
1194 unregister_reboot_notifier(&master->reboot_notifier);
1195 memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1196 }
1197
1198 nvmem_unregister(master->otp_user_nvmem);
1199 nvmem_unregister(master->otp_factory_nvmem);
1200
1201 if (IS_REACHABLE(CONFIG_MTD_VIRT_CONCAT)) {
1202 err = mtd_virt_concat_destroy(master);
1203 if (err)
1204 return err;
1205 }
1206 err = del_mtd_partitions(master);
1207 if (err)
1208 return err;
1209
1210 if (!device_is_registered(&master->dev))
1211 return 0;
1212
1213 return del_mtd_device(master);
1214}
1215EXPORT_SYMBOL_GPL(mtd_device_unregister);
1216
1217/**
1218 * register_mtd_user - register a 'user' of MTD devices.
1219 * @new: pointer to notifier info structure
1220 *
1221 * Registers a pair of callbacks function to be called upon addition
1222 * or removal of MTD devices. Causes the 'add' callback to be immediately
1223 * invoked for each MTD device currently present in the system.
1224 */
1225void register_mtd_user (struct mtd_notifier *new)
1226{
1227 struct mtd_info *mtd;
1228
1229 mutex_lock(&mtd_table_mutex);
1230
1231 list_add(&new->list, &mtd_notifiers);
1232
1233 __module_get(THIS_MODULE);
1234
1235 mtd_for_each_device(mtd)
1236 new->add(mtd);
1237
1238 mutex_unlock(&mtd_table_mutex);
1239}
1240EXPORT_SYMBOL_GPL(register_mtd_user);
1241
1242/**
1243 * unregister_mtd_user - unregister a 'user' of MTD devices.
1244 * @old: pointer to notifier info structure
1245 *
1246 * Removes a callback function pair from the list of 'users' to be
1247 * notified upon addition or removal of MTD devices. Causes the
1248 * 'remove' callback to be immediately invoked for each MTD device
1249 * currently present in the system.
1250 */
1251int unregister_mtd_user (struct mtd_notifier *old)
1252{
1253 struct mtd_info *mtd;
1254
1255 mutex_lock(&mtd_table_mutex);
1256
1257 module_put(THIS_MODULE);
1258
1259 mtd_for_each_device(mtd)
1260 old->remove(mtd);
1261
1262 list_del(&old->list);
1263 mutex_unlock(&mtd_table_mutex);
1264 return 0;
1265}
1266EXPORT_SYMBOL_GPL(unregister_mtd_user);
1267
1268/**
1269 * get_mtd_device - obtain a validated handle for an MTD device
1270 * @mtd: last known address of the required MTD device
1271 * @num: internal device number of the required MTD device
1272 *
1273 * Given a number and NULL address, return the num'th entry in the device
1274 * table, if any. Given an address and num == -1, search the device table
1275 * for a device with that address and return if it's still present. Given
1276 * both, return the num'th driver only if its address matches. Return
1277 * error code if not.
1278 */
1279struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1280{
1281 struct mtd_info *ret = NULL, *other;
1282 int err = -ENODEV;
1283
1284 mutex_lock(&mtd_table_mutex);
1285
1286 if (num == -1) {
1287 mtd_for_each_device(other) {
1288 if (other == mtd) {
1289 ret = mtd;
1290 break;
1291 }
1292 }
1293 } else if (num >= 0) {
1294 ret = idr_find(&mtd_idr, num);
1295 if (mtd && mtd != ret)
1296 ret = NULL;
1297 }
1298
1299 if (!ret) {
1300 ret = ERR_PTR(err);
1301 goto out;
1302 }
1303
1304 err = __get_mtd_device(ret);
1305 if (err)
1306 ret = ERR_PTR(err);
1307out:
1308 mutex_unlock(&mtd_table_mutex);
1309 return ret;
1310}
1311EXPORT_SYMBOL_GPL(get_mtd_device);
1312
1313
1314int __get_mtd_device(struct mtd_info *mtd)
1315{
1316 struct mtd_info *master = mtd_get_master(mtd);
1317 int err;
1318
1319 if (master->_get_device) {
1320 err = master->_get_device(mtd);
1321 if (err)
1322 return err;
1323 }
1324
1325 if (!try_module_get(master->owner)) {
1326 if (master->_put_device)
1327 master->_put_device(master);
1328 return -ENODEV;
1329 }
1330
1331 while (mtd) {
1332 if (mtd != master)
1333 kref_get(&mtd->refcnt);
1334 mtd = mtd->parent;
1335 }
1336
1337 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1338 kref_get(&master->refcnt);
1339
1340 return 0;
1341}
1342EXPORT_SYMBOL_GPL(__get_mtd_device);
1343
1344/**
1345 * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1346 *
1347 * @np: device tree node
1348 */
1349struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1350{
1351 struct mtd_info *mtd = NULL;
1352 struct mtd_info *tmp;
1353 int err;
1354
1355 mutex_lock(&mtd_table_mutex);
1356
1357 err = -EPROBE_DEFER;
1358 mtd_for_each_device(tmp) {
1359 if (mtd_get_of_node(tmp) == np) {
1360 mtd = tmp;
1361 err = __get_mtd_device(mtd);
1362 break;
1363 }
1364 }
1365
1366 mutex_unlock(&mtd_table_mutex);
1367
1368 return err ? ERR_PTR(err) : mtd;
1369}
1370EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1371
1372/**
1373 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1374 * device name
1375 * @name: MTD device name to open
1376 *
1377 * This function returns MTD device description structure in case of
1378 * success and an error code in case of failure.
1379 */
1380struct mtd_info *get_mtd_device_nm(const char *name)
1381{
1382 int err = -ENODEV;
1383 struct mtd_info *mtd = NULL, *other;
1384
1385 mutex_lock(&mtd_table_mutex);
1386
1387 mtd_for_each_device(other) {
1388 if (!strcmp(name, other->name)) {
1389 mtd = other;
1390 break;
1391 }
1392 }
1393
1394 if (!mtd)
1395 goto out_unlock;
1396
1397 err = __get_mtd_device(mtd);
1398 if (err)
1399 goto out_unlock;
1400
1401 mutex_unlock(&mtd_table_mutex);
1402 return mtd;
1403
1404out_unlock:
1405 mutex_unlock(&mtd_table_mutex);
1406 return ERR_PTR(err);
1407}
1408EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1409
1410void put_mtd_device(struct mtd_info *mtd)
1411{
1412 mutex_lock(&mtd_table_mutex);
1413 __put_mtd_device(mtd);
1414 mutex_unlock(&mtd_table_mutex);
1415
1416}
1417EXPORT_SYMBOL_GPL(put_mtd_device);
1418
1419void __put_mtd_device(struct mtd_info *mtd)
1420{
1421 struct mtd_info *master = mtd_get_master(mtd);
1422
1423 while (mtd) {
1424 /* kref_put() can relese mtd, so keep a reference mtd->parent */
1425 struct mtd_info *parent = mtd->parent;
1426
1427 if (mtd != master)
1428 kref_put(&mtd->refcnt, mtd_device_release);
1429 mtd = parent;
1430 }
1431
1432 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1433 kref_put(&master->refcnt, mtd_device_release);
1434
1435 module_put(master->owner);
1436
1437 /* must be the last as master can be freed in the _put_device */
1438 if (master->_put_device)
1439 master->_put_device(master);
1440}
1441EXPORT_SYMBOL_GPL(__put_mtd_device);
1442
1443/*
1444 * Erase is an synchronous operation. Device drivers are epected to return a
1445 * negative error code if the operation failed and update instr->fail_addr
1446 * to point the portion that was not properly erased.
1447 */
1448int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1449{
1450 struct mtd_info *master = mtd_get_master(mtd);
1451 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1452 struct erase_info adjinstr;
1453 int ret;
1454
1455 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1456 adjinstr = *instr;
1457
1458 if (!mtd->erasesize || !master->_erase)
1459 return -ENOTSUPP;
1460
1461 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1462 return -EINVAL;
1463 if (!(mtd->flags & MTD_WRITEABLE))
1464 return -EROFS;
1465
1466 if (!instr->len)
1467 return 0;
1468
1469 ledtrig_mtd_activity();
1470
1471 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1472 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1473 master->erasesize;
1474 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1475 master->erasesize) -
1476 adjinstr.addr;
1477 }
1478
1479 adjinstr.addr += mst_ofs;
1480
1481 ret = master->_erase(master, &adjinstr);
1482
1483 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1484 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1485 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1486 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1487 master);
1488 instr->fail_addr *= mtd->erasesize;
1489 }
1490 }
1491
1492 return ret;
1493}
1494EXPORT_SYMBOL_GPL(mtd_erase);
1495ALLOW_ERROR_INJECTION(mtd_erase, ERRNO);
1496
1497/*
1498 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1499 */
1500int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1501 void **virt, resource_size_t *phys)
1502{
1503 struct mtd_info *master = mtd_get_master(mtd);
1504
1505 *retlen = 0;
1506 *virt = NULL;
1507 if (phys)
1508 *phys = 0;
1509 if (!master->_point)
1510 return -EOPNOTSUPP;
1511 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1512 return -EINVAL;
1513 if (!len)
1514 return 0;
1515
1516 from = mtd_get_master_ofs(mtd, from);
1517 return master->_point(master, from, len, retlen, virt, phys);
1518}
1519EXPORT_SYMBOL_GPL(mtd_point);
1520
1521/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1522int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1523{
1524 struct mtd_info *master = mtd_get_master(mtd);
1525
1526 if (!master->_unpoint)
1527 return -EOPNOTSUPP;
1528 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1529 return -EINVAL;
1530 if (!len)
1531 return 0;
1532 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1533}
1534EXPORT_SYMBOL_GPL(mtd_unpoint);
1535
1536/*
1537 * Allow NOMMU mmap() to directly map the device (if not NULL)
1538 * - return the address to which the offset maps
1539 * - return -ENOSYS to indicate refusal to do the mapping
1540 */
1541unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1542 unsigned long offset, unsigned long flags)
1543{
1544 size_t retlen;
1545 void *virt;
1546 int ret;
1547
1548 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1549 if (ret)
1550 return ret;
1551 if (retlen != len) {
1552 mtd_unpoint(mtd, offset, retlen);
1553 return -ENOSYS;
1554 }
1555 return (unsigned long)virt;
1556}
1557EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1558
1559static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1560 const struct mtd_ecc_stats *old_stats)
1561{
1562 struct mtd_ecc_stats diff;
1563
1564 if (master == mtd)
1565 return;
1566
1567 diff = master->ecc_stats;
1568 diff.failed -= old_stats->failed;
1569 diff.corrected -= old_stats->corrected;
1570
1571 while (mtd->parent) {
1572 mtd->ecc_stats.failed += diff.failed;
1573 mtd->ecc_stats.corrected += diff.corrected;
1574 mtd = mtd->parent;
1575 }
1576}
1577
1578int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1579 u_char *buf)
1580{
1581 struct mtd_oob_ops ops = {
1582 .len = len,
1583 .datbuf = buf,
1584 };
1585 int ret;
1586
1587 ret = mtd_read_oob(mtd, from, &ops);
1588 *retlen = ops.retlen;
1589
1590 WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret));
1591
1592 return ret;
1593}
1594EXPORT_SYMBOL_GPL(mtd_read);
1595ALLOW_ERROR_INJECTION(mtd_read, ERRNO);
1596
1597int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1598 const u_char *buf)
1599{
1600 struct mtd_oob_ops ops = {
1601 .len = len,
1602 .datbuf = (u8 *)buf,
1603 };
1604 int ret;
1605
1606 ret = mtd_write_oob(mtd, to, &ops);
1607 *retlen = ops.retlen;
1608
1609 return ret;
1610}
1611EXPORT_SYMBOL_GPL(mtd_write);
1612ALLOW_ERROR_INJECTION(mtd_write, ERRNO);
1613
1614/*
1615 * In blackbox flight recorder like scenarios we want to make successful writes
1616 * in interrupt context. panic_write() is only intended to be called when its
1617 * known the kernel is about to panic and we need the write to succeed. Since
1618 * the kernel is not going to be running for much longer, this function can
1619 * break locks and delay to ensure the write succeeds (but not sleep).
1620 */
1621int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1622 const u_char *buf)
1623{
1624 struct mtd_info *master = mtd_get_master(mtd);
1625
1626 *retlen = 0;
1627 if (!master->_panic_write)
1628 return -EOPNOTSUPP;
1629 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1630 return -EINVAL;
1631 if (!(mtd->flags & MTD_WRITEABLE))
1632 return -EROFS;
1633 if (!len)
1634 return 0;
1635 if (!master->oops_panic_write)
1636 master->oops_panic_write = true;
1637
1638 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1639 retlen, buf);
1640}
1641EXPORT_SYMBOL_GPL(mtd_panic_write);
1642
1643static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1644 struct mtd_oob_ops *ops)
1645{
1646 /*
1647 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1648 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1649 * this case.
1650 */
1651 if (!ops->datbuf)
1652 ops->len = 0;
1653
1654 if (!ops->oobbuf)
1655 ops->ooblen = 0;
1656
1657 if (offs < 0 || offs + ops->len > mtd->size)
1658 return -EINVAL;
1659
1660 if (ops->ooblen) {
1661 size_t maxooblen;
1662
1663 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1664 return -EINVAL;
1665
1666 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1667 mtd_div_by_ws(offs, mtd)) *
1668 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1669 if (ops->ooblen > maxooblen)
1670 return -EINVAL;
1671 }
1672
1673 return 0;
1674}
1675
1676static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1677 struct mtd_oob_ops *ops)
1678{
1679 struct mtd_info *master = mtd_get_master(mtd);
1680 int ret;
1681
1682 from = mtd_get_master_ofs(mtd, from);
1683 if (master->_read_oob)
1684 ret = master->_read_oob(master, from, ops);
1685 else
1686 ret = master->_read(master, from, ops->len, &ops->retlen,
1687 ops->datbuf);
1688
1689 return ret;
1690}
1691
1692static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1693 struct mtd_oob_ops *ops)
1694{
1695 struct mtd_info *master = mtd_get_master(mtd);
1696 int ret;
1697
1698 to = mtd_get_master_ofs(mtd, to);
1699 if (master->_write_oob)
1700 ret = master->_write_oob(master, to, ops);
1701 else
1702 ret = master->_write(master, to, ops->len, &ops->retlen,
1703 ops->datbuf);
1704
1705 return ret;
1706}
1707
1708static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1709 struct mtd_oob_ops *ops)
1710{
1711 struct mtd_info *master = mtd_get_master(mtd);
1712 int ngroups = mtd_pairing_groups(master);
1713 int npairs = mtd_wunit_per_eb(master) / ngroups;
1714 struct mtd_oob_ops adjops = *ops;
1715 unsigned int wunit, oobavail;
1716 struct mtd_pairing_info info;
1717 int max_bitflips = 0;
1718 u32 ebofs, pageofs;
1719 loff_t base, pos;
1720
1721 ebofs = mtd_mod_by_eb(start, mtd);
1722 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1723 info.group = 0;
1724 info.pair = mtd_div_by_ws(ebofs, mtd);
1725 pageofs = mtd_mod_by_ws(ebofs, mtd);
1726 oobavail = mtd_oobavail(mtd, ops);
1727
1728 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1729 int ret;
1730
1731 if (info.pair >= npairs) {
1732 info.pair = 0;
1733 base += master->erasesize;
1734 }
1735
1736 wunit = mtd_pairing_info_to_wunit(master, &info);
1737 pos = mtd_wunit_to_offset(mtd, base, wunit);
1738
1739 adjops.len = ops->len - ops->retlen;
1740 if (adjops.len > mtd->writesize - pageofs)
1741 adjops.len = mtd->writesize - pageofs;
1742
1743 adjops.ooblen = ops->ooblen - ops->oobretlen;
1744 if (adjops.ooblen > oobavail - adjops.ooboffs)
1745 adjops.ooblen = oobavail - adjops.ooboffs;
1746
1747 if (read) {
1748 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1749 if (ret > 0)
1750 max_bitflips = max(max_bitflips, ret);
1751 } else {
1752 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1753 }
1754
1755 if (ret < 0)
1756 return ret;
1757
1758 max_bitflips = max(max_bitflips, ret);
1759 ops->retlen += adjops.retlen;
1760 ops->oobretlen += adjops.oobretlen;
1761 adjops.datbuf += adjops.retlen;
1762 adjops.oobbuf += adjops.oobretlen;
1763 adjops.ooboffs = 0;
1764 pageofs = 0;
1765 info.pair++;
1766 }
1767
1768 return max_bitflips;
1769}
1770
1771int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1772{
1773 struct mtd_info *master = mtd_get_master(mtd);
1774 struct mtd_ecc_stats old_stats = master->ecc_stats;
1775 int ret_code;
1776
1777 ops->retlen = ops->oobretlen = 0;
1778
1779 ret_code = mtd_check_oob_ops(mtd, from, ops);
1780 if (ret_code)
1781 return ret_code;
1782
1783 ledtrig_mtd_activity();
1784
1785 /* Check the validity of a potential fallback on mtd->_read */
1786 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1787 return -EOPNOTSUPP;
1788
1789 if (ops->stats)
1790 memset(ops->stats, 0, sizeof(*ops->stats));
1791
1792 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1793 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1794 else
1795 ret_code = mtd_read_oob_std(mtd, from, ops);
1796
1797 mtd_update_ecc_stats(mtd, master, &old_stats);
1798
1799 /*
1800 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1801 * similar to mtd->_read(), returning a non-negative integer
1802 * representing max bitflips. In other cases, mtd->_read_oob() may
1803 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1804 */
1805 if (unlikely(ret_code < 0))
1806 return ret_code;
1807 if (mtd->ecc_strength == 0)
1808 return 0; /* device lacks ecc */
1809 if (ops->stats)
1810 ops->stats->max_bitflips = ret_code;
1811 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1812}
1813EXPORT_SYMBOL_GPL(mtd_read_oob);
1814
1815int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1816 struct mtd_oob_ops *ops)
1817{
1818 struct mtd_info *master = mtd_get_master(mtd);
1819 int ret;
1820
1821 ops->retlen = ops->oobretlen = 0;
1822
1823 if (!(mtd->flags & MTD_WRITEABLE))
1824 return -EROFS;
1825
1826 ret = mtd_check_oob_ops(mtd, to, ops);
1827 if (ret)
1828 return ret;
1829
1830 ledtrig_mtd_activity();
1831
1832 /* Check the validity of a potential fallback on mtd->_write */
1833 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1834 return -EOPNOTSUPP;
1835
1836 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1837 return mtd_io_emulated_slc(mtd, to, false, ops);
1838
1839 return mtd_write_oob_std(mtd, to, ops);
1840}
1841EXPORT_SYMBOL_GPL(mtd_write_oob);
1842
1843/**
1844 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1845 * @mtd: MTD device structure
1846 * @section: ECC section. Depending on the layout you may have all the ECC
1847 * bytes stored in a single contiguous section, or one section
1848 * per ECC chunk (and sometime several sections for a single ECC
1849 * ECC chunk)
1850 * @oobecc: OOB region struct filled with the appropriate ECC position
1851 * information
1852 *
1853 * This function returns ECC section information in the OOB area. If you want
1854 * to get all the ECC bytes information, then you should call
1855 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1856 *
1857 * Returns zero on success, a negative error code otherwise.
1858 */
1859int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1860 struct mtd_oob_region *oobecc)
1861{
1862 struct mtd_info *master = mtd_get_master(mtd);
1863
1864 memset(oobecc, 0, sizeof(*oobecc));
1865
1866 if (!master || section < 0)
1867 return -EINVAL;
1868
1869 if (!master->ooblayout || !master->ooblayout->ecc)
1870 return -ENOTSUPP;
1871
1872 return master->ooblayout->ecc(master, section, oobecc);
1873}
1874EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1875
1876/**
1877 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1878 * section
1879 * @mtd: MTD device structure
1880 * @section: Free section you are interested in. Depending on the layout
1881 * you may have all the free bytes stored in a single contiguous
1882 * section, or one section per ECC chunk plus an extra section
1883 * for the remaining bytes (or other funky layout).
1884 * @oobfree: OOB region struct filled with the appropriate free position
1885 * information
1886 *
1887 * This function returns free bytes position in the OOB area. If you want
1888 * to get all the free bytes information, then you should call
1889 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1890 *
1891 * Returns zero on success, a negative error code otherwise.
1892 */
1893int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1894 struct mtd_oob_region *oobfree)
1895{
1896 struct mtd_info *master = mtd_get_master(mtd);
1897
1898 memset(oobfree, 0, sizeof(*oobfree));
1899
1900 if (!master || section < 0)
1901 return -EINVAL;
1902
1903 if (!master->ooblayout || !master->ooblayout->free)
1904 return -ENOTSUPP;
1905
1906 return master->ooblayout->free(master, section, oobfree);
1907}
1908EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1909
1910/**
1911 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1912 * @mtd: mtd info structure
1913 * @byte: the byte we are searching for
1914 * @sectionp: pointer where the section id will be stored
1915 * @oobregion: used to retrieve the ECC position
1916 * @iter: iterator function. Should be either mtd_ooblayout_free or
1917 * mtd_ooblayout_ecc depending on the region type you're searching for
1918 *
1919 * This function returns the section id and oobregion information of a
1920 * specific byte. For example, say you want to know where the 4th ECC byte is
1921 * stored, you'll use:
1922 *
1923 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1924 *
1925 * Returns zero on success, a negative error code otherwise.
1926 */
1927static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1928 int *sectionp, struct mtd_oob_region *oobregion,
1929 int (*iter)(struct mtd_info *,
1930 int section,
1931 struct mtd_oob_region *oobregion))
1932{
1933 int pos = 0, ret, section = 0;
1934
1935 memset(oobregion, 0, sizeof(*oobregion));
1936
1937 while (1) {
1938 ret = iter(mtd, section, oobregion);
1939 if (ret)
1940 return ret;
1941
1942 if (pos + oobregion->length > byte)
1943 break;
1944
1945 pos += oobregion->length;
1946 section++;
1947 }
1948
1949 /*
1950 * Adjust region info to make it start at the beginning at the
1951 * 'start' ECC byte.
1952 */
1953 oobregion->offset += byte - pos;
1954 oobregion->length -= byte - pos;
1955 *sectionp = section;
1956
1957 return 0;
1958}
1959
1960/**
1961 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1962 * ECC byte
1963 * @mtd: mtd info structure
1964 * @eccbyte: the byte we are searching for
1965 * @section: pointer where the section id will be stored
1966 * @oobregion: OOB region information
1967 *
1968 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1969 * byte.
1970 *
1971 * Returns zero on success, a negative error code otherwise.
1972 */
1973int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1974 int *section,
1975 struct mtd_oob_region *oobregion)
1976{
1977 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1978 mtd_ooblayout_ecc);
1979}
1980EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1981
1982/**
1983 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1984 * @mtd: mtd info structure
1985 * @buf: destination buffer to store OOB bytes
1986 * @oobbuf: OOB buffer
1987 * @start: first byte to retrieve
1988 * @nbytes: number of bytes to retrieve
1989 * @iter: section iterator
1990 *
1991 * Extract bytes attached to a specific category (ECC or free)
1992 * from the OOB buffer and copy them into buf.
1993 *
1994 * Returns zero on success, a negative error code otherwise.
1995 */
1996static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1997 const u8 *oobbuf, int start, int nbytes,
1998 int (*iter)(struct mtd_info *,
1999 int section,
2000 struct mtd_oob_region *oobregion))
2001{
2002 struct mtd_oob_region oobregion;
2003 int section, ret;
2004
2005 ret = mtd_ooblayout_find_region(mtd, start, §ion,
2006 &oobregion, iter);
2007
2008 while (!ret) {
2009 int cnt;
2010
2011 cnt = min_t(int, nbytes, oobregion.length);
2012 memcpy(buf, oobbuf + oobregion.offset, cnt);
2013 buf += cnt;
2014 nbytes -= cnt;
2015
2016 if (!nbytes)
2017 break;
2018
2019 ret = iter(mtd, ++section, &oobregion);
2020 }
2021
2022 return ret;
2023}
2024
2025/**
2026 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
2027 * @mtd: mtd info structure
2028 * @buf: source buffer to get OOB bytes from
2029 * @oobbuf: OOB buffer
2030 * @start: first OOB byte to set
2031 * @nbytes: number of OOB bytes to set
2032 * @iter: section iterator
2033 *
2034 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
2035 * is selected by passing the appropriate iterator.
2036 *
2037 * Returns zero on success, a negative error code otherwise.
2038 */
2039static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
2040 u8 *oobbuf, int start, int nbytes,
2041 int (*iter)(struct mtd_info *,
2042 int section,
2043 struct mtd_oob_region *oobregion))
2044{
2045 struct mtd_oob_region oobregion;
2046 int section, ret;
2047
2048 ret = mtd_ooblayout_find_region(mtd, start, §ion,
2049 &oobregion, iter);
2050
2051 while (!ret) {
2052 int cnt;
2053
2054 cnt = min_t(int, nbytes, oobregion.length);
2055 memcpy(oobbuf + oobregion.offset, buf, cnt);
2056 buf += cnt;
2057 nbytes -= cnt;
2058
2059 if (!nbytes)
2060 break;
2061
2062 ret = iter(mtd, ++section, &oobregion);
2063 }
2064
2065 return ret;
2066}
2067
2068/**
2069 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
2070 * @mtd: mtd info structure
2071 * @iter: category iterator
2072 *
2073 * Count the number of bytes in a given category.
2074 *
2075 * Returns a positive value on success, a negative error code otherwise.
2076 */
2077static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
2078 int (*iter)(struct mtd_info *,
2079 int section,
2080 struct mtd_oob_region *oobregion))
2081{
2082 struct mtd_oob_region oobregion;
2083 int section = 0, ret, nbytes = 0;
2084
2085 while (1) {
2086 ret = iter(mtd, section++, &oobregion);
2087 if (ret) {
2088 if (ret == -ERANGE)
2089 ret = nbytes;
2090 break;
2091 }
2092
2093 nbytes += oobregion.length;
2094 }
2095
2096 return ret;
2097}
2098
2099/**
2100 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
2101 * @mtd: mtd info structure
2102 * @eccbuf: destination buffer to store ECC bytes
2103 * @oobbuf: OOB buffer
2104 * @start: first ECC byte to retrieve
2105 * @nbytes: number of ECC bytes to retrieve
2106 *
2107 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
2108 *
2109 * Returns zero on success, a negative error code otherwise.
2110 */
2111int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
2112 const u8 *oobbuf, int start, int nbytes)
2113{
2114 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2115 mtd_ooblayout_ecc);
2116}
2117EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
2118
2119/**
2120 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
2121 * @mtd: mtd info structure
2122 * @eccbuf: source buffer to get ECC bytes from
2123 * @oobbuf: OOB buffer
2124 * @start: first ECC byte to set
2125 * @nbytes: number of ECC bytes to set
2126 *
2127 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2128 *
2129 * Returns zero on success, a negative error code otherwise.
2130 */
2131int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2132 u8 *oobbuf, int start, int nbytes)
2133{
2134 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2135 mtd_ooblayout_ecc);
2136}
2137EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2138
2139/**
2140 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2141 * @mtd: mtd info structure
2142 * @databuf: destination buffer to store ECC bytes
2143 * @oobbuf: OOB buffer
2144 * @start: first ECC byte to retrieve
2145 * @nbytes: number of ECC bytes to retrieve
2146 *
2147 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2148 *
2149 * Returns zero on success, a negative error code otherwise.
2150 */
2151int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2152 const u8 *oobbuf, int start, int nbytes)
2153{
2154 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2155 mtd_ooblayout_free);
2156}
2157EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2158
2159/**
2160 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2161 * @mtd: mtd info structure
2162 * @databuf: source buffer to get data bytes from
2163 * @oobbuf: OOB buffer
2164 * @start: first ECC byte to set
2165 * @nbytes: number of ECC bytes to set
2166 *
2167 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2168 *
2169 * Returns zero on success, a negative error code otherwise.
2170 */
2171int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2172 u8 *oobbuf, int start, int nbytes)
2173{
2174 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2175 mtd_ooblayout_free);
2176}
2177EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2178
2179/**
2180 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2181 * @mtd: mtd info structure
2182 *
2183 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2184 *
2185 * Returns zero on success, a negative error code otherwise.
2186 */
2187int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2188{
2189 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2190}
2191EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2192
2193/**
2194 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2195 * @mtd: mtd info structure
2196 *
2197 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2198 *
2199 * Returns zero on success, a negative error code otherwise.
2200 */
2201int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2202{
2203 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2204}
2205EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2206
2207/*
2208 * Method to access the protection register area, present in some flash
2209 * devices. The user data is one time programmable but the factory data is read
2210 * only.
2211 */
2212int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2213 struct otp_info *buf)
2214{
2215 struct mtd_info *master = mtd_get_master(mtd);
2216
2217 if (!master->_get_fact_prot_info)
2218 return -EOPNOTSUPP;
2219 if (!len)
2220 return 0;
2221 return master->_get_fact_prot_info(master, len, retlen, buf);
2222}
2223EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2224
2225int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2226 size_t *retlen, u_char *buf)
2227{
2228 struct mtd_info *master = mtd_get_master(mtd);
2229
2230 *retlen = 0;
2231 if (!master->_read_fact_prot_reg)
2232 return -EOPNOTSUPP;
2233 if (!len)
2234 return 0;
2235 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2236}
2237EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2238
2239int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2240 struct otp_info *buf)
2241{
2242 struct mtd_info *master = mtd_get_master(mtd);
2243
2244 if (!master->_get_user_prot_info)
2245 return -EOPNOTSUPP;
2246 if (!len)
2247 return 0;
2248 return master->_get_user_prot_info(master, len, retlen, buf);
2249}
2250EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2251
2252int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2253 size_t *retlen, u_char *buf)
2254{
2255 struct mtd_info *master = mtd_get_master(mtd);
2256
2257 *retlen = 0;
2258 if (!master->_read_user_prot_reg)
2259 return -EOPNOTSUPP;
2260 if (!len)
2261 return 0;
2262 return master->_read_user_prot_reg(master, from, len, retlen, buf);
2263}
2264EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2265
2266int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2267 size_t *retlen, const u_char *buf)
2268{
2269 struct mtd_info *master = mtd_get_master(mtd);
2270 int ret;
2271
2272 *retlen = 0;
2273 if (!master->_write_user_prot_reg)
2274 return -EOPNOTSUPP;
2275 if (!len)
2276 return 0;
2277 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2278 if (ret)
2279 return ret;
2280
2281 /*
2282 * If no data could be written at all, we are out of memory and
2283 * must return -ENOSPC.
2284 */
2285 return (*retlen) ? 0 : -ENOSPC;
2286}
2287EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2288
2289int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2290{
2291 struct mtd_info *master = mtd_get_master(mtd);
2292
2293 if (!master->_lock_user_prot_reg)
2294 return -EOPNOTSUPP;
2295 if (!len)
2296 return 0;
2297 return master->_lock_user_prot_reg(master, from, len);
2298}
2299EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2300
2301int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2302{
2303 struct mtd_info *master = mtd_get_master(mtd);
2304
2305 if (!master->_erase_user_prot_reg)
2306 return -EOPNOTSUPP;
2307 if (!len)
2308 return 0;
2309 return master->_erase_user_prot_reg(master, from, len);
2310}
2311EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2312
2313/* Chip-supported device locking */
2314int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2315{
2316 struct mtd_info *master = mtd_get_master(mtd);
2317
2318 if (!master->_lock)
2319 return -EOPNOTSUPP;
2320 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2321 return -EINVAL;
2322 if (!len)
2323 return 0;
2324
2325 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2326 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2327 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2328 }
2329
2330 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2331}
2332EXPORT_SYMBOL_GPL(mtd_lock);
2333
2334int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2335{
2336 struct mtd_info *master = mtd_get_master(mtd);
2337
2338 if (!master->_unlock)
2339 return -EOPNOTSUPP;
2340 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2341 return -EINVAL;
2342 if (!len)
2343 return 0;
2344
2345 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2346 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2347 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2348 }
2349
2350 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2351}
2352EXPORT_SYMBOL_GPL(mtd_unlock);
2353
2354int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2355{
2356 struct mtd_info *master = mtd_get_master(mtd);
2357
2358 if (!master->_is_locked)
2359 return -EOPNOTSUPP;
2360 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2361 return -EINVAL;
2362 if (!len)
2363 return 0;
2364
2365 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2366 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2367 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2368 }
2369
2370 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2371}
2372EXPORT_SYMBOL_GPL(mtd_is_locked);
2373
2374int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2375{
2376 struct mtd_info *master = mtd_get_master(mtd);
2377
2378 if (ofs < 0 || ofs >= mtd->size)
2379 return -EINVAL;
2380 if (!master->_block_isreserved)
2381 return 0;
2382
2383 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2384 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2385
2386 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2387}
2388EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2389
2390int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2391{
2392 struct mtd_info *master = mtd_get_master(mtd);
2393
2394 if (ofs < 0 || ofs >= mtd->size)
2395 return -EINVAL;
2396 if (!master->_block_isbad)
2397 return 0;
2398
2399 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2400 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2401
2402 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2403}
2404EXPORT_SYMBOL_GPL(mtd_block_isbad);
2405
2406int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2407{
2408 struct mtd_info *master = mtd_get_master(mtd);
2409 loff_t moffs;
2410 int ret;
2411
2412 if (!master->_block_markbad)
2413 return -EOPNOTSUPP;
2414 if (ofs < 0 || ofs >= mtd->size)
2415 return -EINVAL;
2416 if (!(mtd->flags & MTD_WRITEABLE))
2417 return -EROFS;
2418
2419 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2420 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2421
2422 moffs = mtd_get_master_ofs(mtd, ofs);
2423
2424 if (master->_block_isbad) {
2425 ret = master->_block_isbad(master, moffs);
2426 if (ret > 0)
2427 return 0;
2428 }
2429
2430 ret = master->_block_markbad(master, moffs);
2431 if (ret)
2432 return ret;
2433
2434 while (mtd->parent) {
2435 mtd->ecc_stats.badblocks++;
2436 mtd = mtd->parent;
2437 }
2438
2439 return 0;
2440}
2441EXPORT_SYMBOL_GPL(mtd_block_markbad);
2442ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO);
2443
2444/*
2445 * default_mtd_writev - the default writev method
2446 * @mtd: mtd device description object pointer
2447 * @vecs: the vectors to write
2448 * @count: count of vectors in @vecs
2449 * @to: the MTD device offset to write to
2450 * @retlen: on exit contains the count of bytes written to the MTD device.
2451 *
2452 * This function returns zero in case of success and a negative error code in
2453 * case of failure.
2454 */
2455static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2456 unsigned long count, loff_t to, size_t *retlen)
2457{
2458 unsigned long i;
2459 size_t totlen = 0, thislen;
2460 int ret = 0;
2461
2462 for (i = 0; i < count; i++) {
2463 if (!vecs[i].iov_len)
2464 continue;
2465 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2466 vecs[i].iov_base);
2467 totlen += thislen;
2468 if (ret || thislen != vecs[i].iov_len)
2469 break;
2470 to += vecs[i].iov_len;
2471 }
2472 *retlen = totlen;
2473 return ret;
2474}
2475
2476/*
2477 * mtd_writev - the vector-based MTD write method
2478 * @mtd: mtd device description object pointer
2479 * @vecs: the vectors to write
2480 * @count: count of vectors in @vecs
2481 * @to: the MTD device offset to write to
2482 * @retlen: on exit contains the count of bytes written to the MTD device.
2483 *
2484 * This function returns zero in case of success and a negative error code in
2485 * case of failure.
2486 */
2487int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2488 unsigned long count, loff_t to, size_t *retlen)
2489{
2490 struct mtd_info *master = mtd_get_master(mtd);
2491
2492 *retlen = 0;
2493 if (!(mtd->flags & MTD_WRITEABLE))
2494 return -EROFS;
2495
2496 if (!master->_writev)
2497 return default_mtd_writev(mtd, vecs, count, to, retlen);
2498
2499 return master->_writev(master, vecs, count,
2500 mtd_get_master_ofs(mtd, to), retlen);
2501}
2502EXPORT_SYMBOL_GPL(mtd_writev);
2503
2504/**
2505 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2506 * @mtd: mtd device description object pointer
2507 * @size: a pointer to the ideal or maximum size of the allocation, points
2508 * to the actual allocation size on success.
2509 *
2510 * This routine attempts to allocate a contiguous kernel buffer up to
2511 * the specified size, backing off the size of the request exponentially
2512 * until the request succeeds or until the allocation size falls below
2513 * the system page size. This attempts to make sure it does not adversely
2514 * impact system performance, so when allocating more than one page, we
2515 * ask the memory allocator to avoid re-trying, swapping, writing back
2516 * or performing I/O.
2517 *
2518 * Note, this function also makes sure that the allocated buffer is aligned to
2519 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2520 *
2521 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2522 * to handle smaller (i.e. degraded) buffer allocations under low- or
2523 * fragmented-memory situations where such reduced allocations, from a
2524 * requested ideal, are allowed.
2525 *
2526 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2527 */
2528void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2529{
2530 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2531 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2532 void *kbuf;
2533
2534 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2535
2536 while (*size > min_alloc) {
2537 kbuf = kmalloc(*size, flags);
2538 if (kbuf)
2539 return kbuf;
2540
2541 *size >>= 1;
2542 *size = ALIGN(*size, mtd->writesize);
2543 }
2544
2545 /*
2546 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2547 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2548 */
2549 return kmalloc(*size, GFP_KERNEL);
2550}
2551EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2552
2553#ifdef CONFIG_PROC_FS
2554
2555/*====================================================================*/
2556/* Support for /proc/mtd */
2557
2558static int mtd_proc_show(struct seq_file *m, void *v)
2559{
2560 struct mtd_info *mtd;
2561
2562 seq_puts(m, "dev: size erasesize name\n");
2563 mutex_lock(&mtd_table_mutex);
2564 mtd_for_each_device(mtd) {
2565 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2566 mtd->index, (unsigned long long)mtd->size,
2567 mtd->erasesize, mtd->name);
2568 }
2569 mutex_unlock(&mtd_table_mutex);
2570 return 0;
2571}
2572#endif /* CONFIG_PROC_FS */
2573
2574/*====================================================================*/
2575/* Init code */
2576
2577static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2578{
2579 struct backing_dev_info *bdi;
2580 int ret;
2581
2582 bdi = bdi_alloc(NUMA_NO_NODE);
2583 if (!bdi)
2584 return ERR_PTR(-ENOMEM);
2585 bdi->ra_pages = 0;
2586 bdi->io_pages = 0;
2587
2588 /*
2589 * We put '-0' suffix to the name to get the same name format as we
2590 * used to get. Since this is called only once, we get a unique name.
2591 */
2592 ret = bdi_register(bdi, "%.28s-0", name);
2593 if (ret)
2594 bdi_put(bdi);
2595
2596 return ret ? ERR_PTR(ret) : bdi;
2597}
2598
2599static struct proc_dir_entry *proc_mtd;
2600
2601static int __init init_mtd(void)
2602{
2603 int ret;
2604
2605 ret = class_register(&mtd_class);
2606 if (ret)
2607 goto err_reg;
2608
2609 mtd_bdi = mtd_bdi_init("mtd");
2610 if (IS_ERR(mtd_bdi)) {
2611 ret = PTR_ERR(mtd_bdi);
2612 goto err_bdi;
2613 }
2614
2615 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2616
2617 ret = init_mtdchar();
2618 if (ret)
2619 goto out_procfs;
2620
2621 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2622 debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2623 &mtd_expert_analysis_mode);
2624
2625 return 0;
2626
2627out_procfs:
2628 if (proc_mtd)
2629 remove_proc_entry("mtd", NULL);
2630 bdi_unregister(mtd_bdi);
2631 bdi_put(mtd_bdi);
2632err_bdi:
2633 class_unregister(&mtd_class);
2634err_reg:
2635 pr_err("Error registering mtd class or bdi: %d\n", ret);
2636 return ret;
2637}
2638
2639static void __exit cleanup_mtd(void)
2640{
2641 if (IS_REACHABLE(CONFIG_MTD_VIRT_CONCAT)) {
2642 mtd_virt_concat_destroy_joins();
2643 mtd_virt_concat_destroy_items();
2644 }
2645 debugfs_remove_recursive(dfs_dir_mtd);
2646 cleanup_mtdchar();
2647 if (proc_mtd)
2648 remove_proc_entry("mtd", NULL);
2649 class_unregister(&mtd_class);
2650 bdi_unregister(mtd_bdi);
2651 bdi_put(mtd_bdi);
2652 idr_destroy(&mtd_idr);
2653}
2654
2655module_init(init_mtd);
2656module_exit(cleanup_mtd);
2657
2658MODULE_LICENSE("GPL");
2659MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2660MODULE_DESCRIPTION("Core MTD registration and access routines");