]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Simple MTD partitioning layer | |
3 | * | |
2f82af08 | 4 | * (C) 2000 Nicolas Pitre <[email protected]> |
1da177e4 LT |
5 | * |
6 | * This code is GPL | |
7 | * | |
1da177e4 LT |
8 | * 02-21-2002 Thomas Gleixner <[email protected]> |
9 | * added support for read_oob, write_oob | |
97894cda | 10 | */ |
1da177e4 LT |
11 | |
12 | #include <linux/module.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/list.h> | |
1da177e4 LT |
17 | #include <linux/kmod.h> |
18 | #include <linux/mtd/mtd.h> | |
19 | #include <linux/mtd/partitions.h> | |
20 | #include <linux/mtd/compatmac.h> | |
21 | ||
22 | /* Our partition linked list */ | |
23 | static LIST_HEAD(mtd_partitions); | |
24 | ||
25 | /* Our partition node structure */ | |
26 | struct mtd_part { | |
27 | struct mtd_info mtd; | |
28 | struct mtd_info *master; | |
69423d99 | 29 | uint64_t offset; |
1da177e4 | 30 | struct list_head list; |
1da177e4 LT |
31 | }; |
32 | ||
33 | /* | |
34 | * Given a pointer to the MTD object in the mtd_part structure, we can retrieve | |
35 | * the pointer to that structure with this macro. | |
36 | */ | |
37 | #define PART(x) ((struct mtd_part *)(x)) | |
38 | ||
97894cda TG |
39 | |
40 | /* | |
1da177e4 LT |
41 | * MTD methods which simply translate the effective address and pass through |
42 | * to the _real_ device. | |
43 | */ | |
44 | ||
b33a2887 AN |
45 | static int part_read(struct mtd_info *mtd, loff_t from, size_t len, |
46 | size_t *retlen, u_char *buf) | |
1da177e4 LT |
47 | { |
48 | struct mtd_part *part = PART(mtd); | |
d8877f19 | 49 | struct mtd_ecc_stats stats; |
f1a28c02 TG |
50 | int res; |
51 | ||
d8877f19 YK |
52 | stats = part->master->ecc_stats; |
53 | ||
1da177e4 LT |
54 | if (from >= mtd->size) |
55 | len = 0; | |
56 | else if (from + len > mtd->size) | |
57 | len = mtd->size - from; | |
b33a2887 | 58 | res = part->master->read(part->master, from + part->offset, |
9223a456 | 59 | len, retlen, buf); |
f1a28c02 TG |
60 | if (unlikely(res)) { |
61 | if (res == -EUCLEAN) | |
d8877f19 | 62 | mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; |
f1a28c02 | 63 | if (res == -EBADMSG) |
d8877f19 | 64 | mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; |
f1a28c02 TG |
65 | } |
66 | return res; | |
1da177e4 LT |
67 | } |
68 | ||
b33a2887 AN |
69 | static int part_point(struct mtd_info *mtd, loff_t from, size_t len, |
70 | size_t *retlen, void **virt, resource_size_t *phys) | |
1da177e4 LT |
71 | { |
72 | struct mtd_part *part = PART(mtd); | |
73 | if (from >= mtd->size) | |
74 | len = 0; | |
75 | else if (from + len > mtd->size) | |
76 | len = mtd->size - from; | |
97894cda | 77 | return part->master->point (part->master, from + part->offset, |
a98889f3 | 78 | len, retlen, virt, phys); |
1da177e4 | 79 | } |
9223a456 | 80 | |
a98889f3 | 81 | static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
1da177e4 LT |
82 | { |
83 | struct mtd_part *part = PART(mtd); | |
84 | ||
a98889f3 | 85 | part->master->unpoint(part->master, from + part->offset, len); |
1da177e4 LT |
86 | } |
87 | ||
402d3265 DH |
88 | static unsigned long part_get_unmapped_area(struct mtd_info *mtd, |
89 | unsigned long len, | |
90 | unsigned long offset, | |
91 | unsigned long flags) | |
92 | { | |
93 | struct mtd_part *part = PART(mtd); | |
94 | ||
95 | offset += part->offset; | |
96 | return part->master->get_unmapped_area(part->master, len, offset, | |
97 | flags); | |
98 | } | |
99 | ||
8593fbc6 | 100 | static int part_read_oob(struct mtd_info *mtd, loff_t from, |
b33a2887 | 101 | struct mtd_oob_ops *ops) |
1da177e4 LT |
102 | { |
103 | struct mtd_part *part = PART(mtd); | |
f1a28c02 | 104 | int res; |
8593fbc6 | 105 | |
1da177e4 | 106 | if (from >= mtd->size) |
8593fbc6 | 107 | return -EINVAL; |
7014568b | 108 | if (ops->datbuf && from + ops->len > mtd->size) |
8593fbc6 | 109 | return -EINVAL; |
f1a28c02 TG |
110 | res = part->master->read_oob(part->master, from + part->offset, ops); |
111 | ||
112 | if (unlikely(res)) { | |
113 | if (res == -EUCLEAN) | |
114 | mtd->ecc_stats.corrected++; | |
115 | if (res == -EBADMSG) | |
116 | mtd->ecc_stats.failed++; | |
117 | } | |
118 | return res; | |
1da177e4 LT |
119 | } |
120 | ||
b33a2887 AN |
121 | static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, |
122 | size_t len, size_t *retlen, u_char *buf) | |
1da177e4 LT |
123 | { |
124 | struct mtd_part *part = PART(mtd); | |
b33a2887 | 125 | return part->master->read_user_prot_reg(part->master, from, |
1da177e4 LT |
126 | len, retlen, buf); |
127 | } | |
128 | ||
b33a2887 AN |
129 | static int part_get_user_prot_info(struct mtd_info *mtd, |
130 | struct otp_info *buf, size_t len) | |
f77814dd NP |
131 | { |
132 | struct mtd_part *part = PART(mtd); | |
b33a2887 | 133 | return part->master->get_user_prot_info(part->master, buf, len); |
f77814dd NP |
134 | } |
135 | ||
b33a2887 AN |
136 | static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, |
137 | size_t len, size_t *retlen, u_char *buf) | |
1da177e4 LT |
138 | { |
139 | struct mtd_part *part = PART(mtd); | |
b33a2887 | 140 | return part->master->read_fact_prot_reg(part->master, from, |
1da177e4 LT |
141 | len, retlen, buf); |
142 | } | |
143 | ||
b33a2887 AN |
144 | static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, |
145 | size_t len) | |
f77814dd NP |
146 | { |
147 | struct mtd_part *part = PART(mtd); | |
b33a2887 | 148 | return part->master->get_fact_prot_info(part->master, buf, len); |
f77814dd NP |
149 | } |
150 | ||
b33a2887 AN |
151 | static int part_write(struct mtd_info *mtd, loff_t to, size_t len, |
152 | size_t *retlen, const u_char *buf) | |
1da177e4 LT |
153 | { |
154 | struct mtd_part *part = PART(mtd); | |
155 | if (!(mtd->flags & MTD_WRITEABLE)) | |
156 | return -EROFS; | |
157 | if (to >= mtd->size) | |
158 | len = 0; | |
159 | else if (to + len > mtd->size) | |
160 | len = mtd->size - to; | |
b33a2887 | 161 | return part->master->write(part->master, to + part->offset, |
9223a456 | 162 | len, retlen, buf); |
1da177e4 LT |
163 | } |
164 | ||
b33a2887 AN |
165 | static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, |
166 | size_t *retlen, const u_char *buf) | |
388bbb09 RP |
167 | { |
168 | struct mtd_part *part = PART(mtd); | |
169 | if (!(mtd->flags & MTD_WRITEABLE)) | |
170 | return -EROFS; | |
171 | if (to >= mtd->size) | |
172 | len = 0; | |
173 | else if (to + len > mtd->size) | |
174 | len = mtd->size - to; | |
b33a2887 | 175 | return part->master->panic_write(part->master, to + part->offset, |
388bbb09 RP |
176 | len, retlen, buf); |
177 | } | |
178 | ||
8593fbc6 | 179 | static int part_write_oob(struct mtd_info *mtd, loff_t to, |
b33a2887 | 180 | struct mtd_oob_ops *ops) |
1da177e4 LT |
181 | { |
182 | struct mtd_part *part = PART(mtd); | |
8593fbc6 | 183 | |
1da177e4 LT |
184 | if (!(mtd->flags & MTD_WRITEABLE)) |
185 | return -EROFS; | |
8593fbc6 | 186 | |
1da177e4 | 187 | if (to >= mtd->size) |
8593fbc6 | 188 | return -EINVAL; |
7014568b | 189 | if (ops->datbuf && to + ops->len > mtd->size) |
8593fbc6 TG |
190 | return -EINVAL; |
191 | return part->master->write_oob(part->master, to + part->offset, ops); | |
1da177e4 LT |
192 | } |
193 | ||
b33a2887 AN |
194 | static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, |
195 | size_t len, size_t *retlen, u_char *buf) | |
1da177e4 LT |
196 | { |
197 | struct mtd_part *part = PART(mtd); | |
b33a2887 | 198 | return part->master->write_user_prot_reg(part->master, from, |
1da177e4 LT |
199 | len, retlen, buf); |
200 | } | |
201 | ||
b33a2887 AN |
202 | static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, |
203 | size_t len) | |
f77814dd NP |
204 | { |
205 | struct mtd_part *part = PART(mtd); | |
b33a2887 | 206 | return part->master->lock_user_prot_reg(part->master, from, len); |
f77814dd NP |
207 | } |
208 | ||
b33a2887 AN |
209 | static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, |
210 | unsigned long count, loff_t to, size_t *retlen) | |
1da177e4 LT |
211 | { |
212 | struct mtd_part *part = PART(mtd); | |
213 | if (!(mtd->flags & MTD_WRITEABLE)) | |
214 | return -EROFS; | |
b33a2887 | 215 | return part->master->writev(part->master, vecs, count, |
1da177e4 | 216 | to + part->offset, retlen); |
1da177e4 LT |
217 | } |
218 | ||
b33a2887 | 219 | static int part_erase(struct mtd_info *mtd, struct erase_info *instr) |
1da177e4 LT |
220 | { |
221 | struct mtd_part *part = PART(mtd); | |
222 | int ret; | |
223 | if (!(mtd->flags & MTD_WRITEABLE)) | |
224 | return -EROFS; | |
225 | if (instr->addr >= mtd->size) | |
226 | return -EINVAL; | |
227 | instr->addr += part->offset; | |
228 | ret = part->master->erase(part->master, instr); | |
74641d75 | 229 | if (ret) { |
bb0eb217 | 230 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
74641d75 AH |
231 | instr->fail_addr -= part->offset; |
232 | instr->addr -= part->offset; | |
233 | } | |
1da177e4 LT |
234 | return ret; |
235 | } | |
236 | ||
237 | void mtd_erase_callback(struct erase_info *instr) | |
238 | { | |
239 | if (instr->mtd->erase == part_erase) { | |
240 | struct mtd_part *part = PART(instr->mtd); | |
241 | ||
bb0eb217 | 242 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
1da177e4 LT |
243 | instr->fail_addr -= part->offset; |
244 | instr->addr -= part->offset; | |
245 | } | |
246 | if (instr->callback) | |
247 | instr->callback(instr); | |
248 | } | |
249 | EXPORT_SYMBOL_GPL(mtd_erase_callback); | |
250 | ||
69423d99 | 251 | static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
1da177e4 LT |
252 | { |
253 | struct mtd_part *part = PART(mtd); | |
97894cda | 254 | if ((len + ofs) > mtd->size) |
1da177e4 LT |
255 | return -EINVAL; |
256 | return part->master->lock(part->master, ofs + part->offset, len); | |
257 | } | |
258 | ||
69423d99 | 259 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
1da177e4 LT |
260 | { |
261 | struct mtd_part *part = PART(mtd); | |
97894cda | 262 | if ((len + ofs) > mtd->size) |
1da177e4 LT |
263 | return -EINVAL; |
264 | return part->master->unlock(part->master, ofs + part->offset, len); | |
265 | } | |
266 | ||
267 | static void part_sync(struct mtd_info *mtd) | |
268 | { | |
269 | struct mtd_part *part = PART(mtd); | |
270 | part->master->sync(part->master); | |
271 | } | |
272 | ||
273 | static int part_suspend(struct mtd_info *mtd) | |
274 | { | |
275 | struct mtd_part *part = PART(mtd); | |
276 | return part->master->suspend(part->master); | |
277 | } | |
278 | ||
279 | static void part_resume(struct mtd_info *mtd) | |
280 | { | |
281 | struct mtd_part *part = PART(mtd); | |
282 | part->master->resume(part->master); | |
283 | } | |
284 | ||
b33a2887 | 285 | static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) |
1da177e4 LT |
286 | { |
287 | struct mtd_part *part = PART(mtd); | |
288 | if (ofs >= mtd->size) | |
289 | return -EINVAL; | |
290 | ofs += part->offset; | |
291 | return part->master->block_isbad(part->master, ofs); | |
292 | } | |
293 | ||
b33a2887 | 294 | static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) |
1da177e4 LT |
295 | { |
296 | struct mtd_part *part = PART(mtd); | |
f1a28c02 TG |
297 | int res; |
298 | ||
1da177e4 LT |
299 | if (!(mtd->flags & MTD_WRITEABLE)) |
300 | return -EROFS; | |
301 | if (ofs >= mtd->size) | |
302 | return -EINVAL; | |
303 | ofs += part->offset; | |
f1a28c02 TG |
304 | res = part->master->block_markbad(part->master, ofs); |
305 | if (!res) | |
306 | mtd->ecc_stats.badblocks++; | |
307 | return res; | |
1da177e4 LT |
308 | } |
309 | ||
97894cda TG |
310 | /* |
311 | * This function unregisters and destroy all slave MTD objects which are | |
1da177e4 LT |
312 | * attached to the given master MTD object. |
313 | */ | |
314 | ||
315 | int del_mtd_partitions(struct mtd_info *master) | |
316 | { | |
71a928c0 | 317 | struct mtd_part *slave, *next; |
1da177e4 | 318 | |
71a928c0 | 319 | list_for_each_entry_safe(slave, next, &mtd_partitions, list) |
1da177e4 | 320 | if (slave->master == master) { |
71a928c0 | 321 | list_del(&slave->list); |
b90cf668 | 322 | del_mtd_device(&slave->mtd); |
1da177e4 | 323 | kfree(slave); |
1da177e4 | 324 | } |
1da177e4 LT |
325 | |
326 | return 0; | |
327 | } | |
b33a2887 | 328 | EXPORT_SYMBOL(del_mtd_partitions); |
1da177e4 | 329 | |
7788ba71 AN |
330 | static struct mtd_part *add_one_partition(struct mtd_info *master, |
331 | const struct mtd_partition *part, int partno, | |
69423d99 | 332 | uint64_t cur_offset) |
7788ba71 AN |
333 | { |
334 | struct mtd_part *slave; | |
335 | ||
336 | /* allocate the partition structure */ | |
b33a2887 | 337 | slave = kzalloc(sizeof(*slave), GFP_KERNEL); |
7788ba71 | 338 | if (!slave) { |
b33a2887 | 339 | printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", |
7788ba71 AN |
340 | master->name); |
341 | del_mtd_partitions(master); | |
342 | return NULL; | |
343 | } | |
344 | list_add(&slave->list, &mtd_partitions); | |
345 | ||
346 | /* set up the MTD object for this partition */ | |
347 | slave->mtd.type = master->type; | |
348 | slave->mtd.flags = master->flags & ~part->mask_flags; | |
349 | slave->mtd.size = part->size; | |
350 | slave->mtd.writesize = master->writesize; | |
351 | slave->mtd.oobsize = master->oobsize; | |
352 | slave->mtd.oobavail = master->oobavail; | |
353 | slave->mtd.subpage_sft = master->subpage_sft; | |
354 | ||
355 | slave->mtd.name = part->name; | |
356 | slave->mtd.owner = master->owner; | |
402d3265 | 357 | slave->mtd.backing_dev_info = master->backing_dev_info; |
7788ba71 | 358 | |
1f24b5a8 DB |
359 | /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone |
360 | * to have the same data be in two different partitions. | |
361 | */ | |
362 | slave->mtd.dev.parent = master->dev.parent; | |
363 | ||
7788ba71 AN |
364 | slave->mtd.read = part_read; |
365 | slave->mtd.write = part_write; | |
366 | ||
367 | if (master->panic_write) | |
368 | slave->mtd.panic_write = part_panic_write; | |
369 | ||
b33a2887 | 370 | if (master->point && master->unpoint) { |
7788ba71 AN |
371 | slave->mtd.point = part_point; |
372 | slave->mtd.unpoint = part_unpoint; | |
373 | } | |
374 | ||
402d3265 DH |
375 | if (master->get_unmapped_area) |
376 | slave->mtd.get_unmapped_area = part_get_unmapped_area; | |
7788ba71 AN |
377 | if (master->read_oob) |
378 | slave->mtd.read_oob = part_read_oob; | |
379 | if (master->write_oob) | |
380 | slave->mtd.write_oob = part_write_oob; | |
b33a2887 | 381 | if (master->read_user_prot_reg) |
7788ba71 | 382 | slave->mtd.read_user_prot_reg = part_read_user_prot_reg; |
b33a2887 | 383 | if (master->read_fact_prot_reg) |
7788ba71 | 384 | slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; |
b33a2887 | 385 | if (master->write_user_prot_reg) |
7788ba71 | 386 | slave->mtd.write_user_prot_reg = part_write_user_prot_reg; |
b33a2887 | 387 | if (master->lock_user_prot_reg) |
7788ba71 | 388 | slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; |
b33a2887 | 389 | if (master->get_user_prot_info) |
7788ba71 | 390 | slave->mtd.get_user_prot_info = part_get_user_prot_info; |
b33a2887 | 391 | if (master->get_fact_prot_info) |
7788ba71 AN |
392 | slave->mtd.get_fact_prot_info = part_get_fact_prot_info; |
393 | if (master->sync) | |
394 | slave->mtd.sync = part_sync; | |
4704a784 | 395 | if (!partno && !master->dev.class && master->suspend && master->resume) { |
7788ba71 AN |
396 | slave->mtd.suspend = part_suspend; |
397 | slave->mtd.resume = part_resume; | |
398 | } | |
399 | if (master->writev) | |
400 | slave->mtd.writev = part_writev; | |
401 | if (master->lock) | |
402 | slave->mtd.lock = part_lock; | |
403 | if (master->unlock) | |
404 | slave->mtd.unlock = part_unlock; | |
405 | if (master->block_isbad) | |
406 | slave->mtd.block_isbad = part_block_isbad; | |
407 | if (master->block_markbad) | |
408 | slave->mtd.block_markbad = part_block_markbad; | |
409 | slave->mtd.erase = part_erase; | |
410 | slave->master = master; | |
411 | slave->offset = part->offset; | |
7788ba71 AN |
412 | |
413 | if (slave->offset == MTDPART_OFS_APPEND) | |
414 | slave->offset = cur_offset; | |
415 | if (slave->offset == MTDPART_OFS_NXTBLK) { | |
416 | slave->offset = cur_offset; | |
69423d99 | 417 | if (mtd_mod_by_eb(cur_offset, master) != 0) { |
7788ba71 | 418 | /* Round up to next erasesize */ |
69423d99 | 419 | slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; |
7788ba71 | 420 | printk(KERN_NOTICE "Moving partition %d: " |
69423d99 AH |
421 | "0x%012llx -> 0x%012llx\n", partno, |
422 | (unsigned long long)cur_offset, (unsigned long long)slave->offset); | |
7788ba71 AN |
423 | } |
424 | } | |
425 | if (slave->mtd.size == MTDPART_SIZ_FULL) | |
426 | slave->mtd.size = master->size - slave->offset; | |
427 | ||
69423d99 AH |
428 | printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, |
429 | (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); | |
7788ba71 AN |
430 | |
431 | /* let's do some sanity checks */ | |
432 | if (slave->offset >= master->size) { | |
f636ffb4 | 433 | /* let's register it anyway to preserve ordering */ |
7788ba71 AN |
434 | slave->offset = 0; |
435 | slave->mtd.size = 0; | |
b33a2887 | 436 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", |
7788ba71 | 437 | part->name); |
f636ffb4 | 438 | goto out_register; |
7788ba71 AN |
439 | } |
440 | if (slave->offset + slave->mtd.size > master->size) { | |
441 | slave->mtd.size = master->size - slave->offset; | |
69423d99 AH |
442 | printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", |
443 | part->name, master->name, (unsigned long long)slave->mtd.size); | |
7788ba71 | 444 | } |
b33a2887 | 445 | if (master->numeraseregions > 1) { |
7788ba71 | 446 | /* Deal with variable erase size stuff */ |
6910c136 | 447 | int i, max = master->numeraseregions; |
69423d99 | 448 | u64 end = slave->offset + slave->mtd.size; |
7788ba71 AN |
449 | struct mtd_erase_region_info *regions = master->eraseregions; |
450 | ||
6910c136 AN |
451 | /* Find the first erase regions which is part of this |
452 | * partition. */ | |
453 | for (i = 0; i < max && regions[i].offset <= slave->offset; i++) | |
7788ba71 | 454 | ; |
6910c136 | 455 | /* The loop searched for the region _behind_ the first one */ |
a57ca046 RK |
456 | if (i > 0) |
457 | i--; | |
7788ba71 | 458 | |
6910c136 AN |
459 | /* Pick biggest erasesize */ |
460 | for (; i < max && regions[i].offset < end; i++) { | |
7788ba71 AN |
461 | if (slave->mtd.erasesize < regions[i].erasesize) { |
462 | slave->mtd.erasesize = regions[i].erasesize; | |
463 | } | |
464 | } | |
6910c136 | 465 | BUG_ON(slave->mtd.erasesize == 0); |
7788ba71 AN |
466 | } else { |
467 | /* Single erase size */ | |
468 | slave->mtd.erasesize = master->erasesize; | |
469 | } | |
470 | ||
471 | if ((slave->mtd.flags & MTD_WRITEABLE) && | |
69423d99 | 472 | mtd_mod_by_eb(slave->offset, &slave->mtd)) { |
7788ba71 | 473 | /* Doesn't start on a boundary of major erase size */ |
b33a2887 AN |
474 | /* FIXME: Let it be writable if it is on a boundary of |
475 | * _minor_ erase size though */ | |
7788ba71 | 476 | slave->mtd.flags &= ~MTD_WRITEABLE; |
b33a2887 | 477 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", |
7788ba71 AN |
478 | part->name); |
479 | } | |
480 | if ((slave->mtd.flags & MTD_WRITEABLE) && | |
69423d99 | 481 | mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { |
7788ba71 | 482 | slave->mtd.flags &= ~MTD_WRITEABLE; |
b33a2887 | 483 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", |
7788ba71 AN |
484 | part->name); |
485 | } | |
486 | ||
487 | slave->mtd.ecclayout = master->ecclayout; | |
488 | if (master->block_isbad) { | |
69423d99 | 489 | uint64_t offs = 0; |
7788ba71 | 490 | |
b33a2887 | 491 | while (offs < slave->mtd.size) { |
7788ba71 AN |
492 | if (master->block_isbad(master, |
493 | offs + slave->offset)) | |
494 | slave->mtd.ecc_stats.badblocks++; | |
495 | offs += slave->mtd.erasesize; | |
496 | } | |
497 | } | |
498 | ||
f636ffb4 | 499 | out_register: |
b90cf668 DW |
500 | /* register our partition */ |
501 | add_mtd_device(&slave->mtd); | |
502 | ||
7788ba71 AN |
503 | return slave; |
504 | } | |
505 | ||
1da177e4 LT |
506 | /* |
507 | * This function, given a master MTD object and a partition table, creates | |
508 | * and registers slave MTD objects which are bound to the master according to | |
509 | * the partition definitions. | |
1f24b5a8 DB |
510 | * |
511 | * We don't register the master, or expect the caller to have done so, | |
512 | * for reasons of data integrity. | |
1da177e4 LT |
513 | */ |
514 | ||
97894cda | 515 | int add_mtd_partitions(struct mtd_info *master, |
1da177e4 LT |
516 | const struct mtd_partition *parts, |
517 | int nbparts) | |
518 | { | |
519 | struct mtd_part *slave; | |
69423d99 | 520 | uint64_t cur_offset = 0; |
1da177e4 LT |
521 | int i; |
522 | ||
b33a2887 | 523 | printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); |
1da177e4 LT |
524 | |
525 | for (i = 0; i < nbparts; i++) { | |
7788ba71 AN |
526 | slave = add_one_partition(master, parts + i, i, cur_offset); |
527 | if (!slave) | |
1da177e4 | 528 | return -ENOMEM; |
1da177e4 | 529 | cur_offset = slave->offset + slave->mtd.size; |
1da177e4 LT |
530 | } |
531 | ||
532 | return 0; | |
533 | } | |
1da177e4 | 534 | EXPORT_SYMBOL(add_mtd_partitions); |
1da177e4 LT |
535 | |
536 | static DEFINE_SPINLOCK(part_parser_lock); | |
537 | static LIST_HEAD(part_parsers); | |
538 | ||
539 | static struct mtd_part_parser *get_partition_parser(const char *name) | |
540 | { | |
71a928c0 | 541 | struct mtd_part_parser *p, *ret = NULL; |
1da177e4 | 542 | |
71a928c0 | 543 | spin_lock(&part_parser_lock); |
1da177e4 | 544 | |
71a928c0 | 545 | list_for_each_entry(p, &part_parsers, list) |
1da177e4 LT |
546 | if (!strcmp(p->name, name) && try_module_get(p->owner)) { |
547 | ret = p; | |
548 | break; | |
549 | } | |
71a928c0 | 550 | |
1da177e4 LT |
551 | spin_unlock(&part_parser_lock); |
552 | ||
553 | return ret; | |
554 | } | |
555 | ||
556 | int register_mtd_parser(struct mtd_part_parser *p) | |
557 | { | |
558 | spin_lock(&part_parser_lock); | |
559 | list_add(&p->list, &part_parsers); | |
560 | spin_unlock(&part_parser_lock); | |
561 | ||
562 | return 0; | |
563 | } | |
b33a2887 | 564 | EXPORT_SYMBOL_GPL(register_mtd_parser); |
1da177e4 LT |
565 | |
566 | int deregister_mtd_parser(struct mtd_part_parser *p) | |
567 | { | |
568 | spin_lock(&part_parser_lock); | |
569 | list_del(&p->list); | |
570 | spin_unlock(&part_parser_lock); | |
571 | return 0; | |
572 | } | |
b33a2887 | 573 | EXPORT_SYMBOL_GPL(deregister_mtd_parser); |
1da177e4 | 574 | |
97894cda | 575 | int parse_mtd_partitions(struct mtd_info *master, const char **types, |
1da177e4 LT |
576 | struct mtd_partition **pparts, unsigned long origin) |
577 | { | |
578 | struct mtd_part_parser *parser; | |
579 | int ret = 0; | |
97894cda | 580 | |
1da177e4 LT |
581 | for ( ; ret <= 0 && *types; types++) { |
582 | parser = get_partition_parser(*types); | |
1da177e4 LT |
583 | if (!parser && !request_module("%s", *types)) |
584 | parser = get_partition_parser(*types); | |
1da177e4 LT |
585 | if (!parser) { |
586 | printk(KERN_NOTICE "%s partition parsing not available\n", | |
587 | *types); | |
588 | continue; | |
589 | } | |
590 | ret = (*parser->parse_fn)(master, pparts, origin); | |
591 | if (ret > 0) { | |
97894cda | 592 | printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", |
1da177e4 LT |
593 | ret, parser->name, master->name); |
594 | } | |
595 | put_partition_parser(parser); | |
596 | } | |
597 | return ret; | |
598 | } | |
1da177e4 | 599 | EXPORT_SYMBOL_GPL(parse_mtd_partitions); |