]>
Commit | Line | Data |
---|---|---|
e29c22f5 KP |
1 | /* |
2 | * Simple MTD partitioning layer | |
3 | * | |
4 | * (C) 2000 Nicolas Pitre <[email protected]> | |
5 | * | |
6 | * This code is GPL | |
7 | * | |
8 | * 02-21-2002 Thomas Gleixner <[email protected]> | |
9 | * added support for read_oob, write_oob | |
10 | */ | |
11 | ||
12 | #include <common.h> | |
13 | #include <malloc.h> | |
14 | #include <asm/errno.h> | |
15 | ||
16 | #include <linux/types.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/mtd/mtd.h> | |
19 | #include <linux/mtd/partitions.h> | |
7b15e2bb | 20 | #include <linux/compat.h> |
e29c22f5 KP |
21 | |
22 | /* Our partition linked list */ | |
9def12ca | 23 | struct list_head mtd_partitions; |
e29c22f5 KP |
24 | |
25 | /* Our partition node structure */ | |
26 | struct mtd_part { | |
27 | struct mtd_info mtd; | |
28 | struct mtd_info *master; | |
8d2effea | 29 | uint64_t offset; |
e29c22f5 KP |
30 | int index; |
31 | struct list_head list; | |
32 | int registered; | |
33 | }; | |
34 | ||
35 | /* | |
36 | * Given a pointer to the MTD object in the mtd_part structure, we can retrieve | |
37 | * the pointer to that structure with this macro. | |
38 | */ | |
39 | #define PART(x) ((struct mtd_part *)(x)) | |
40 | ||
41 | ||
42 | /* | |
43 | * MTD methods which simply translate the effective address and pass through | |
44 | * to the _real_ device. | |
45 | */ | |
46 | ||
8d2effea SR |
47 | static int part_read(struct mtd_info *mtd, loff_t from, size_t len, |
48 | size_t *retlen, u_char *buf) | |
e29c22f5 KP |
49 | { |
50 | struct mtd_part *part = PART(mtd); | |
8d2effea | 51 | struct mtd_ecc_stats stats; |
e29c22f5 KP |
52 | int res; |
53 | ||
8d2effea | 54 | stats = part->master->ecc_stats; |
dfe64e2c | 55 | res = mtd_read(part->master, from + part->offset, len, retlen, buf); |
40462e54 PB |
56 | if (unlikely(mtd_is_eccerr(res))) |
57 | mtd->ecc_stats.failed += | |
58 | part->master->ecc_stats.failed - stats.failed; | |
59 | else | |
60 | mtd->ecc_stats.corrected += | |
61 | part->master->ecc_stats.corrected - stats.corrected; | |
e29c22f5 KP |
62 | return res; |
63 | } | |
64 | ||
e29c22f5 | 65 | static int part_read_oob(struct mtd_info *mtd, loff_t from, |
8d2effea | 66 | struct mtd_oob_ops *ops) |
e29c22f5 KP |
67 | { |
68 | struct mtd_part *part = PART(mtd); | |
69 | int res; | |
70 | ||
71 | if (from >= mtd->size) | |
72 | return -EINVAL; | |
73 | if (ops->datbuf && from + ops->len > mtd->size) | |
74 | return -EINVAL; | |
dfe64e2c | 75 | res = mtd_read_oob(part->master, from + part->offset, ops); |
e29c22f5 KP |
76 | |
77 | if (unlikely(res)) { | |
dfe64e2c | 78 | if (mtd_is_bitflip(res)) |
e29c22f5 | 79 | mtd->ecc_stats.corrected++; |
dfe64e2c | 80 | if (mtd_is_eccerr(res)) |
e29c22f5 KP |
81 | mtd->ecc_stats.failed++; |
82 | } | |
83 | return res; | |
84 | } | |
85 | ||
8d2effea SR |
86 | static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, |
87 | size_t len, size_t *retlen, u_char *buf) | |
e29c22f5 KP |
88 | { |
89 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 90 | return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); |
e29c22f5 KP |
91 | } |
92 | ||
8d2effea SR |
93 | static int part_get_user_prot_info(struct mtd_info *mtd, |
94 | struct otp_info *buf, size_t len) | |
e29c22f5 KP |
95 | { |
96 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 97 | return mtd_get_user_prot_info(part->master, buf, len); |
e29c22f5 KP |
98 | } |
99 | ||
8d2effea SR |
100 | static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, |
101 | size_t len, size_t *retlen, u_char *buf) | |
e29c22f5 KP |
102 | { |
103 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 104 | return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); |
e29c22f5 KP |
105 | } |
106 | ||
8d2effea SR |
107 | static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, |
108 | size_t len) | |
e29c22f5 KP |
109 | { |
110 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 111 | return mtd_get_fact_prot_info(part->master, buf, len); |
e29c22f5 KP |
112 | } |
113 | ||
8d2effea SR |
114 | static int part_write(struct mtd_info *mtd, loff_t to, size_t len, |
115 | size_t *retlen, const u_char *buf) | |
e29c22f5 KP |
116 | { |
117 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 118 | return mtd_write(part->master, to + part->offset, len, retlen, buf); |
e29c22f5 | 119 | } |
e29c22f5 KP |
120 | |
121 | static int part_write_oob(struct mtd_info *mtd, loff_t to, | |
8d2effea | 122 | struct mtd_oob_ops *ops) |
e29c22f5 KP |
123 | { |
124 | struct mtd_part *part = PART(mtd); | |
125 | ||
e29c22f5 KP |
126 | if (to >= mtd->size) |
127 | return -EINVAL; | |
128 | if (ops->datbuf && to + ops->len > mtd->size) | |
129 | return -EINVAL; | |
dfe64e2c | 130 | return mtd_write_oob(part->master, to + part->offset, ops); |
e29c22f5 KP |
131 | } |
132 | ||
8d2effea SR |
133 | static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, |
134 | size_t len, size_t *retlen, u_char *buf) | |
e29c22f5 KP |
135 | { |
136 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 137 | return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); |
e29c22f5 KP |
138 | } |
139 | ||
8d2effea SR |
140 | static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, |
141 | size_t len) | |
e29c22f5 KP |
142 | { |
143 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 144 | return mtd_lock_user_prot_reg(part->master, from, len); |
e29c22f5 | 145 | } |
e29c22f5 | 146 | |
8d2effea | 147 | static int part_erase(struct mtd_info *mtd, struct erase_info *instr) |
e29c22f5 KP |
148 | { |
149 | struct mtd_part *part = PART(mtd); | |
150 | int ret; | |
dfe64e2c | 151 | |
e29c22f5 | 152 | instr->addr += part->offset; |
dfe64e2c | 153 | ret = mtd_erase(part->master, instr); |
e29c22f5 | 154 | if (ret) { |
8d2effea | 155 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
e29c22f5 KP |
156 | instr->fail_addr -= part->offset; |
157 | instr->addr -= part->offset; | |
158 | } | |
159 | return ret; | |
160 | } | |
161 | ||
162 | void mtd_erase_callback(struct erase_info *instr) | |
163 | { | |
dfe64e2c | 164 | if (instr->mtd->_erase == part_erase) { |
e29c22f5 KP |
165 | struct mtd_part *part = PART(instr->mtd); |
166 | ||
8d2effea | 167 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
e29c22f5 KP |
168 | instr->fail_addr -= part->offset; |
169 | instr->addr -= part->offset; | |
170 | } | |
171 | if (instr->callback) | |
172 | instr->callback(instr); | |
173 | } | |
e29c22f5 | 174 | |
8d2effea | 175 | static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
e29c22f5 KP |
176 | { |
177 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 178 | return mtd_lock(part->master, ofs + part->offset, len); |
e29c22f5 KP |
179 | } |
180 | ||
8d2effea | 181 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
e29c22f5 KP |
182 | { |
183 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 184 | return mtd_unlock(part->master, ofs + part->offset, len); |
e29c22f5 | 185 | } |
e29c22f5 KP |
186 | |
187 | static void part_sync(struct mtd_info *mtd) | |
188 | { | |
189 | struct mtd_part *part = PART(mtd); | |
dfe64e2c | 190 | mtd_sync(part->master); |
e29c22f5 KP |
191 | } |
192 | ||
8d2effea | 193 | static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) |
e29c22f5 KP |
194 | { |
195 | struct mtd_part *part = PART(mtd); | |
e29c22f5 | 196 | ofs += part->offset; |
dfe64e2c | 197 | return mtd_block_isbad(part->master, ofs); |
e29c22f5 KP |
198 | } |
199 | ||
8d2effea | 200 | static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) |
e29c22f5 KP |
201 | { |
202 | struct mtd_part *part = PART(mtd); | |
203 | int res; | |
204 | ||
e29c22f5 | 205 | ofs += part->offset; |
dfe64e2c | 206 | res = mtd_block_markbad(part->master, ofs); |
e29c22f5 KP |
207 | if (!res) |
208 | mtd->ecc_stats.badblocks++; | |
e29c22f5 KP |
209 | return res; |
210 | } | |
211 | ||
212 | /* | |
213 | * This function unregisters and destroy all slave MTD objects which are | |
214 | * attached to the given master MTD object. | |
215 | */ | |
216 | ||
217 | int del_mtd_partitions(struct mtd_info *master) | |
218 | { | |
8d2effea | 219 | struct mtd_part *slave, *next; |
e29c22f5 | 220 | |
8d2effea | 221 | list_for_each_entry_safe(slave, next, &mtd_partitions, list) |
e29c22f5 | 222 | if (slave->master == master) { |
8d2effea SR |
223 | list_del(&slave->list); |
224 | if (slave->registered) | |
e29c22f5 KP |
225 | del_mtd_device(&slave->mtd); |
226 | kfree(slave); | |
e29c22f5 | 227 | } |
e29c22f5 KP |
228 | |
229 | return 0; | |
230 | } | |
231 | ||
8d2effea SR |
232 | static struct mtd_part *add_one_partition(struct mtd_info *master, |
233 | const struct mtd_partition *part, int partno, | |
234 | uint64_t cur_offset) | |
235 | { | |
236 | struct mtd_part *slave; | |
237 | ||
238 | /* allocate the partition structure */ | |
239 | slave = kzalloc(sizeof(*slave), GFP_KERNEL); | |
240 | if (!slave) { | |
241 | printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", | |
242 | master->name); | |
243 | del_mtd_partitions(master); | |
244 | return NULL; | |
245 | } | |
246 | list_add(&slave->list, &mtd_partitions); | |
247 | ||
248 | /* set up the MTD object for this partition */ | |
249 | slave->mtd.type = master->type; | |
250 | slave->mtd.flags = master->flags & ~part->mask_flags; | |
251 | slave->mtd.size = part->size; | |
252 | slave->mtd.writesize = master->writesize; | |
253 | slave->mtd.oobsize = master->oobsize; | |
254 | slave->mtd.oobavail = master->oobavail; | |
255 | slave->mtd.subpage_sft = master->subpage_sft; | |
256 | ||
257 | slave->mtd.name = part->name; | |
258 | slave->mtd.owner = master->owner; | |
259 | ||
dfe64e2c SL |
260 | slave->mtd._read = part_read; |
261 | slave->mtd._write = part_write; | |
262 | ||
263 | if (master->_read_oob) | |
264 | slave->mtd._read_oob = part_read_oob; | |
265 | if (master->_write_oob) | |
266 | slave->mtd._write_oob = part_write_oob; | |
267 | if (master->_read_user_prot_reg) | |
268 | slave->mtd._read_user_prot_reg = part_read_user_prot_reg; | |
269 | if (master->_read_fact_prot_reg) | |
270 | slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; | |
271 | if (master->_write_user_prot_reg) | |
272 | slave->mtd._write_user_prot_reg = part_write_user_prot_reg; | |
273 | if (master->_lock_user_prot_reg) | |
274 | slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; | |
275 | if (master->_get_user_prot_info) | |
276 | slave->mtd._get_user_prot_info = part_get_user_prot_info; | |
277 | if (master->_get_fact_prot_info) | |
278 | slave->mtd._get_fact_prot_info = part_get_fact_prot_info; | |
279 | if (master->_sync) | |
280 | slave->mtd._sync = part_sync; | |
281 | if (master->_lock) | |
282 | slave->mtd._lock = part_lock; | |
283 | if (master->_unlock) | |
284 | slave->mtd._unlock = part_unlock; | |
285 | if (master->_block_isbad) | |
286 | slave->mtd._block_isbad = part_block_isbad; | |
287 | if (master->_block_markbad) | |
288 | slave->mtd._block_markbad = part_block_markbad; | |
289 | slave->mtd._erase = part_erase; | |
8d2effea SR |
290 | slave->master = master; |
291 | slave->offset = part->offset; | |
292 | slave->index = partno; | |
293 | ||
294 | if (slave->offset == MTDPART_OFS_APPEND) | |
295 | slave->offset = cur_offset; | |
296 | if (slave->offset == MTDPART_OFS_NXTBLK) { | |
297 | slave->offset = cur_offset; | |
298 | if (mtd_mod_by_eb(cur_offset, master) != 0) { | |
299 | /* Round up to next erasesize */ | |
300 | slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; | |
147162da JH |
301 | debug("Moving partition %d: 0x%012llx -> 0x%012llx\n", |
302 | partno, (unsigned long long)cur_offset, | |
303 | (unsigned long long)slave->offset); | |
8d2effea SR |
304 | } |
305 | } | |
306 | if (slave->mtd.size == MTDPART_SIZ_FULL) | |
307 | slave->mtd.size = master->size - slave->offset; | |
308 | ||
147162da JH |
309 | debug("0x%012llx-0x%012llx : \"%s\"\n", |
310 | (unsigned long long)slave->offset, | |
311 | (unsigned long long)(slave->offset + slave->mtd.size), | |
312 | slave->mtd.name); | |
8d2effea SR |
313 | |
314 | /* let's do some sanity checks */ | |
315 | if (slave->offset >= master->size) { | |
316 | /* let's register it anyway to preserve ordering */ | |
317 | slave->offset = 0; | |
318 | slave->mtd.size = 0; | |
319 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", | |
320 | part->name); | |
321 | goto out_register; | |
322 | } | |
323 | if (slave->offset + slave->mtd.size > master->size) { | |
324 | slave->mtd.size = master->size - slave->offset; | |
325 | printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", | |
326 | part->name, master->name, (unsigned long long)slave->mtd.size); | |
327 | } | |
328 | if (master->numeraseregions > 1) { | |
329 | /* Deal with variable erase size stuff */ | |
330 | int i, max = master->numeraseregions; | |
331 | u64 end = slave->offset + slave->mtd.size; | |
332 | struct mtd_erase_region_info *regions = master->eraseregions; | |
333 | ||
334 | /* Find the first erase regions which is part of this | |
335 | * partition. */ | |
336 | for (i = 0; i < max && regions[i].offset <= slave->offset; i++) | |
337 | ; | |
338 | /* The loop searched for the region _behind_ the first one */ | |
339 | i--; | |
340 | ||
341 | /* Pick biggest erasesize */ | |
342 | for (; i < max && regions[i].offset < end; i++) { | |
343 | if (slave->mtd.erasesize < regions[i].erasesize) { | |
344 | slave->mtd.erasesize = regions[i].erasesize; | |
345 | } | |
346 | } | |
347 | BUG_ON(slave->mtd.erasesize == 0); | |
348 | } else { | |
349 | /* Single erase size */ | |
350 | slave->mtd.erasesize = master->erasesize; | |
351 | } | |
352 | ||
353 | if ((slave->mtd.flags & MTD_WRITEABLE) && | |
354 | mtd_mod_by_eb(slave->offset, &slave->mtd)) { | |
355 | /* Doesn't start on a boundary of major erase size */ | |
356 | /* FIXME: Let it be writable if it is on a boundary of | |
357 | * _minor_ erase size though */ | |
358 | slave->mtd.flags &= ~MTD_WRITEABLE; | |
359 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", | |
360 | part->name); | |
361 | } | |
362 | if ((slave->mtd.flags & MTD_WRITEABLE) && | |
363 | mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { | |
364 | slave->mtd.flags &= ~MTD_WRITEABLE; | |
365 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", | |
366 | part->name); | |
367 | } | |
368 | ||
369 | slave->mtd.ecclayout = master->ecclayout; | |
dfe64e2c | 370 | if (master->_block_isbad) { |
8d2effea SR |
371 | uint64_t offs = 0; |
372 | ||
373 | while (offs < slave->mtd.size) { | |
dfe64e2c | 374 | if (mtd_block_isbad(master, offs + slave->offset)) |
8d2effea SR |
375 | slave->mtd.ecc_stats.badblocks++; |
376 | offs += slave->mtd.erasesize; | |
377 | } | |
378 | } | |
379 | ||
380 | out_register: | |
381 | if (part->mtdp) { | |
382 | /* store the object pointer (caller may or may not register it*/ | |
383 | *part->mtdp = &slave->mtd; | |
384 | slave->registered = 0; | |
385 | } else { | |
386 | /* register our partition */ | |
387 | add_mtd_device(&slave->mtd); | |
388 | slave->registered = 1; | |
389 | } | |
390 | return slave; | |
391 | } | |
392 | ||
e29c22f5 KP |
393 | /* |
394 | * This function, given a master MTD object and a partition table, creates | |
395 | * and registers slave MTD objects which are bound to the master according to | |
396 | * the partition definitions. | |
8d2effea SR |
397 | * |
398 | * We don't register the master, or expect the caller to have done so, | |
399 | * for reasons of data integrity. | |
e29c22f5 KP |
400 | */ |
401 | ||
402 | int add_mtd_partitions(struct mtd_info *master, | |
403 | const struct mtd_partition *parts, | |
404 | int nbparts) | |
405 | { | |
406 | struct mtd_part *slave; | |
8d2effea | 407 | uint64_t cur_offset = 0; |
e29c22f5 KP |
408 | int i; |
409 | ||
9def12ca SR |
410 | /* |
411 | * Need to init the list here, since LIST_INIT() does not | |
412 | * work on platforms where relocation has problems (like MIPS | |
413 | * & PPC). | |
414 | */ | |
415 | if (mtd_partitions.next == NULL) | |
416 | INIT_LIST_HEAD(&mtd_partitions); | |
417 | ||
147162da | 418 | debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); |
e29c22f5 KP |
419 | |
420 | for (i = 0; i < nbparts; i++) { | |
8d2effea SR |
421 | slave = add_one_partition(master, parts + i, i, cur_offset); |
422 | if (!slave) | |
e29c22f5 | 423 | return -ENOMEM; |
e29c22f5 | 424 | cur_offset = slave->offset + slave->mtd.size; |
e29c22f5 KP |
425 | } |
426 | ||
427 | return 0; | |
428 | } |