]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * MTD device concatenation layer | |
3 | * | |
4 | * (C) 2002 Robert Kaiser <[email protected]> | |
5 | * | |
6 | * NAND support by Christian Gan <[email protected]> | |
7 | * | |
8 | * This code is GPL | |
9 | * | |
97894cda | 10 | * $Id: mtdconcat.c,v 1.11 2005/11/07 11:14:20 gleixner Exp $ |
1da177e4 LT |
11 | */ |
12 | ||
1da177e4 | 13 | #include <linux/kernel.h> |
15fdc52f | 14 | #include <linux/module.h> |
1da177e4 | 15 | #include <linux/slab.h> |
15fdc52f TG |
16 | #include <linux/sched.h> |
17 | #include <linux/types.h> | |
18 | ||
1da177e4 LT |
19 | #include <linux/mtd/mtd.h> |
20 | #include <linux/mtd/concat.h> | |
21 | ||
22 | /* | |
23 | * Our storage structure: | |
24 | * Subdev points to an array of pointers to struct mtd_info objects | |
25 | * which is allocated along with this structure | |
26 | * | |
27 | */ | |
28 | struct mtd_concat { | |
29 | struct mtd_info mtd; | |
30 | int num_subdev; | |
31 | struct mtd_info **subdev; | |
32 | }; | |
33 | ||
34 | /* | |
35 | * how to calculate the size required for the above structure, | |
36 | * including the pointer array subdev points to: | |
37 | */ | |
38 | #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \ | |
39 | ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *))) | |
40 | ||
41 | /* | |
42 | * Given a pointer to the MTD object in the mtd_concat structure, | |
43 | * we can retrieve the pointer to that structure with this macro. | |
44 | */ | |
45 | #define CONCAT(x) ((struct mtd_concat *)(x)) | |
46 | ||
97894cda | 47 | /* |
1da177e4 LT |
48 | * MTD methods which look up the relevant subdevice, translate the |
49 | * effective address and pass through to the subdevice. | |
50 | */ | |
51 | ||
52 | static int | |
53 | concat_read(struct mtd_info *mtd, loff_t from, size_t len, | |
54 | size_t * retlen, u_char * buf) | |
55 | { | |
56 | struct mtd_concat *concat = CONCAT(mtd); | |
57 | int err = -EINVAL; | |
58 | int i; | |
59 | ||
60 | *retlen = 0; | |
61 | ||
62 | for (i = 0; i < concat->num_subdev; i++) { | |
63 | struct mtd_info *subdev = concat->subdev[i]; | |
64 | size_t size, retsize; | |
65 | ||
66 | if (from >= subdev->size) { | |
67 | /* Not destined for this subdev */ | |
68 | size = 0; | |
69 | from -= subdev->size; | |
70 | continue; | |
71 | } | |
72 | if (from + len > subdev->size) | |
73 | /* First part goes into this subdev */ | |
74 | size = subdev->size - from; | |
75 | else | |
76 | /* Entire transaction goes into this subdev */ | |
77 | size = len; | |
78 | ||
79 | err = subdev->read(subdev, from, size, &retsize, buf); | |
80 | ||
81 | if (err) | |
82 | break; | |
83 | ||
84 | *retlen += retsize; | |
85 | len -= size; | |
86 | if (len == 0) | |
87 | break; | |
88 | ||
89 | err = -EINVAL; | |
90 | buf += size; | |
91 | from = 0; | |
92 | } | |
93 | return err; | |
94 | } | |
95 | ||
96 | static int | |
97 | concat_write(struct mtd_info *mtd, loff_t to, size_t len, | |
98 | size_t * retlen, const u_char * buf) | |
99 | { | |
100 | struct mtd_concat *concat = CONCAT(mtd); | |
101 | int err = -EINVAL; | |
102 | int i; | |
103 | ||
104 | if (!(mtd->flags & MTD_WRITEABLE)) | |
105 | return -EROFS; | |
106 | ||
107 | *retlen = 0; | |
108 | ||
109 | for (i = 0; i < concat->num_subdev; i++) { | |
110 | struct mtd_info *subdev = concat->subdev[i]; | |
111 | size_t size, retsize; | |
112 | ||
113 | if (to >= subdev->size) { | |
114 | size = 0; | |
115 | to -= subdev->size; | |
116 | continue; | |
117 | } | |
118 | if (to + len > subdev->size) | |
119 | size = subdev->size - to; | |
120 | else | |
121 | size = len; | |
122 | ||
123 | if (!(subdev->flags & MTD_WRITEABLE)) | |
124 | err = -EROFS; | |
125 | else | |
126 | err = subdev->write(subdev, to, size, &retsize, buf); | |
127 | ||
128 | if (err) | |
129 | break; | |
130 | ||
131 | *retlen += retsize; | |
132 | len -= size; | |
133 | if (len == 0) | |
134 | break; | |
135 | ||
136 | err = -EINVAL; | |
137 | buf += size; | |
138 | to = 0; | |
139 | } | |
140 | return err; | |
141 | } | |
142 | ||
143 | static int | |
144 | concat_read_ecc(struct mtd_info *mtd, loff_t from, size_t len, | |
145 | size_t * retlen, u_char * buf, u_char * eccbuf, | |
146 | struct nand_oobinfo *oobsel) | |
147 | { | |
148 | struct mtd_concat *concat = CONCAT(mtd); | |
149 | int err = -EINVAL; | |
150 | int i; | |
151 | ||
152 | *retlen = 0; | |
153 | ||
154 | for (i = 0; i < concat->num_subdev; i++) { | |
155 | struct mtd_info *subdev = concat->subdev[i]; | |
156 | size_t size, retsize; | |
157 | ||
158 | if (from >= subdev->size) { | |
159 | /* Not destined for this subdev */ | |
160 | size = 0; | |
161 | from -= subdev->size; | |
162 | continue; | |
163 | } | |
164 | ||
165 | if (from + len > subdev->size) | |
166 | /* First part goes into this subdev */ | |
167 | size = subdev->size - from; | |
168 | else | |
169 | /* Entire transaction goes into this subdev */ | |
170 | size = len; | |
171 | ||
172 | if (subdev->read_ecc) | |
173 | err = subdev->read_ecc(subdev, from, size, | |
174 | &retsize, buf, eccbuf, oobsel); | |
175 | else | |
176 | err = -EINVAL; | |
177 | ||
178 | if (err) | |
179 | break; | |
180 | ||
181 | *retlen += retsize; | |
182 | len -= size; | |
183 | if (len == 0) | |
184 | break; | |
185 | ||
186 | err = -EINVAL; | |
187 | buf += size; | |
188 | if (eccbuf) { | |
189 | eccbuf += subdev->oobsize; | |
190 | /* in nand.c at least, eccbufs are | |
191 | tagged with 2 (int)eccstatus'; we | |
192 | must account for these */ | |
193 | eccbuf += 2 * (sizeof (int)); | |
194 | } | |
195 | from = 0; | |
196 | } | |
197 | return err; | |
198 | } | |
199 | ||
200 | static int | |
201 | concat_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, | |
202 | size_t * retlen, const u_char * buf, u_char * eccbuf, | |
203 | struct nand_oobinfo *oobsel) | |
204 | { | |
205 | struct mtd_concat *concat = CONCAT(mtd); | |
206 | int err = -EINVAL; | |
207 | int i; | |
208 | ||
209 | if (!(mtd->flags & MTD_WRITEABLE)) | |
210 | return -EROFS; | |
211 | ||
212 | *retlen = 0; | |
213 | ||
214 | for (i = 0; i < concat->num_subdev; i++) { | |
215 | struct mtd_info *subdev = concat->subdev[i]; | |
216 | size_t size, retsize; | |
217 | ||
218 | if (to >= subdev->size) { | |
219 | size = 0; | |
220 | to -= subdev->size; | |
221 | continue; | |
222 | } | |
223 | if (to + len > subdev->size) | |
224 | size = subdev->size - to; | |
225 | else | |
226 | size = len; | |
227 | ||
228 | if (!(subdev->flags & MTD_WRITEABLE)) | |
229 | err = -EROFS; | |
230 | else if (subdev->write_ecc) | |
231 | err = subdev->write_ecc(subdev, to, size, | |
232 | &retsize, buf, eccbuf, oobsel); | |
233 | else | |
234 | err = -EINVAL; | |
235 | ||
236 | if (err) | |
237 | break; | |
238 | ||
239 | *retlen += retsize; | |
240 | len -= size; | |
241 | if (len == 0) | |
242 | break; | |
243 | ||
244 | err = -EINVAL; | |
245 | buf += size; | |
246 | if (eccbuf) | |
247 | eccbuf += subdev->oobsize; | |
248 | to = 0; | |
249 | } | |
250 | return err; | |
251 | } | |
252 | ||
253 | static int | |
254 | concat_read_oob(struct mtd_info *mtd, loff_t from, size_t len, | |
255 | size_t * retlen, u_char * buf) | |
256 | { | |
257 | struct mtd_concat *concat = CONCAT(mtd); | |
258 | int err = -EINVAL; | |
259 | int i; | |
260 | ||
261 | *retlen = 0; | |
262 | ||
263 | for (i = 0; i < concat->num_subdev; i++) { | |
264 | struct mtd_info *subdev = concat->subdev[i]; | |
265 | size_t size, retsize; | |
266 | ||
267 | if (from >= subdev->size) { | |
268 | /* Not destined for this subdev */ | |
269 | size = 0; | |
270 | from -= subdev->size; | |
271 | continue; | |
272 | } | |
273 | if (from + len > subdev->size) | |
274 | /* First part goes into this subdev */ | |
275 | size = subdev->size - from; | |
276 | else | |
277 | /* Entire transaction goes into this subdev */ | |
278 | size = len; | |
279 | ||
280 | if (subdev->read_oob) | |
281 | err = subdev->read_oob(subdev, from, size, | |
282 | &retsize, buf); | |
283 | else | |
284 | err = -EINVAL; | |
285 | ||
286 | if (err) | |
287 | break; | |
288 | ||
289 | *retlen += retsize; | |
290 | len -= size; | |
291 | if (len == 0) | |
292 | break; | |
293 | ||
294 | err = -EINVAL; | |
295 | buf += size; | |
296 | from = 0; | |
297 | } | |
298 | return err; | |
299 | } | |
300 | ||
301 | static int | |
302 | concat_write_oob(struct mtd_info *mtd, loff_t to, size_t len, | |
303 | size_t * retlen, const u_char * buf) | |
304 | { | |
305 | struct mtd_concat *concat = CONCAT(mtd); | |
306 | int err = -EINVAL; | |
307 | int i; | |
308 | ||
309 | if (!(mtd->flags & MTD_WRITEABLE)) | |
310 | return -EROFS; | |
311 | ||
312 | *retlen = 0; | |
313 | ||
314 | for (i = 0; i < concat->num_subdev; i++) { | |
315 | struct mtd_info *subdev = concat->subdev[i]; | |
316 | size_t size, retsize; | |
317 | ||
318 | if (to >= subdev->size) { | |
319 | size = 0; | |
320 | to -= subdev->size; | |
321 | continue; | |
322 | } | |
323 | if (to + len > subdev->size) | |
324 | size = subdev->size - to; | |
325 | else | |
326 | size = len; | |
327 | ||
328 | if (!(subdev->flags & MTD_WRITEABLE)) | |
329 | err = -EROFS; | |
330 | else if (subdev->write_oob) | |
331 | err = subdev->write_oob(subdev, to, size, &retsize, | |
332 | buf); | |
333 | else | |
334 | err = -EINVAL; | |
335 | ||
336 | if (err) | |
337 | break; | |
338 | ||
339 | *retlen += retsize; | |
340 | len -= size; | |
341 | if (len == 0) | |
342 | break; | |
343 | ||
344 | err = -EINVAL; | |
345 | buf += size; | |
346 | to = 0; | |
347 | } | |
348 | return err; | |
349 | } | |
350 | ||
351 | static void concat_erase_callback(struct erase_info *instr) | |
352 | { | |
353 | wake_up((wait_queue_head_t *) instr->priv); | |
354 | } | |
355 | ||
356 | static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase) | |
357 | { | |
358 | int err; | |
359 | wait_queue_head_t waitq; | |
360 | DECLARE_WAITQUEUE(wait, current); | |
361 | ||
362 | /* | |
363 | * This code was stol^H^H^H^Hinspired by mtdchar.c | |
364 | */ | |
365 | init_waitqueue_head(&waitq); | |
366 | ||
367 | erase->mtd = mtd; | |
368 | erase->callback = concat_erase_callback; | |
369 | erase->priv = (unsigned long) &waitq; | |
370 | ||
371 | /* | |
372 | * FIXME: Allow INTERRUPTIBLE. Which means | |
373 | * not having the wait_queue head on the stack. | |
374 | */ | |
375 | err = mtd->erase(mtd, erase); | |
376 | if (!err) { | |
377 | set_current_state(TASK_UNINTERRUPTIBLE); | |
378 | add_wait_queue(&waitq, &wait); | |
379 | if (erase->state != MTD_ERASE_DONE | |
380 | && erase->state != MTD_ERASE_FAILED) | |
381 | schedule(); | |
382 | remove_wait_queue(&waitq, &wait); | |
383 | set_current_state(TASK_RUNNING); | |
384 | ||
385 | err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0; | |
386 | } | |
387 | return err; | |
388 | } | |
389 | ||
390 | static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) | |
391 | { | |
392 | struct mtd_concat *concat = CONCAT(mtd); | |
393 | struct mtd_info *subdev; | |
394 | int i, err; | |
395 | u_int32_t length, offset = 0; | |
396 | struct erase_info *erase; | |
397 | ||
398 | if (!(mtd->flags & MTD_WRITEABLE)) | |
399 | return -EROFS; | |
400 | ||
401 | if (instr->addr > concat->mtd.size) | |
402 | return -EINVAL; | |
403 | ||
404 | if (instr->len + instr->addr > concat->mtd.size) | |
405 | return -EINVAL; | |
406 | ||
407 | /* | |
408 | * Check for proper erase block alignment of the to-be-erased area. | |
409 | * It is easier to do this based on the super device's erase | |
410 | * region info rather than looking at each particular sub-device | |
411 | * in turn. | |
412 | */ | |
413 | if (!concat->mtd.numeraseregions) { | |
414 | /* the easy case: device has uniform erase block size */ | |
415 | if (instr->addr & (concat->mtd.erasesize - 1)) | |
416 | return -EINVAL; | |
417 | if (instr->len & (concat->mtd.erasesize - 1)) | |
418 | return -EINVAL; | |
419 | } else { | |
420 | /* device has variable erase size */ | |
421 | struct mtd_erase_region_info *erase_regions = | |
422 | concat->mtd.eraseregions; | |
423 | ||
424 | /* | |
425 | * Find the erase region where the to-be-erased area begins: | |
426 | */ | |
427 | for (i = 0; i < concat->mtd.numeraseregions && | |
428 | instr->addr >= erase_regions[i].offset; i++) ; | |
429 | --i; | |
430 | ||
431 | /* | |
432 | * Now erase_regions[i] is the region in which the | |
433 | * to-be-erased area begins. Verify that the starting | |
434 | * offset is aligned to this region's erase size: | |
435 | */ | |
436 | if (instr->addr & (erase_regions[i].erasesize - 1)) | |
437 | return -EINVAL; | |
438 | ||
439 | /* | |
440 | * now find the erase region where the to-be-erased area ends: | |
441 | */ | |
442 | for (; i < concat->mtd.numeraseregions && | |
443 | (instr->addr + instr->len) >= erase_regions[i].offset; | |
444 | ++i) ; | |
445 | --i; | |
446 | /* | |
447 | * check if the ending offset is aligned to this region's erase size | |
448 | */ | |
449 | if ((instr->addr + instr->len) & (erase_regions[i].erasesize - | |
450 | 1)) | |
451 | return -EINVAL; | |
452 | } | |
453 | ||
454 | instr->fail_addr = 0xffffffff; | |
455 | ||
456 | /* make a local copy of instr to avoid modifying the caller's struct */ | |
457 | erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); | |
458 | ||
459 | if (!erase) | |
460 | return -ENOMEM; | |
461 | ||
462 | *erase = *instr; | |
463 | length = instr->len; | |
464 | ||
465 | /* | |
466 | * find the subdevice where the to-be-erased area begins, adjust | |
467 | * starting offset to be relative to the subdevice start | |
468 | */ | |
469 | for (i = 0; i < concat->num_subdev; i++) { | |
470 | subdev = concat->subdev[i]; | |
471 | if (subdev->size <= erase->addr) { | |
472 | erase->addr -= subdev->size; | |
473 | offset += subdev->size; | |
474 | } else { | |
475 | break; | |
476 | } | |
477 | } | |
478 | ||
479 | /* must never happen since size limit has been verified above */ | |
373ebfbf | 480 | BUG_ON(i >= concat->num_subdev); |
1da177e4 LT |
481 | |
482 | /* now do the erase: */ | |
483 | err = 0; | |
484 | for (; length > 0; i++) { | |
485 | /* loop for all subdevices affected by this request */ | |
486 | subdev = concat->subdev[i]; /* get current subdevice */ | |
487 | ||
488 | /* limit length to subdevice's size: */ | |
489 | if (erase->addr + length > subdev->size) | |
490 | erase->len = subdev->size - erase->addr; | |
491 | else | |
492 | erase->len = length; | |
493 | ||
494 | if (!(subdev->flags & MTD_WRITEABLE)) { | |
495 | err = -EROFS; | |
496 | break; | |
497 | } | |
498 | length -= erase->len; | |
499 | if ((err = concat_dev_erase(subdev, erase))) { | |
500 | /* sanity check: should never happen since | |
501 | * block alignment has been checked above */ | |
373ebfbf | 502 | BUG_ON(err == -EINVAL); |
1da177e4 LT |
503 | if (erase->fail_addr != 0xffffffff) |
504 | instr->fail_addr = erase->fail_addr + offset; | |
505 | break; | |
506 | } | |
507 | /* | |
508 | * erase->addr specifies the offset of the area to be | |
509 | * erased *within the current subdevice*. It can be | |
510 | * non-zero only the first time through this loop, i.e. | |
511 | * for the first subdevice where blocks need to be erased. | |
512 | * All the following erases must begin at the start of the | |
513 | * current subdevice, i.e. at offset zero. | |
514 | */ | |
515 | erase->addr = 0; | |
516 | offset += subdev->size; | |
517 | } | |
518 | instr->state = erase->state; | |
519 | kfree(erase); | |
520 | if (err) | |
521 | return err; | |
522 | ||
523 | if (instr->callback) | |
524 | instr->callback(instr); | |
525 | return 0; | |
526 | } | |
527 | ||
528 | static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | |
529 | { | |
530 | struct mtd_concat *concat = CONCAT(mtd); | |
531 | int i, err = -EINVAL; | |
532 | ||
533 | if ((len + ofs) > mtd->size) | |
534 | return -EINVAL; | |
535 | ||
536 | for (i = 0; i < concat->num_subdev; i++) { | |
537 | struct mtd_info *subdev = concat->subdev[i]; | |
538 | size_t size; | |
539 | ||
540 | if (ofs >= subdev->size) { | |
541 | size = 0; | |
542 | ofs -= subdev->size; | |
543 | continue; | |
544 | } | |
545 | if (ofs + len > subdev->size) | |
546 | size = subdev->size - ofs; | |
547 | else | |
548 | size = len; | |
549 | ||
550 | err = subdev->lock(subdev, ofs, size); | |
551 | ||
552 | if (err) | |
553 | break; | |
554 | ||
555 | len -= size; | |
556 | if (len == 0) | |
557 | break; | |
558 | ||
559 | err = -EINVAL; | |
560 | ofs = 0; | |
561 | } | |
562 | ||
563 | return err; | |
564 | } | |
565 | ||
566 | static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) | |
567 | { | |
568 | struct mtd_concat *concat = CONCAT(mtd); | |
569 | int i, err = 0; | |
570 | ||
571 | if ((len + ofs) > mtd->size) | |
572 | return -EINVAL; | |
573 | ||
574 | for (i = 0; i < concat->num_subdev; i++) { | |
575 | struct mtd_info *subdev = concat->subdev[i]; | |
576 | size_t size; | |
577 | ||
578 | if (ofs >= subdev->size) { | |
579 | size = 0; | |
580 | ofs -= subdev->size; | |
581 | continue; | |
582 | } | |
583 | if (ofs + len > subdev->size) | |
584 | size = subdev->size - ofs; | |
585 | else | |
586 | size = len; | |
587 | ||
588 | err = subdev->unlock(subdev, ofs, size); | |
589 | ||
590 | if (err) | |
591 | break; | |
592 | ||
593 | len -= size; | |
594 | if (len == 0) | |
595 | break; | |
596 | ||
597 | err = -EINVAL; | |
598 | ofs = 0; | |
599 | } | |
600 | ||
601 | return err; | |
602 | } | |
603 | ||
604 | static void concat_sync(struct mtd_info *mtd) | |
605 | { | |
606 | struct mtd_concat *concat = CONCAT(mtd); | |
607 | int i; | |
608 | ||
609 | for (i = 0; i < concat->num_subdev; i++) { | |
610 | struct mtd_info *subdev = concat->subdev[i]; | |
611 | subdev->sync(subdev); | |
612 | } | |
613 | } | |
614 | ||
615 | static int concat_suspend(struct mtd_info *mtd) | |
616 | { | |
617 | struct mtd_concat *concat = CONCAT(mtd); | |
618 | int i, rc = 0; | |
619 | ||
620 | for (i = 0; i < concat->num_subdev; i++) { | |
621 | struct mtd_info *subdev = concat->subdev[i]; | |
622 | if ((rc = subdev->suspend(subdev)) < 0) | |
623 | return rc; | |
624 | } | |
625 | return rc; | |
626 | } | |
627 | ||
628 | static void concat_resume(struct mtd_info *mtd) | |
629 | { | |
630 | struct mtd_concat *concat = CONCAT(mtd); | |
631 | int i; | |
632 | ||
633 | for (i = 0; i < concat->num_subdev; i++) { | |
634 | struct mtd_info *subdev = concat->subdev[i]; | |
635 | subdev->resume(subdev); | |
636 | } | |
637 | } | |
638 | ||
639 | /* | |
640 | * This function constructs a virtual MTD device by concatenating | |
641 | * num_devs MTD devices. A pointer to the new device object is | |
642 | * stored to *new_dev upon success. This function does _not_ | |
643 | * register any devices: this is the caller's responsibility. | |
644 | */ | |
645 | struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ | |
646 | int num_devs, /* number of subdevices */ | |
647 | char *name) | |
648 | { /* name for the new device */ | |
649 | int i; | |
650 | size_t size; | |
651 | struct mtd_concat *concat; | |
652 | u_int32_t max_erasesize, curr_erasesize; | |
653 | int num_erase_region; | |
654 | ||
655 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); | |
656 | for (i = 0; i < num_devs; i++) | |
657 | printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name); | |
658 | printk(KERN_NOTICE "into device \"%s\"\n", name); | |
659 | ||
660 | /* allocate the device structure */ | |
661 | size = SIZEOF_STRUCT_MTD_CONCAT(num_devs); | |
662 | concat = kmalloc(size, GFP_KERNEL); | |
663 | if (!concat) { | |
664 | printk | |
665 | ("memory allocation error while creating concatenated device \"%s\"\n", | |
666 | name); | |
667 | return NULL; | |
668 | } | |
669 | memset(concat, 0, size); | |
670 | concat->subdev = (struct mtd_info **) (concat + 1); | |
671 | ||
672 | /* | |
673 | * Set up the new "super" device's MTD object structure, check for | |
674 | * incompatibilites between the subdevices. | |
675 | */ | |
676 | concat->mtd.type = subdev[0]->type; | |
677 | concat->mtd.flags = subdev[0]->flags; | |
678 | concat->mtd.size = subdev[0]->size; | |
679 | concat->mtd.erasesize = subdev[0]->erasesize; | |
680 | concat->mtd.oobblock = subdev[0]->oobblock; | |
681 | concat->mtd.oobsize = subdev[0]->oobsize; | |
682 | concat->mtd.ecctype = subdev[0]->ecctype; | |
683 | concat->mtd.eccsize = subdev[0]->eccsize; | |
684 | if (subdev[0]->read_ecc) | |
685 | concat->mtd.read_ecc = concat_read_ecc; | |
686 | if (subdev[0]->write_ecc) | |
687 | concat->mtd.write_ecc = concat_write_ecc; | |
688 | if (subdev[0]->read_oob) | |
689 | concat->mtd.read_oob = concat_read_oob; | |
690 | if (subdev[0]->write_oob) | |
691 | concat->mtd.write_oob = concat_write_oob; | |
692 | ||
693 | concat->subdev[0] = subdev[0]; | |
694 | ||
695 | for (i = 1; i < num_devs; i++) { | |
696 | if (concat->mtd.type != subdev[i]->type) { | |
697 | kfree(concat); | |
698 | printk("Incompatible device type on \"%s\"\n", | |
699 | subdev[i]->name); | |
700 | return NULL; | |
701 | } | |
702 | if (concat->mtd.flags != subdev[i]->flags) { | |
703 | /* | |
704 | * Expect all flags except MTD_WRITEABLE to be | |
705 | * equal on all subdevices. | |
706 | */ | |
707 | if ((concat->mtd.flags ^ subdev[i]-> | |
708 | flags) & ~MTD_WRITEABLE) { | |
709 | kfree(concat); | |
710 | printk("Incompatible device flags on \"%s\"\n", | |
711 | subdev[i]->name); | |
712 | return NULL; | |
713 | } else | |
714 | /* if writeable attribute differs, | |
715 | make super device writeable */ | |
716 | concat->mtd.flags |= | |
717 | subdev[i]->flags & MTD_WRITEABLE; | |
718 | } | |
719 | concat->mtd.size += subdev[i]->size; | |
720 | if (concat->mtd.oobblock != subdev[i]->oobblock || | |
721 | concat->mtd.oobsize != subdev[i]->oobsize || | |
722 | concat->mtd.ecctype != subdev[i]->ecctype || | |
723 | concat->mtd.eccsize != subdev[i]->eccsize || | |
724 | !concat->mtd.read_ecc != !subdev[i]->read_ecc || | |
725 | !concat->mtd.write_ecc != !subdev[i]->write_ecc || | |
726 | !concat->mtd.read_oob != !subdev[i]->read_oob || | |
727 | !concat->mtd.write_oob != !subdev[i]->write_oob) { | |
728 | kfree(concat); | |
729 | printk("Incompatible OOB or ECC data on \"%s\"\n", | |
730 | subdev[i]->name); | |
731 | return NULL; | |
732 | } | |
733 | concat->subdev[i] = subdev[i]; | |
734 | ||
735 | } | |
736 | ||
737 | concat->num_subdev = num_devs; | |
738 | concat->mtd.name = name; | |
739 | ||
740 | /* | |
741 | * NOTE: for now, we do not provide any readv()/writev() methods | |
742 | * because they are messy to implement and they are not | |
743 | * used to a great extent anyway. | |
744 | */ | |
745 | concat->mtd.erase = concat_erase; | |
746 | concat->mtd.read = concat_read; | |
747 | concat->mtd.write = concat_write; | |
748 | concat->mtd.sync = concat_sync; | |
749 | concat->mtd.lock = concat_lock; | |
750 | concat->mtd.unlock = concat_unlock; | |
751 | concat->mtd.suspend = concat_suspend; | |
752 | concat->mtd.resume = concat_resume; | |
753 | ||
754 | /* | |
755 | * Combine the erase block size info of the subdevices: | |
756 | * | |
757 | * first, walk the map of the new device and see how | |
758 | * many changes in erase size we have | |
759 | */ | |
760 | max_erasesize = curr_erasesize = subdev[0]->erasesize; | |
761 | num_erase_region = 1; | |
762 | for (i = 0; i < num_devs; i++) { | |
763 | if (subdev[i]->numeraseregions == 0) { | |
764 | /* current subdevice has uniform erase size */ | |
765 | if (subdev[i]->erasesize != curr_erasesize) { | |
766 | /* if it differs from the last subdevice's erase size, count it */ | |
767 | ++num_erase_region; | |
768 | curr_erasesize = subdev[i]->erasesize; | |
769 | if (curr_erasesize > max_erasesize) | |
770 | max_erasesize = curr_erasesize; | |
771 | } | |
772 | } else { | |
773 | /* current subdevice has variable erase size */ | |
774 | int j; | |
775 | for (j = 0; j < subdev[i]->numeraseregions; j++) { | |
776 | ||
777 | /* walk the list of erase regions, count any changes */ | |
778 | if (subdev[i]->eraseregions[j].erasesize != | |
779 | curr_erasesize) { | |
780 | ++num_erase_region; | |
781 | curr_erasesize = | |
782 | subdev[i]->eraseregions[j]. | |
783 | erasesize; | |
784 | if (curr_erasesize > max_erasesize) | |
785 | max_erasesize = curr_erasesize; | |
786 | } | |
787 | } | |
788 | } | |
789 | } | |
790 | ||
791 | if (num_erase_region == 1) { | |
792 | /* | |
793 | * All subdevices have the same uniform erase size. | |
794 | * This is easy: | |
795 | */ | |
796 | concat->mtd.erasesize = curr_erasesize; | |
797 | concat->mtd.numeraseregions = 0; | |
798 | } else { | |
799 | /* | |
800 | * erase block size varies across the subdevices: allocate | |
801 | * space to store the data describing the variable erase regions | |
802 | */ | |
803 | struct mtd_erase_region_info *erase_region_p; | |
804 | u_int32_t begin, position; | |
805 | ||
806 | concat->mtd.erasesize = max_erasesize; | |
807 | concat->mtd.numeraseregions = num_erase_region; | |
808 | concat->mtd.eraseregions = erase_region_p = | |
809 | kmalloc(num_erase_region * | |
810 | sizeof (struct mtd_erase_region_info), GFP_KERNEL); | |
811 | if (!erase_region_p) { | |
812 | kfree(concat); | |
813 | printk | |
814 | ("memory allocation error while creating erase region list" | |
815 | " for device \"%s\"\n", name); | |
816 | return NULL; | |
817 | } | |
818 | ||
819 | /* | |
820 | * walk the map of the new device once more and fill in | |
821 | * in erase region info: | |
822 | */ | |
823 | curr_erasesize = subdev[0]->erasesize; | |
824 | begin = position = 0; | |
825 | for (i = 0; i < num_devs; i++) { | |
826 | if (subdev[i]->numeraseregions == 0) { | |
827 | /* current subdevice has uniform erase size */ | |
828 | if (subdev[i]->erasesize != curr_erasesize) { | |
829 | /* | |
830 | * fill in an mtd_erase_region_info structure for the area | |
831 | * we have walked so far: | |
832 | */ | |
833 | erase_region_p->offset = begin; | |
834 | erase_region_p->erasesize = | |
835 | curr_erasesize; | |
836 | erase_region_p->numblocks = | |
837 | (position - begin) / curr_erasesize; | |
838 | begin = position; | |
839 | ||
840 | curr_erasesize = subdev[i]->erasesize; | |
841 | ++erase_region_p; | |
842 | } | |
843 | position += subdev[i]->size; | |
844 | } else { | |
845 | /* current subdevice has variable erase size */ | |
846 | int j; | |
847 | for (j = 0; j < subdev[i]->numeraseregions; j++) { | |
848 | /* walk the list of erase regions, count any changes */ | |
849 | if (subdev[i]->eraseregions[j]. | |
850 | erasesize != curr_erasesize) { | |
851 | erase_region_p->offset = begin; | |
852 | erase_region_p->erasesize = | |
853 | curr_erasesize; | |
854 | erase_region_p->numblocks = | |
855 | (position - | |
856 | begin) / curr_erasesize; | |
857 | begin = position; | |
858 | ||
859 | curr_erasesize = | |
860 | subdev[i]->eraseregions[j]. | |
861 | erasesize; | |
862 | ++erase_region_p; | |
863 | } | |
864 | position += | |
865 | subdev[i]->eraseregions[j]. | |
866 | numblocks * curr_erasesize; | |
867 | } | |
868 | } | |
869 | } | |
870 | /* Now write the final entry */ | |
871 | erase_region_p->offset = begin; | |
872 | erase_region_p->erasesize = curr_erasesize; | |
873 | erase_region_p->numblocks = (position - begin) / curr_erasesize; | |
874 | } | |
875 | ||
876 | return &concat->mtd; | |
877 | } | |
878 | ||
97894cda | 879 | /* |
1da177e4 LT |
880 | * This function destroys an MTD object obtained from concat_mtd_devs() |
881 | */ | |
882 | ||
883 | void mtd_concat_destroy(struct mtd_info *mtd) | |
884 | { | |
885 | struct mtd_concat *concat = CONCAT(mtd); | |
886 | if (concat->mtd.numeraseregions) | |
887 | kfree(concat->mtd.eraseregions); | |
888 | kfree(concat); | |
889 | } | |
890 | ||
891 | EXPORT_SYMBOL(mtd_concat_create); | |
892 | EXPORT_SYMBOL(mtd_concat_destroy); | |
893 | ||
894 | MODULE_LICENSE("GPL"); | |
895 | MODULE_AUTHOR("Robert Kaiser <[email protected]>"); | |
896 | MODULE_DESCRIPTION("Generic support for concatenating of MTD devices"); |