]> Git Repo - u-boot.git/blob - drivers/mtd/ubi/wl.c
Merge branch 'master' of https://source.denx.de/u-boot/custodians/u-boot-sh
[u-boot.git] / drivers / mtd / ubi / wl.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) International Business Machines Corp., 2006
4  *
5  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
6  */
7
8 /*
9  * UBI wear-leveling sub-system.
10  *
11  * This sub-system is responsible for wear-leveling. It works in terms of
12  * physical eraseblocks and erase counters and knows nothing about logical
13  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14  * eraseblocks are of two types - used and free. Used physical eraseblocks are
15  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
16  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
17  *
18  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
19  * header. The rest of the physical eraseblock contains only %0xFF bytes.
20  *
21  * When physical eraseblocks are returned to the WL sub-system by means of the
22  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
23  * done asynchronously in context of the per-UBI device background thread,
24  * which is also managed by the WL sub-system.
25  *
26  * The wear-leveling is ensured by means of moving the contents of used
27  * physical eraseblocks with low erase counter to free physical eraseblocks
28  * with high erase counter.
29  *
30  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
31  * bad.
32  *
33  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
34  * in a physical eraseblock, it has to be moved. Technically this is the same
35  * as moving it for wear-leveling reasons.
36  *
37  * As it was said, for the UBI sub-system all physical eraseblocks are either
38  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40  * RB-trees, as well as (temporarily) in the @wl->pq queue.
41  *
42  * When the WL sub-system returns a physical eraseblock, the physical
43  * eraseblock is protected from being moved for some "time". For this reason,
44  * the physical eraseblock is not directly moved from the @wl->free tree to the
45  * @wl->used tree. There is a protection queue in between where this
46  * physical eraseblock is temporarily stored (@wl->pq).
47  *
48  * All this protection stuff is needed because:
49  *  o we don't want to move physical eraseblocks just after we have given them
50  *    to the user; instead, we first want to let users fill them up with data;
51  *
52  *  o there is a chance that the user will put the physical eraseblock very
53  *    soon, so it makes sense not to move it for some time, but wait.
54  *
55  * Physical eraseblocks stay protected only for limited time. But the "time" is
56  * measured in erase cycles in this case. This is implemented with help of the
57  * protection queue. Eraseblocks are put to the tail of this queue when they
58  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
59  * head of the queue on each erase operation (for any eraseblock). So the
60  * length of the queue defines how may (global) erase cycles PEBs are protected.
61  *
62  * To put it differently, each physical eraseblock has 2 main states: free and
63  * used. The former state corresponds to the @wl->free tree. The latter state
64  * is split up on several sub-states:
65  * o the WL movement is allowed (@wl->used tree);
66  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67  *   erroneous - e.g., there was a read error;
68  * o the WL movement is temporarily prohibited (@wl->pq queue);
69  * o scrubbing is needed (@wl->scrub tree).
70  *
71  * Depending on the sub-state, wear-leveling entries of the used physical
72  * eraseblocks may be kept in one of those structures.
73  *
74  * Note, in this implementation, we keep a small in-RAM object for each physical
75  * eraseblock. This is surely not a scalable solution. But it appears to be good
76  * enough for moderately large flashes and it is simple. In future, one may
77  * re-work this sub-system and make it more scalable.
78  *
79  * At the moment this sub-system does not utilize the sequence number, which
80  * was introduced relatively recently. But it would be wise to do this because
81  * the sequence number of a logical eraseblock characterizes how old is it. For
82  * example, when we move a PEB with low erase counter, and we need to pick the
83  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
84  * pick target PEB with an average EC if our PEB is not very "old". This is a
85  * room for future re-works of the WL sub-system.
86  */
87
88 #ifndef __UBOOT__
89 #include <log.h>
90 #include <dm/devres.h>
91 #include <linux/slab.h>
92 #include <linux/crc32.h>
93 #include <linux/freezer.h>
94 #include <linux/kthread.h>
95 #else
96 #include <ubi_uboot.h>
97 #endif
98
99 #include "ubi.h"
100 #include "wl.h"
101
102 /* Number of physical eraseblocks reserved for wear-leveling purposes */
103 #define WL_RESERVED_PEBS 1
104
105 /*
106  * Maximum difference between two erase counters. If this threshold is
107  * exceeded, the WL sub-system starts moving data from used physical
108  * eraseblocks with low erase counter to free physical eraseblocks with high
109  * erase counter.
110  */
111 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
112
113 /*
114  * When a physical eraseblock is moved, the WL sub-system has to pick the target
115  * physical eraseblock to move to. The simplest way would be just to pick the
116  * one with the highest erase counter. But in certain workloads this could lead
117  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
118  * situation when the picked physical eraseblock is constantly erased after the
119  * data is written to it. So, we have a constant which limits the highest erase
120  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
121  * does not pick eraseblocks with erase counter greater than the lowest erase
122  * counter plus %WL_FREE_MAX_DIFF.
123  */
124 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
125
126 /*
127  * Maximum number of consecutive background thread failures which is enough to
128  * switch to read-only mode.
129  */
130 #define WL_MAX_FAILURES 32
131
132 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
133 static int self_check_in_wl_tree(const struct ubi_device *ubi,
134                                  struct ubi_wl_entry *e, struct rb_root *root);
135 static int self_check_in_pq(const struct ubi_device *ubi,
136                             struct ubi_wl_entry *e);
137
138 /**
139  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
140  * @e: the wear-leveling entry to add
141  * @root: the root of the tree
142  *
143  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
144  * the @ubi->used and @ubi->free RB-trees.
145  */
146 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
147 {
148         struct rb_node **p, *parent = NULL;
149
150         p = &root->rb_node;
151         while (*p) {
152                 struct ubi_wl_entry *e1;
153
154                 parent = *p;
155                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
156
157                 if (e->ec < e1->ec)
158                         p = &(*p)->rb_left;
159                 else if (e->ec > e1->ec)
160                         p = &(*p)->rb_right;
161                 else {
162                         ubi_assert(e->pnum != e1->pnum);
163                         if (e->pnum < e1->pnum)
164                                 p = &(*p)->rb_left;
165                         else
166                                 p = &(*p)->rb_right;
167                 }
168         }
169
170         rb_link_node(&e->u.rb, parent, p);
171         rb_insert_color(&e->u.rb, root);
172 }
173
174 /**
175  * wl_tree_destroy - destroy a wear-leveling entry.
176  * @ubi: UBI device description object
177  * @e: the wear-leveling entry to add
178  *
179  * This function destroys a wear leveling entry and removes
180  * the reference from the lookup table.
181  */
182 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
183 {
184         ubi->lookuptbl[e->pnum] = NULL;
185         kmem_cache_free(ubi_wl_entry_slab, e);
186 }
187
188 /**
189  * do_work - do one pending work.
190  * @ubi: UBI device description object
191  *
192  * This function returns zero in case of success and a negative error code in
193  * case of failure.
194  */
195 static int do_work(struct ubi_device *ubi)
196 {
197         int err;
198         struct ubi_work *wrk;
199
200         cond_resched();
201
202         /*
203          * @ubi->work_sem is used to synchronize with the workers. Workers take
204          * it in read mode, so many of them may be doing works at a time. But
205          * the queue flush code has to be sure the whole queue of works is
206          * done, and it takes the mutex in write mode.
207          */
208         down_read(&ubi->work_sem);
209         spin_lock(&ubi->wl_lock);
210         if (list_empty(&ubi->works)) {
211                 spin_unlock(&ubi->wl_lock);
212                 up_read(&ubi->work_sem);
213                 return 0;
214         }
215
216         wrk = list_entry(ubi->works.next, struct ubi_work, list);
217         list_del(&wrk->list);
218         ubi->works_count -= 1;
219         ubi_assert(ubi->works_count >= 0);
220         spin_unlock(&ubi->wl_lock);
221
222         /*
223          * Call the worker function. Do not touch the work structure
224          * after this call as it will have been freed or reused by that
225          * time by the worker function.
226          */
227         err = wrk->func(ubi, wrk, 0);
228         if (err)
229                 ubi_err(ubi, "work failed with error code %d", err);
230         up_read(&ubi->work_sem);
231
232         return err;
233 }
234
235 /**
236  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
237  * @e: the wear-leveling entry to check
238  * @root: the root of the tree
239  *
240  * This function returns non-zero if @e is in the @root RB-tree and zero if it
241  * is not.
242  */
243 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
244 {
245         struct rb_node *p;
246
247         p = root->rb_node;
248         while (p) {
249                 struct ubi_wl_entry *e1;
250
251                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
252
253                 if (e->pnum == e1->pnum) {
254                         ubi_assert(e == e1);
255                         return 1;
256                 }
257
258                 if (e->ec < e1->ec)
259                         p = p->rb_left;
260                 else if (e->ec > e1->ec)
261                         p = p->rb_right;
262                 else {
263                         ubi_assert(e->pnum != e1->pnum);
264                         if (e->pnum < e1->pnum)
265                                 p = p->rb_left;
266                         else
267                                 p = p->rb_right;
268                 }
269         }
270
271         return 0;
272 }
273
274 /**
275  * prot_queue_add - add physical eraseblock to the protection queue.
276  * @ubi: UBI device description object
277  * @e: the physical eraseblock to add
278  *
279  * This function adds @e to the tail of the protection queue @ubi->pq, where
280  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
281  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
282  * be locked.
283  */
284 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
285 {
286         int pq_tail = ubi->pq_head - 1;
287
288         if (pq_tail < 0)
289                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
290         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
291         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
292         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
293 }
294
295 /**
296  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
297  * @ubi: UBI device description object
298  * @root: the RB-tree where to look for
299  * @diff: maximum possible difference from the smallest erase counter
300  *
301  * This function looks for a wear leveling entry with erase counter closest to
302  * min + @diff, where min is the smallest erase counter.
303  */
304 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
305                                           struct rb_root *root, int diff)
306 {
307         struct rb_node *p;
308         struct ubi_wl_entry *e, *prev_e = NULL;
309         int max;
310
311         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
312         max = e->ec + diff;
313
314         p = root->rb_node;
315         while (p) {
316                 struct ubi_wl_entry *e1;
317
318                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
319                 if (e1->ec >= max)
320                         p = p->rb_left;
321                 else {
322                         p = p->rb_right;
323                         prev_e = e;
324                         e = e1;
325                 }
326         }
327
328         /* If no fastmap has been written and this WL entry can be used
329          * as anchor PEB, hold it back and return the second best WL entry
330          * such that fastmap can use the anchor PEB later. */
331         if (prev_e && !ubi->fm_disabled &&
332             !ubi->fm && e->pnum < UBI_FM_MAX_START)
333                 return prev_e;
334
335         return e;
336 }
337
338 /**
339  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
340  * @ubi: UBI device description object
341  * @root: the RB-tree where to look for
342  *
343  * This function looks for a wear leveling entry with medium erase counter,
344  * but not greater or equivalent than the lowest erase counter plus
345  * %WL_FREE_MAX_DIFF/2.
346  */
347 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
348                                                struct rb_root *root)
349 {
350         struct ubi_wl_entry *e, *first, *last;
351
352         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
353         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
354
355         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
356                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
357
358                 /* If no fastmap has been written and this WL entry can be used
359                  * as anchor PEB, hold it back and return the second best
360                  * WL entry such that fastmap can use the anchor PEB later. */
361                 e = may_reserve_for_fm(ubi, e, root);
362         } else
363                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
364
365         return e;
366 }
367
368 /**
369  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
370  * refill_wl_user_pool().
371  * @ubi: UBI device description object
372  *
373  * This function returns a a wear leveling entry in case of success and
374  * NULL in case of failure.
375  */
376 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
377 {
378         struct ubi_wl_entry *e;
379
380         e = find_mean_wl_entry(ubi, &ubi->free);
381         if (!e) {
382                 ubi_err(ubi, "no free eraseblocks");
383                 return NULL;
384         }
385
386         self_check_in_wl_tree(ubi, e, &ubi->free);
387
388         /*
389          * Move the physical eraseblock to the protection queue where it will
390          * be protected from being moved for some time.
391          */
392         rb_erase(&e->u.rb, &ubi->free);
393         ubi->free_count--;
394         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
395
396         return e;
397 }
398
399 /**
400  * prot_queue_del - remove a physical eraseblock from the protection queue.
401  * @ubi: UBI device description object
402  * @pnum: the physical eraseblock to remove
403  *
404  * This function deletes PEB @pnum from the protection queue and returns zero
405  * in case of success and %-ENODEV if the PEB was not found.
406  */
407 static int prot_queue_del(struct ubi_device *ubi, int pnum)
408 {
409         struct ubi_wl_entry *e;
410
411         e = ubi->lookuptbl[pnum];
412         if (!e)
413                 return -ENODEV;
414
415         if (self_check_in_pq(ubi, e))
416                 return -ENODEV;
417
418         list_del(&e->u.list);
419         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
420         return 0;
421 }
422
423 /**
424  * sync_erase - synchronously erase a physical eraseblock.
425  * @ubi: UBI device description object
426  * @e: the the physical eraseblock to erase
427  * @torture: if the physical eraseblock has to be tortured
428  *
429  * This function returns zero in case of success and a negative error code in
430  * case of failure.
431  */
432 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
433                       int torture)
434 {
435         int err;
436         struct ubi_ec_hdr *ec_hdr;
437         unsigned long long ec = e->ec;
438
439         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
440
441         err = self_check_ec(ubi, e->pnum, e->ec);
442         if (err)
443                 return -EINVAL;
444
445         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
446         if (!ec_hdr)
447                 return -ENOMEM;
448
449         err = ubi_io_sync_erase(ubi, e->pnum, torture);
450         if (err < 0)
451                 goto out_free;
452
453         ec += err;
454         if (ec > UBI_MAX_ERASECOUNTER) {
455                 /*
456                  * Erase counter overflow. Upgrade UBI and use 64-bit
457                  * erase counters internally.
458                  */
459                 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
460                         e->pnum, ec);
461                 err = -EINVAL;
462                 goto out_free;
463         }
464
465         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
466
467         ec_hdr->ec = cpu_to_be64(ec);
468
469         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
470         if (err)
471                 goto out_free;
472
473         e->ec = ec;
474         spin_lock(&ubi->wl_lock);
475         if (e->ec > ubi->max_ec)
476                 ubi->max_ec = e->ec;
477         spin_unlock(&ubi->wl_lock);
478
479 out_free:
480         kfree(ec_hdr);
481         return err;
482 }
483
484 /**
485  * serve_prot_queue - check if it is time to stop protecting PEBs.
486  * @ubi: UBI device description object
487  *
488  * This function is called after each erase operation and removes PEBs from the
489  * tail of the protection queue. These PEBs have been protected for long enough
490  * and should be moved to the used tree.
491  */
492 static void serve_prot_queue(struct ubi_device *ubi)
493 {
494         struct ubi_wl_entry *e, *tmp;
495         int count;
496
497         /*
498          * There may be several protected physical eraseblock to remove,
499          * process them all.
500          */
501 repeat:
502         count = 0;
503         spin_lock(&ubi->wl_lock);
504         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
505                 dbg_wl("PEB %d EC %d protection over, move to used tree",
506                         e->pnum, e->ec);
507
508                 list_del(&e->u.list);
509                 wl_tree_add(e, &ubi->used);
510                 if (count++ > 32) {
511                         /*
512                          * Let's be nice and avoid holding the spinlock for
513                          * too long.
514                          */
515                         spin_unlock(&ubi->wl_lock);
516                         cond_resched();
517                         goto repeat;
518                 }
519         }
520
521         ubi->pq_head += 1;
522         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
523                 ubi->pq_head = 0;
524         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
525         spin_unlock(&ubi->wl_lock);
526 }
527
528 #ifdef __UBOOT__
529 void ubi_do_worker(struct ubi_device *ubi)
530 {
531         int err;
532
533         if (list_empty(&ubi->works) || ubi->ro_mode ||
534             !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi))
535                 return;
536
537         spin_lock(&ubi->wl_lock);
538         while (!list_empty(&ubi->works)) {
539                 /*
540                  * call do_work, which executes exactly one work form the queue,
541                  * including removeing it from the work queue.
542                  */
543                 spin_unlock(&ubi->wl_lock);
544                 err = do_work(ubi);
545                 spin_lock(&ubi->wl_lock);
546                 if (err) {
547                         ubi_err(ubi, "%s: work failed with error code %d",
548                                 ubi->bgt_name, err);
549                 }
550         }
551         spin_unlock(&ubi->wl_lock);
552 }
553 #endif
554
555 /**
556  * __schedule_ubi_work - schedule a work.
557  * @ubi: UBI device description object
558  * @wrk: the work to schedule
559  *
560  * This function adds a work defined by @wrk to the tail of the pending works
561  * list. Can only be used if ubi->work_sem is already held in read mode!
562  */
563 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
564 {
565         spin_lock(&ubi->wl_lock);
566         list_add_tail(&wrk->list, &ubi->works);
567         ubi_assert(ubi->works_count >= 0);
568         ubi->works_count += 1;
569 #ifndef __UBOOT__
570         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
571                 wake_up_process(ubi->bgt_thread);
572 #endif
573         spin_unlock(&ubi->wl_lock);
574 }
575
576 /**
577  * schedule_ubi_work - schedule a work.
578  * @ubi: UBI device description object
579  * @wrk: the work to schedule
580  *
581  * This function adds a work defined by @wrk to the tail of the pending works
582  * list.
583  */
584 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
585 {
586         down_read(&ubi->work_sem);
587         __schedule_ubi_work(ubi, wrk);
588         up_read(&ubi->work_sem);
589 }
590
591 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
592                         int shutdown);
593
594 /**
595  * schedule_erase - schedule an erase work.
596  * @ubi: UBI device description object
597  * @e: the WL entry of the physical eraseblock to erase
598  * @vol_id: the volume ID that last used this PEB
599  * @lnum: the last used logical eraseblock number for the PEB
600  * @torture: if the physical eraseblock has to be tortured
601  *
602  * This function returns zero in case of success and a %-ENOMEM in case of
603  * failure.
604  */
605 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
606                           int vol_id, int lnum, int torture)
607 {
608         struct ubi_work *wl_wrk;
609
610         ubi_assert(e);
611
612         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
613                e->pnum, e->ec, torture);
614
615         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
616         if (!wl_wrk)
617                 return -ENOMEM;
618
619         wl_wrk->func = &erase_worker;
620         wl_wrk->e = e;
621         wl_wrk->vol_id = vol_id;
622         wl_wrk->lnum = lnum;
623         wl_wrk->torture = torture;
624
625         schedule_ubi_work(ubi, wl_wrk);
626
627 #ifdef __UBOOT__
628         ubi_do_worker(ubi);
629 #endif
630         return 0;
631 }
632
633 /**
634  * do_sync_erase - run the erase worker synchronously.
635  * @ubi: UBI device description object
636  * @e: the WL entry of the physical eraseblock to erase
637  * @vol_id: the volume ID that last used this PEB
638  * @lnum: the last used logical eraseblock number for the PEB
639  * @torture: if the physical eraseblock has to be tortured
640  *
641  */
642 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
643                          int vol_id, int lnum, int torture)
644 {
645         struct ubi_work *wl_wrk;
646
647         dbg_wl("sync erase of PEB %i", e->pnum);
648
649         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
650         if (!wl_wrk)
651                 return -ENOMEM;
652
653         wl_wrk->e = e;
654         wl_wrk->vol_id = vol_id;
655         wl_wrk->lnum = lnum;
656         wl_wrk->torture = torture;
657
658         return erase_worker(ubi, wl_wrk, 0);
659 }
660
661 /**
662  * wear_leveling_worker - wear-leveling worker function.
663  * @ubi: UBI device description object
664  * @wrk: the work object
665  * @shutdown: non-zero if the worker has to free memory and exit
666  * because the WL-subsystem is shutting down
667  *
668  * This function copies a more worn out physical eraseblock to a less worn out
669  * one. Returns zero in case of success and a negative error code in case of
670  * failure.
671  */
672 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
673                                 int shutdown)
674 {
675         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
676         int vol_id = -1, lnum = -1;
677 #ifdef CONFIG_MTD_UBI_FASTMAP
678         int anchor = wrk->anchor;
679 #endif
680         struct ubi_wl_entry *e1, *e2;
681         struct ubi_vid_hdr *vid_hdr;
682
683         kfree(wrk);
684         if (shutdown)
685                 return 0;
686
687         vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
688         if (!vid_hdr)
689                 return -ENOMEM;
690
691         mutex_lock(&ubi->move_mutex);
692         spin_lock(&ubi->wl_lock);
693         ubi_assert(!ubi->move_from && !ubi->move_to);
694         ubi_assert(!ubi->move_to_put);
695
696         if (!ubi->free.rb_node ||
697             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
698                 /*
699                  * No free physical eraseblocks? Well, they must be waiting in
700                  * the queue to be erased. Cancel movement - it will be
701                  * triggered again when a free physical eraseblock appears.
702                  *
703                  * No used physical eraseblocks? They must be temporarily
704                  * protected from being moved. They will be moved to the
705                  * @ubi->used tree later and the wear-leveling will be
706                  * triggered again.
707                  */
708                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
709                        !ubi->free.rb_node, !ubi->used.rb_node);
710                 goto out_cancel;
711         }
712
713 #ifdef CONFIG_MTD_UBI_FASTMAP
714         /* Check whether we need to produce an anchor PEB */
715         if (!anchor)
716                 anchor = !anchor_pebs_avalible(&ubi->free);
717
718         if (anchor) {
719                 e1 = find_anchor_wl_entry(&ubi->used);
720                 if (!e1)
721                         goto out_cancel;
722                 e2 = get_peb_for_wl(ubi);
723                 if (!e2)
724                         goto out_cancel;
725
726                 self_check_in_wl_tree(ubi, e1, &ubi->used);
727                 rb_erase(&e1->u.rb, &ubi->used);
728                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
729         } else if (!ubi->scrub.rb_node) {
730 #else
731         if (!ubi->scrub.rb_node) {
732 #endif
733                 /*
734                  * Now pick the least worn-out used physical eraseblock and a
735                  * highly worn-out free physical eraseblock. If the erase
736                  * counters differ much enough, start wear-leveling.
737                  */
738                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
739                 e2 = get_peb_for_wl(ubi);
740                 if (!e2)
741                         goto out_cancel;
742
743                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
744                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
745                                e1->ec, e2->ec);
746
747                         /* Give the unused PEB back */
748                         wl_tree_add(e2, &ubi->free);
749                         ubi->free_count++;
750                         goto out_cancel;
751                 }
752                 self_check_in_wl_tree(ubi, e1, &ubi->used);
753                 rb_erase(&e1->u.rb, &ubi->used);
754                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
755                        e1->pnum, e1->ec, e2->pnum, e2->ec);
756         } else {
757                 /* Perform scrubbing */
758                 scrubbing = 1;
759                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
760                 e2 = get_peb_for_wl(ubi);
761                 if (!e2)
762                         goto out_cancel;
763
764                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
765                 rb_erase(&e1->u.rb, &ubi->scrub);
766                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
767         }
768
769         ubi->move_from = e1;
770         ubi->move_to = e2;
771         spin_unlock(&ubi->wl_lock);
772
773         /*
774          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
775          * We so far do not know which logical eraseblock our physical
776          * eraseblock (@e1) belongs to. We have to read the volume identifier
777          * header first.
778          *
779          * Note, we are protected from this PEB being unmapped and erased. The
780          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
781          * which is being moved was unmapped.
782          */
783
784         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
785         if (err && err != UBI_IO_BITFLIPS) {
786                 if (err == UBI_IO_FF) {
787                         /*
788                          * We are trying to move PEB without a VID header. UBI
789                          * always write VID headers shortly after the PEB was
790                          * given, so we have a situation when it has not yet
791                          * had a chance to write it, because it was preempted.
792                          * So add this PEB to the protection queue so far,
793                          * because presumably more data will be written there
794                          * (including the missing VID header), and then we'll
795                          * move it.
796                          */
797                         dbg_wl("PEB %d has no VID header", e1->pnum);
798                         protect = 1;
799                         goto out_not_moved;
800                 } else if (err == UBI_IO_FF_BITFLIPS) {
801                         /*
802                          * The same situation as %UBI_IO_FF, but bit-flips were
803                          * detected. It is better to schedule this PEB for
804                          * scrubbing.
805                          */
806                         dbg_wl("PEB %d has no VID header but has bit-flips",
807                                e1->pnum);
808                         scrubbing = 1;
809                         goto out_not_moved;
810                 }
811
812                 ubi_err(ubi, "error %d while reading VID header from PEB %d",
813                         err, e1->pnum);
814                 goto out_error;
815         }
816
817         vol_id = be32_to_cpu(vid_hdr->vol_id);
818         lnum = be32_to_cpu(vid_hdr->lnum);
819
820         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
821         if (err) {
822                 if (err == MOVE_CANCEL_RACE) {
823                         /*
824                          * The LEB has not been moved because the volume is
825                          * being deleted or the PEB has been put meanwhile. We
826                          * should prevent this PEB from being selected for
827                          * wear-leveling movement again, so put it to the
828                          * protection queue.
829                          */
830                         protect = 1;
831                         goto out_not_moved;
832                 }
833                 if (err == MOVE_RETRY) {
834                         scrubbing = 1;
835                         goto out_not_moved;
836                 }
837                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
838                     err == MOVE_TARGET_RD_ERR) {
839                         /*
840                          * Target PEB had bit-flips or write error - torture it.
841                          */
842                         torture = 1;
843                         goto out_not_moved;
844                 }
845
846                 if (err == MOVE_SOURCE_RD_ERR) {
847                         /*
848                          * An error happened while reading the source PEB. Do
849                          * not switch to R/O mode in this case, and give the
850                          * upper layers a possibility to recover from this,
851                          * e.g. by unmapping corresponding LEB. Instead, just
852                          * put this PEB to the @ubi->erroneous list to prevent
853                          * UBI from trying to move it over and over again.
854                          */
855                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
856                                 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
857                                         ubi->erroneous_peb_count);
858                                 goto out_error;
859                         }
860                         erroneous = 1;
861                         goto out_not_moved;
862                 }
863
864                 if (err < 0)
865                         goto out_error;
866
867                 ubi_assert(0);
868         }
869
870         /* The PEB has been successfully moved */
871         if (scrubbing)
872                 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
873                         e1->pnum, vol_id, lnum, e2->pnum);
874         ubi_free_vid_hdr(ubi, vid_hdr);
875
876         spin_lock(&ubi->wl_lock);
877         if (!ubi->move_to_put) {
878                 wl_tree_add(e2, &ubi->used);
879                 e2 = NULL;
880         }
881         ubi->move_from = ubi->move_to = NULL;
882         ubi->move_to_put = ubi->wl_scheduled = 0;
883         spin_unlock(&ubi->wl_lock);
884
885         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
886         if (err) {
887                 if (e2)
888                         wl_entry_destroy(ubi, e2);
889                 goto out_ro;
890         }
891
892         if (e2) {
893                 /*
894                  * Well, the target PEB was put meanwhile, schedule it for
895                  * erasure.
896                  */
897                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
898                        e2->pnum, vol_id, lnum);
899                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
900                 if (err)
901                         goto out_ro;
902         }
903
904         dbg_wl("done");
905         mutex_unlock(&ubi->move_mutex);
906         return 0;
907
908         /*
909          * For some reasons the LEB was not moved, might be an error, might be
910          * something else. @e1 was not changed, so return it back. @e2 might
911          * have been changed, schedule it for erasure.
912          */
913 out_not_moved:
914         if (vol_id != -1)
915                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
916                        e1->pnum, vol_id, lnum, e2->pnum, err);
917         else
918                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
919                        e1->pnum, e2->pnum, err);
920         spin_lock(&ubi->wl_lock);
921         if (protect)
922                 prot_queue_add(ubi, e1);
923         else if (erroneous) {
924                 wl_tree_add(e1, &ubi->erroneous);
925                 ubi->erroneous_peb_count += 1;
926         } else if (scrubbing)
927                 wl_tree_add(e1, &ubi->scrub);
928         else
929                 wl_tree_add(e1, &ubi->used);
930         ubi_assert(!ubi->move_to_put);
931         ubi->move_from = ubi->move_to = NULL;
932         ubi->wl_scheduled = 0;
933         spin_unlock(&ubi->wl_lock);
934
935         ubi_free_vid_hdr(ubi, vid_hdr);
936         err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
937         if (err)
938                 goto out_ro;
939
940         mutex_unlock(&ubi->move_mutex);
941         return 0;
942
943 out_error:
944         if (vol_id != -1)
945                 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
946                         err, e1->pnum, e2->pnum);
947         else
948                 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
949                         err, e1->pnum, vol_id, lnum, e2->pnum);
950         spin_lock(&ubi->wl_lock);
951         ubi->move_from = ubi->move_to = NULL;
952         ubi->move_to_put = ubi->wl_scheduled = 0;
953         spin_unlock(&ubi->wl_lock);
954
955         ubi_free_vid_hdr(ubi, vid_hdr);
956         wl_entry_destroy(ubi, e1);
957         wl_entry_destroy(ubi, e2);
958
959 out_ro:
960         ubi_ro_mode(ubi);
961         mutex_unlock(&ubi->move_mutex);
962         ubi_assert(err != 0);
963         return err < 0 ? err : -EIO;
964
965 out_cancel:
966         ubi->wl_scheduled = 0;
967         spin_unlock(&ubi->wl_lock);
968         mutex_unlock(&ubi->move_mutex);
969         ubi_free_vid_hdr(ubi, vid_hdr);
970         return 0;
971 }
972
973 /**
974  * ensure_wear_leveling - schedule wear-leveling if it is needed.
975  * @ubi: UBI device description object
976  * @nested: set to non-zero if this function is called from UBI worker
977  *
978  * This function checks if it is time to start wear-leveling and schedules it
979  * if yes. This function returns zero in case of success and a negative error
980  * code in case of failure.
981  */
982 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
983 {
984         int err = 0;
985         struct ubi_wl_entry *e1;
986         struct ubi_wl_entry *e2;
987         struct ubi_work *wrk;
988
989         spin_lock(&ubi->wl_lock);
990         if (ubi->wl_scheduled)
991                 /* Wear-leveling is already in the work queue */
992                 goto out_unlock;
993
994         /*
995          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
996          * the WL worker has to be scheduled anyway.
997          */
998         if (!ubi->scrub.rb_node) {
999                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1000                         /* No physical eraseblocks - no deal */
1001                         goto out_unlock;
1002
1003                 /*
1004                  * We schedule wear-leveling only if the difference between the
1005                  * lowest erase counter of used physical eraseblocks and a high
1006                  * erase counter of free physical eraseblocks is greater than
1007                  * %UBI_WL_THRESHOLD.
1008                  */
1009                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1010                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1011
1012                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1013                         goto out_unlock;
1014                 dbg_wl("schedule wear-leveling");
1015         } else
1016                 dbg_wl("schedule scrubbing");
1017
1018         ubi->wl_scheduled = 1;
1019         spin_unlock(&ubi->wl_lock);
1020
1021         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1022         if (!wrk) {
1023                 err = -ENOMEM;
1024                 goto out_cancel;
1025         }
1026
1027         wrk->anchor = 0;
1028         wrk->func = &wear_leveling_worker;
1029         if (nested)
1030                 __schedule_ubi_work(ubi, wrk);
1031 #ifndef __UBOOT__
1032         else
1033                 schedule_ubi_work(ubi, wrk);
1034 #else
1035         else {
1036                 schedule_ubi_work(ubi, wrk);
1037                 ubi_do_worker(ubi);
1038         }
1039 #endif
1040         return err;
1041
1042 out_cancel:
1043         spin_lock(&ubi->wl_lock);
1044         ubi->wl_scheduled = 0;
1045 out_unlock:
1046         spin_unlock(&ubi->wl_lock);
1047         return err;
1048 }
1049
1050 /**
1051  * erase_worker - physical eraseblock erase worker function.
1052  * @ubi: UBI device description object
1053  * @wl_wrk: the work object
1054  * @shutdown: non-zero if the worker has to free memory and exit
1055  * because the WL sub-system is shutting down
1056  *
1057  * This function erases a physical eraseblock and perform torture testing if
1058  * needed. It also takes care about marking the physical eraseblock bad if
1059  * needed. Returns zero in case of success and a negative error code in case of
1060  * failure.
1061  */
1062 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1063                         int shutdown)
1064 {
1065         struct ubi_wl_entry *e = wl_wrk->e;
1066         int pnum = e->pnum;
1067         int vol_id = wl_wrk->vol_id;
1068         int lnum = wl_wrk->lnum;
1069         int err, available_consumed = 0;
1070
1071         if (shutdown) {
1072                 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1073                 kfree(wl_wrk);
1074                 wl_entry_destroy(ubi, e);
1075                 return 0;
1076         }
1077
1078         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1079                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1080
1081         err = sync_erase(ubi, e, wl_wrk->torture);
1082         if (!err) {
1083                 /* Fine, we've erased it successfully */
1084                 kfree(wl_wrk);
1085
1086                 spin_lock(&ubi->wl_lock);
1087                 wl_tree_add(e, &ubi->free);
1088                 ubi->free_count++;
1089                 spin_unlock(&ubi->wl_lock);
1090
1091                 /*
1092                  * One more erase operation has happened, take care about
1093                  * protected physical eraseblocks.
1094                  */
1095                 serve_prot_queue(ubi);
1096
1097                 /* And take care about wear-leveling */
1098                 err = ensure_wear_leveling(ubi, 1);
1099                 return err;
1100         }
1101
1102         ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1103         kfree(wl_wrk);
1104
1105         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1106             err == -EBUSY) {
1107                 int err1;
1108
1109                 /* Re-schedule the LEB for erasure */
1110                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1111                 if (err1) {
1112                         err = err1;
1113                         goto out_ro;
1114                 }
1115                 return err;
1116         }
1117
1118         wl_entry_destroy(ubi, e);
1119         if (err != -EIO)
1120                 /*
1121                  * If this is not %-EIO, we have no idea what to do. Scheduling
1122                  * this physical eraseblock for erasure again would cause
1123                  * errors again and again. Well, lets switch to R/O mode.
1124                  */
1125                 goto out_ro;
1126
1127         /* It is %-EIO, the PEB went bad */
1128
1129         if (!ubi->bad_allowed) {
1130                 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1131                 goto out_ro;
1132         }
1133
1134         spin_lock(&ubi->volumes_lock);
1135         if (ubi->beb_rsvd_pebs == 0) {
1136                 if (ubi->avail_pebs == 0) {
1137                         spin_unlock(&ubi->volumes_lock);
1138                         ubi_err(ubi, "no reserved/available physical eraseblocks");
1139                         goto out_ro;
1140                 }
1141                 ubi->avail_pebs -= 1;
1142                 available_consumed = 1;
1143         }
1144         spin_unlock(&ubi->volumes_lock);
1145
1146         ubi_msg(ubi, "mark PEB %d as bad", pnum);
1147         err = ubi_io_mark_bad(ubi, pnum);
1148         if (err)
1149                 goto out_ro;
1150
1151         spin_lock(&ubi->volumes_lock);
1152         if (ubi->beb_rsvd_pebs > 0) {
1153                 if (available_consumed) {
1154                         /*
1155                          * The amount of reserved PEBs increased since we last
1156                          * checked.
1157                          */
1158                         ubi->avail_pebs += 1;
1159                         available_consumed = 0;
1160                 }
1161                 ubi->beb_rsvd_pebs -= 1;
1162         }
1163         ubi->bad_peb_count += 1;
1164         ubi->good_peb_count -= 1;
1165         ubi_calculate_reserved(ubi);
1166         if (available_consumed)
1167                 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1168         else if (ubi->beb_rsvd_pebs)
1169                 ubi_msg(ubi, "%d PEBs left in the reserve",
1170                         ubi->beb_rsvd_pebs);
1171         else
1172                 ubi_warn(ubi, "last PEB from the reserve was used");
1173         spin_unlock(&ubi->volumes_lock);
1174
1175         return err;
1176
1177 out_ro:
1178         if (available_consumed) {
1179                 spin_lock(&ubi->volumes_lock);
1180                 ubi->avail_pebs += 1;
1181                 spin_unlock(&ubi->volumes_lock);
1182         }
1183         ubi_ro_mode(ubi);
1184         return err;
1185 }
1186
1187 /**
1188  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1189  * @ubi: UBI device description object
1190  * @vol_id: the volume ID that last used this PEB
1191  * @lnum: the last used logical eraseblock number for the PEB
1192  * @pnum: physical eraseblock to return
1193  * @torture: if this physical eraseblock has to be tortured
1194  *
1195  * This function is called to return physical eraseblock @pnum to the pool of
1196  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1197  * occurred to this @pnum and it has to be tested. This function returns zero
1198  * in case of success, and a negative error code in case of failure.
1199  */
1200 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1201                    int pnum, int torture)
1202 {
1203         int err;
1204         struct ubi_wl_entry *e;
1205
1206         dbg_wl("PEB %d", pnum);
1207         ubi_assert(pnum >= 0);
1208         ubi_assert(pnum < ubi->peb_count);
1209
1210         down_read(&ubi->fm_protect);
1211
1212 retry:
1213         spin_lock(&ubi->wl_lock);
1214         e = ubi->lookuptbl[pnum];
1215         if (e == ubi->move_from) {
1216                 /*
1217                  * User is putting the physical eraseblock which was selected to
1218                  * be moved. It will be scheduled for erasure in the
1219                  * wear-leveling worker.
1220                  */
1221                 dbg_wl("PEB %d is being moved, wait", pnum);
1222                 spin_unlock(&ubi->wl_lock);
1223
1224                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1225                 mutex_lock(&ubi->move_mutex);
1226                 mutex_unlock(&ubi->move_mutex);
1227                 goto retry;
1228         } else if (e == ubi->move_to) {
1229                 /*
1230                  * User is putting the physical eraseblock which was selected
1231                  * as the target the data is moved to. It may happen if the EBA
1232                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1233                  * but the WL sub-system has not put the PEB to the "used" tree
1234                  * yet, but it is about to do this. So we just set a flag which
1235                  * will tell the WL worker that the PEB is not needed anymore
1236                  * and should be scheduled for erasure.
1237                  */
1238                 dbg_wl("PEB %d is the target of data moving", pnum);
1239                 ubi_assert(!ubi->move_to_put);
1240                 ubi->move_to_put = 1;
1241                 spin_unlock(&ubi->wl_lock);
1242                 up_read(&ubi->fm_protect);
1243                 return 0;
1244         } else {
1245                 if (in_wl_tree(e, &ubi->used)) {
1246                         self_check_in_wl_tree(ubi, e, &ubi->used);
1247                         rb_erase(&e->u.rb, &ubi->used);
1248                 } else if (in_wl_tree(e, &ubi->scrub)) {
1249                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1250                         rb_erase(&e->u.rb, &ubi->scrub);
1251                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1252                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1253                         rb_erase(&e->u.rb, &ubi->erroneous);
1254                         ubi->erroneous_peb_count -= 1;
1255                         ubi_assert(ubi->erroneous_peb_count >= 0);
1256                         /* Erroneous PEBs should be tortured */
1257                         torture = 1;
1258                 } else {
1259                         err = prot_queue_del(ubi, e->pnum);
1260                         if (err) {
1261                                 ubi_err(ubi, "PEB %d not found", pnum);
1262                                 ubi_ro_mode(ubi);
1263                                 spin_unlock(&ubi->wl_lock);
1264                                 up_read(&ubi->fm_protect);
1265                                 return err;
1266                         }
1267                 }
1268         }
1269         spin_unlock(&ubi->wl_lock);
1270
1271         err = schedule_erase(ubi, e, vol_id, lnum, torture);
1272         if (err) {
1273                 spin_lock(&ubi->wl_lock);
1274                 wl_tree_add(e, &ubi->used);
1275                 spin_unlock(&ubi->wl_lock);
1276         }
1277
1278         up_read(&ubi->fm_protect);
1279         return err;
1280 }
1281
1282 /**
1283  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1284  * @ubi: UBI device description object
1285  * @pnum: the physical eraseblock to schedule
1286  *
1287  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1288  * needs scrubbing. This function schedules a physical eraseblock for
1289  * scrubbing which is done in background. This function returns zero in case of
1290  * success and a negative error code in case of failure.
1291  */
1292 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1293 {
1294         struct ubi_wl_entry *e;
1295
1296         ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1297
1298 retry:
1299         spin_lock(&ubi->wl_lock);
1300         e = ubi->lookuptbl[pnum];
1301         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1302                                    in_wl_tree(e, &ubi->erroneous)) {
1303                 spin_unlock(&ubi->wl_lock);
1304                 return 0;
1305         }
1306
1307         if (e == ubi->move_to) {
1308                 /*
1309                  * This physical eraseblock was used to move data to. The data
1310                  * was moved but the PEB was not yet inserted to the proper
1311                  * tree. We should just wait a little and let the WL worker
1312                  * proceed.
1313                  */
1314                 spin_unlock(&ubi->wl_lock);
1315                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1316                 yield();
1317                 goto retry;
1318         }
1319
1320         if (in_wl_tree(e, &ubi->used)) {
1321                 self_check_in_wl_tree(ubi, e, &ubi->used);
1322                 rb_erase(&e->u.rb, &ubi->used);
1323         } else {
1324                 int err;
1325
1326                 err = prot_queue_del(ubi, e->pnum);
1327                 if (err) {
1328                         ubi_err(ubi, "PEB %d not found", pnum);
1329                         ubi_ro_mode(ubi);
1330                         spin_unlock(&ubi->wl_lock);
1331                         return err;
1332                 }
1333         }
1334
1335         wl_tree_add(e, &ubi->scrub);
1336         spin_unlock(&ubi->wl_lock);
1337
1338         /*
1339          * Technically scrubbing is the same as wear-leveling, so it is done
1340          * by the WL worker.
1341          */
1342         return ensure_wear_leveling(ubi, 0);
1343 }
1344
1345 /**
1346  * ubi_wl_flush - flush all pending works.
1347  * @ubi: UBI device description object
1348  * @vol_id: the volume id to flush for
1349  * @lnum: the logical eraseblock number to flush for
1350  *
1351  * This function executes all pending works for a particular volume id /
1352  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1353  * acts as a wildcard for all of the corresponding volume numbers or logical
1354  * eraseblock numbers. It returns zero in case of success and a negative error
1355  * code in case of failure.
1356  */
1357 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1358 {
1359         int err = 0;
1360         int found = 1;
1361
1362         /*
1363          * Erase while the pending works queue is not empty, but not more than
1364          * the number of currently pending works.
1365          */
1366         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1367                vol_id, lnum, ubi->works_count);
1368
1369         while (found) {
1370                 struct ubi_work *wrk, *tmp;
1371                 found = 0;
1372
1373                 down_read(&ubi->work_sem);
1374                 spin_lock(&ubi->wl_lock);
1375                 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1376                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1377                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1378                                 list_del(&wrk->list);
1379                                 ubi->works_count -= 1;
1380                                 ubi_assert(ubi->works_count >= 0);
1381                                 spin_unlock(&ubi->wl_lock);
1382
1383                                 err = wrk->func(ubi, wrk, 0);
1384                                 if (err) {
1385                                         up_read(&ubi->work_sem);
1386                                         return err;
1387                                 }
1388
1389                                 spin_lock(&ubi->wl_lock);
1390                                 found = 1;
1391                                 break;
1392                         }
1393                 }
1394                 spin_unlock(&ubi->wl_lock);
1395                 up_read(&ubi->work_sem);
1396         }
1397
1398         /*
1399          * Make sure all the works which have been done in parallel are
1400          * finished.
1401          */
1402         down_write(&ubi->work_sem);
1403         up_write(&ubi->work_sem);
1404
1405         return err;
1406 }
1407
1408 /**
1409  * tree_destroy - destroy an RB-tree.
1410  * @ubi: UBI device description object
1411  * @root: the root of the tree to destroy
1412  */
1413 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1414 {
1415         struct rb_node *rb;
1416         struct ubi_wl_entry *e;
1417
1418         rb = root->rb_node;
1419         while (rb) {
1420                 if (rb->rb_left)
1421                         rb = rb->rb_left;
1422                 else if (rb->rb_right)
1423                         rb = rb->rb_right;
1424                 else {
1425                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1426
1427                         rb = rb_parent(rb);
1428                         if (rb) {
1429                                 if (rb->rb_left == &e->u.rb)
1430                                         rb->rb_left = NULL;
1431                                 else
1432                                         rb->rb_right = NULL;
1433                         }
1434
1435                         wl_entry_destroy(ubi, e);
1436                 }
1437         }
1438 }
1439
1440 /**
1441  * ubi_thread - UBI background thread.
1442  * @u: the UBI device description object pointer
1443  */
1444 int ubi_thread(void *u)
1445 {
1446         int failures = 0;
1447         struct ubi_device *ubi = u;
1448
1449         ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1450                 ubi->bgt_name, task_pid_nr(current));
1451
1452         set_freezable();
1453         for (;;) {
1454                 int err;
1455
1456                 if (kthread_should_stop())
1457                         break;
1458
1459                 if (try_to_freeze())
1460                         continue;
1461
1462                 spin_lock(&ubi->wl_lock);
1463                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1464                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1465                         set_current_state(TASK_INTERRUPTIBLE);
1466                         spin_unlock(&ubi->wl_lock);
1467                         schedule();
1468                         continue;
1469                 }
1470                 spin_unlock(&ubi->wl_lock);
1471
1472                 err = do_work(ubi);
1473                 if (err) {
1474                         ubi_err(ubi, "%s: work failed with error code %d",
1475                                 ubi->bgt_name, err);
1476                         if (failures++ > WL_MAX_FAILURES) {
1477                                 /*
1478                                  * Too many failures, disable the thread and
1479                                  * switch to read-only mode.
1480                                  */
1481                                 ubi_msg(ubi, "%s: %d consecutive failures",
1482                                         ubi->bgt_name, WL_MAX_FAILURES);
1483                                 ubi_ro_mode(ubi);
1484                                 ubi->thread_enabled = 0;
1485                                 continue;
1486                         }
1487                 } else
1488                         failures = 0;
1489
1490                 cond_resched();
1491         }
1492
1493         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1494         return 0;
1495 }
1496
1497 /**
1498  * shutdown_work - shutdown all pending works.
1499  * @ubi: UBI device description object
1500  */
1501 static void shutdown_work(struct ubi_device *ubi)
1502 {
1503 #ifdef CONFIG_MTD_UBI_FASTMAP
1504 #ifndef __UBOOT__
1505         flush_work(&ubi->fm_work);
1506 #else
1507         /* in U-Boot, we have all work done */
1508 #endif
1509 #endif
1510         while (!list_empty(&ubi->works)) {
1511                 struct ubi_work *wrk;
1512
1513                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1514                 list_del(&wrk->list);
1515                 wrk->func(ubi, wrk, 1);
1516                 ubi->works_count -= 1;
1517                 ubi_assert(ubi->works_count >= 0);
1518         }
1519 }
1520
1521 /**
1522  * ubi_wl_init - initialize the WL sub-system using attaching information.
1523  * @ubi: UBI device description object
1524  * @ai: attaching information
1525  *
1526  * This function returns zero in case of success, and a negative error code in
1527  * case of failure.
1528  */
1529 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1530 {
1531         int err, i, reserved_pebs, found_pebs = 0;
1532         struct rb_node *rb1, *rb2;
1533         struct ubi_ainf_volume *av;
1534         struct ubi_ainf_peb *aeb, *tmp;
1535         struct ubi_wl_entry *e;
1536
1537         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1538         spin_lock_init(&ubi->wl_lock);
1539         mutex_init(&ubi->move_mutex);
1540         init_rwsem(&ubi->work_sem);
1541         ubi->max_ec = ai->max_ec;
1542         INIT_LIST_HEAD(&ubi->works);
1543
1544         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1545
1546         err = -ENOMEM;
1547         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1548         if (!ubi->lookuptbl)
1549                 return err;
1550
1551         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1552                 INIT_LIST_HEAD(&ubi->pq[i]);
1553         ubi->pq_head = 0;
1554
1555         ubi->free_count = 0;
1556         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1557                 cond_resched();
1558
1559                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1560                 if (!e)
1561                         goto out_free;
1562
1563                 e->pnum = aeb->pnum;
1564                 e->ec = aeb->ec;
1565                 ubi->lookuptbl[e->pnum] = e;
1566                 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1567                         wl_entry_destroy(ubi, e);
1568                         goto out_free;
1569                 }
1570
1571                 found_pebs++;
1572         }
1573
1574         list_for_each_entry(aeb, &ai->free, u.list) {
1575                 cond_resched();
1576
1577                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1578                 if (!e)
1579                         goto out_free;
1580
1581                 e->pnum = aeb->pnum;
1582                 e->ec = aeb->ec;
1583                 ubi_assert(e->ec >= 0);
1584
1585                 wl_tree_add(e, &ubi->free);
1586                 ubi->free_count++;
1587
1588                 ubi->lookuptbl[e->pnum] = e;
1589
1590                 found_pebs++;
1591         }
1592
1593         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1594                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1595                         cond_resched();
1596
1597                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1598                         if (!e)
1599                                 goto out_free;
1600
1601                         e->pnum = aeb->pnum;
1602                         e->ec = aeb->ec;
1603                         ubi->lookuptbl[e->pnum] = e;
1604
1605                         if (!aeb->scrub) {
1606                                 dbg_wl("add PEB %d EC %d to the used tree",
1607                                        e->pnum, e->ec);
1608                                 wl_tree_add(e, &ubi->used);
1609                         } else {
1610                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1611                                        e->pnum, e->ec);
1612                                 wl_tree_add(e, &ubi->scrub);
1613                         }
1614
1615                         found_pebs++;
1616                 }
1617         }
1618
1619         dbg_wl("found %i PEBs", found_pebs);
1620
1621         if (ubi->fm) {
1622                 ubi_assert(ubi->good_peb_count ==
1623                            found_pebs + ubi->fm->used_blocks);
1624
1625                 for (i = 0; i < ubi->fm->used_blocks; i++) {
1626                         e = ubi->fm->e[i];
1627                         ubi->lookuptbl[e->pnum] = e;
1628                 }
1629         }
1630         else
1631                 ubi_assert(ubi->good_peb_count == found_pebs);
1632
1633         reserved_pebs = WL_RESERVED_PEBS;
1634         ubi_fastmap_init(ubi, &reserved_pebs);
1635
1636         if (ubi->avail_pebs < reserved_pebs) {
1637                 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1638                         ubi->avail_pebs, reserved_pebs);
1639                 if (ubi->corr_peb_count)
1640                         ubi_err(ubi, "%d PEBs are corrupted and not used",
1641                                 ubi->corr_peb_count);
1642                 goto out_free;
1643         }
1644         ubi->avail_pebs -= reserved_pebs;
1645         ubi->rsvd_pebs += reserved_pebs;
1646
1647         /* Schedule wear-leveling if needed */
1648         err = ensure_wear_leveling(ubi, 0);
1649         if (err)
1650                 goto out_free;
1651
1652         return 0;
1653
1654 out_free:
1655         shutdown_work(ubi);
1656         tree_destroy(ubi, &ubi->used);
1657         tree_destroy(ubi, &ubi->free);
1658         tree_destroy(ubi, &ubi->scrub);
1659         kfree(ubi->lookuptbl);
1660         return err;
1661 }
1662
1663 /**
1664  * protection_queue_destroy - destroy the protection queue.
1665  * @ubi: UBI device description object
1666  */
1667 static void protection_queue_destroy(struct ubi_device *ubi)
1668 {
1669         int i;
1670         struct ubi_wl_entry *e, *tmp;
1671
1672         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1673                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1674                         list_del(&e->u.list);
1675                         wl_entry_destroy(ubi, e);
1676                 }
1677         }
1678 }
1679
1680 /**
1681  * ubi_wl_close - close the wear-leveling sub-system.
1682  * @ubi: UBI device description object
1683  */
1684 void ubi_wl_close(struct ubi_device *ubi)
1685 {
1686         dbg_wl("close the WL sub-system");
1687         ubi_fastmap_close(ubi);
1688         shutdown_work(ubi);
1689         protection_queue_destroy(ubi);
1690         tree_destroy(ubi, &ubi->used);
1691         tree_destroy(ubi, &ubi->erroneous);
1692         tree_destroy(ubi, &ubi->free);
1693         tree_destroy(ubi, &ubi->scrub);
1694         kfree(ubi->lookuptbl);
1695 }
1696
1697 /**
1698  * self_check_ec - make sure that the erase counter of a PEB is correct.
1699  * @ubi: UBI device description object
1700  * @pnum: the physical eraseblock number to check
1701  * @ec: the erase counter to check
1702  *
1703  * This function returns zero if the erase counter of physical eraseblock @pnum
1704  * is equivalent to @ec, and a negative error code if not or if an error
1705  * occurred.
1706  */
1707 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1708 {
1709         int err;
1710         long long read_ec;
1711         struct ubi_ec_hdr *ec_hdr;
1712
1713         if (!ubi_dbg_chk_gen(ubi))
1714                 return 0;
1715
1716         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1717         if (!ec_hdr)
1718                 return -ENOMEM;
1719
1720         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1721         if (err && err != UBI_IO_BITFLIPS) {
1722                 /* The header does not have to exist */
1723                 err = 0;
1724                 goto out_free;
1725         }
1726
1727         read_ec = be64_to_cpu(ec_hdr->ec);
1728         if (ec != read_ec && read_ec - ec > 1) {
1729                 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1730                 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1731                 dump_stack();
1732                 err = 1;
1733         } else
1734                 err = 0;
1735
1736 out_free:
1737         kfree(ec_hdr);
1738         return err;
1739 }
1740
1741 /**
1742  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1743  * @ubi: UBI device description object
1744  * @e: the wear-leveling entry to check
1745  * @root: the root of the tree
1746  *
1747  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1748  * is not.
1749  */
1750 static int self_check_in_wl_tree(const struct ubi_device *ubi,
1751                                  struct ubi_wl_entry *e, struct rb_root *root)
1752 {
1753         if (!ubi_dbg_chk_gen(ubi))
1754                 return 0;
1755
1756         if (in_wl_tree(e, root))
1757                 return 0;
1758
1759         ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1760                 e->pnum, e->ec, root);
1761         dump_stack();
1762         return -EINVAL;
1763 }
1764
1765 /**
1766  * self_check_in_pq - check if wear-leveling entry is in the protection
1767  *                        queue.
1768  * @ubi: UBI device description object
1769  * @e: the wear-leveling entry to check
1770  *
1771  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1772  */
1773 static int self_check_in_pq(const struct ubi_device *ubi,
1774                             struct ubi_wl_entry *e)
1775 {
1776         struct ubi_wl_entry *p;
1777         int i;
1778
1779         if (!ubi_dbg_chk_gen(ubi))
1780                 return 0;
1781
1782         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1783                 list_for_each_entry(p, &ubi->pq[i], u.list)
1784                         if (p == e)
1785                                 return 0;
1786
1787         ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1788                 e->pnum, e->ec);
1789         dump_stack();
1790         return -EINVAL;
1791 }
1792 #ifndef CONFIG_MTD_UBI_FASTMAP
1793 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1794 {
1795         struct ubi_wl_entry *e;
1796
1797         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1798         self_check_in_wl_tree(ubi, e, &ubi->free);
1799         ubi->free_count--;
1800         ubi_assert(ubi->free_count >= 0);
1801         rb_erase(&e->u.rb, &ubi->free);
1802
1803         return e;
1804 }
1805
1806 /**
1807  * produce_free_peb - produce a free physical eraseblock.
1808  * @ubi: UBI device description object
1809  *
1810  * This function tries to make a free PEB by means of synchronous execution of
1811  * pending works. This may be needed if, for example the background thread is
1812  * disabled. Returns zero in case of success and a negative error code in case
1813  * of failure.
1814  */
1815 static int produce_free_peb(struct ubi_device *ubi)
1816 {
1817         int err;
1818
1819         while (!ubi->free.rb_node && ubi->works_count) {
1820                 spin_unlock(&ubi->wl_lock);
1821
1822                 dbg_wl("do one work synchronously");
1823                 err = do_work(ubi);
1824
1825                 spin_lock(&ubi->wl_lock);
1826                 if (err)
1827                         return err;
1828         }
1829
1830         return 0;
1831 }
1832
1833 /**
1834  * ubi_wl_get_peb - get a physical eraseblock.
1835  * @ubi: UBI device description object
1836  *
1837  * This function returns a physical eraseblock in case of success and a
1838  * negative error code in case of failure.
1839  * Returns with ubi->fm_eba_sem held in read mode!
1840  */
1841 int ubi_wl_get_peb(struct ubi_device *ubi)
1842 {
1843         int err;
1844         struct ubi_wl_entry *e;
1845
1846 retry:
1847         down_read(&ubi->fm_eba_sem);
1848         spin_lock(&ubi->wl_lock);
1849         if (!ubi->free.rb_node) {
1850                 if (ubi->works_count == 0) {
1851                         ubi_err(ubi, "no free eraseblocks");
1852                         ubi_assert(list_empty(&ubi->works));
1853                         spin_unlock(&ubi->wl_lock);
1854                         return -ENOSPC;
1855                 }
1856
1857                 err = produce_free_peb(ubi);
1858                 if (err < 0) {
1859                         spin_unlock(&ubi->wl_lock);
1860                         return err;
1861                 }
1862                 spin_unlock(&ubi->wl_lock);
1863                 up_read(&ubi->fm_eba_sem);
1864                 goto retry;
1865
1866         }
1867         e = wl_get_wle(ubi);
1868         prot_queue_add(ubi, e);
1869         spin_unlock(&ubi->wl_lock);
1870
1871         err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1872                                     ubi->peb_size - ubi->vid_hdr_aloffset);
1873         if (err) {
1874                 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1875                 return err;
1876         }
1877
1878         return e->pnum;
1879 }
1880 #else
1881 #include "fastmap-wl.c"
1882 #endif
This page took 0.131826 seconds and 4 git commands to generate.