*/
/*
- * UBI wear-leveling unit.
+ * UBI wear-leveling sub-system.
*
- * This unit is responsible for wear-leveling. It works in terms of physical
- * eraseblocks and erase counters and knows nothing about logical eraseblocks,
- * volumes, etc. From this unit's perspective all physical eraseblocks are of
- * two types - used and free. Used physical eraseblocks are those that were
- * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
- * those that were put by the 'ubi_wl_put_peb()' function.
+ * This sub-system is responsible for wear-leveling. It works in terms of
+ * physical* eraseblocks and erase counters and knows nothing about logical
+ * eraseblocks, volumes, etc. From this sub-system's perspective all physical
+ * eraseblocks are of two types - used and free. Used physical eraseblocks are
+ * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
+ * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
*
* Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
- * header. The rest of the physical eraseblock contains only 0xFF bytes.
+ * header. The rest of the physical eraseblock contains only %0xFF bytes.
*
- * When physical eraseblocks are returned to the WL unit by means of the
+ * When physical eraseblocks are returned to the WL sub-system by means of the
* 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
* done asynchronously in context of the per-UBI device background thread,
- * which is also managed by the WL unit.
+ * which is also managed by the WL sub-system.
*
* The wear-leveling is ensured by means of moving the contents of used
* physical eraseblocks with low erase counter to free physical eraseblocks
* The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
* an "optimal" physical eraseblock. For example, when it is known that the
* physical eraseblock will be "put" soon because it contains short-term data,
- * the WL unit may pick a free physical eraseblock with low erase counter, and
- * so forth.
+ * the WL sub-system may pick a free physical eraseblock with low erase
+ * counter, and so forth.
*
- * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
+ * If the WL sub-system fails to erase a physical eraseblock, it marks it as
+ * bad.
*
- * This unit is also responsible for scrubbing. If a bit-flip is detected in a
- * physical eraseblock, it has to be moved. Technically this is the same as
- * moving it for wear-leveling reasons.
+ * This sub-system is also responsible for scrubbing. If a bit-flip is detected
+ * in a physical eraseblock, it has to be moved. Technically this is the same
+ * as moving it for wear-leveling reasons.
*
- * As it was said, for the UBI unit all physical eraseblocks are either "free"
- * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
- * eraseblocks are kept in a set of different RB-trees: @wl->used,
+ * As it was said, for the UBI sub-system all physical eraseblocks are either
+ * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
+ * used eraseblocks are kept in a set of different RB-trees: @wl->used,
* @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
*
* Note, in this implementation, we keep a small in-RAM object for each physical
* eraseblock. This is surely not a scalable solution. But it appears to be good
* enough for moderately large flashes and it is simple. In future, one may
- * re-work this unit and make it more scalable.
+ * re-work this sub-system and make it more scalable.
*
- * At the moment this unit does not utilize the sequence number, which was
- * introduced relatively recently. But it would be wise to do this because the
- * sequence number of a logical eraseblock characterizes how old is it. For
+ * At the moment this sub-system does not utilize the sequence number, which
+ * was introduced relatively recently. But it would be wise to do this because
+ * the sequence number of a logical eraseblock characterizes how old is it. For
* example, when we move a PEB with low erase counter, and we need to pick the
* target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
* pick target PEB with an average EC if our PEB is not very "old". This is a
- * room for future re-works of the WL unit.
+ * room for future re-works of the WL sub-system.
*
- * FIXME: looks too complex, should be simplified (later).
+ * Note: the stuff with protection trees looks too complex and is difficult to
+ * understand. Should be fixed.
*/
#include <linux/slab.h>
/*
* Maximum difference between two erase counters. If this threshold is
- * exceeded, the WL unit starts moving data from used physical eraseblocks with
- * low erase counter to free physical eraseblocks with high erase counter.
+ * exceeded, the WL sub-system starts moving data from used physical
+ * eraseblocks with low erase counter to free physical eraseblocks with high
+ * erase counter.
*/
#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
/*
- * When a physical eraseblock is moved, the WL unit has to pick the target
+ * When a physical eraseblock is moved, the WL sub-system has to pick the target
* physical eraseblock to move to. The simplest way would be just to pick the
* one with the highest erase counter. But in certain workloads this could lead
* to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
* situation when the picked physical eraseblock is constantly erased after the
* data is written to it. So, we have a constant which limits the highest erase
- * counter of the free physical eraseblock to pick. Namely, the WL unit does
- * not pick eraseblocks with erase counter greater then the lowest erase
+ * counter of the free physical eraseblock to pick. Namely, the WL sub-system
+ * does not pick eraseblocks with erase counter greater then the lowest erase
* counter plus %WL_FREE_MAX_DIFF.
*/
#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
*/
#define WL_MAX_FAILURES 32
-/**
- * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
- * @ec: erase counter
- * @pnum: physical eraseblock number
- *
- * Each physical eraseblock has a corresponding &struct wl_entry object which
- * may be kept in different RB-trees.
- */
-struct ubi_wl_entry {
- struct rb_node rb;
- int ec;
- int pnum;
-};
-
/**
* struct ubi_wl_prot_entry - PEB protection entry.
* @rb_pnum: link in the @wl->prot.pnum RB-tree
* @abs_ec: the absolute erase counter value when the protection ends
* @e: the wear-leveling entry of the physical eraseblock under protection
*
- * When the WL unit returns a physical eraseblock, the physical eraseblock is
- * protected from being moved for some "time". For this reason, the physical
- * eraseblock is not directly moved from the @wl->free tree to the @wl->used
- * tree. There is one more tree in between where this physical eraseblock is
- * temporarily stored (@wl->prot).
+ * When the WL sub-system returns a physical eraseblock, the physical
+ * eraseblock is protected from being moved for some "time". For this reason,
+ * the physical eraseblock is not directly moved from the @wl->free tree to the
+ * @wl->used tree. There is one more tree in between where this physical
+ * eraseblock is temporarily stored (@wl->prot).
*
* All this protection stuff is needed because:
* o we don't want to move physical eraseblocks just after we have given them
* @list: a link in the list of pending works
* @func: worker function
* @priv: private data of the worker function
- *
* @e: physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
*
};
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec);
+static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root);
#else
#define paranoid_check_in_wl_tree(e, root)
#endif
-/* Slab cache for wear-leveling entries */
-static struct kmem_cache *wl_entries_slab;
-
-/**
- * tree_empty - a helper function to check if an RB-tree is empty.
- * @root: the root of the tree
- *
- * This function returns non-zero if the RB-tree is empty and zero if not.
- */
-static inline int tree_empty(struct rb_root *root)
-{
- return root->rb_node == NULL;
-}
-
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
rb_insert_color(&e->rb, root);
}
-
-/*
- * Helper functions to add and delete wear-leveling entries from different
- * trees.
- */
-
-static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
-{
- wl_tree_add(e, &ubi->free);
-}
-static inline void used_tree_add(struct ubi_device *ubi,
- struct ubi_wl_entry *e)
-{
- wl_tree_add(e, &ubi->used);
-}
-static inline void scrub_tree_add(struct ubi_device *ubi,
- struct ubi_wl_entry *e)
-{
- wl_tree_add(e, &ubi->scrub);
-}
-static inline void free_tree_del(struct ubi_device *ubi,
- struct ubi_wl_entry *e)
-{
- paranoid_check_in_wl_tree(e, &ubi->free);
- rb_erase(&e->rb, &ubi->free);
-}
-static inline void used_tree_del(struct ubi_device *ubi,
- struct ubi_wl_entry *e)
-{
- paranoid_check_in_wl_tree(e, &ubi->used);
- rb_erase(&e->rb, &ubi->used);
-}
-static inline void scrub_tree_del(struct ubi_device *ubi,
- struct ubi_wl_entry *e)
-{
- paranoid_check_in_wl_tree(e, &ubi->scrub);
- rb_erase(&e->rb, &ubi->scrub);
-}
-
/**
* do_work - do one pending work.
* @ubi: UBI device description object
int err;
struct ubi_work *wrk;
- spin_lock(&ubi->wl_lock);
+ cond_resched();
+ /*
+ * @ubi->work_sem is used to synchronize with the workers. Workers take
+ * it in read mode, so many of them may be doing works at a time. But
+ * the queue flush code has to be sure the whole queue of works is
+ * done, and it takes the mutex in write mode.
+ */
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
return 0;
}
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
spin_unlock(&ubi->wl_lock);
/*
err = wrk->func(ubi, wrk, 0);
if (err)
ubi_err("work failed with error code %d", err);
+ up_read(&ubi->work_sem);
- spin_lock(&ubi->wl_lock);
- ubi->works_count -= 1;
- ubi_assert(ubi->works_count >= 0);
- spin_unlock(&ubi->wl_lock);
return err;
}
int err;
spin_lock(&ubi->wl_lock);
- while (tree_empty(&ubi->free)) {
+ while (!ubi->free.rb_node) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
* @ubi: UBI device description object
* @e: the physical eraseblock to add
* @pe: protection entry object to use
- * @abs_ec: absolute erase counter value when this physical eraseblock has
- * to be removed from the protection trees.
+ * @ec: for how many erase operations this PEB should be protected
*
* @wl->lock has to be locked.
*/
static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
- struct ubi_wl_prot_entry *pe, int abs_ec)
+ struct ubi_wl_prot_entry *pe, int ec)
{
struct rb_node **p, *parent = NULL;
struct ubi_wl_prot_entry *pe1;
pe->e = e;
- pe->abs_ec = ubi->abs_ec + abs_ec;
+ pe->abs_ec = ubi->abs_ec + ec;
p = &ubi->prot.pnum.rb_node;
while (*p) {
ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
dtype == UBI_UNKNOWN);
- pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL);
+ pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
if (!pe)
return -ENOMEM;
retry:
spin_lock(&ubi->wl_lock);
- if (tree_empty(&ubi->free)) {
+ if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
}
switch (dtype) {
- case UBI_LONGTERM:
- /*
- * For long term data we pick a physical eraseblock
- * with high erase counter. But the highest erase
- * counter we can pick is bounded by the the lowest
- * erase counter plus %WL_FREE_MAX_DIFF.
- */
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- protect = LT_PROTECTION;
- break;
- case UBI_UNKNOWN:
- /*
- * For unknown data we pick a physical eraseblock with
- * medium erase counter. But we by no means can pick a
- * physical eraseblock with erase counter greater or
- * equivalent than the lowest erase counter plus
- * %WL_FREE_MAX_DIFF.
- */
- first = rb_entry(rb_first(&ubi->free),
- struct ubi_wl_entry, rb);
- last = rb_entry(rb_last(&ubi->free),
- struct ubi_wl_entry, rb);
+ case UBI_LONGTERM:
+ /*
+ * For long term data we pick a physical eraseblock with high
+ * erase counter. But the highest erase counter we can pick is
+ * bounded by the the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF.
+ */
+ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ protect = LT_PROTECTION;
+ break;
+ case UBI_UNKNOWN:
+ /*
+ * For unknown data we pick a physical eraseblock with medium
+ * erase counter. But we by no means can pick a physical
+ * eraseblock with erase counter greater or equivalent than the
+ * lowest erase counter plus %WL_FREE_MAX_DIFF.
+ */
+ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
+ last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb);
- if (last->ec - first->ec < WL_FREE_MAX_DIFF)
- e = rb_entry(ubi->free.rb_node,
- struct ubi_wl_entry, rb);
- else {
- medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
- e = find_wl_entry(&ubi->free, medium_ec);
- }
- protect = U_PROTECTION;
- break;
- case UBI_SHORTTERM:
- /*
- * For short term data we pick a physical eraseblock
- * with the lowest erase counter as we expect it will
- * be erased soon.
- */
- e = rb_entry(rb_first(&ubi->free),
- struct ubi_wl_entry, rb);
- protect = ST_PROTECTION;
- break;
- default:
- protect = 0;
- e = NULL;
- BUG();
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF)
+ e = rb_entry(ubi->free.rb_node,
+ struct ubi_wl_entry, rb);
+ else {
+ medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
+ e = find_wl_entry(&ubi->free, medium_ec);
+ }
+ protect = U_PROTECTION;
+ break;
+ case UBI_SHORTTERM:
+ /*
+ * For short term data we pick a physical eraseblock with the
+ * lowest erase counter as we expect it will be erased soon.
+ */
+ e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
+ protect = ST_PROTECTION;
+ break;
+ default:
+ protect = 0;
+ e = NULL;
+ BUG();
}
/*
* Move the physical eraseblock to the protection trees where it will
* be protected from being moved for some time.
*/
- free_tree_del(ubi, e);
+ paranoid_check_in_wl_tree(e, &ubi->free);
+ rb_erase(&e->rb, &ubi->free);
prot_tree_add(ubi, e, pe, protect);
dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
* prot_tree_del - remove a physical eraseblock from the protection trees
* @ubi: UBI device description object
* @pnum: the physical eraseblock to remove
+ *
+ * This function returns PEB @pnum from the protection trees and returns zero
+ * in case of success and %-ENODEV if the PEB was not found in the protection
+ * trees.
*/
-static void prot_tree_del(struct ubi_device *ubi, int pnum)
+static int prot_tree_del(struct ubi_device *ubi, int pnum)
{
struct rb_node *p;
struct ubi_wl_prot_entry *pe = NULL;
pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
if (pnum == pe->e->pnum)
- break;
+ goto found;
if (pnum < pe->e->pnum)
p = p->rb_left;
p = p->rb_right;
}
+ return -ENODEV;
+
+found:
ubi_assert(pe->e->pnum == pnum);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
kfree(pe);
+ return 0;
}
/**
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
+static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int torture)
{
int err;
struct ubi_ec_hdr *ec_hdr;
if (err > 0)
return -EINVAL;
- ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
}
/**
- * check_protection_over - check if it is time to stop protecting some
- * physical eraseblocks.
+ * check_protection_over - check if it is time to stop protecting some PEBs.
* @ubi: UBI device description object
*
* This function is called after each erase operation, when the absolute erase
*/
while (1) {
spin_lock(&ubi->wl_lock);
- if (tree_empty(&ubi->prot.aec)) {
+ if (!ubi->prot.aec.rb_node) {
spin_unlock(&ubi->wl_lock);
break;
}
pe->e->pnum, ubi->abs_ec, pe->abs_ec);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
- used_tree_add(ubi, pe->e);
+ wl_tree_add(pe->e, &ubi->used);
spin_unlock(&ubi->wl_lock);
kfree(pe);
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
- wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wl_wrk)
return -ENOMEM;
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
- int err, put = 0;
+ int err, put = 0, scrubbing = 0;
+ struct ubi_wl_prot_entry *uninitialized_var(pe);
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
if (cancel)
return 0;
- vid_hdr = ubi_zalloc_vid_hdr(ubi);
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
+ mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+ ubi_assert(!ubi->move_to_put);
- /*
- * Only one WL worker at a time is supported at this implementation, so
- * make sure a PEB is not being moved already.
- */
- if (ubi->move_to || tree_empty(&ubi->free) ||
- (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) {
+ if (!ubi->free.rb_node ||
+ (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
- * Only one WL worker at a time is supported at this
- * implementation, so if a LEB is already being moved, cancel.
- *
- * No free physical eraseblocks? Well, we cancel wear-leveling
- * then. It will be triggered again when a free physical
- * eraseblock appears.
+ * No free physical eraseblocks? Well, they must be waiting in
+ * the queue to be erased. Cancel movement - it will be
+ * triggered again when a free physical eraseblock appears.
*
* No used physical eraseblocks? They must be temporarily
* protected from being moved. They will be moved to the
* triggered again.
*/
dbg_wl("cancel WL, a list is empty: free %d, used %d",
- tree_empty(&ubi->free), tree_empty(&ubi->used));
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ !ubi->free.rb_node, !ubi->used.rb_node);
+ goto out_cancel;
}
- if (tree_empty(&ubi->scrub)) {
+ if (!ubi->scrub.rb_node) {
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ goto out_cancel;
}
- used_tree_del(ubi, e1);
+ paranoid_check_in_wl_tree(e1, &ubi->used);
+ rb_erase(&e1->rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
+ /* Perform scrubbing */
+ scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- scrub_tree_del(ubi, e1);
+ paranoid_check_in_wl_tree(e1, &ubi->scrub);
+ rb_erase(&e1->rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- free_tree_del(ubi, e2);
- ubi_assert(!ubi->move_from && !ubi->move_to);
- ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
+ paranoid_check_in_wl_tree(e2, &ubi->free);
+ rb_erase(&e2->rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
* We so far do not know which logical eraseblock our physical
* eraseblock (@e1) belongs to. We have to read the volume identifier
* header first.
+ *
+ * Note, we are protected from this PEB being unmapped and erased. The
+ * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+ * which is being moved was unmapped.
*/
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
* likely have the VID header in place.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
- err = 0;
- } else {
- ubi_err("error %d while reading VID header from PEB %d",
- err, e1->pnum);
- if (err > 0)
- err = -EIO;
+ goto out_not_moved;
}
- goto error;
+
+ ubi_err("error %d while reading VID header from PEB %d",
+ err, e1->pnum);
+ if (err > 0)
+ err = -EIO;
+ goto out_error;
}
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
- if (err == UBI_IO_BITFLIPS)
- err = 0;
- goto error;
+
+ if (err < 0)
+ goto out_error;
+ if (err == 1)
+ goto out_not_moved;
+
+ /*
+ * For some reason the LEB was not moved - it might be because
+ * the volume is being deleted. We should prevent this PEB from
+ * being selected for wear-levelling movement for some "time",
+ * so put it to the protection tree.
+ */
+
+ dbg_wl("cancelled moving PEB %d", e1->pnum);
+ pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
+ if (!pe) {
+ err = -ENOMEM;
+ goto out_error;
+ }
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ spin_lock(&ubi->wl_lock);
+ prot_tree_add(ubi, e1, pe, U_PROTECTION);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ err = schedule_erase(ubi, e2, 0);
+ if (err)
+ goto out_error;
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
}
+ /* The PEB has been successfully moved */
ubi_free_vid_hdr(ubi, vid_hdr);
+ if (scrubbing)
+ ubi_msg("scrubbed PEB %d, data moved to PEB %d",
+ e1->pnum, e2->pnum);
+
spin_lock(&ubi->wl_lock);
if (!ubi->move_to_put)
- used_tree_add(ubi, e2);
+ wl_tree_add(e2, &ubi->used);
else
put = 1;
ubi->move_from = ubi->move_to = NULL;
- ubi->move_from_put = ubi->move_to_put = 0;
- ubi->wl_scheduled = 0;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
+ err = schedule_erase(ubi, e1, 0);
+ if (err)
+ goto out_error;
+
if (put) {
/*
* Well, the target PEB was put meanwhile, schedule it for
*/
dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
err = schedule_erase(ubi, e2, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e2);
- ubi_ro_mode(ubi);
- }
- }
-
- err = schedule_erase(ubi, e1, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e1);
- ubi_ro_mode(ubi);
+ if (err)
+ goto out_error;
}
dbg_wl("done");
- return err;
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
/*
- * Some error occurred. @e1 was not changed, so return it back. @e2
- * might be changed, schedule it for erasure.
+ * For some reasons the LEB was not moved, might be an error, might be
+ * something else. @e1 was not changed, so return it back. @e2 might
+ * be changed, schedule it for erasure.
*/
-error:
- if (err)
- dbg_wl("error %d occurred, cancel operation", err);
- ubi_assert(err <= 0);
-
+out_not_moved:
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
- ubi->wl_scheduled = 0;
- if (ubi->move_from_put)
- put = 1;
+ if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
else
- used_tree_add(ubi, e1);
+ wl_tree_add(e1, &ubi->used);
ubi->move_from = ubi->move_to = NULL;
- ubi->move_from_put = ubi->move_to_put = 0;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- if (put) {
- /*
- * Well, the target PEB was put meanwhile, schedule it for
- * erasure.
- */
- dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
- err = schedule_erase(ubi, e1, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e1);
- ubi_ro_mode(ubi);
- }
- }
-
err = schedule_erase(ubi, e2, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e2);
- ubi_ro_mode(ubi);
- }
+ if (err)
+ goto out_error;
+
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
- yield();
+out_error:
+ ubi_err("error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ spin_lock(&ubi->wl_lock);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ ubi_ro_mode(ubi);
+
+ mutex_unlock(&ubi->move_mutex);
return err;
+
+out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
}
/**
* If the ubi->scrub tree is not empty, scrubbing is needed, and the
* the WL worker has to be scheduled anyway.
*/
- if (tree_empty(&ubi->scrub)) {
- if (tree_empty(&ubi->used) || tree_empty(&ubi->free))
+ if (!ubi->scrub.rb_node) {
+ if (!ubi->used.rb_node || !ubi->free.rb_node)
/* No physical eraseblocks - no deal */
goto out_unlock;
ubi->wl_scheduled = 1;
spin_unlock(&ubi->wl_lock);
- wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
err = -ENOMEM;
goto out_cancel;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
return 0;
}
spin_lock(&ubi->wl_lock);
ubi->abs_ec += 1;
- free_tree_add(ubi, e);
+ wl_tree_add(e, &ubi->free);
spin_unlock(&ubi->wl_lock);
/*
- * One more erase operation has happened, take care about protected
- * physical eraseblocks.
+ * One more erase operation has happened, take care about
+ * protected physical eraseblocks.
*/
check_protection_over(ubi);
return err;
}
+ ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
}
/**
- * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
- * unit.
+ * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
* This function is called to return physical eraseblock @pnum to the pool of
* free physical eraseblocks. The @torture flag has to be set if an I/O error
* occurred to this @pnum and it has to be tested. This function returns zero
- * in case of success and a negative error code in case of failure.
+ * in case of success, and a negative error code in case of failure.
*/
int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
{
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
+retry:
spin_lock(&ubi->wl_lock);
-
e = ubi->lookuptbl[pnum];
if (e == ubi->move_from) {
/*
* be moved. It will be scheduled for erasure in the
* wear-leveling worker.
*/
- dbg_wl("PEB %d is being moved", pnum);
- ubi_assert(!ubi->move_from_put);
- ubi->move_from_put = 1;
+ dbg_wl("PEB %d is being moved, wait", pnum);
spin_unlock(&ubi->wl_lock);
- return 0;
+
+ /* Wait for the WL worker by taking the @ubi->move_mutex */
+ mutex_lock(&ubi->move_mutex);
+ mutex_unlock(&ubi->move_mutex);
+ goto retry;
} else if (e == ubi->move_to) {
/*
* User is putting the physical eraseblock which was selected
* as the target the data is moved to. It may happen if the EBA
- * unit already re-mapped the LEB but the WL unit did has not
- * put the PEB to the "used" tree.
+ * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
+ * but the WL sub-system has not put the PEB to the "used" tree
+ * yet, but it is about to do this. So we just set a flag which
+ * will tell the WL worker that the PEB is not needed anymore
+ * and should be scheduled for erasure.
*/
dbg_wl("PEB %d is the target of data moving", pnum);
ubi_assert(!ubi->move_to_put);
spin_unlock(&ubi->wl_lock);
return 0;
} else {
- if (in_wl_tree(e, &ubi->used))
- used_tree_del(ubi, e);
- else if (in_wl_tree(e, &ubi->scrub))
- scrub_tree_del(ubi, e);
- else
- prot_tree_del(ubi, e->pnum);
+ if (in_wl_tree(e, &ubi->used)) {
+ paranoid_check_in_wl_tree(e, &ubi->used);
+ rb_erase(&e->rb, &ubi->used);
+ } else if (in_wl_tree(e, &ubi->scrub)) {
+ paranoid_check_in_wl_tree(e, &ubi->scrub);
+ rb_erase(&e->rb, &ubi->scrub);
+ } else {
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
}
spin_unlock(&ubi->wl_lock);
err = schedule_erase(ubi, e, torture);
if (err) {
spin_lock(&ubi->wl_lock);
- used_tree_add(ubi, e);
+ wl_tree_add(e, &ubi->used);
spin_unlock(&ubi->wl_lock);
}
{
struct ubi_wl_entry *e;
- ubi_msg("schedule PEB %d for scrubbing", pnum);
+ dbg_msg("schedule PEB %d for scrubbing", pnum);
retry:
spin_lock(&ubi->wl_lock);
goto retry;
}
- if (in_wl_tree(e, &ubi->used))
- used_tree_del(ubi, e);
- else
- prot_tree_del(ubi, pnum);
+ if (in_wl_tree(e, &ubi->used)) {
+ paranoid_check_in_wl_tree(e, &ubi->used);
+ rb_erase(&e->rb, &ubi->used);
+ } else {
+ int err;
+
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
- scrub_tree_add(ubi, e);
+ wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
/*
*/
int ubi_wl_flush(struct ubi_device *ubi)
{
- int err, pending_count;
-
- pending_count = ubi->works_count;
-
- dbg_wl("flush (%d pending works)", pending_count);
+ int err;
/*
* Erase while the pending works queue is not empty, but not more then
* the number of currently pending works.
*/
- while (pending_count-- > 0) {
+ dbg_wl("flush (%d pending works)", ubi->works_count);
+ while (ubi->works_count) {
+ err = do_work(ubi);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
+ up_write(&ubi->work_sem);
+
+ /*
+ * And in case last was the WL worker and it cancelled the LEB
+ * movement, flush again.
+ */
+ while (ubi->works_count) {
+ dbg_wl("flush more (%d pending works)", ubi->works_count);
err = do_work(ubi);
if (err)
return err;
rb->rb_right = NULL;
}
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
}
}
}
* ubi_thread - UBI background thread.
* @u: the UBI device description object pointer
*/
-static int ubi_thread(void *u)
+int ubi_thread(void *u)
{
int failures = 0;
struct ubi_device *ubi = u;
+#ifdef CONFIG_POLLUX_KERNEL_BOOT_MESSAGE_ENABLE
ubi_msg("background thread \"%s\" started, PID %d",
- ubi->bgt_name, current->pid);
+ ubi->bgt_name, task_pid_nr(current));
+#endif
+
set_freezable();
for (;;) {
int err;
if (kthread_should_stop())
- goto out;
+ break;
if (try_to_freeze())
continue;
ubi_msg("%s: %d consecutive failures",
ubi->bgt_name, WL_MAX_FAILURES);
ubi_ro_mode(ubi);
- break;
+ ubi->thread_enabled = 0;
+ continue;
}
} else
failures = 0;
cond_resched();
}
-out:
dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
return 0;
}
}
/**
- * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
- * information.
+ * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
* @ubi: UBI device description object
* @si: scanning information
*
ubi->used = ubi->free = ubi->scrub = RB_ROOT;
ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
+ mutex_init(&ubi->move_mutex);
+ init_rwsem(&ubi->work_sem);
ubi->max_ec = si->max_ec;
INIT_LIST_HEAD(&ubi->works);
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
- ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
- if (IS_ERR(ubi->bgt_thread)) {
- err = PTR_ERR(ubi->bgt_thread);
- ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
- err);
- return err;
- }
-
- if (ubi_devices_cnt == 0) {
- wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!wl_entries_slab)
- return -ENOMEM;
- }
-
err = -ENOMEM;
ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
if (!ubi->lookuptbl)
- goto out_free;
+ return err;
list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
list_for_each_entry(seb, &si->free, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
e->pnum = seb->pnum;
e->ec = seb->ec;
ubi_assert(e->ec >= 0);
- free_tree_add(ubi, e);
+ wl_tree_add(e, &ubi->free);
ubi->lookuptbl[e->pnum] = e;
}
list_for_each_entry(seb, &si->corr, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
if (!seb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
- used_tree_add(ubi, e);
+ wl_tree_add(e, &ubi->used);
} else {
dbg_wl("add PEB %d EC %d to the scrub tree",
e->pnum, e->ec);
- scrub_tree_add(ubi, e);
+ wl_tree_add(e, &ubi->scrub);
}
}
}
- if (WL_RESERVED_PEBS > ubi->avail_pebs) {
+ if (ubi->avail_pebs < WL_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, WL_RESERVED_PEBS);
goto out_free;
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
- if (ubi_devices_cnt == 0)
- kmem_cache_destroy(wl_entries_slab);
return err;
}
rb->rb_right = NULL;
}
- kmem_cache_free(wl_entries_slab, pe->e);
+ kmem_cache_free(ubi_wl_entry_slab, pe->e);
kfree(pe);
}
}
}
/**
- * ubi_wl_close - close the wear-leveling unit.
+ * ubi_wl_close - close the wear-leveling sub-system.
* @ubi: UBI device description object
*/
void ubi_wl_close(struct ubi_device *ubi)
{
- dbg_wl("disable \"%s\"", ubi->bgt_name);
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
-
- dbg_wl("close the UBI wear-leveling unit");
-
+ dbg_wl("close the WL sub-system");
cancel_pending(ubi);
protection_trees_destroy(ubi);
tree_destroy(&ubi->used);
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
- if (ubi_devices_cnt == 1)
- kmem_cache_destroy(wl_entries_slab);
}
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
/**
- * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
- * is correct.
+ * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @ec: the erase counter to check
* is equivalent to @ec, %1 if not, and a negative error code if an error
* occurred.
*/
-static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec)
+static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
}
/**
- * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
- * in a WL RB-tree.
+ * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
- * This function returns zero if @e is in the @root RB-tree and %1 if it
- * is not.
+ * This function returns zero if @e is in the @root RB-tree and %1 if it is
+ * not.
*/
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root)