#include <linux/bio.h>
#include <linux/blktrace_api.h>
#include "blk-cgroup.h"
+#include "blk.h"
/* Max dispatch from a group in 1 round */
static int throtl_grp_quantum = 8;
return tg;
}
-/*
- * This function returns with queue lock unlocked in case of error, like
- * request queue is no more
- */
static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
+ /* no throttling for dead queue */
+ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc
- *
- * Take the request queue reference to make sure queue does not
- * go away once we return from allocation.
+ * we need to drop rcu lock and queue_lock before we call alloc.
*/
- blk_get_queue(q);
rcu_read_unlock();
spin_unlock_irq(q->queue_lock);
tg = throtl_alloc_tg(td);
- /*
- * We might have slept in group allocation. Make sure queue is not
- * dead
- */
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
- blk_put_queue(q);
- if (tg)
- kfree(tg);
-
- return ERR_PTR(-ENODEV);
- }
- blk_put_queue(q);
/* Group allocated and queue is still alive. take the lock */
spin_lock_irq(q->queue_lock);
+ /* Make sure @q is still alive */
+ if (unlikely(blk_queue_dead(q))) {
+ kfree(tg);
+ return NULL;
+ }
+
/*
* Initialize the new group. After sleeping, read the blkcg again.
*/
}
}
-static void throtl_td_free(struct throtl_data *td)
-{
- kfree(td);
-}
-
/*
* Blk cgroup controller notification saying that blkio_group object is being
* delinked as associated cgroup object is going away. That also means that
.plid = BLKIO_POLICY_THROTL,
};
-int blk_throtl_bio(struct request_queue *q, struct bio **biop)
+bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
struct throtl_data *td = q->td;
struct throtl_grp *tg;
- struct bio *bio = *biop;
bool rw = bio_data_dir(bio), update_disptime = true;
struct blkio_cgroup *blkcg;
+ bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
bio->bi_rw &= ~REQ_THROTTLED;
- return 0;
+ goto out;
}
/*
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
rw, rw_is_sync(bio->bi_rw));
rcu_read_unlock();
- return 0;
+ goto out;
}
}
rcu_read_unlock();
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
-
spin_lock_irq(q->queue_lock);
tg = throtl_get_tg(td);
-
- if (IS_ERR(tg)) {
- if (PTR_ERR(tg) == -ENODEV) {
- /*
- * Queue is gone. No queue lock held here.
- */
- return -ENODEV;
- }
- }
+ if (unlikely(!tg))
+ goto out_unlock;
if (tg->nr_queued[rw]) {
/*
* So keep on trimming slice even if bio is not queued.
*/
throtl_trim_slice(td, tg, rw);
- goto out;
+ goto out_unlock;
}
queue_bio:
tg->nr_queued[READ], tg->nr_queued[WRITE]);
throtl_add_bio_tg(q->td, tg, bio);
- *biop = NULL;
+ throttled = true;
if (update_disptime) {
tg_update_disptime(td, tg);
throtl_schedule_next_dispatch(td);
}
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
out:
+ return throttled;
+}
+
+/**
+ * blk_throtl_drain - drain throttled bios
+ * @q: request_queue to drain throttled bios for
+ *
+ * Dispatch all currently throttled bios on @q through ->make_request_fn().
+ */
+void blk_throtl_drain(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
+{
+ struct throtl_data *td = q->td;
+ struct throtl_rb_root *st = &td->tg_service_tree;
+ struct throtl_grp *tg;
+ struct bio_list bl;
+ struct bio *bio;
+
+ WARN_ON_ONCE(!queue_is_locked(q));
+
+ bio_list_init(&bl);
+
+ while ((tg = throtl_rb_first(st))) {
+ throtl_dequeue_tg(td, tg);
+
+ while ((bio = bio_list_peek(&tg->bio_lists[READ])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ }
spin_unlock_irq(q->queue_lock);
- return 0;
+
+ while ((bio = bio_list_pop(&bl)))
+ generic_make_request(bio);
+
+ spin_lock_irq(q->queue_lock);
}
int blk_throtl_init(struct request_queue *q)
* it.
*/
throtl_shutdown_wq(q);
- throtl_td_free(td);
+}
+
+void blk_throtl_release(struct request_queue *q)
+{
+ kfree(q->td);
}
static int __init throtl_init(void)