+/*
+ * This is necessitated by the fact that blk_cleanup_queue does not
+ * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
+ * Since our blk_init_queue() passes a spinlock common with ub_dev,
+ * we have life time issues when ub_cleanup frees ub_dev.
+ */
+static spinlock_t *ub_next_lock(void)
+{
+ unsigned long flags;
+ spinlock_t *ret;
+
+ spin_lock_irqsave(&ub_lock, flags);
+ ret = &ub_qlockv[ub_qlock_next];
+ ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
+ spin_unlock_irqrestore(&ub_lock, flags);
+ return ret;
+}
+