2 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
4 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/freezer.h>
18 #include <linux/spinlock.h>
19 #include <linux/hdreg.h>
20 #include <linux/init.h>
21 #include <linux/mutex.h>
22 #include <linux/kthread.h>
23 #include <asm/uaccess.h>
27 static LIST_HEAD(blktrans_majors);
29 struct mtd_blkcore_priv {
30 struct task_struct *thread;
31 struct request_queue *rq;
32 spinlock_t queue_lock;
35 static int blktrans_discard_request(struct request_queue *q,
38 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
39 req->cmd[0] = REQ_LB_OP_DISCARD;
43 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
44 struct mtd_blktrans_dev *dev,
47 unsigned long block, nsect;
50 block = blk_rq_pos(req) << 9 >> tr->blkshift;
51 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return tr->discard(dev, block, nsect);
59 if (!blk_fs_request(req))
62 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
63 get_capacity(req->rq_disk))
66 switch(rq_data_dir(req)) {
68 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
69 if (tr->readsect(dev, block, buf))
77 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
78 if (tr->writesect(dev, block, buf))
83 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
88 static int mtd_blktrans_thread(void *arg)
90 struct mtd_blktrans_ops *tr = arg;
91 struct request_queue *rq = tr->blkcore_priv->rq;
93 /* we might get involved when memory gets low, so use PF_MEMALLOC */
94 current->flags |= PF_MEMALLOC;
96 spin_lock_irq(rq->queue_lock);
97 while (!kthread_should_stop()) {
99 struct mtd_blktrans_dev *dev;
102 req = elv_next_request(rq);
105 set_current_state(TASK_INTERRUPTIBLE);
106 spin_unlock_irq(rq->queue_lock);
108 spin_lock_irq(rq->queue_lock);
112 dev = req->rq_disk->private_data;
115 spin_unlock_irq(rq->queue_lock);
117 mutex_lock(&dev->lock);
118 res = do_blktrans_request(tr, dev, req);
119 mutex_unlock(&dev->lock);
121 spin_lock_irq(rq->queue_lock);
123 __blk_end_request_cur(req, res);
125 spin_unlock_irq(rq->queue_lock);
130 static void mtd_blktrans_request(struct request_queue *rq)
132 struct mtd_blktrans_ops *tr = rq->queuedata;
133 wake_up_process(tr->blkcore_priv->thread);
137 static int blktrans_open(struct block_device *bdev, fmode_t mode)
139 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
140 struct mtd_blktrans_ops *tr = dev->tr;
143 if (!try_module_get(dev->mtd->owner))
146 if (!try_module_get(tr->owner))
149 /* FIXME: Locking. A hot pluggable device can go away
150 (del_mtd_device can be called for it) without its module
152 dev->mtd->usecount++;
155 if (tr->open && (ret = tr->open(dev))) {
156 dev->mtd->usecount--;
157 module_put(dev->mtd->owner);
159 module_put(tr->owner);
165 static int blktrans_release(struct gendisk *disk, fmode_t mode)
167 struct mtd_blktrans_dev *dev = disk->private_data;
168 struct mtd_blktrans_ops *tr = dev->tr;
172 ret = tr->release(dev);
175 dev->mtd->usecount--;
176 module_put(dev->mtd->owner);
177 module_put(tr->owner);
183 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
185 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
188 return dev->tr->getgeo(dev, geo);
192 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
193 unsigned int cmd, unsigned long arg)
195 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
196 struct mtd_blktrans_ops *tr = dev->tr;
201 return tr->flush(dev);
202 /* The core code did the work, we had nothing to do. */
209 static struct block_device_operations mtd_blktrans_ops = {
210 .owner = THIS_MODULE,
211 .open = blktrans_open,
212 .release = blktrans_release,
213 .locked_ioctl = blktrans_ioctl,
214 .getgeo = blktrans_getgeo,
217 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
219 struct mtd_blktrans_ops *tr = new->tr;
220 struct mtd_blktrans_dev *d;
221 int last_devnum = -1;
224 if (mutex_trylock(&mtd_table_mutex)) {
225 mutex_unlock(&mtd_table_mutex);
229 list_for_each_entry(d, &tr->devs, list) {
230 if (new->devnum == -1) {
231 /* Use first free number */
232 if (d->devnum != last_devnum+1) {
233 /* Found a free devnum. Plug it in here */
234 new->devnum = last_devnum+1;
235 list_add_tail(&new->list, &d->list);
238 } else if (d->devnum == new->devnum) {
239 /* Required number taken */
241 } else if (d->devnum > new->devnum) {
242 /* Required number was free */
243 list_add_tail(&new->list, &d->list);
246 last_devnum = d->devnum;
248 if (new->devnum == -1)
249 new->devnum = last_devnum+1;
251 if ((new->devnum << tr->part_bits) > 256) {
255 list_add_tail(&new->list, &tr->devs);
257 mutex_init(&new->lock);
261 gd = alloc_disk(1 << tr->part_bits);
263 list_del(&new->list);
266 gd->major = tr->major;
267 gd->first_minor = (new->devnum) << tr->part_bits;
268 gd->fops = &mtd_blktrans_ops;
271 if (new->devnum < 26)
272 snprintf(gd->disk_name, sizeof(gd->disk_name),
273 "%s%c", tr->name, 'a' + new->devnum);
275 snprintf(gd->disk_name, sizeof(gd->disk_name),
277 'a' - 1 + new->devnum / 26,
278 'a' + new->devnum % 26);
280 snprintf(gd->disk_name, sizeof(gd->disk_name),
281 "%s%d", tr->name, new->devnum);
283 /* 2.5 has capacity in units of 512 bytes while still
284 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
285 set_capacity(gd, (new->size * tr->blksize) >> 9);
287 gd->private_data = new;
288 new->blkcore_priv = gd;
289 gd->queue = tr->blkcore_priv->rq;
290 gd->driverfs_dev = new->mtd->dev.parent;
300 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
302 if (mutex_trylock(&mtd_table_mutex)) {
303 mutex_unlock(&mtd_table_mutex);
307 list_del(&old->list);
309 del_gendisk(old->blkcore_priv);
310 put_disk(old->blkcore_priv);
315 static void blktrans_notify_remove(struct mtd_info *mtd)
317 struct mtd_blktrans_ops *tr;
318 struct mtd_blktrans_dev *dev, *next;
320 list_for_each_entry(tr, &blktrans_majors, list)
321 list_for_each_entry_safe(dev, next, &tr->devs, list)
326 static void blktrans_notify_add(struct mtd_info *mtd)
328 struct mtd_blktrans_ops *tr;
330 if (mtd->type == MTD_ABSENT)
333 list_for_each_entry(tr, &blktrans_majors, list)
334 tr->add_mtd(tr, mtd);
337 static struct mtd_notifier blktrans_notifier = {
338 .add = blktrans_notify_add,
339 .remove = blktrans_notify_remove,
342 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
346 /* Register the notifier if/when the first device type is
347 registered, to prevent the link/init ordering from fucking
349 if (!blktrans_notifier.list.next)
350 register_mtd_user(&blktrans_notifier);
352 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
353 if (!tr->blkcore_priv)
356 mutex_lock(&mtd_table_mutex);
358 ret = register_blkdev(tr->major, tr->name);
360 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
361 tr->name, tr->major, ret);
362 kfree(tr->blkcore_priv);
363 mutex_unlock(&mtd_table_mutex);
366 spin_lock_init(&tr->blkcore_priv->queue_lock);
368 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
369 if (!tr->blkcore_priv->rq) {
370 unregister_blkdev(tr->major, tr->name);
371 kfree(tr->blkcore_priv);
372 mutex_unlock(&mtd_table_mutex);
376 tr->blkcore_priv->rq->queuedata = tr;
377 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
379 blk_queue_set_discard(tr->blkcore_priv->rq,
380 blktrans_discard_request);
382 tr->blkshift = ffs(tr->blksize) - 1;
384 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
386 if (IS_ERR(tr->blkcore_priv->thread)) {
387 int ret = PTR_ERR(tr->blkcore_priv->thread);
388 blk_cleanup_queue(tr->blkcore_priv->rq);
389 unregister_blkdev(tr->major, tr->name);
390 kfree(tr->blkcore_priv);
391 mutex_unlock(&mtd_table_mutex);
395 INIT_LIST_HEAD(&tr->devs);
396 list_add(&tr->list, &blktrans_majors);
398 for (i=0; i<MAX_MTD_DEVICES; i++) {
399 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
400 tr->add_mtd(tr, mtd_table[i]);
403 mutex_unlock(&mtd_table_mutex);
408 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
410 struct mtd_blktrans_dev *dev, *next;
412 mutex_lock(&mtd_table_mutex);
414 /* Clean up the kernel thread */
415 kthread_stop(tr->blkcore_priv->thread);
417 /* Remove it from the list of active majors */
420 list_for_each_entry_safe(dev, next, &tr->devs, list)
423 blk_cleanup_queue(tr->blkcore_priv->rq);
424 unregister_blkdev(tr->major, tr->name);
426 mutex_unlock(&mtd_table_mutex);
428 kfree(tr->blkcore_priv);
430 BUG_ON(!list_empty(&tr->devs));
434 static void __exit mtd_blktrans_exit(void)
436 /* No race here -- if someone's currently in register_mtd_blktrans
437 we're screwed anyway. */
438 if (blktrans_notifier.list.next)
439 unregister_mtd_user(&blktrans_notifier);
442 module_exit(mtd_blktrans_exit);
444 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
445 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
446 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
447 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
449 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
450 MODULE_LICENSE("GPL");
451 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");