[PATCH] x86: i8253/i8259A lock cleanup
[pandora-kernel.git] / fs / block_dev.c
1 /*
2  *  linux/fs/block_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
6  */
7
8 #include <linux/config.h>
9 #include <linux/init.h>
10 #include <linux/mm.h>
11 #include <linux/fcntl.h>
12 #include <linux/slab.h>
13 #include <linux/kmod.h>
14 #include <linux/major.h>
15 #include <linux/devfs_fs_kernel.h>
16 #include <linux/smp_lock.h>
17 #include <linux/highmem.h>
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/blkpg.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mpage.h>
23 #include <linux/mount.h>
24 #include <linux/uio.h>
25 #include <linux/namei.h>
26 #include <asm/uaccess.h>
27
28 struct bdev_inode {
29         struct block_device bdev;
30         struct inode vfs_inode;
31 };
32
33 static inline struct bdev_inode *BDEV_I(struct inode *inode)
34 {
35         return container_of(inode, struct bdev_inode, vfs_inode);
36 }
37
38 inline struct block_device *I_BDEV(struct inode *inode)
39 {
40         return &BDEV_I(inode)->bdev;
41 }
42
43 EXPORT_SYMBOL(I_BDEV);
44
45 static sector_t max_block(struct block_device *bdev)
46 {
47         sector_t retval = ~((sector_t)0);
48         loff_t sz = i_size_read(bdev->bd_inode);
49
50         if (sz) {
51                 unsigned int size = block_size(bdev);
52                 unsigned int sizebits = blksize_bits(size);
53                 retval = (sz >> sizebits);
54         }
55         return retval;
56 }
57
58 /* Kill _all_ buffers, dirty or not.. */
59 static void kill_bdev(struct block_device *bdev)
60 {
61         invalidate_bdev(bdev, 1);
62         truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
63 }       
64
65 int set_blocksize(struct block_device *bdev, int size)
66 {
67         /* Size must be a power of two, and between 512 and PAGE_SIZE */
68         if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
69                 return -EINVAL;
70
71         /* Size cannot be smaller than the size supported by the device */
72         if (size < bdev_hardsect_size(bdev))
73                 return -EINVAL;
74
75         /* Don't change the size if it is same as current */
76         if (bdev->bd_block_size != size) {
77                 sync_blockdev(bdev);
78                 bdev->bd_block_size = size;
79                 bdev->bd_inode->i_blkbits = blksize_bits(size);
80                 kill_bdev(bdev);
81         }
82         return 0;
83 }
84
85 EXPORT_SYMBOL(set_blocksize);
86
87 int sb_set_blocksize(struct super_block *sb, int size)
88 {
89         int bits = 9; /* 2^9 = 512 */
90
91         if (set_blocksize(sb->s_bdev, size))
92                 return 0;
93         /* If we get here, we know size is power of two
94          * and it's value is between 512 and PAGE_SIZE */
95         sb->s_blocksize = size;
96         for (size >>= 10; size; size >>= 1)
97                 ++bits;
98         sb->s_blocksize_bits = bits;
99         return sb->s_blocksize;
100 }
101
102 EXPORT_SYMBOL(sb_set_blocksize);
103
104 int sb_min_blocksize(struct super_block *sb, int size)
105 {
106         int minsize = bdev_hardsect_size(sb->s_bdev);
107         if (size < minsize)
108                 size = minsize;
109         return sb_set_blocksize(sb, size);
110 }
111
112 EXPORT_SYMBOL(sb_min_blocksize);
113
114 static int
115 blkdev_get_block(struct inode *inode, sector_t iblock,
116                 struct buffer_head *bh, int create)
117 {
118         if (iblock >= max_block(I_BDEV(inode))) {
119                 if (create)
120                         return -EIO;
121
122                 /*
123                  * for reads, we're just trying to fill a partial page.
124                  * return a hole, they will have to call get_block again
125                  * before they can fill it, and they will get -EIO at that
126                  * time
127                  */
128                 return 0;
129         }
130         bh->b_bdev = I_BDEV(inode);
131         bh->b_blocknr = iblock;
132         set_buffer_mapped(bh);
133         return 0;
134 }
135
136 static int
137 blkdev_get_blocks(struct inode *inode, sector_t iblock,
138                 unsigned long max_blocks, struct buffer_head *bh, int create)
139 {
140         sector_t end_block = max_block(I_BDEV(inode));
141
142         if ((iblock + max_blocks) > end_block) {
143                 max_blocks = end_block - iblock;
144                 if ((long)max_blocks <= 0) {
145                         if (create)
146                                 return -EIO;    /* write fully beyond EOF */
147                         /*
148                          * It is a read which is fully beyond EOF.  We return
149                          * a !buffer_mapped buffer
150                          */
151                         max_blocks = 0;
152                 }
153         }
154
155         bh->b_bdev = I_BDEV(inode);
156         bh->b_blocknr = iblock;
157         bh->b_size = max_blocks << inode->i_blkbits;
158         if (max_blocks)
159                 set_buffer_mapped(bh);
160         return 0;
161 }
162
163 static ssize_t
164 blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
165                         loff_t offset, unsigned long nr_segs)
166 {
167         struct file *file = iocb->ki_filp;
168         struct inode *inode = file->f_mapping->host;
169
170         return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
171                                 iov, offset, nr_segs, blkdev_get_blocks, NULL);
172 }
173
174 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
175 {
176         return block_write_full_page(page, blkdev_get_block, wbc);
177 }
178
179 static int blkdev_readpage(struct file * file, struct page * page)
180 {
181         return block_read_full_page(page, blkdev_get_block);
182 }
183
184 static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
185 {
186         return block_prepare_write(page, from, to, blkdev_get_block);
187 }
188
189 static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
190 {
191         return block_commit_write(page, from, to);
192 }
193
194 /*
195  * private llseek:
196  * for a block special file file->f_dentry->d_inode->i_size is zero
197  * so we compute the size by hand (just as in block_read/write above)
198  */
199 static loff_t block_llseek(struct file *file, loff_t offset, int origin)
200 {
201         struct inode *bd_inode = file->f_mapping->host;
202         loff_t size;
203         loff_t retval;
204
205         down(&bd_inode->i_sem);
206         size = i_size_read(bd_inode);
207
208         switch (origin) {
209                 case 2:
210                         offset += size;
211                         break;
212                 case 1:
213                         offset += file->f_pos;
214         }
215         retval = -EINVAL;
216         if (offset >= 0 && offset <= size) {
217                 if (offset != file->f_pos) {
218                         file->f_pos = offset;
219                 }
220                 retval = offset;
221         }
222         up(&bd_inode->i_sem);
223         return retval;
224 }
225         
226 /*
227  *      Filp is never NULL; the only case when ->fsync() is called with
228  *      NULL first argument is nfsd_sync_dir() and that's not a directory.
229  */
230  
231 static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
232 {
233         return sync_blockdev(I_BDEV(filp->f_mapping->host));
234 }
235
236 /*
237  * pseudo-fs
238  */
239
240 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
241 static kmem_cache_t * bdev_cachep;
242
243 static struct inode *bdev_alloc_inode(struct super_block *sb)
244 {
245         struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
246         if (!ei)
247                 return NULL;
248         return &ei->vfs_inode;
249 }
250
251 static void bdev_destroy_inode(struct inode *inode)
252 {
253         struct bdev_inode *bdi = BDEV_I(inode);
254
255         bdi->bdev.bd_inode_backing_dev_info = NULL;
256         kmem_cache_free(bdev_cachep, bdi);
257 }
258
259 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
260 {
261         struct bdev_inode *ei = (struct bdev_inode *) foo;
262         struct block_device *bdev = &ei->bdev;
263
264         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
265             SLAB_CTOR_CONSTRUCTOR)
266         {
267                 memset(bdev, 0, sizeof(*bdev));
268                 sema_init(&bdev->bd_sem, 1);
269                 sema_init(&bdev->bd_mount_sem, 1);
270                 INIT_LIST_HEAD(&bdev->bd_inodes);
271                 INIT_LIST_HEAD(&bdev->bd_list);
272                 inode_init_once(&ei->vfs_inode);
273         }
274 }
275
276 static inline void __bd_forget(struct inode *inode)
277 {
278         list_del_init(&inode->i_devices);
279         inode->i_bdev = NULL;
280         inode->i_mapping = &inode->i_data;
281 }
282
283 static void bdev_clear_inode(struct inode *inode)
284 {
285         struct block_device *bdev = &BDEV_I(inode)->bdev;
286         struct list_head *p;
287         spin_lock(&bdev_lock);
288         while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
289                 __bd_forget(list_entry(p, struct inode, i_devices));
290         }
291         list_del_init(&bdev->bd_list);
292         spin_unlock(&bdev_lock);
293 }
294
295 static struct super_operations bdev_sops = {
296         .statfs = simple_statfs,
297         .alloc_inode = bdev_alloc_inode,
298         .destroy_inode = bdev_destroy_inode,
299         .drop_inode = generic_delete_inode,
300         .clear_inode = bdev_clear_inode,
301 };
302
303 static struct super_block *bd_get_sb(struct file_system_type *fs_type,
304         int flags, const char *dev_name, void *data)
305 {
306         return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
307 }
308
309 static struct file_system_type bd_type = {
310         .name           = "bdev",
311         .get_sb         = bd_get_sb,
312         .kill_sb        = kill_anon_super,
313 };
314
315 static struct vfsmount *bd_mnt;
316 struct super_block *blockdev_superblock;
317
318 void __init bdev_cache_init(void)
319 {
320         int err;
321         bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
322                         0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
323                         init_once, NULL);
324         err = register_filesystem(&bd_type);
325         if (err)
326                 panic("Cannot register bdev pseudo-fs");
327         bd_mnt = kern_mount(&bd_type);
328         err = PTR_ERR(bd_mnt);
329         if (IS_ERR(bd_mnt))
330                 panic("Cannot create bdev pseudo-fs");
331         blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
332 }
333
334 /*
335  * Most likely _very_ bad one - but then it's hardly critical for small
336  * /dev and can be fixed when somebody will need really large one.
337  * Keep in mind that it will be fed through icache hash function too.
338  */
339 static inline unsigned long hash(dev_t dev)
340 {
341         return MAJOR(dev)+MINOR(dev);
342 }
343
344 static int bdev_test(struct inode *inode, void *data)
345 {
346         return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
347 }
348
349 static int bdev_set(struct inode *inode, void *data)
350 {
351         BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
352         return 0;
353 }
354
355 static LIST_HEAD(all_bdevs);
356
357 struct block_device *bdget(dev_t dev)
358 {
359         struct block_device *bdev;
360         struct inode *inode;
361
362         inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
363                         bdev_test, bdev_set, &dev);
364
365         if (!inode)
366                 return NULL;
367
368         bdev = &BDEV_I(inode)->bdev;
369
370         if (inode->i_state & I_NEW) {
371                 bdev->bd_contains = NULL;
372                 bdev->bd_inode = inode;
373                 bdev->bd_block_size = (1 << inode->i_blkbits);
374                 bdev->bd_part_count = 0;
375                 bdev->bd_invalidated = 0;
376                 inode->i_mode = S_IFBLK;
377                 inode->i_rdev = dev;
378                 inode->i_bdev = bdev;
379                 inode->i_data.a_ops = &def_blk_aops;
380                 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
381                 inode->i_data.backing_dev_info = &default_backing_dev_info;
382                 spin_lock(&bdev_lock);
383                 list_add(&bdev->bd_list, &all_bdevs);
384                 spin_unlock(&bdev_lock);
385                 unlock_new_inode(inode);
386         }
387         return bdev;
388 }
389
390 EXPORT_SYMBOL(bdget);
391
392 long nr_blockdev_pages(void)
393 {
394         struct list_head *p;
395         long ret = 0;
396         spin_lock(&bdev_lock);
397         list_for_each(p, &all_bdevs) {
398                 struct block_device *bdev;
399                 bdev = list_entry(p, struct block_device, bd_list);
400                 ret += bdev->bd_inode->i_mapping->nrpages;
401         }
402         spin_unlock(&bdev_lock);
403         return ret;
404 }
405
406 void bdput(struct block_device *bdev)
407 {
408         iput(bdev->bd_inode);
409 }
410
411 EXPORT_SYMBOL(bdput);
412  
413 static struct block_device *bd_acquire(struct inode *inode)
414 {
415         struct block_device *bdev;
416         spin_lock(&bdev_lock);
417         bdev = inode->i_bdev;
418         if (bdev && igrab(bdev->bd_inode)) {
419                 spin_unlock(&bdev_lock);
420                 return bdev;
421         }
422         spin_unlock(&bdev_lock);
423         bdev = bdget(inode->i_rdev);
424         if (bdev) {
425                 spin_lock(&bdev_lock);
426                 if (inode->i_bdev)
427                         __bd_forget(inode);
428                 inode->i_bdev = bdev;
429                 inode->i_mapping = bdev->bd_inode->i_mapping;
430                 list_add(&inode->i_devices, &bdev->bd_inodes);
431                 spin_unlock(&bdev_lock);
432         }
433         return bdev;
434 }
435
436 /* Call when you free inode */
437
438 void bd_forget(struct inode *inode)
439 {
440         spin_lock(&bdev_lock);
441         if (inode->i_bdev)
442                 __bd_forget(inode);
443         spin_unlock(&bdev_lock);
444 }
445
446 int bd_claim(struct block_device *bdev, void *holder)
447 {
448         int res;
449         spin_lock(&bdev_lock);
450
451         /* first decide result */
452         if (bdev->bd_holder == holder)
453                 res = 0;         /* already a holder */
454         else if (bdev->bd_holder != NULL)
455                 res = -EBUSY;    /* held by someone else */
456         else if (bdev->bd_contains == bdev)
457                 res = 0;         /* is a whole device which isn't held */
458
459         else if (bdev->bd_contains->bd_holder == bd_claim)
460                 res = 0;         /* is a partition of a device that is being partitioned */
461         else if (bdev->bd_contains->bd_holder != NULL)
462                 res = -EBUSY;    /* is a partition of a held device */
463         else
464                 res = 0;         /* is a partition of an un-held device */
465
466         /* now impose change */
467         if (res==0) {
468                 /* note that for a whole device bd_holders
469                  * will be incremented twice, and bd_holder will
470                  * be set to bd_claim before being set to holder
471                  */
472                 bdev->bd_contains->bd_holders ++;
473                 bdev->bd_contains->bd_holder = bd_claim;
474                 bdev->bd_holders++;
475                 bdev->bd_holder = holder;
476         }
477         spin_unlock(&bdev_lock);
478         return res;
479 }
480
481 EXPORT_SYMBOL(bd_claim);
482
483 void bd_release(struct block_device *bdev)
484 {
485         spin_lock(&bdev_lock);
486         if (!--bdev->bd_contains->bd_holders)
487                 bdev->bd_contains->bd_holder = NULL;
488         if (!--bdev->bd_holders)
489                 bdev->bd_holder = NULL;
490         spin_unlock(&bdev_lock);
491 }
492
493 EXPORT_SYMBOL(bd_release);
494
495 /*
496  * Tries to open block device by device number.  Use it ONLY if you
497  * really do not have anything better - i.e. when you are behind a
498  * truly sucky interface and all you are given is a device number.  _Never_
499  * to be used for internal purposes.  If you ever need it - reconsider
500  * your API.
501  */
502 struct block_device *open_by_devnum(dev_t dev, unsigned mode)
503 {
504         struct block_device *bdev = bdget(dev);
505         int err = -ENOMEM;
506         int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
507         if (bdev)
508                 err = blkdev_get(bdev, mode, flags);
509         return err ? ERR_PTR(err) : bdev;
510 }
511
512 EXPORT_SYMBOL(open_by_devnum);
513
514 /*
515  * This routine checks whether a removable media has been changed,
516  * and invalidates all buffer-cache-entries in that case. This
517  * is a relatively slow routine, so we have to try to minimize using
518  * it. Thus it is called only upon a 'mount' or 'open'. This
519  * is the best way of combining speed and utility, I think.
520  * People changing diskettes in the middle of an operation deserve
521  * to lose :-)
522  */
523 int check_disk_change(struct block_device *bdev)
524 {
525         struct gendisk *disk = bdev->bd_disk;
526         struct block_device_operations * bdops = disk->fops;
527
528         if (!bdops->media_changed)
529                 return 0;
530         if (!bdops->media_changed(bdev->bd_disk))
531                 return 0;
532
533         if (__invalidate_device(bdev))
534                 printk("VFS: busy inodes on changed media.\n");
535
536         if (bdops->revalidate_disk)
537                 bdops->revalidate_disk(bdev->bd_disk);
538         if (bdev->bd_disk->minors > 1)
539                 bdev->bd_invalidated = 1;
540         return 1;
541 }
542
543 EXPORT_SYMBOL(check_disk_change);
544
545 void bd_set_size(struct block_device *bdev, loff_t size)
546 {
547         unsigned bsize = bdev_hardsect_size(bdev);
548
549         bdev->bd_inode->i_size = size;
550         while (bsize < PAGE_CACHE_SIZE) {
551                 if (size & bsize)
552                         break;
553                 bsize <<= 1;
554         }
555         bdev->bd_block_size = bsize;
556         bdev->bd_inode->i_blkbits = blksize_bits(bsize);
557 }
558 EXPORT_SYMBOL(bd_set_size);
559
560 static int do_open(struct block_device *bdev, struct file *file)
561 {
562         struct module *owner = NULL;
563         struct gendisk *disk;
564         int ret = -ENXIO;
565         int part;
566
567         file->f_mapping = bdev->bd_inode->i_mapping;
568         lock_kernel();
569         disk = get_gendisk(bdev->bd_dev, &part);
570         if (!disk) {
571                 unlock_kernel();
572                 bdput(bdev);
573                 return ret;
574         }
575         owner = disk->fops->owner;
576
577         down(&bdev->bd_sem);
578         if (!bdev->bd_openers) {
579                 bdev->bd_disk = disk;
580                 bdev->bd_contains = bdev;
581                 if (!part) {
582                         struct backing_dev_info *bdi;
583                         if (disk->fops->open) {
584                                 ret = disk->fops->open(bdev->bd_inode, file);
585                                 if (ret)
586                                         goto out_first;
587                         }
588                         if (!bdev->bd_openers) {
589                                 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
590                                 bdi = blk_get_backing_dev_info(bdev);
591                                 if (bdi == NULL)
592                                         bdi = &default_backing_dev_info;
593                                 bdev->bd_inode->i_data.backing_dev_info = bdi;
594                         }
595                         if (bdev->bd_invalidated)
596                                 rescan_partitions(disk, bdev);
597                 } else {
598                         struct hd_struct *p;
599                         struct block_device *whole;
600                         whole = bdget_disk(disk, 0);
601                         ret = -ENOMEM;
602                         if (!whole)
603                                 goto out_first;
604                         ret = blkdev_get(whole, file->f_mode, file->f_flags);
605                         if (ret)
606                                 goto out_first;
607                         bdev->bd_contains = whole;
608                         down(&whole->bd_sem);
609                         whole->bd_part_count++;
610                         p = disk->part[part - 1];
611                         bdev->bd_inode->i_data.backing_dev_info =
612                            whole->bd_inode->i_data.backing_dev_info;
613                         if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
614                                 whole->bd_part_count--;
615                                 up(&whole->bd_sem);
616                                 ret = -ENXIO;
617                                 goto out_first;
618                         }
619                         kobject_get(&p->kobj);
620                         bdev->bd_part = p;
621                         bd_set_size(bdev, (loff_t) p->nr_sects << 9);
622                         up(&whole->bd_sem);
623                 }
624         } else {
625                 put_disk(disk);
626                 module_put(owner);
627                 if (bdev->bd_contains == bdev) {
628                         if (bdev->bd_disk->fops->open) {
629                                 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
630                                 if (ret)
631                                         goto out;
632                         }
633                         if (bdev->bd_invalidated)
634                                 rescan_partitions(bdev->bd_disk, bdev);
635                 } else {
636                         down(&bdev->bd_contains->bd_sem);
637                         bdev->bd_contains->bd_part_count++;
638                         up(&bdev->bd_contains->bd_sem);
639                 }
640         }
641         bdev->bd_openers++;
642         up(&bdev->bd_sem);
643         unlock_kernel();
644         return 0;
645
646 out_first:
647         bdev->bd_disk = NULL;
648         bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
649         if (bdev != bdev->bd_contains)
650                 blkdev_put(bdev->bd_contains);
651         bdev->bd_contains = NULL;
652         put_disk(disk);
653         module_put(owner);
654 out:
655         up(&bdev->bd_sem);
656         unlock_kernel();
657         if (ret)
658                 bdput(bdev);
659         return ret;
660 }
661
662 int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
663 {
664         /*
665          * This crockload is due to bad choice of ->open() type.
666          * It will go away.
667          * For now, block device ->open() routine must _not_
668          * examine anything in 'inode' argument except ->i_rdev.
669          */
670         struct file fake_file = {};
671         struct dentry fake_dentry = {};
672         fake_file.f_mode = mode;
673         fake_file.f_flags = flags;
674         fake_file.f_dentry = &fake_dentry;
675         fake_dentry.d_inode = bdev->bd_inode;
676
677         return do_open(bdev, &fake_file);
678 }
679
680 EXPORT_SYMBOL(blkdev_get);
681
682 static int blkdev_open(struct inode * inode, struct file * filp)
683 {
684         struct block_device *bdev;
685         int res;
686
687         /*
688          * Preserve backwards compatibility and allow large file access
689          * even if userspace doesn't ask for it explicitly. Some mkfs
690          * binary needs it. We might want to drop this workaround
691          * during an unstable branch.
692          */
693         filp->f_flags |= O_LARGEFILE;
694
695         bdev = bd_acquire(inode);
696
697         res = do_open(bdev, filp);
698         if (res)
699                 return res;
700
701         if (!(filp->f_flags & O_EXCL) )
702                 return 0;
703
704         if (!(res = bd_claim(bdev, filp)))
705                 return 0;
706
707         blkdev_put(bdev);
708         return res;
709 }
710
711 int blkdev_put(struct block_device *bdev)
712 {
713         int ret = 0;
714         struct inode *bd_inode = bdev->bd_inode;
715         struct gendisk *disk = bdev->bd_disk;
716
717         down(&bdev->bd_sem);
718         lock_kernel();
719         if (!--bdev->bd_openers) {
720                 sync_blockdev(bdev);
721                 kill_bdev(bdev);
722         }
723         if (bdev->bd_contains == bdev) {
724                 if (disk->fops->release)
725                         ret = disk->fops->release(bd_inode, NULL);
726         } else {
727                 down(&bdev->bd_contains->bd_sem);
728                 bdev->bd_contains->bd_part_count--;
729                 up(&bdev->bd_contains->bd_sem);
730         }
731         if (!bdev->bd_openers) {
732                 struct module *owner = disk->fops->owner;
733
734                 put_disk(disk);
735                 module_put(owner);
736
737                 if (bdev->bd_contains != bdev) {
738                         kobject_put(&bdev->bd_part->kobj);
739                         bdev->bd_part = NULL;
740                 }
741                 bdev->bd_disk = NULL;
742                 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
743                 if (bdev != bdev->bd_contains) {
744                         blkdev_put(bdev->bd_contains);
745                 }
746                 bdev->bd_contains = NULL;
747         }
748         unlock_kernel();
749         up(&bdev->bd_sem);
750         bdput(bdev);
751         return ret;
752 }
753
754 EXPORT_SYMBOL(blkdev_put);
755
756 static int blkdev_close(struct inode * inode, struct file * filp)
757 {
758         struct block_device *bdev = I_BDEV(filp->f_mapping->host);
759         if (bdev->bd_holder == filp)
760                 bd_release(bdev);
761         return blkdev_put(bdev);
762 }
763
764 static ssize_t blkdev_file_write(struct file *file, const char __user *buf,
765                                    size_t count, loff_t *ppos)
766 {
767         struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
768
769         return generic_file_write_nolock(file, &local_iov, 1, ppos);
770 }
771
772 static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf,
773                                    size_t count, loff_t pos)
774 {
775         struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
776
777         return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
778 }
779
780 static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
781 {
782         return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
783 }
784
785 struct address_space_operations def_blk_aops = {
786         .readpage       = blkdev_readpage,
787         .writepage      = blkdev_writepage,
788         .sync_page      = block_sync_page,
789         .prepare_write  = blkdev_prepare_write,
790         .commit_write   = blkdev_commit_write,
791         .writepages     = generic_writepages,
792         .direct_IO      = blkdev_direct_IO,
793 };
794
795 struct file_operations def_blk_fops = {
796         .open           = blkdev_open,
797         .release        = blkdev_close,
798         .llseek         = block_llseek,
799         .read           = generic_file_read,
800         .write          = blkdev_file_write,
801         .aio_read       = generic_file_aio_read,
802         .aio_write      = blkdev_file_aio_write, 
803         .mmap           = generic_file_mmap,
804         .fsync          = block_fsync,
805         .unlocked_ioctl = block_ioctl,
806 #ifdef CONFIG_COMPAT
807         .compat_ioctl   = compat_blkdev_ioctl,
808 #endif
809         .readv          = generic_file_readv,
810         .writev         = generic_file_write_nolock,
811         .sendfile       = generic_file_sendfile,
812 };
813
814 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
815 {
816         int res;
817         mm_segment_t old_fs = get_fs();
818         set_fs(KERNEL_DS);
819         res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
820         set_fs(old_fs);
821         return res;
822 }
823
824 EXPORT_SYMBOL(ioctl_by_bdev);
825
826 /**
827  * lookup_bdev  - lookup a struct block_device by name
828  *
829  * @path:       special file representing the block device
830  *
831  * Get a reference to the blockdevice at @path in the current
832  * namespace if possible and return it.  Return ERR_PTR(error)
833  * otherwise.
834  */
835 struct block_device *lookup_bdev(const char *path)
836 {
837         struct block_device *bdev;
838         struct inode *inode;
839         struct nameidata nd;
840         int error;
841
842         if (!path || !*path)
843                 return ERR_PTR(-EINVAL);
844
845         error = path_lookup(path, LOOKUP_FOLLOW, &nd);
846         if (error)
847                 return ERR_PTR(error);
848
849         inode = nd.dentry->d_inode;
850         error = -ENOTBLK;
851         if (!S_ISBLK(inode->i_mode))
852                 goto fail;
853         error = -EACCES;
854         if (nd.mnt->mnt_flags & MNT_NODEV)
855                 goto fail;
856         error = -ENOMEM;
857         bdev = bd_acquire(inode);
858         if (!bdev)
859                 goto fail;
860 out:
861         path_release(&nd);
862         return bdev;
863 fail:
864         bdev = ERR_PTR(error);
865         goto out;
866 }
867
868 /**
869  * open_bdev_excl  -  open a block device by name and set it up for use
870  *
871  * @path:       special file representing the block device
872  * @flags:      %MS_RDONLY for opening read-only
873  * @holder:     owner for exclusion
874  *
875  * Open the blockdevice described by the special file at @path, claim it
876  * for the @holder.
877  */
878 struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
879 {
880         struct block_device *bdev;
881         mode_t mode = FMODE_READ;
882         int error = 0;
883
884         bdev = lookup_bdev(path);
885         if (IS_ERR(bdev))
886                 return bdev;
887
888         if (!(flags & MS_RDONLY))
889                 mode |= FMODE_WRITE;
890         error = blkdev_get(bdev, mode, 0);
891         if (error)
892                 return ERR_PTR(error);
893         error = -EACCES;
894         if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
895                 goto blkdev_put;
896         error = bd_claim(bdev, holder);
897         if (error)
898                 goto blkdev_put;
899
900         return bdev;
901         
902 blkdev_put:
903         blkdev_put(bdev);
904         return ERR_PTR(error);
905 }
906
907 EXPORT_SYMBOL(open_bdev_excl);
908
909 /**
910  * close_bdev_excl  -  release a blockdevice openen by open_bdev_excl()
911  *
912  * @bdev:       blockdevice to close
913  *
914  * This is the counterpart to open_bdev_excl().
915  */
916 void close_bdev_excl(struct block_device *bdev)
917 {
918         bd_release(bdev);
919         blkdev_put(bdev);
920 }
921
922 EXPORT_SYMBOL(close_bdev_excl);