Merge branch 'master' into for-2.6.35
[pandora-kernel.git] / block / blk-lib.c
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 static void blkdev_discard_end_io(struct bio *bio, int err)
13 {
14         if (err) {
15                 if (err == -EOPNOTSUPP)
16                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18         }
19
20         if (bio->bi_private)
21                 complete(bio->bi_private);
22         __free_page(bio_page(bio));
23
24         bio_put(bio);
25 }
26
27 /**
28  * blkdev_issue_discard - queue a discard
29  * @bdev:       blockdev to issue discard for
30  * @sector:     start sector
31  * @nr_sects:   number of sectors to discard
32  * @gfp_mask:   memory allocation flags (for bio_alloc)
33  * @flags:      BLKDEV_IFL_* flags to control behaviour
34  *
35  * Description:
36  *    Issue a discard request for the sectors in question.
37  */
38 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
40 {
41         DECLARE_COMPLETION_ONSTACK(wait);
42         struct request_queue *q = bdev_get_queue(bdev);
43         int type = flags & BLKDEV_IFL_BARRIER ?
44                 DISCARD_BARRIER : DISCARD_NOBARRIER;
45         struct bio *bio;
46         struct page *page;
47         int ret = 0;
48
49         if (!q)
50                 return -ENXIO;
51
52         if (!blk_queue_discard(q))
53                 return -EOPNOTSUPP;
54
55         while (nr_sects && !ret) {
56                 unsigned int sector_size = q->limits.logical_block_size;
57                 unsigned int max_discard_sectors =
58                         min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59
60                 bio = bio_alloc(gfp_mask, 1);
61                 if (!bio)
62                         goto out;
63                 bio->bi_sector = sector;
64                 bio->bi_end_io = blkdev_discard_end_io;
65                 bio->bi_bdev = bdev;
66                 if (flags & BLKDEV_IFL_WAIT)
67                         bio->bi_private = &wait;
68
69                 /*
70                  * Add a zeroed one-sector payload as that's what
71                  * our current implementations need.  If we'll ever need
72                  * more the interface will need revisiting.
73                  */
74                 page = alloc_page(gfp_mask | __GFP_ZERO);
75                 if (!page)
76                         goto out_free_bio;
77                 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
78                         goto out_free_page;
79
80                 /*
81                  * And override the bio size - the way discard works we
82                  * touch many more blocks on disk than the actual payload
83                  * length.
84                  */
85                 if (nr_sects > max_discard_sectors) {
86                         bio->bi_size = max_discard_sectors << 9;
87                         nr_sects -= max_discard_sectors;
88                         sector += max_discard_sectors;
89                 } else {
90                         bio->bi_size = nr_sects << 9;
91                         nr_sects = 0;
92                 }
93
94                 bio_get(bio);
95                 submit_bio(type, bio);
96
97                 if (flags & BLKDEV_IFL_WAIT)
98                         wait_for_completion(&wait);
99
100                 if (bio_flagged(bio, BIO_EOPNOTSUPP))
101                         ret = -EOPNOTSUPP;
102                 else if (!bio_flagged(bio, BIO_UPTODATE))
103                         ret = -EIO;
104                 bio_put(bio);
105         }
106         return ret;
107 out_free_page:
108         __free_page(page);
109 out_free_bio:
110         bio_put(bio);
111 out:
112         return -ENOMEM;
113 }
114 EXPORT_SYMBOL(blkdev_issue_discard);
115
116 struct bio_batch
117 {
118         atomic_t                done;
119         unsigned long           flags;
120         struct completion       *wait;
121         bio_end_io_t            *end_io;
122 };
123
124 static void bio_batch_end_io(struct bio *bio, int err)
125 {
126         struct bio_batch *bb = bio->bi_private;
127
128         if (err) {
129                 if (err == -EOPNOTSUPP)
130                         set_bit(BIO_EOPNOTSUPP, &bb->flags);
131                 else
132                         clear_bit(BIO_UPTODATE, &bb->flags);
133         }
134         if (bb) {
135                 if (bb->end_io)
136                         bb->end_io(bio, err);
137                 atomic_inc(&bb->done);
138                 complete(bb->wait);
139         }
140         bio_put(bio);
141 }
142
143 /**
144  * blkdev_issue_zeroout generate number of zero filed write bios
145  * @bdev:       blockdev to issue
146  * @sector:     start sector
147  * @nr_sects:   number of sectors to write
148  * @gfp_mask:   memory allocation flags (for bio_alloc)
149  * @flags:      BLKDEV_IFL_* flags to control behaviour
150  *
151  * Description:
152  *  Generate and issue number of bios with zerofiled pages.
153  *  Send barrier at the beginning and at the end if requested. This guarantie
154  *  correct request ordering. Empty barrier allow us to avoid post queue flush.
155  */
156
157 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
158                         sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
159 {
160         int ret = 0;
161         struct bio *bio;
162         struct bio_batch bb;
163         unsigned int sz, issued = 0;
164         DECLARE_COMPLETION_ONSTACK(wait);
165
166         atomic_set(&bb.done, 0);
167         bb.flags = 1 << BIO_UPTODATE;
168         bb.wait = &wait;
169         bb.end_io = NULL;
170
171         if (flags & BLKDEV_IFL_BARRIER) {
172                 /* issue async barrier before the data */
173                 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
174                 if (ret)
175                         return ret;
176         }
177 submit:
178         while (nr_sects != 0) {
179                 bio = bio_alloc(gfp_mask,
180                                 min(nr_sects, (sector_t)BIO_MAX_PAGES));
181                 if (!bio)
182                         break;
183
184                 bio->bi_sector = sector;
185                 bio->bi_bdev   = bdev;
186                 bio->bi_end_io = bio_batch_end_io;
187                 if (flags & BLKDEV_IFL_WAIT)
188                         bio->bi_private = &bb;
189
190                 while (nr_sects != 0) {
191                         sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
192                         if (sz == 0)
193                                 /* bio has maximum size possible */
194                                 break;
195                         ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
196                         nr_sects -= ret >> 9;
197                         sector += ret >> 9;
198                         if (ret < (sz << 9))
199                                 break;
200                 }
201                 issued++;
202                 submit_bio(WRITE, bio);
203         }
204         /*
205          * When all data bios are in flight. Send final barrier if requeted.
206          */
207         if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
208                 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
209                                         flags & BLKDEV_IFL_WAIT);
210
211
212         if (flags & BLKDEV_IFL_WAIT)
213                 /* Wait for bios in-flight */
214                 while ( issued != atomic_read(&bb.done))
215                         wait_for_completion(&wait);
216
217         if (!test_bit(BIO_UPTODATE, &bb.flags))
218                 /* One of bios in the batch was completed with error.*/
219                 ret = -EIO;
220
221         if (ret)
222                 goto out;
223
224         if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
225                 ret = -EOPNOTSUPP;
226                 goto out;
227         }
228         if (nr_sects != 0)
229                 goto submit;
230 out:
231         return ret;
232 }
233 EXPORT_SYMBOL(blkdev_issue_zeroout);