ore: RAID5 read
[pandora-kernel.git] / fs / exofs / ore.c
1 /*
2  * Copyright (C) 2005, 2006
3  * Avishay Traeger (avishay@gmail.com)
4  * Copyright (C) 2008, 2009
5  * Boaz Harrosh <bharrosh@panasas.com>
6  *
7  * This file is part of exofs.
8  *
9  * exofs is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation.  Since it is based on ext2, and the only
12  * valid version of GPL for the Linux kernel is version 2, the only valid
13  * version of GPL for exofs is version 2.
14  *
15  * exofs is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with exofs; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
23  */
24
25 #include <linux/slab.h>
26 #include <asm/div64.h>
27 #include <linux/lcm.h>
28
29 #include "ore_raid.h"
30
31 MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
32 MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
33 MODULE_LICENSE("GPL");
34
35 /* ore_verify_layout does a couple of things:
36  * 1. Given a minimum number of needed parameters fixes up the rest of the
37  *    members to be operatonals for the ore. The needed parameters are those
38  *    that are defined by the pnfs-objects layout STD.
39  * 2. Check to see if the current ore code actually supports these parameters
40  *    for example stripe_unit must be a multple of the system PAGE_SIZE,
41  *    and etc...
42  * 3. Cache some havily used calculations that will be needed by users.
43  */
44
45 enum { BIO_MAX_PAGES_KMALLOC =
46                 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),};
47
48 int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
49 {
50         u64 stripe_length;
51
52 /* FIXME: Only raid0 is supported for now. */
53         if (layout->raid_algorithm != PNFS_OSD_RAID_0) {
54                 ORE_ERR("Only RAID_0 for now\n");
55                 return -EINVAL;
56         }
57         if (0 != (layout->stripe_unit & ~PAGE_MASK)) {
58                 ORE_ERR("Stripe Unit(0x%llx)"
59                           " must be Multples of PAGE_SIZE(0x%lx)\n",
60                           _LLU(layout->stripe_unit), PAGE_SIZE);
61                 return -EINVAL;
62         }
63         if (layout->group_width) {
64                 if (!layout->group_depth) {
65                         ORE_ERR("group_depth == 0 && group_width != 0\n");
66                         return -EINVAL;
67                 }
68                 if (total_comps < (layout->group_width * layout->mirrors_p1)) {
69                         ORE_ERR("Data Map wrong, "
70                                 "numdevs=%d < group_width=%d * mirrors=%d\n",
71                                 total_comps, layout->group_width,
72                                 layout->mirrors_p1);
73                         return -EINVAL;
74                 }
75                 layout->group_count = total_comps / layout->mirrors_p1 /
76                                                 layout->group_width;
77         } else {
78                 if (layout->group_depth) {
79                         printk(KERN_NOTICE "Warning: group_depth ignored "
80                                 "group_width == 0 && group_depth == %lld\n",
81                                 _LLU(layout->group_depth));
82                 }
83                 layout->group_width = total_comps / layout->mirrors_p1;
84                 layout->group_depth = -1;
85                 layout->group_count = 1;
86         }
87
88         stripe_length = (u64)layout->group_width * layout->stripe_unit;
89         if (stripe_length >= (1ULL << 32)) {
90                 ORE_ERR("Stripe_length(0x%llx) >= 32bit is not supported\n",
91                         _LLU(stripe_length));
92                 return -EINVAL;
93         }
94
95         layout->max_io_length =
96                 (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
97                                                         layout->group_width;
98         return 0;
99 }
100 EXPORT_SYMBOL(ore_verify_layout);
101
102 static u8 *_ios_cred(struct ore_io_state *ios, unsigned index)
103 {
104         return ios->oc->comps[index & ios->oc->single_comp].cred;
105 }
106
107 static struct osd_obj_id *_ios_obj(struct ore_io_state *ios, unsigned index)
108 {
109         return &ios->oc->comps[index & ios->oc->single_comp].obj;
110 }
111
112 static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
113 {
114         ORE_DBGMSG2("oc->first_dev=%d oc->numdevs=%d i=%d oc->ods=%p\n",
115                     ios->oc->first_dev, ios->oc->numdevs, index,
116                     ios->oc->ods);
117
118         return ore_comp_dev(ios->oc, index);
119 }
120
121 static int  _ore_get_io_state(struct ore_layout *layout,
122                         struct ore_components *oc, unsigned numdevs,
123                         unsigned sgs_per_dev, unsigned num_par_pages,
124                         struct ore_io_state **pios)
125 {
126         struct ore_io_state *ios;
127         struct page **pages;
128         struct osd_sg_entry *sgilist;
129         struct __alloc_all_io_state {
130                 struct ore_io_state ios;
131                 struct ore_per_dev_state per_dev[numdevs];
132                 union {
133                         struct osd_sg_entry sglist[sgs_per_dev * numdevs];
134                         struct page *pages[num_par_pages];
135                 };
136         } *_aios;
137
138         if (likely(sizeof(*_aios) <= PAGE_SIZE)) {
139                 _aios = kzalloc(sizeof(*_aios), GFP_KERNEL);
140                 if (unlikely(!_aios)) {
141                         ORE_DBGMSG("Failed kzalloc bytes=%zd\n",
142                                    sizeof(*_aios));
143                         *pios = NULL;
144                         return -ENOMEM;
145                 }
146                 pages = num_par_pages ? _aios->pages : NULL;
147                 sgilist = sgs_per_dev ? _aios->sglist : NULL;
148                 ios = &_aios->ios;
149         } else {
150                 struct __alloc_small_io_state {
151                         struct ore_io_state ios;
152                         struct ore_per_dev_state per_dev[numdevs];
153                 } *_aio_small;
154                 union __extra_part {
155                         struct osd_sg_entry sglist[sgs_per_dev * numdevs];
156                         struct page *pages[num_par_pages];
157                 } *extra_part;
158
159                 _aio_small = kzalloc(sizeof(*_aio_small), GFP_KERNEL);
160                 if (unlikely(!_aio_small)) {
161                         ORE_DBGMSG("Failed alloc first part bytes=%zd\n",
162                                    sizeof(*_aio_small));
163                         *pios = NULL;
164                         return -ENOMEM;
165                 }
166                 extra_part = kzalloc(sizeof(*extra_part), GFP_KERNEL);
167                 if (unlikely(!extra_part)) {
168                         ORE_DBGMSG("Failed alloc second part bytes=%zd\n",
169                                    sizeof(*extra_part));
170                         kfree(_aio_small);
171                         *pios = NULL;
172                         return -ENOMEM;
173                 }
174
175                 pages = num_par_pages ? extra_part->pages : NULL;
176                 sgilist = sgs_per_dev ? extra_part->sglist : NULL;
177                 /* In this case the per_dev[0].sgilist holds the pointer to
178                  * be freed
179                  */
180                 ios = &_aio_small->ios;
181                 ios->extra_part_alloc = true;
182         }
183
184         if (pages) {
185                 ios->parity_pages = pages;
186                 ios->max_par_pages = num_par_pages;
187         }
188         if (sgilist) {
189                 unsigned d;
190
191                 for (d = 0; d < numdevs; ++d) {
192                         ios->per_dev[d].sglist = sgilist;
193                         sgilist += sgs_per_dev;
194                 }
195                 ios->sgs_per_dev = sgs_per_dev;
196         }
197
198         ios->layout = layout;
199         ios->oc = oc;
200         *pios = ios;
201         return 0;
202 }
203
204 /* Allocate an io_state for only a single group of devices
205  *
206  * If a user needs to call ore_read/write() this version must be used becase it
207  * allocates extra stuff for striping and raid.
208  * The ore might decide to only IO less then @length bytes do to alignmets
209  * and constrains as follows:
210  * - The IO cannot cross group boundary.
211  * - In raid5/6 The end of the IO must align at end of a stripe eg.
212  *   (@offset + @length) % strip_size == 0. Or the complete range is within a
213  *   single stripe.
214  * - Memory condition only permitted a shorter IO. (A user can use @length=~0
215  *   And check the returned ios->length for max_io_size.)
216  *
217  * The caller must check returned ios->length (and/or ios->nr_pages) and
218  * re-issue these pages that fall outside of ios->length
219  */
220 int  ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
221                       bool is_reading, u64 offset, u64 length,
222                       struct ore_io_state **pios)
223 {
224         struct ore_io_state *ios;
225         unsigned numdevs = layout->group_width * layout->mirrors_p1;
226         unsigned sgs_per_dev = 0, max_par_pages = 0;
227         int ret;
228
229         if (layout->parity && length) {
230                 unsigned data_devs = layout->group_width - layout->parity;
231                 unsigned stripe_size = layout->stripe_unit * data_devs;
232                 unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
233                 u32 remainder;
234                 u64 num_stripes;
235                 u64 num_raid_units;
236
237                 num_stripes = div_u64_rem(length, stripe_size, &remainder);
238                 if (remainder)
239                         ++num_stripes;
240
241                 num_raid_units =  num_stripes * layout->parity;
242
243                 if (is_reading) {
244                         /* For reads add per_dev sglist array */
245                         /* TODO: Raid 6 we need twice more. Actually:
246                         *         num_stripes / LCMdP(W,P);
247                         *         if (W%P != 0) num_stripes *= parity;
248                         */
249
250                         /* first/last seg is split */
251                         num_raid_units += layout->group_width;
252                         sgs_per_dev = div_u64(num_raid_units, data_devs);
253                 } else {
254                         /* For Writes add parity pages array. */
255                         max_par_pages = num_raid_units * pages_in_unit *
256                                                 sizeof(struct page *);
257                 }
258         }
259
260         ret = _ore_get_io_state(layout, oc, numdevs, sgs_per_dev, max_par_pages,
261                                 pios);
262         if (unlikely(ret))
263                 return ret;
264
265         ios = *pios;
266         ios->reading = is_reading;
267         ios->offset = offset;
268
269         if (length) {
270                 ore_calc_stripe_info(layout, offset, length, &ios->si);
271                 ios->length = ios->si.length;
272                 ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
273                 if (layout->parity)
274                         _ore_post_alloc_raid_stuff(ios);
275         }
276
277         return 0;
278 }
279 EXPORT_SYMBOL(ore_get_rw_state);
280
281 /* Allocate an io_state for all the devices in the comps array
282  *
283  * This version of io_state allocation is used mostly by create/remove
284  * and trunc where we currently need all the devices. The only wastful
285  * bit is the read/write_attributes with no IO. Those sites should
286  * be converted to use ore_get_rw_state() with length=0
287  */
288 int  ore_get_io_state(struct ore_layout *layout, struct ore_components *oc,
289                       struct ore_io_state **pios)
290 {
291         return _ore_get_io_state(layout, oc, oc->numdevs, 0, 0, pios);
292 }
293 EXPORT_SYMBOL(ore_get_io_state);
294
295 void ore_put_io_state(struct ore_io_state *ios)
296 {
297         if (ios) {
298                 unsigned i;
299
300                 for (i = 0; i < ios->numdevs; i++) {
301                         struct ore_per_dev_state *per_dev = &ios->per_dev[i];
302
303                         if (per_dev->or)
304                                 osd_end_request(per_dev->or);
305                         if (per_dev->bio)
306                                 bio_put(per_dev->bio);
307                 }
308
309                 _ore_free_raid_stuff(ios);
310                 kfree(ios);
311         }
312 }
313 EXPORT_SYMBOL(ore_put_io_state);
314
315 static void _sync_done(struct ore_io_state *ios, void *p)
316 {
317         struct completion *waiting = p;
318
319         complete(waiting);
320 }
321
322 static void _last_io(struct kref *kref)
323 {
324         struct ore_io_state *ios = container_of(
325                                         kref, struct ore_io_state, kref);
326
327         ios->done(ios, ios->private);
328 }
329
330 static void _done_io(struct osd_request *or, void *p)
331 {
332         struct ore_io_state *ios = p;
333
334         kref_put(&ios->kref, _last_io);
335 }
336
337 static int ore_io_execute(struct ore_io_state *ios)
338 {
339         DECLARE_COMPLETION_ONSTACK(wait);
340         bool sync = (ios->done == NULL);
341         int i, ret;
342
343         if (sync) {
344                 ios->done = _sync_done;
345                 ios->private = &wait;
346         }
347
348         for (i = 0; i < ios->numdevs; i++) {
349                 struct osd_request *or = ios->per_dev[i].or;
350                 if (unlikely(!or))
351                         continue;
352
353                 ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL);
354                 if (unlikely(ret)) {
355                         ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
356                                      ret);
357                         return ret;
358                 }
359         }
360
361         kref_init(&ios->kref);
362
363         for (i = 0; i < ios->numdevs; i++) {
364                 struct osd_request *or = ios->per_dev[i].or;
365                 if (unlikely(!or))
366                         continue;
367
368                 kref_get(&ios->kref);
369                 osd_execute_request_async(or, _done_io, ios);
370         }
371
372         kref_put(&ios->kref, _last_io);
373         ret = 0;
374
375         if (sync) {
376                 wait_for_completion(&wait);
377                 ret = ore_check_io(ios, NULL);
378         }
379         return ret;
380 }
381
382 static void _clear_bio(struct bio *bio)
383 {
384         struct bio_vec *bv;
385         unsigned i;
386
387         __bio_for_each_segment(bv, bio, i, 0) {
388                 unsigned this_count = bv->bv_len;
389
390                 if (likely(PAGE_SIZE == this_count))
391                         clear_highpage(bv->bv_page);
392                 else
393                         zero_user(bv->bv_page, bv->bv_offset, this_count);
394         }
395 }
396
397 int ore_check_io(struct ore_io_state *ios, ore_on_dev_error on_dev_error)
398 {
399         enum osd_err_priority acumulated_osd_err = 0;
400         int acumulated_lin_err = 0;
401         int i;
402
403         for (i = 0; i < ios->numdevs; i++) {
404                 struct osd_sense_info osi;
405                 struct ore_per_dev_state *per_dev = &ios->per_dev[i];
406                 struct osd_request *or = per_dev->or;
407                 int ret;
408
409                 if (unlikely(!or))
410                         continue;
411
412                 ret = osd_req_decode_sense(or, &osi);
413                 if (likely(!ret))
414                         continue;
415
416                 if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
417                         /* start read offset passed endof file */
418                         _clear_bio(per_dev->bio);
419                         ORE_DBGMSG("start read offset passed end of file "
420                                 "offset=0x%llx, length=0x%llx\n",
421                                 _LLU(per_dev->offset),
422                                 _LLU(per_dev->length));
423
424                         continue; /* we recovered */
425                 }
426
427                 if (on_dev_error) {
428                         u64 residual = ios->reading ?
429                                         or->in.residual : or->out.residual;
430                         u64 offset = (ios->offset + ios->length) - residual;
431                         struct ore_dev *od = ios->oc->ods[
432                                         per_dev->dev - ios->oc->first_dev];
433
434                         on_dev_error(ios, od, per_dev->dev, osi.osd_err_pri,
435                                      offset, residual);
436                 }
437                 if (osi.osd_err_pri >= acumulated_osd_err) {
438                         acumulated_osd_err = osi.osd_err_pri;
439                         acumulated_lin_err = ret;
440                 }
441         }
442
443         return acumulated_lin_err;
444 }
445 EXPORT_SYMBOL(ore_check_io);
446
447 /*
448  * L - logical offset into the file
449  *
450  * D - number of Data devices
451  *      D = group_width - parity
452  *
453  * U - The number of bytes in a stripe within a group
454  *      U =  stripe_unit * D
455  *
456  * T - The number of bytes striped within a group of component objects
457  *     (before advancing to the next group)
458  *      T = U * group_depth
459  *
460  * S - The number of bytes striped across all component objects
461  *     before the pattern repeats
462  *      S = T * group_count
463  *
464  * M - The "major" (i.e., across all components) cycle number
465  *      M = L / S
466  *
467  * G - Counts the groups from the beginning of the major cycle
468  *      G = (L - (M * S)) / T   [or (L % S) / T]
469  *
470  * H - The byte offset within the group
471  *      H = (L - (M * S)) % T   [or (L % S) % T]
472  *
473  * N - The "minor" (i.e., across the group) stripe number
474  *      N = H / U
475  *
476  * C - The component index coresponding to L
477  *
478  *      C = (H - (N * U)) / stripe_unit + G * D
479  *      [or (L % U) / stripe_unit + G * D]
480  *
481  * O - The component offset coresponding to L
482  *      O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
483  *
484  * LCMdP â€“ Parity cycle: Lowest Common Multiple of group_width, parity
485  *          divide by parity
486  *      LCMdP = lcm(group_width, parity) / parity
487  *
488  * R - The parity Rotation stripe
489  *     (Note parity cycle always starts at a group's boundary)
490  *      R = N % LCMdP
491  *
492  * I = the first parity device index
493  *      I = (group_width + group_width - R*parity - parity) % group_width
494  *
495  * Craid - The component index Rotated
496  *      Craid = (group_width + C - R*parity) % group_width
497  *      (We add the group_width to avoid negative numbers modulo math)
498  */
499 void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
500                           u64 length, struct ore_striping_info *si)
501 {
502         u32     stripe_unit = layout->stripe_unit;
503         u32     group_width = layout->group_width;
504         u64     group_depth = layout->group_depth;
505         u32     parity      = layout->parity;
506
507         u32     D = group_width - parity;
508         u32     U = D * stripe_unit;
509         u64     T = U * group_depth;
510         u64     S = T * layout->group_count;
511         u64     M = div64_u64(file_offset, S);
512
513         /*
514         G = (L - (M * S)) / T
515         H = (L - (M * S)) % T
516         */
517         u64     LmodS = file_offset - M * S;
518         u32     G = div64_u64(LmodS, T);
519         u64     H = LmodS - G * T;
520
521         u32     N = div_u64(H, U);
522
523         /* "H - (N * U)" is just "H % U" so it's bound to u32 */
524         u32     C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
525
526         div_u64_rem(file_offset, stripe_unit, &si->unit_off);
527
528         si->obj_offset = si->unit_off + (N * stripe_unit) +
529                                   (M * group_depth * stripe_unit);
530
531         if (parity) {
532                 u32 LCMdP = lcm(group_width, parity) / parity;
533                 /* R     = N % LCMdP; */
534                 u32 RxP   = (N % LCMdP) * parity;
535                 u32 first_dev = C - C % group_width;
536
537                 si->par_dev = (group_width + group_width - parity - RxP) %
538                               group_width + first_dev;
539                 si->dev = (group_width + C - RxP) % group_width + first_dev;
540                 si->bytes_in_stripe = U;
541                 si->first_stripe_start = M * S + G * T + N * U;
542         } else {
543                 /* Make the math correct see _prepare_one_group */
544                 si->par_dev = group_width;
545                 si->dev = C;
546         }
547
548         si->dev *= layout->mirrors_p1;
549         si->par_dev *= layout->mirrors_p1;
550         si->offset = file_offset;
551         si->length = T - H;
552         if (si->length > length)
553                 si->length = length;
554         si->M = M;
555 }
556 EXPORT_SYMBOL(ore_calc_stripe_info);
557
558 int _ore_add_stripe_unit(struct ore_io_state *ios,  unsigned *cur_pg,
559                          unsigned pgbase, struct page **pages,
560                          struct ore_per_dev_state *per_dev, int cur_len)
561 {
562         unsigned pg = *cur_pg;
563         struct request_queue *q =
564                         osd_request_queue(_ios_od(ios, per_dev->dev));
565         unsigned len = cur_len;
566         int ret;
567
568         if (per_dev->bio == NULL) {
569                 unsigned pages_in_stripe = ios->layout->group_width *
570                                         (ios->layout->stripe_unit / PAGE_SIZE);
571                 unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
572                                         (ios->layout->group_width -
573                                          ios->layout->parity);
574                 unsigned bio_size = (nr_pages + pages_in_stripe) /
575                                         ios->layout->group_width;
576
577                 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
578                 if (unlikely(!per_dev->bio)) {
579                         ORE_DBGMSG("Failed to allocate BIO size=%u\n",
580                                      bio_size);
581                         ret = -ENOMEM;
582                         goto out;
583                 }
584         }
585
586         while (cur_len > 0) {
587                 unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
588                 unsigned added_len;
589
590                 cur_len -= pglen;
591
592                 added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
593                                             pglen, pgbase);
594                 if (unlikely(pglen != added_len)) {
595                         ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
596                                    per_dev->bio->bi_vcnt);
597                         ret = -ENOMEM;
598                         goto out;
599                 }
600                 pgbase = 0;
601                 ++pg;
602         }
603         BUG_ON(cur_len);
604
605         per_dev->length += len;
606         *cur_pg = pg;
607         ret = 0;
608 out:    /* we fail the complete unit on an error eg don't advance
609          * per_dev->length and cur_pg. This means that we might have a bigger
610          * bio than the CDB requested length (per_dev->length). That's fine
611          * only the oposite is fatal.
612          */
613         return ret;
614 }
615
616 static int _prepare_for_striping(struct ore_io_state *ios)
617 {
618         struct ore_striping_info *si = &ios->si;
619         unsigned stripe_unit = ios->layout->stripe_unit;
620         unsigned mirrors_p1 = ios->layout->mirrors_p1;
621         unsigned group_width = ios->layout->group_width;
622         unsigned devs_in_group = group_width * mirrors_p1;
623         unsigned dev = si->dev;
624         unsigned first_dev = dev - (dev % devs_in_group);
625         unsigned dev_order;
626         unsigned cur_pg = ios->pages_consumed;
627         u64 length = ios->length;
628         int ret = 0;
629
630         if (!ios->pages) {
631                 ios->numdevs = ios->layout->mirrors_p1;
632                 return 0;
633         }
634
635         BUG_ON(length > si->length);
636
637         dev_order = _dev_order(devs_in_group, mirrors_p1, si->par_dev, dev);
638         si->cur_comp = dev_order;
639
640         while (length) {
641                 unsigned comp = dev - first_dev;
642                 struct ore_per_dev_state *per_dev = &ios->per_dev[comp];
643                 unsigned cur_len, page_off = 0;
644
645                 if (!per_dev->length) {
646                         per_dev->dev = dev;
647                         if (dev == si->dev) {
648                                 WARN_ON(dev == si->par_dev);
649                                 per_dev->offset = si->obj_offset;
650                                 cur_len = stripe_unit - si->unit_off;
651                                 page_off = si->unit_off & ~PAGE_MASK;
652                                 BUG_ON(page_off && (page_off != ios->pgbase));
653                         } else {
654                                 if (si->cur_comp > dev_order)
655                                         per_dev->offset =
656                                                 si->obj_offset - si->unit_off;
657                                 else /* si->cur_comp < dev_order */
658                                         per_dev->offset =
659                                                 si->obj_offset + stripe_unit -
660                                                                    si->unit_off;
661                                 cur_len = stripe_unit;
662                         }
663                 } else {
664                         cur_len = stripe_unit;
665                 }
666                 if (cur_len >= length)
667                         cur_len = length;
668
669                 ret = _ore_add_stripe_unit(ios, &cur_pg, page_off, ios->pages,
670                                            per_dev, cur_len);
671                 if (unlikely(ret))
672                         goto out;
673
674                 dev += mirrors_p1;
675                 dev = (dev % devs_in_group) + first_dev;
676
677                 length -= cur_len;
678
679                 si->cur_comp = (si->cur_comp + 1) % group_width;
680                 if (unlikely((dev == si->par_dev) ||
681                              (!length && ios->parity_pages))) {
682                         if (!length)
683                                 /* If we are writing and this is the very last
684                                  * stripe. then operate on parity dev.
685                                  */
686                                 dev = si->par_dev;
687                         if (ios->reading)
688                                 /* In writes cur_len just means if it's the
689                                  * last one. See _ore_add_parity_unit.
690                                  */
691                                 cur_len = length;
692                         per_dev = &ios->per_dev[dev - first_dev];
693                         if (!per_dev->length) {
694                                 /* Only/always the parity unit of the first
695                                  * stripe will be empty. So this is a chance to
696                                  * initialize the per_dev info.
697                                  */
698                                 per_dev->dev = dev;
699                                 per_dev->offset = si->obj_offset - si->unit_off;
700                         }
701
702                         ret = _ore_add_parity_unit(ios, si, per_dev, cur_len);
703                         if (unlikely(ret))
704                                         goto out;
705
706                         /* Rotate next par_dev backwards with wraping */
707                         si->par_dev = (devs_in_group + si->par_dev -
708                                        ios->layout->parity * mirrors_p1) %
709                                       devs_in_group + first_dev;
710                         /* Next stripe, start fresh */
711                         si->cur_comp = 0;
712                 }
713         }
714 out:
715         ios->numdevs = devs_in_group;
716         ios->pages_consumed = cur_pg;
717         if (unlikely(ret)) {
718                 if (length == ios->length)
719                         return ret;
720                 else
721                         ios->length -= length;
722         }
723         return 0;
724 }
725
726 int ore_create(struct ore_io_state *ios)
727 {
728         int i, ret;
729
730         for (i = 0; i < ios->oc->numdevs; i++) {
731                 struct osd_request *or;
732
733                 or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
734                 if (unlikely(!or)) {
735                         ORE_ERR("%s: osd_start_request failed\n", __func__);
736                         ret = -ENOMEM;
737                         goto out;
738                 }
739                 ios->per_dev[i].or = or;
740                 ios->numdevs++;
741
742                 osd_req_create_object(or, _ios_obj(ios, i));
743         }
744         ret = ore_io_execute(ios);
745
746 out:
747         return ret;
748 }
749 EXPORT_SYMBOL(ore_create);
750
751 int ore_remove(struct ore_io_state *ios)
752 {
753         int i, ret;
754
755         for (i = 0; i < ios->oc->numdevs; i++) {
756                 struct osd_request *or;
757
758                 or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
759                 if (unlikely(!or)) {
760                         ORE_ERR("%s: osd_start_request failed\n", __func__);
761                         ret = -ENOMEM;
762                         goto out;
763                 }
764                 ios->per_dev[i].or = or;
765                 ios->numdevs++;
766
767                 osd_req_remove_object(or, _ios_obj(ios, i));
768         }
769         ret = ore_io_execute(ios);
770
771 out:
772         return ret;
773 }
774 EXPORT_SYMBOL(ore_remove);
775
776 static int _write_mirror(struct ore_io_state *ios, int cur_comp)
777 {
778         struct ore_per_dev_state *master_dev = &ios->per_dev[cur_comp];
779         unsigned dev = ios->per_dev[cur_comp].dev;
780         unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
781         int ret = 0;
782
783         if (ios->pages && !master_dev->length)
784                 return 0; /* Just an empty slot */
785
786         for (; cur_comp < last_comp; ++cur_comp, ++dev) {
787                 struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
788                 struct osd_request *or;
789
790                 or = osd_start_request(_ios_od(ios, dev), GFP_KERNEL);
791                 if (unlikely(!or)) {
792                         ORE_ERR("%s: osd_start_request failed\n", __func__);
793                         ret = -ENOMEM;
794                         goto out;
795                 }
796                 per_dev->or = or;
797
798                 if (ios->pages) {
799                         struct bio *bio;
800
801                         if (per_dev != master_dev) {
802                                 bio = bio_kmalloc(GFP_KERNEL,
803                                                   master_dev->bio->bi_max_vecs);
804                                 if (unlikely(!bio)) {
805                                         ORE_DBGMSG(
806                                               "Failed to allocate BIO size=%u\n",
807                                               master_dev->bio->bi_max_vecs);
808                                         ret = -ENOMEM;
809                                         goto out;
810                                 }
811
812                                 __bio_clone(bio, master_dev->bio);
813                                 bio->bi_bdev = NULL;
814                                 bio->bi_next = NULL;
815                                 per_dev->offset = master_dev->offset;
816                                 per_dev->length = master_dev->length;
817                                 per_dev->bio =  bio;
818                                 per_dev->dev = dev;
819                         } else {
820                                 bio = master_dev->bio;
821                                 /* FIXME: bio_set_dir() */
822                                 bio->bi_rw |= REQ_WRITE;
823                         }
824
825                         osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
826                                       bio, per_dev->length);
827                         ORE_DBGMSG("write(0x%llx) offset=0x%llx "
828                                       "length=0x%llx dev=%d\n",
829                                      _LLU(_ios_obj(ios, dev)->id),
830                                      _LLU(per_dev->offset),
831                                      _LLU(per_dev->length), dev);
832                 } else if (ios->kern_buff) {
833                         per_dev->offset = ios->si.obj_offset;
834                         per_dev->dev = ios->si.dev + dev;
835
836                         /* no cross device without page array */
837                         BUG_ON((ios->layout->group_width > 1) &&
838                                (ios->si.unit_off + ios->length >
839                                 ios->layout->stripe_unit));
840
841                         ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
842                                                  per_dev->offset,
843                                                  ios->kern_buff, ios->length);
844                         if (unlikely(ret))
845                                 goto out;
846                         ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
847                                       "length=0x%llx dev=%d\n",
848                                      _LLU(_ios_obj(ios, dev)->id),
849                                      _LLU(per_dev->offset),
850                                      _LLU(ios->length), per_dev->dev);
851                 } else {
852                         osd_req_set_attributes(or, _ios_obj(ios, dev));
853                         ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
854                                      _LLU(_ios_obj(ios, dev)->id),
855                                      ios->out_attr_len, dev);
856                 }
857
858                 if (ios->out_attr)
859                         osd_req_add_set_attr_list(or, ios->out_attr,
860                                                   ios->out_attr_len);
861
862                 if (ios->in_attr)
863                         osd_req_add_get_attr_list(or, ios->in_attr,
864                                                   ios->in_attr_len);
865         }
866
867 out:
868         return ret;
869 }
870
871 int ore_write(struct ore_io_state *ios)
872 {
873         int i;
874         int ret;
875
876         ret = _prepare_for_striping(ios);
877         if (unlikely(ret))
878                 return ret;
879
880         for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
881                 ret = _write_mirror(ios, i);
882                 if (unlikely(ret))
883                         return ret;
884         }
885
886         ret = ore_io_execute(ios);
887         return ret;
888 }
889 EXPORT_SYMBOL(ore_write);
890
891 static int _read_mirror(struct ore_io_state *ios, unsigned cur_comp)
892 {
893         struct osd_request *or;
894         struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
895         struct osd_obj_id *obj = _ios_obj(ios, cur_comp);
896         unsigned first_dev = (unsigned)obj->id;
897
898         if (ios->pages && !per_dev->length)
899                 return 0; /* Just an empty slot */
900
901         first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
902         or = osd_start_request(_ios_od(ios, first_dev), GFP_KERNEL);
903         if (unlikely(!or)) {
904                 ORE_ERR("%s: osd_start_request failed\n", __func__);
905                 return -ENOMEM;
906         }
907         per_dev->or = or;
908
909         if (ios->pages) {
910                 if (per_dev->cur_sg) {
911                         /* finalize the last sg_entry */
912                         _ore_add_sg_seg(per_dev, 0, false);
913                         if (unlikely(!per_dev->cur_sg))
914                                 return 0; /* Skip parity only device */
915
916                         osd_req_read_sg(or, obj, per_dev->bio,
917                                         per_dev->sglist, per_dev->cur_sg);
918                 } else {
919                         /* The no raid case */
920                         osd_req_read(or, obj, per_dev->offset,
921                                      per_dev->bio, per_dev->length);
922                 }
923
924                 ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
925                              " dev=%d sg_len=%d\n", _LLU(obj->id),
926                              _LLU(per_dev->offset), _LLU(per_dev->length),
927                              first_dev, per_dev->cur_sg);
928         } else {
929                 BUG_ON(ios->kern_buff);
930
931                 osd_req_get_attributes(or, obj);
932                 ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
933                               _LLU(obj->id),
934                               ios->in_attr_len, first_dev);
935         }
936         if (ios->out_attr)
937                 osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
938
939         if (ios->in_attr)
940                 osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
941
942         return 0;
943 }
944
945 int ore_read(struct ore_io_state *ios)
946 {
947         int i;
948         int ret;
949
950         ret = _prepare_for_striping(ios);
951         if (unlikely(ret))
952                 return ret;
953
954         for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
955                 ret = _read_mirror(ios, i);
956                 if (unlikely(ret))
957                         return ret;
958         }
959
960         ret = ore_io_execute(ios);
961         return ret;
962 }
963 EXPORT_SYMBOL(ore_read);
964
965 int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr)
966 {
967         struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
968         void *iter = NULL;
969         int nelem;
970
971         do {
972                 nelem = 1;
973                 osd_req_decode_get_attr_list(ios->per_dev[0].or,
974                                              &cur_attr, &nelem, &iter);
975                 if ((cur_attr.attr_page == attr->attr_page) &&
976                     (cur_attr.attr_id == attr->attr_id)) {
977                         attr->len = cur_attr.len;
978                         attr->val_ptr = cur_attr.val_ptr;
979                         return 0;
980                 }
981         } while (iter);
982
983         return -EIO;
984 }
985 EXPORT_SYMBOL(extract_attr_from_ios);
986
987 static int _truncate_mirrors(struct ore_io_state *ios, unsigned cur_comp,
988                              struct osd_attr *attr)
989 {
990         int last_comp = cur_comp + ios->layout->mirrors_p1;
991
992         for (; cur_comp < last_comp; ++cur_comp) {
993                 struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
994                 struct osd_request *or;
995
996                 or = osd_start_request(_ios_od(ios, cur_comp), GFP_KERNEL);
997                 if (unlikely(!or)) {
998                         ORE_ERR("%s: osd_start_request failed\n", __func__);
999                         return -ENOMEM;
1000                 }
1001                 per_dev->or = or;
1002
1003                 osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
1004                 osd_req_add_set_attr_list(or, attr, 1);
1005         }
1006
1007         return 0;
1008 }
1009
1010 struct _trunc_info {
1011         struct ore_striping_info si;
1012         u64 prev_group_obj_off;
1013         u64 next_group_obj_off;
1014
1015         unsigned first_group_dev;
1016         unsigned nex_group_dev;
1017 };
1018
1019 static void _calc_trunk_info(struct ore_layout *layout, u64 file_offset,
1020                              struct _trunc_info *ti)
1021 {
1022         unsigned stripe_unit = layout->stripe_unit;
1023
1024         ore_calc_stripe_info(layout, file_offset, 0, &ti->si);
1025
1026         ti->prev_group_obj_off = ti->si.M * stripe_unit;
1027         ti->next_group_obj_off = ti->si.M ? (ti->si.M - 1) * stripe_unit : 0;
1028
1029         ti->first_group_dev = ti->si.dev - (ti->si.dev % layout->group_width);
1030         ti->nex_group_dev = ti->first_group_dev + layout->group_width;
1031 }
1032
1033 int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
1034                    u64 size)
1035 {
1036         struct ore_io_state *ios;
1037         struct exofs_trunc_attr {
1038                 struct osd_attr attr;
1039                 __be64 newsize;
1040         } *size_attrs;
1041         struct _trunc_info ti;
1042         int i, ret;
1043
1044         ret = ore_get_io_state(layout, oc, &ios);
1045         if (unlikely(ret))
1046                 return ret;
1047
1048         _calc_trunk_info(ios->layout, size, &ti);
1049
1050         size_attrs = kcalloc(ios->oc->numdevs, sizeof(*size_attrs),
1051                              GFP_KERNEL);
1052         if (unlikely(!size_attrs)) {
1053                 ret = -ENOMEM;
1054                 goto out;
1055         }
1056
1057         ios->numdevs = ios->oc->numdevs;
1058
1059         for (i = 0; i < ios->numdevs; ++i) {
1060                 struct exofs_trunc_attr *size_attr = &size_attrs[i];
1061                 u64 obj_size;
1062
1063                 if (i < ti.first_group_dev)
1064                         obj_size = ti.prev_group_obj_off;
1065                 else if (i >= ti.nex_group_dev)
1066                         obj_size = ti.next_group_obj_off;
1067                 else if (i < ti.si.dev) /* dev within this group */
1068                         obj_size = ti.si.obj_offset +
1069                                       ios->layout->stripe_unit - ti.si.unit_off;
1070                 else if (i == ti.si.dev)
1071                         obj_size = ti.si.obj_offset;
1072                 else /* i > ti.dev */
1073                         obj_size = ti.si.obj_offset - ti.si.unit_off;
1074
1075                 size_attr->newsize = cpu_to_be64(obj_size);
1076                 size_attr->attr = g_attr_logical_length;
1077                 size_attr->attr.val_ptr = &size_attr->newsize;
1078
1079                 ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
1080                              _LLU(oc->comps->obj.id), _LLU(obj_size), i);
1081                 ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
1082                                         &size_attr->attr);
1083                 if (unlikely(ret))
1084                         goto out;
1085         }
1086         ret = ore_io_execute(ios);
1087
1088 out:
1089         kfree(size_attrs);
1090         ore_put_io_state(ios);
1091         return ret;
1092 }
1093 EXPORT_SYMBOL(ore_truncate);
1094
1095 const struct osd_attr g_attr_logical_length = ATTR_DEF(
1096         OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
1097 EXPORT_SYMBOL(g_attr_logical_length);