dm space map metadata: fix sm_bootstrap_get_nr_blocks()
[pandora-kernel.git] / drivers / md / persistent-data / dm-space-map-metadata.c
1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-space-map.h"
8 #include "dm-space-map-common.h"
9 #include "dm-space-map-metadata.h"
10
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/device-mapper.h>
14
15 #define DM_MSG_PREFIX "space map metadata"
16
17 /*----------------------------------------------------------------*/
18
19 /*
20  * Space map interface.
21  *
22  * The low level disk format is written using the standard btree and
23  * transaction manager.  This means that performing disk operations may
24  * cause us to recurse into the space map in order to allocate new blocks.
25  * For this reason we have a pool of pre-allocated blocks large enough to
26  * service any metadata_ll_disk operation.
27  */
28
29 /*
30  * FIXME: we should calculate this based on the size of the device.
31  * Only the metadata space map needs this functionality.
32  */
33 #define MAX_RECURSIVE_ALLOCATIONS 1024
34
35 enum block_op_type {
36         BOP_INC,
37         BOP_DEC
38 };
39
40 struct block_op {
41         enum block_op_type type;
42         dm_block_t block;
43 };
44
45 struct sm_metadata {
46         struct dm_space_map sm;
47
48         struct ll_disk ll;
49         struct ll_disk old_ll;
50
51         dm_block_t begin;
52
53         unsigned recursion_count;
54         unsigned allocated_this_transaction;
55         unsigned nr_uncommitted;
56         struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
57 };
58
59 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
60 {
61         struct block_op *op;
62
63         if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
64                 DMERR("too many recursive allocations");
65                 return -ENOMEM;
66         }
67
68         op = smm->uncommitted + smm->nr_uncommitted++;
69         op->type = type;
70         op->block = b;
71
72         return 0;
73 }
74
75 static int commit_bop(struct sm_metadata *smm, struct block_op *op)
76 {
77         int r = 0;
78         enum allocation_event ev;
79
80         switch (op->type) {
81         case BOP_INC:
82                 r = sm_ll_inc(&smm->ll, op->block, &ev);
83                 break;
84
85         case BOP_DEC:
86                 r = sm_ll_dec(&smm->ll, op->block, &ev);
87                 break;
88         }
89
90         return r;
91 }
92
93 static void in(struct sm_metadata *smm)
94 {
95         smm->recursion_count++;
96 }
97
98 static int out(struct sm_metadata *smm)
99 {
100         int r = 0;
101
102         /*
103          * If we're not recursing then very bad things are happening.
104          */
105         if (!smm->recursion_count) {
106                 DMERR("lost track of recursion depth");
107                 return -ENOMEM;
108         }
109
110         if (smm->recursion_count == 1 && smm->nr_uncommitted) {
111                 while (smm->nr_uncommitted && !r) {
112                         smm->nr_uncommitted--;
113                         r = commit_bop(smm, smm->uncommitted +
114                                        smm->nr_uncommitted);
115                         if (r)
116                                 break;
117                 }
118         }
119
120         smm->recursion_count--;
121
122         return r;
123 }
124
125 /*
126  * When using the out() function above, we often want to combine an error
127  * code for the operation run in the recursive context with that from
128  * out().
129  */
130 static int combine_errors(int r1, int r2)
131 {
132         return r1 ? r1 : r2;
133 }
134
135 static int recursing(struct sm_metadata *smm)
136 {
137         return smm->recursion_count;
138 }
139
140 static void sm_metadata_destroy(struct dm_space_map *sm)
141 {
142         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
143
144         kfree(smm);
145 }
146
147 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
148 {
149         DMERR("doesn't support extend");
150         return -EINVAL;
151 }
152
153 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
154 {
155         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
156
157         *count = smm->ll.nr_blocks;
158
159         return 0;
160 }
161
162 static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
163 {
164         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
165
166         *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
167                  smm->allocated_this_transaction;
168
169         return 0;
170 }
171
172 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
173                                  uint32_t *result)
174 {
175         int r, i;
176         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
177         unsigned adjustment = 0;
178
179         /*
180          * We may have some uncommitted adjustments to add.  This list
181          * should always be really short.
182          */
183         for (i = 0; i < smm->nr_uncommitted; i++) {
184                 struct block_op *op = smm->uncommitted + i;
185
186                 if (op->block != b)
187                         continue;
188
189                 switch (op->type) {
190                 case BOP_INC:
191                         adjustment++;
192                         break;
193
194                 case BOP_DEC:
195                         adjustment--;
196                         break;
197                 }
198         }
199
200         r = sm_ll_lookup(&smm->ll, b, result);
201         if (r)
202                 return r;
203
204         *result += adjustment;
205
206         return 0;
207 }
208
209 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
210                                               dm_block_t b, int *result)
211 {
212         int r, i, adjustment = 0;
213         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
214         uint32_t rc;
215
216         /*
217          * We may have some uncommitted adjustments to add.  This list
218          * should always be really short.
219          */
220         for (i = 0; i < smm->nr_uncommitted; i++) {
221                 struct block_op *op = smm->uncommitted + i;
222
223                 if (op->block != b)
224                         continue;
225
226                 switch (op->type) {
227                 case BOP_INC:
228                         adjustment++;
229                         break;
230
231                 case BOP_DEC:
232                         adjustment--;
233                         break;
234                 }
235         }
236
237         if (adjustment > 1) {
238                 *result = 1;
239                 return 0;
240         }
241
242         r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
243         if (r)
244                 return r;
245
246         if (rc == 3)
247                 /*
248                  * We err on the side of caution, and always return true.
249                  */
250                 *result = 1;
251         else
252                 *result = rc + adjustment > 1;
253
254         return 0;
255 }
256
257 static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
258                                  uint32_t count)
259 {
260         int r, r2;
261         enum allocation_event ev;
262         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
263
264         if (smm->recursion_count) {
265                 DMERR("cannot recurse set_count()");
266                 return -EINVAL;
267         }
268
269         in(smm);
270         r = sm_ll_insert(&smm->ll, b, count, &ev);
271         r2 = out(smm);
272
273         return combine_errors(r, r2);
274 }
275
276 static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
277 {
278         int r, r2 = 0;
279         enum allocation_event ev;
280         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
281
282         if (recursing(smm))
283                 r = add_bop(smm, BOP_INC, b);
284         else {
285                 in(smm);
286                 r = sm_ll_inc(&smm->ll, b, &ev);
287                 r2 = out(smm);
288         }
289
290         return combine_errors(r, r2);
291 }
292
293 static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
294 {
295         int r, r2 = 0;
296         enum allocation_event ev;
297         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
298
299         if (recursing(smm))
300                 r = add_bop(smm, BOP_DEC, b);
301         else {
302                 in(smm);
303                 r = sm_ll_dec(&smm->ll, b, &ev);
304                 r2 = out(smm);
305         }
306
307         return combine_errors(r, r2);
308 }
309
310 static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
311 {
312         int r, r2 = 0;
313         enum allocation_event ev;
314         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
315
316         r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
317         if (r)
318                 return r;
319
320         smm->begin = *b + 1;
321
322         if (recursing(smm))
323                 r = add_bop(smm, BOP_INC, *b);
324         else {
325                 in(smm);
326                 r = sm_ll_inc(&smm->ll, *b, &ev);
327                 r2 = out(smm);
328         }
329
330         if (!r)
331                 smm->allocated_this_transaction++;
332
333         return combine_errors(r, r2);
334 }
335
336 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
337 {
338         int r = sm_metadata_new_block_(sm, b);
339         if (r)
340                 DMERR("out of metadata space");
341         return r;
342 }
343
344 static int sm_metadata_commit(struct dm_space_map *sm)
345 {
346         int r;
347         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
348
349         r = sm_ll_commit(&smm->ll);
350         if (r)
351                 return r;
352
353         memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
354         smm->begin = 0;
355         smm->allocated_this_transaction = 0;
356
357         return 0;
358 }
359
360 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
361 {
362         *result = sizeof(struct disk_sm_root);
363
364         return 0;
365 }
366
367 static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
368 {
369         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
370         struct disk_sm_root root_le;
371
372         root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
373         root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
374         root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
375         root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
376
377         if (max < sizeof(root_le))
378                 return -ENOSPC;
379
380         memcpy(where_le, &root_le, sizeof(root_le));
381
382         return 0;
383 }
384
385 static struct dm_space_map ops = {
386         .destroy = sm_metadata_destroy,
387         .extend = sm_metadata_extend,
388         .get_nr_blocks = sm_metadata_get_nr_blocks,
389         .get_nr_free = sm_metadata_get_nr_free,
390         .get_count = sm_metadata_get_count,
391         .count_is_more_than_one = sm_metadata_count_is_more_than_one,
392         .set_count = sm_metadata_set_count,
393         .inc_block = sm_metadata_inc_block,
394         .dec_block = sm_metadata_dec_block,
395         .new_block = sm_metadata_new_block,
396         .commit = sm_metadata_commit,
397         .root_size = sm_metadata_root_size,
398         .copy_root = sm_metadata_copy_root
399 };
400
401 /*----------------------------------------------------------------*/
402
403 /*
404  * When a new space map is created that manages its own space.  We use
405  * this tiny bootstrap allocator.
406  */
407 static void sm_bootstrap_destroy(struct dm_space_map *sm)
408 {
409 }
410
411 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
412 {
413         DMERR("boostrap doesn't support extend");
414
415         return -EINVAL;
416 }
417
418 static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
419 {
420         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
421
422         *count = smm->ll.nr_blocks;
423
424         return 0;
425 }
426
427 static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
428 {
429         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
430
431         *count = smm->ll.nr_blocks - smm->begin;
432
433         return 0;
434 }
435
436 static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
437                                   uint32_t *result)
438 {
439         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
440
441         return b < smm->begin ? 1 : 0;
442 }
443
444 static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
445                                                dm_block_t b, int *result)
446 {
447         *result = 0;
448
449         return 0;
450 }
451
452 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
453                                   uint32_t count)
454 {
455         DMERR("boostrap doesn't support set_count");
456
457         return -EINVAL;
458 }
459
460 static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
461 {
462         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
463
464         /*
465          * We know the entire device is unused.
466          */
467         if (smm->begin == smm->ll.nr_blocks)
468                 return -ENOSPC;
469
470         *b = smm->begin++;
471
472         return 0;
473 }
474
475 static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
476 {
477         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
478
479         return add_bop(smm, BOP_INC, b);
480 }
481
482 static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
483 {
484         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
485
486         return add_bop(smm, BOP_DEC, b);
487 }
488
489 static int sm_bootstrap_commit(struct dm_space_map *sm)
490 {
491         return 0;
492 }
493
494 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
495 {
496         DMERR("boostrap doesn't support root_size");
497
498         return -EINVAL;
499 }
500
501 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
502                                   size_t max)
503 {
504         DMERR("boostrap doesn't support copy_root");
505
506         return -EINVAL;
507 }
508
509 static struct dm_space_map bootstrap_ops = {
510         .destroy = sm_bootstrap_destroy,
511         .extend = sm_bootstrap_extend,
512         .get_nr_blocks = sm_bootstrap_get_nr_blocks,
513         .get_nr_free = sm_bootstrap_get_nr_free,
514         .get_count = sm_bootstrap_get_count,
515         .count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
516         .set_count = sm_bootstrap_set_count,
517         .inc_block = sm_bootstrap_inc_block,
518         .dec_block = sm_bootstrap_dec_block,
519         .new_block = sm_bootstrap_new_block,
520         .commit = sm_bootstrap_commit,
521         .root_size = sm_bootstrap_root_size,
522         .copy_root = sm_bootstrap_copy_root
523 };
524
525 /*----------------------------------------------------------------*/
526
527 struct dm_space_map *dm_sm_metadata_init(void)
528 {
529         struct sm_metadata *smm;
530
531         smm = kmalloc(sizeof(*smm), GFP_KERNEL);
532         if (!smm)
533                 return ERR_PTR(-ENOMEM);
534
535         memcpy(&smm->sm, &ops, sizeof(smm->sm));
536
537         return &smm->sm;
538 }
539
540 int dm_sm_metadata_create(struct dm_space_map *sm,
541                           struct dm_transaction_manager *tm,
542                           dm_block_t nr_blocks,
543                           dm_block_t superblock)
544 {
545         int r;
546         dm_block_t i;
547         enum allocation_event ev;
548         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
549
550         smm->begin = superblock + 1;
551         smm->recursion_count = 0;
552         smm->allocated_this_transaction = 0;
553         smm->nr_uncommitted = 0;
554
555         memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
556
557         r = sm_ll_new_metadata(&smm->ll, tm);
558         if (r)
559                 return r;
560
561         r = sm_ll_extend(&smm->ll, nr_blocks);
562         if (r)
563                 return r;
564
565         memcpy(&smm->sm, &ops, sizeof(smm->sm));
566
567         /*
568          * Now we need to update the newly created data structures with the
569          * allocated blocks that they were built from.
570          */
571         for (i = superblock; !r && i < smm->begin; i++)
572                 r = sm_ll_inc(&smm->ll, i, &ev);
573
574         if (r)
575                 return r;
576
577         return sm_metadata_commit(sm);
578 }
579
580 int dm_sm_metadata_open(struct dm_space_map *sm,
581                         struct dm_transaction_manager *tm,
582                         void *root_le, size_t len)
583 {
584         int r;
585         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
586
587         r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
588         if (r)
589                 return r;
590
591         smm->begin = 0;
592         smm->recursion_count = 0;
593         smm->allocated_this_transaction = 0;
594         smm->nr_uncommitted = 0;
595
596         memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
597         return 0;
598 }