xen-blkback: don't leak stack data via response ring
[pandora-kernel.git] / drivers / md / persistent-data / dm-btree-remove.c
1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-btree.h"
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
10
11 #include <linux/export.h>
12
13 /*
14  * Removing an entry from a btree
15  * ==============================
16  *
17  * A very important constraint for our btree is that no node, except the
18  * root, may have fewer than a certain number of entries.
19  * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
20  *
21  * Ensuring this is complicated by the way we want to only ever hold the
22  * locks on 2 nodes concurrently, and only change nodes in a top to bottom
23  * fashion.
24  *
25  * Each node may have a left or right sibling.  When decending the spine,
26  * if a node contains only MIN_ENTRIES then we try and increase this to at
27  * least MIN_ENTRIES + 1.  We do this in the following ways:
28  *
29  * [A] No siblings => this can only happen if the node is the root, in which
30  *     case we copy the childs contents over the root.
31  *
32  * [B] No left sibling
33  *     ==> rebalance(node, right sibling)
34  *
35  * [C] No right sibling
36  *     ==> rebalance(left sibling, node)
37  *
38  * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
39  *     ==> delete node adding it's contents to left and right
40  *
41  * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
42  *     ==> rebalance(left, node, right)
43  *
44  * After these operations it's possible that the our original node no
45  * longer contains the desired sub tree.  For this reason this rebalancing
46  * is performed on the children of the current node.  This also avoids
47  * having a special case for the root.
48  *
49  * Once this rebalancing has occurred we can then step into the child node
50  * for internal nodes.  Or delete the entry for leaf nodes.
51  */
52
53 /*
54  * Some little utilities for moving node data around.
55  */
56 static void node_shift(struct btree_node *n, int shift)
57 {
58         uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
59         uint32_t value_size = le32_to_cpu(n->header.value_size);
60
61         if (shift < 0) {
62                 shift = -shift;
63                 BUG_ON(shift > nr_entries);
64                 BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift, value_size));
65                 memmove(key_ptr(n, 0),
66                         key_ptr(n, shift),
67                         (nr_entries - shift) * sizeof(__le64));
68                 memmove(value_ptr(n, 0, value_size),
69                         value_ptr(n, shift, value_size),
70                         (nr_entries - shift) * value_size);
71         } else {
72                 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries));
73                 memmove(key_ptr(n, shift),
74                         key_ptr(n, 0),
75                         nr_entries * sizeof(__le64));
76                 memmove(value_ptr(n, shift, value_size),
77                         value_ptr(n, 0, value_size),
78                         nr_entries * value_size);
79         }
80 }
81
82 static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
83 {
84         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
85         uint32_t value_size = le32_to_cpu(left->header.value_size);
86         BUG_ON(value_size != le32_to_cpu(right->header.value_size));
87
88         if (shift < 0) {
89                 shift = -shift;
90                 BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
91                 memcpy(key_ptr(left, nr_left),
92                        key_ptr(right, 0),
93                        shift * sizeof(__le64));
94                 memcpy(value_ptr(left, nr_left, value_size),
95                        value_ptr(right, 0, value_size),
96                        shift * value_size);
97         } else {
98                 BUG_ON(shift > le32_to_cpu(right->header.max_entries));
99                 memcpy(key_ptr(right, 0),
100                        key_ptr(left, nr_left - shift),
101                        shift * sizeof(__le64));
102                 memcpy(value_ptr(right, 0, value_size),
103                        value_ptr(left, nr_left - shift, value_size),
104                        shift * value_size);
105         }
106 }
107
108 /*
109  * Delete a specific entry from a leaf node.
110  */
111 static void delete_at(struct btree_node *n, unsigned index)
112 {
113         unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
114         unsigned nr_to_copy = nr_entries - (index + 1);
115         uint32_t value_size = le32_to_cpu(n->header.value_size);
116         BUG_ON(index >= nr_entries);
117
118         if (nr_to_copy) {
119                 memmove(key_ptr(n, index),
120                         key_ptr(n, index + 1),
121                         nr_to_copy * sizeof(__le64));
122
123                 memmove(value_ptr(n, index, value_size),
124                         value_ptr(n, index + 1, value_size),
125                         nr_to_copy * value_size);
126         }
127
128         n->header.nr_entries = cpu_to_le32(nr_entries - 1);
129 }
130
131 static unsigned merge_threshold(struct btree_node *n)
132 {
133         return le32_to_cpu(n->header.max_entries) / 3;
134 }
135
136 struct child {
137         unsigned index;
138         struct dm_block *block;
139         struct btree_node *n;
140 };
141
142 static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
143                       struct btree_node *parent,
144                       unsigned index, struct child *result)
145 {
146         int r, inc;
147         dm_block_t root;
148
149         result->index = index;
150         root = value64(parent, index);
151
152         r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
153                                &result->block, &inc);
154         if (r)
155                 return r;
156
157         result->n = dm_block_data(result->block);
158
159         if (inc)
160                 inc_children(info->tm, result->n, vt);
161
162         *((__le64 *) value_ptr(parent, index, sizeof(__le64))) =
163                 cpu_to_le64(dm_block_location(result->block));
164
165         return 0;
166 }
167
168 static int exit_child(struct dm_btree_info *info, struct child *c)
169 {
170         return dm_tm_unlock(info->tm, c->block);
171 }
172
173 static void shift(struct btree_node *left, struct btree_node *right, int count)
174 {
175         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
176         uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
177         uint32_t max_entries = le32_to_cpu(left->header.max_entries);
178         uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
179
180         BUG_ON(max_entries != r_max_entries);
181         BUG_ON(nr_left - count > max_entries);
182         BUG_ON(nr_right + count > max_entries);
183
184         if (!count)
185                 return;
186
187         if (count > 0) {
188                 node_shift(right, count);
189                 node_copy(left, right, count);
190         } else {
191                 node_copy(left, right, count);
192                 node_shift(right, count);
193         }
194
195         left->header.nr_entries = cpu_to_le32(nr_left - count);
196         right->header.nr_entries = cpu_to_le32(nr_right + count);
197 }
198
199 static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
200                          struct child *l, struct child *r)
201 {
202         struct btree_node *left = l->n;
203         struct btree_node *right = r->n;
204         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
205         uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
206         unsigned threshold = 2 * merge_threshold(left) + 1;
207
208         if (nr_left + nr_right < threshold) {
209                 /*
210                  * Merge
211                  */
212                 node_copy(left, right, -nr_right);
213                 left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
214                 delete_at(parent, r->index);
215
216                 /*
217                  * We need to decrement the right block, but not it's
218                  * children, since they're still referenced by left.
219                  */
220                 dm_tm_dec(info->tm, dm_block_location(r->block));
221         } else {
222                 /*
223                  * Rebalance.
224                  */
225                 unsigned target_left = (nr_left + nr_right) / 2;
226                 shift(left, right, nr_left - target_left);
227                 *key_ptr(parent, r->index) = right->keys[0];
228         }
229 }
230
231 static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
232                       struct dm_btree_value_type *vt, unsigned left_index)
233 {
234         int r;
235         struct btree_node *parent;
236         struct child left, right;
237
238         parent = dm_block_data(shadow_current(s));
239
240         r = init_child(info, vt, parent, left_index, &left);
241         if (r)
242                 return r;
243
244         r = init_child(info, vt, parent, left_index + 1, &right);
245         if (r) {
246                 exit_child(info, &left);
247                 return r;
248         }
249
250         __rebalance2(info, parent, &left, &right);
251
252         r = exit_child(info, &left);
253         if (r) {
254                 exit_child(info, &right);
255                 return r;
256         }
257
258         return exit_child(info, &right);
259 }
260
261 /*
262  * We dump as many entries from center as possible into left, then the rest
263  * in right, then rebalance2.  This wastes some cpu, but I want something
264  * simple atm.
265  */
266 static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
267                                struct child *l, struct child *c, struct child *r,
268                                struct btree_node *left, struct btree_node *center, struct btree_node *right,
269                                uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
270 {
271         uint32_t max_entries = le32_to_cpu(left->header.max_entries);
272         unsigned shift = min(max_entries - nr_left, nr_center);
273
274         BUG_ON(nr_left + shift > max_entries);
275         node_copy(left, center, -shift);
276         left->header.nr_entries = cpu_to_le32(nr_left + shift);
277
278         if (shift != nr_center) {
279                 shift = nr_center - shift;
280                 BUG_ON((nr_right + shift) > max_entries);
281                 node_shift(right, shift);
282                 node_copy(center, right, shift);
283                 right->header.nr_entries = cpu_to_le32(nr_right + shift);
284         }
285         *key_ptr(parent, r->index) = right->keys[0];
286
287         delete_at(parent, c->index);
288         r->index--;
289
290         dm_tm_dec(info->tm, dm_block_location(c->block));
291         __rebalance2(info, parent, l, r);
292 }
293
294 /*
295  * Redistributes entries among 3 sibling nodes.
296  */
297 static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
298                           struct child *l, struct child *c, struct child *r,
299                           struct btree_node *left, struct btree_node *center, struct btree_node *right,
300                           uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
301 {
302         int s;
303         uint32_t max_entries = le32_to_cpu(left->header.max_entries);
304         unsigned total = nr_left + nr_center + nr_right;
305         unsigned target_right = total / 3;
306         unsigned remainder = (target_right * 3) != total;
307         unsigned target_left = target_right + remainder;
308
309         BUG_ON(target_left > max_entries);
310         BUG_ON(target_right > max_entries);
311
312         if (nr_left < nr_right) {
313                 s = nr_left - target_left;
314
315                 if (s < 0 && nr_center < -s) {
316                         /* not enough in central node */
317                         shift(left, center, -nr_center);
318                         s += nr_center;
319                         shift(left, right, s);
320                         nr_right += s;
321                 } else
322                         shift(left, center, s);
323
324                 shift(center, right, target_right - nr_right);
325
326         } else {
327                 s = target_right - nr_right;
328                 if (s > 0 && nr_center < s) {
329                         /* not enough in central node */
330                         shift(center, right, nr_center);
331                         s -= nr_center;
332                         shift(left, right, s);
333                         nr_left -= s;
334                 } else
335                         shift(center, right, s);
336
337                 shift(left, center, nr_left - target_left);
338         }
339
340         *key_ptr(parent, c->index) = center->keys[0];
341         *key_ptr(parent, r->index) = right->keys[0];
342 }
343
344 static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
345                          struct child *l, struct child *c, struct child *r)
346 {
347         struct btree_node *left = l->n;
348         struct btree_node *center = c->n;
349         struct btree_node *right = r->n;
350
351         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
352         uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
353         uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
354
355         unsigned threshold = merge_threshold(left) * 4 + 1;
356
357         BUG_ON(left->header.max_entries != center->header.max_entries);
358         BUG_ON(center->header.max_entries != right->header.max_entries);
359
360         if ((nr_left + nr_center + nr_right) < threshold)
361                 delete_center_node(info, parent, l, c, r, left, center, right,
362                                    nr_left, nr_center, nr_right);
363         else
364                 redistribute3(info, parent, l, c, r, left, center, right,
365                               nr_left, nr_center, nr_right);
366 }
367
368 static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
369                       struct dm_btree_value_type *vt, unsigned left_index)
370 {
371         int r;
372         struct btree_node *parent = dm_block_data(shadow_current(s));
373         struct child left, center, right;
374
375         /*
376          * FIXME: fill out an array?
377          */
378         r = init_child(info, vt, parent, left_index, &left);
379         if (r)
380                 return r;
381
382         r = init_child(info, vt, parent, left_index + 1, &center);
383         if (r) {
384                 exit_child(info, &left);
385                 return r;
386         }
387
388         r = init_child(info, vt, parent, left_index + 2, &right);
389         if (r) {
390                 exit_child(info, &left);
391                 exit_child(info, &center);
392                 return r;
393         }
394
395         __rebalance3(info, parent, &left, &center, &right);
396
397         r = exit_child(info, &left);
398         if (r) {
399                 exit_child(info, &center);
400                 exit_child(info, &right);
401                 return r;
402         }
403
404         r = exit_child(info, &center);
405         if (r) {
406                 exit_child(info, &right);
407                 return r;
408         }
409
410         r = exit_child(info, &right);
411         if (r)
412                 return r;
413
414         return 0;
415 }
416
417 static int get_nr_entries(struct dm_transaction_manager *tm,
418                           dm_block_t b, uint32_t *result)
419 {
420         int r;
421         struct dm_block *block;
422         struct btree_node *n;
423
424         r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
425         if (r)
426                 return r;
427
428         n = dm_block_data(block);
429         *result = le32_to_cpu(n->header.nr_entries);
430
431         return dm_tm_unlock(tm, block);
432 }
433
434 static int rebalance_children(struct shadow_spine *s,
435                               struct dm_btree_info *info,
436                               struct dm_btree_value_type *vt, uint64_t key)
437 {
438         int i, r, has_left_sibling, has_right_sibling;
439         uint32_t child_entries;
440         struct btree_node *n;
441
442         n = dm_block_data(shadow_current(s));
443
444         if (le32_to_cpu(n->header.nr_entries) == 1) {
445                 struct dm_block *child;
446                 dm_block_t b = value64(n, 0);
447
448                 r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
449                 if (r)
450                         return r;
451
452                 memcpy(n, dm_block_data(child),
453                        dm_bm_block_size(dm_tm_get_bm(info->tm)));
454                 r = dm_tm_unlock(info->tm, child);
455                 if (r)
456                         return r;
457
458                 dm_tm_dec(info->tm, dm_block_location(child));
459                 return 0;
460         }
461
462         i = lower_bound(n, key);
463         if (i < 0)
464                 return -ENODATA;
465
466         r = get_nr_entries(info->tm, value64(n, i), &child_entries);
467         if (r)
468                 return r;
469
470         has_left_sibling = i > 0;
471         has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
472
473         if (!has_left_sibling)
474                 r = rebalance2(s, info, vt, i);
475
476         else if (!has_right_sibling)
477                 r = rebalance2(s, info, vt, i - 1);
478
479         else
480                 r = rebalance3(s, info, vt, i - 1);
481
482         return r;
483 }
484
485 static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
486 {
487         int i = lower_bound(n, key);
488
489         if ((i < 0) ||
490             (i >= le32_to_cpu(n->header.nr_entries)) ||
491             (le64_to_cpu(n->keys[i]) != key))
492                 return -ENODATA;
493
494         *index = i;
495
496         return 0;
497 }
498
499 /*
500  * Prepares for removal from one level of the hierarchy.  The caller must
501  * call delete_at() to remove the entry at index.
502  */
503 static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
504                       struct dm_btree_value_type *vt, dm_block_t root,
505                       uint64_t key, unsigned *index)
506 {
507         int i = *index, r;
508         struct btree_node *n;
509
510         for (;;) {
511                 r = shadow_step(s, root, vt);
512                 if (r < 0)
513                         break;
514
515                 /*
516                  * We have to patch up the parent node, ugly, but I don't
517                  * see a way to do this automatically as part of the spine
518                  * op.
519                  */
520                 if (shadow_has_parent(s)) {
521                         __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
522                         memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(__le64)),
523                                &location, sizeof(__le64));
524                 }
525
526                 n = dm_block_data(shadow_current(s));
527
528                 if (le32_to_cpu(n->header.flags) & LEAF_NODE)
529                         return do_leaf(n, key, index);
530
531                 r = rebalance_children(s, info, vt, key);
532                 if (r)
533                         break;
534
535                 n = dm_block_data(shadow_current(s));
536                 if (le32_to_cpu(n->header.flags) & LEAF_NODE)
537                         return do_leaf(n, key, index);
538
539                 i = lower_bound(n, key);
540
541                 /*
542                  * We know the key is present, or else
543                  * rebalance_children would have returned
544                  * -ENODATA
545                  */
546                 root = value64(n, i);
547         }
548
549         return r;
550 }
551
552 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
553                     uint64_t *keys, dm_block_t *new_root)
554 {
555         unsigned level, last_level = info->levels - 1;
556         int index = 0, r = 0;
557         struct shadow_spine spine;
558         struct btree_node *n;
559         struct dm_btree_value_type le64_vt;
560
561         init_le64_type(info->tm, &le64_vt);
562         init_shadow_spine(&spine, info);
563         for (level = 0; level < info->levels; level++) {
564                 r = remove_raw(&spine, info,
565                                (level == last_level ?
566                                 &info->value_type : &le64_vt),
567                                root, keys[level], (unsigned *)&index);
568                 if (r < 0)
569                         break;
570
571                 n = dm_block_data(shadow_current(&spine));
572                 if (level != last_level) {
573                         root = value64(n, index);
574                         continue;
575                 }
576
577                 BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
578
579                 if (info->value_type.dec)
580                         info->value_type.dec(info->value_type.context,
581                                              value_ptr(n, index, info->value_type.size));
582
583                 delete_at(n, index);
584         }
585
586         *new_root = shadow_root(&spine);
587         exit_shadow_spine(&spine);
588
589         return r;
590 }
591 EXPORT_SYMBOL_GPL(dm_btree_remove);