tcp: add tcp_min_snd_mss sysctl
[pandora-kernel.git] / fs / hfsplus / btree.c
1 /*
2  *  linux/fs/hfsplus/btree.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Handle opening/closing btree
9  */
10
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/log2.h>
14
15 #include "hfsplus_fs.h"
16 #include "hfsplus_raw.h"
17
18
19 /* Get a reference to a B*Tree and do some initial checks */
20 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
21 {
22         struct hfs_btree *tree;
23         struct hfs_btree_header_rec *head;
24         struct address_space *mapping;
25         struct inode *inode;
26         struct page *page;
27         unsigned int size;
28
29         tree = kzalloc(sizeof(*tree), GFP_KERNEL);
30         if (!tree)
31                 return NULL;
32
33         mutex_init(&tree->tree_lock);
34         spin_lock_init(&tree->hash_lock);
35         tree->sb = sb;
36         tree->cnid = id;
37         inode = hfsplus_iget(sb, id);
38         if (IS_ERR(inode))
39                 goto free_tree;
40         tree->inode = inode;
41
42         if (!HFSPLUS_I(tree->inode)->first_blocks) {
43                 printk(KERN_ERR
44                        "hfs: invalid btree extent records (0 size).\n");
45                 goto free_inode;
46         }
47
48         mapping = tree->inode->i_mapping;
49         page = read_mapping_page(mapping, 0, NULL);
50         if (IS_ERR(page))
51                 goto free_inode;
52
53         /* Load the header */
54         head = (struct hfs_btree_header_rec *)(kmap(page) +
55                 sizeof(struct hfs_bnode_desc));
56         tree->root = be32_to_cpu(head->root);
57         tree->leaf_count = be32_to_cpu(head->leaf_count);
58         tree->leaf_head = be32_to_cpu(head->leaf_head);
59         tree->leaf_tail = be32_to_cpu(head->leaf_tail);
60         tree->node_count = be32_to_cpu(head->node_count);
61         tree->free_nodes = be32_to_cpu(head->free_nodes);
62         tree->attributes = be32_to_cpu(head->attributes);
63         tree->node_size = be16_to_cpu(head->node_size);
64         tree->max_key_len = be16_to_cpu(head->max_key_len);
65         tree->depth = be16_to_cpu(head->depth);
66
67         /* Verify the tree and set the correct compare function */
68         switch (id) {
69         case HFSPLUS_EXT_CNID:
70                 if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
71                         printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
72                                 tree->max_key_len);
73                         goto fail_page;
74                 }
75                 if (tree->attributes & HFS_TREE_VARIDXKEYS) {
76                         printk(KERN_ERR "hfs: invalid extent btree flag\n");
77                         goto fail_page;
78                 }
79
80                 tree->keycmp = hfsplus_ext_cmp_key;
81                 break;
82         case HFSPLUS_CAT_CNID:
83                 if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
84                         printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
85                                 tree->max_key_len);
86                         goto fail_page;
87                 }
88                 if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
89                         printk(KERN_ERR "hfs: invalid catalog btree flag\n");
90                         goto fail_page;
91                 }
92
93                 if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
94                     (head->key_type == HFSPLUS_KEY_BINARY))
95                         tree->keycmp = hfsplus_cat_bin_cmp_key;
96                 else {
97                         tree->keycmp = hfsplus_cat_case_cmp_key;
98                         set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
99                 }
100                 break;
101         default:
102                 printk(KERN_ERR "hfs: unknown B*Tree requested\n");
103                 goto fail_page;
104         }
105
106         if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
107                 printk(KERN_ERR "hfs: invalid btree flag\n");
108                 goto fail_page;
109         }
110
111         size = tree->node_size;
112         if (!is_power_of_2(size))
113                 goto fail_page;
114         if (!tree->node_count)
115                 goto fail_page;
116
117         tree->node_size_shift = ffs(size) - 1;
118
119         tree->pages_per_bnode =
120                 (tree->node_size + PAGE_CACHE_SIZE - 1) >>
121                 PAGE_CACHE_SHIFT;
122
123         kunmap(page);
124         page_cache_release(page);
125         return tree;
126
127  fail_page:
128         page_cache_release(page);
129  free_inode:
130         tree->inode->i_mapping->a_ops = &hfsplus_aops;
131         iput(tree->inode);
132  free_tree:
133         kfree(tree);
134         return NULL;
135 }
136
137 /* Release resources used by a btree */
138 void hfs_btree_close(struct hfs_btree *tree)
139 {
140         struct hfs_bnode *node;
141         int i;
142
143         if (!tree)
144                 return;
145
146         for (i = 0; i < NODE_HASH_SIZE; i++) {
147                 while ((node = tree->node_hash[i])) {
148                         tree->node_hash[i] = node->next_hash;
149                         if (atomic_read(&node->refcnt))
150                                 printk(KERN_CRIT "hfs: node %d:%d "
151                                                 "still has %d user(s)!\n",
152                                         node->tree->cnid, node->this,
153                                         atomic_read(&node->refcnt));
154                         hfs_bnode_free(node);
155                         tree->node_hash_cnt--;
156                 }
157         }
158         iput(tree->inode);
159         kfree(tree);
160 }
161
162 void hfs_btree_write(struct hfs_btree *tree)
163 {
164         struct hfs_btree_header_rec *head;
165         struct hfs_bnode *node;
166         struct page *page;
167
168         node = hfs_bnode_find(tree, 0);
169         if (IS_ERR(node))
170                 /* panic? */
171                 return;
172         /* Load the header */
173         page = node->page[0];
174         head = (struct hfs_btree_header_rec *)(kmap(page) +
175                 sizeof(struct hfs_bnode_desc));
176
177         head->root = cpu_to_be32(tree->root);
178         head->leaf_count = cpu_to_be32(tree->leaf_count);
179         head->leaf_head = cpu_to_be32(tree->leaf_head);
180         head->leaf_tail = cpu_to_be32(tree->leaf_tail);
181         head->node_count = cpu_to_be32(tree->node_count);
182         head->free_nodes = cpu_to_be32(tree->free_nodes);
183         head->attributes = cpu_to_be32(tree->attributes);
184         head->depth = cpu_to_be16(tree->depth);
185
186         kunmap(page);
187         set_page_dirty(page);
188         hfs_bnode_put(node);
189 }
190
191 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
192 {
193         struct hfs_btree *tree = prev->tree;
194         struct hfs_bnode *node;
195         struct hfs_bnode_desc desc;
196         __be32 cnid;
197
198         node = hfs_bnode_create(tree, idx);
199         if (IS_ERR(node))
200                 return node;
201
202         tree->free_nodes--;
203         prev->next = idx;
204         cnid = cpu_to_be32(idx);
205         hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
206
207         node->type = HFS_NODE_MAP;
208         node->num_recs = 1;
209         hfs_bnode_clear(node, 0, tree->node_size);
210         desc.next = 0;
211         desc.prev = 0;
212         desc.type = HFS_NODE_MAP;
213         desc.height = 0;
214         desc.num_recs = cpu_to_be16(1);
215         desc.reserved = 0;
216         hfs_bnode_write(node, &desc, 0, sizeof(desc));
217         hfs_bnode_write_u16(node, 14, 0x8000);
218         hfs_bnode_write_u16(node, tree->node_size - 2, 14);
219         hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
220
221         return node;
222 }
223
224 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
225 {
226         struct hfs_bnode *node, *next_node;
227         struct page **pagep;
228         u32 nidx, idx;
229         unsigned off;
230         u16 off16;
231         u16 len;
232         u8 *data, byte, m;
233         int i;
234
235         while (!tree->free_nodes) {
236                 struct inode *inode = tree->inode;
237                 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
238                 u32 count;
239                 int res;
240
241                 res = hfsplus_file_extend(inode);
242                 if (res)
243                         return ERR_PTR(res);
244                 hip->phys_size = inode->i_size =
245                         (loff_t)hip->alloc_blocks <<
246                                 HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
247                 hip->fs_blocks =
248                         hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
249                 inode_set_bytes(inode, inode->i_size);
250                 count = inode->i_size >> tree->node_size_shift;
251                 tree->free_nodes = count - tree->node_count;
252                 tree->node_count = count;
253         }
254
255         nidx = 0;
256         node = hfs_bnode_find(tree, nidx);
257         if (IS_ERR(node))
258                 return node;
259         len = hfs_brec_lenoff(node, 2, &off16);
260         off = off16;
261
262         off += node->page_offset;
263         pagep = node->page + (off >> PAGE_CACHE_SHIFT);
264         data = kmap(*pagep);
265         off &= ~PAGE_CACHE_MASK;
266         idx = 0;
267
268         for (;;) {
269                 while (len) {
270                         byte = data[off];
271                         if (byte != 0xff) {
272                                 for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
273                                         if (!(byte & m)) {
274                                                 idx += i;
275                                                 data[off] |= m;
276                                                 set_page_dirty(*pagep);
277                                                 kunmap(*pagep);
278                                                 tree->free_nodes--;
279                                                 mark_inode_dirty(tree->inode);
280                                                 hfs_bnode_put(node);
281                                                 return hfs_bnode_create(tree,
282                                                         idx);
283                                         }
284                                 }
285                         }
286                         if (++off >= PAGE_CACHE_SIZE) {
287                                 kunmap(*pagep);
288                                 data = kmap(*++pagep);
289                                 off = 0;
290                         }
291                         idx += 8;
292                         len--;
293                 }
294                 kunmap(*pagep);
295                 nidx = node->next;
296                 if (!nidx) {
297                         dprint(DBG_BNODE_MOD, "hfs: create new bmap node.\n");
298                         next_node = hfs_bmap_new_bmap(node, idx);
299                 } else
300                         next_node = hfs_bnode_find(tree, nidx);
301                 hfs_bnode_put(node);
302                 if (IS_ERR(next_node))
303                         return next_node;
304                 node = next_node;
305
306                 len = hfs_brec_lenoff(node, 0, &off16);
307                 off = off16;
308                 off += node->page_offset;
309                 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
310                 data = kmap(*pagep);
311                 off &= ~PAGE_CACHE_MASK;
312         }
313 }
314
315 void hfs_bmap_free(struct hfs_bnode *node)
316 {
317         struct hfs_btree *tree;
318         struct page *page;
319         u16 off, len;
320         u32 nidx;
321         u8 *data, byte, m;
322
323         dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
324         BUG_ON(!node->this);
325         tree = node->tree;
326         nidx = node->this;
327         node = hfs_bnode_find(tree, 0);
328         if (IS_ERR(node))
329                 return;
330         len = hfs_brec_lenoff(node, 2, &off);
331         while (nidx >= len * 8) {
332                 u32 i;
333
334                 nidx -= len * 8;
335                 i = node->next;
336                 hfs_bnode_put(node);
337                 if (!i) {
338                         /* panic */;
339                         printk(KERN_CRIT "hfs: unable to free bnode %u. "
340                                         "bmap not found!\n",
341                                 node->this);
342                         return;
343                 }
344                 node = hfs_bnode_find(tree, i);
345                 if (IS_ERR(node))
346                         return;
347                 if (node->type != HFS_NODE_MAP) {
348                         /* panic */;
349                         printk(KERN_CRIT "hfs: invalid bmap found! "
350                                         "(%u,%d)\n",
351                                 node->this, node->type);
352                         hfs_bnode_put(node);
353                         return;
354                 }
355                 len = hfs_brec_lenoff(node, 0, &off);
356         }
357         off += node->page_offset + nidx / 8;
358         page = node->page[off >> PAGE_CACHE_SHIFT];
359         data = kmap(page);
360         off &= ~PAGE_CACHE_MASK;
361         m = 1 << (~nidx & 7);
362         byte = data[off];
363         if (!(byte & m)) {
364                 printk(KERN_CRIT "hfs: trying to free free bnode "
365                                 "%u(%d)\n",
366                         node->this, node->type);
367                 kunmap(page);
368                 hfs_bnode_put(node);
369                 return;
370         }
371         data[off] = byte & ~m;
372         set_page_dirty(page);
373         kunmap(page);
374         hfs_bnode_put(node);
375         tree->free_nodes++;
376         mark_inode_dirty(tree->inode);
377 }