[PATCH] ntfs build fix
[pandora-kernel.git] / fs / ntfs / aops.c
1 /**
2  * aops.c - NTFS kernel address space operations and page cache handling.
3  *          Part of the Linux-NTFS project.
4  *
5  * Copyright (c) 2001-2005 Anton Altaparmakov
6  * Copyright (c) 2002 Richard Russon
7  *
8  * This program/include file is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as published
10  * by the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program/include file is distributed in the hope that it will be
14  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program (in the main directory of the Linux-NTFS
20  * distribution in the file COPYING); if not, write to the Free Software
21  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  */
23
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/buffer_head.h>
29 #include <linux/writeback.h>
30 #include <linux/bit_spinlock.h>
31
32 #include "aops.h"
33 #include "attrib.h"
34 #include "debug.h"
35 #include "inode.h"
36 #include "mft.h"
37 #include "runlist.h"
38 #include "types.h"
39 #include "ntfs.h"
40
41 /**
42  * ntfs_end_buffer_async_read - async io completion for reading attributes
43  * @bh:         buffer head on which io is completed
44  * @uptodate:   whether @bh is now uptodate or not
45  *
46  * Asynchronous I/O completion handler for reading pages belonging to the
47  * attribute address space of an inode.  The inodes can either be files or
48  * directories or they can be fake inodes describing some attribute.
49  *
50  * If NInoMstProtected(), perform the post read mst fixups when all IO on the
51  * page has been completed and mark the page uptodate or set the error bit on
52  * the page.  To determine the size of the records that need fixing up, we
53  * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
54  * record size, and index_block_size_bits, to the log(base 2) of the ntfs
55  * record size.
56  */
57 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
58 {
59         unsigned long flags;
60         struct buffer_head *first, *tmp;
61         struct page *page;
62         ntfs_inode *ni;
63         int page_uptodate = 1;
64
65         page = bh->b_page;
66         ni = NTFS_I(page->mapping->host);
67
68         if (likely(uptodate)) {
69                 s64 file_ofs, initialized_size;
70
71                 set_buffer_uptodate(bh);
72
73                 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
74                                 bh_offset(bh);
75                 read_lock_irqsave(&ni->size_lock, flags);
76                 initialized_size = ni->initialized_size;
77                 read_unlock_irqrestore(&ni->size_lock, flags);
78                 /* Check for the current buffer head overflowing. */
79                 if (file_ofs + bh->b_size > initialized_size) {
80                         char *addr;
81                         int ofs = 0;
82
83                         if (file_ofs < initialized_size)
84                                 ofs = initialized_size - file_ofs;
85                         addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
86                         memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);
87                         flush_dcache_page(page);
88                         kunmap_atomic(addr, KM_BIO_SRC_IRQ);
89                 }
90         } else {
91                 clear_buffer_uptodate(bh);
92                 SetPageError(page);
93                 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",
94                                 (unsigned long long)bh->b_blocknr);
95         }
96         first = page_buffers(page);
97         local_irq_save(flags);
98         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
99         clear_buffer_async_read(bh);
100         unlock_buffer(bh);
101         tmp = bh;
102         do {
103                 if (!buffer_uptodate(tmp))
104                         page_uptodate = 0;
105                 if (buffer_async_read(tmp)) {
106                         if (likely(buffer_locked(tmp)))
107                                 goto still_busy;
108                         /* Async buffers must be locked. */
109                         BUG();
110                 }
111                 tmp = tmp->b_this_page;
112         } while (tmp != bh);
113         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
114         local_irq_restore(flags);
115         /*
116          * If none of the buffers had errors then we can set the page uptodate,
117          * but we first have to perform the post read mst fixups, if the
118          * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
119          * Note we ignore fixup errors as those are detected when
120          * map_mft_record() is called which gives us per record granularity
121          * rather than per page granularity.
122          */
123         if (!NInoMstProtected(ni)) {
124                 if (likely(page_uptodate && !PageError(page)))
125                         SetPageUptodate(page);
126         } else {
127                 char *addr;
128                 unsigned int i, recs;
129                 u32 rec_size;
130
131                 rec_size = ni->itype.index.block_size;
132                 recs = PAGE_CACHE_SIZE / rec_size;
133                 /* Should have been verified before we got here... */
134                 BUG_ON(!recs);
135                 addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
136                 for (i = 0; i < recs; i++)
137                         post_read_mst_fixup((NTFS_RECORD*)(addr +
138                                         i * rec_size), rec_size);
139                 flush_dcache_page(page);
140                 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
141                 if (likely(page_uptodate && !PageError(page)))
142                         SetPageUptodate(page);
143         }
144         unlock_page(page);
145         return;
146 still_busy:
147         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
148         local_irq_restore(flags);
149         return;
150 }
151
152 /**
153  * ntfs_read_block - fill a @page of an address space with data
154  * @page:       page cache page to fill with data
155  *
156  * Fill the page @page of the address space belonging to the @page->host inode.
157  * We read each buffer asynchronously and when all buffers are read in, our io
158  * completion handler ntfs_end_buffer_read_async(), if required, automatically
159  * applies the mst fixups to the page before finally marking it uptodate and
160  * unlocking it.
161  *
162  * We only enforce allocated_size limit because i_size is checked for in
163  * generic_file_read().
164  *
165  * Return 0 on success and -errno on error.
166  *
167  * Contains an adapted version of fs/buffer.c::block_read_full_page().
168  */
169 static int ntfs_read_block(struct page *page)
170 {
171         VCN vcn;
172         LCN lcn;
173         ntfs_inode *ni;
174         ntfs_volume *vol;
175         runlist_element *rl;
176         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
177         sector_t iblock, lblock, zblock;
178         unsigned long flags;
179         unsigned int blocksize, vcn_ofs;
180         int i, nr;
181         unsigned char blocksize_bits;
182
183         ni = NTFS_I(page->mapping->host);
184         vol = ni->vol;
185
186         /* $MFT/$DATA must have its complete runlist in memory at all times. */
187         BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
188
189         blocksize_bits = VFS_I(ni)->i_blkbits;
190         blocksize = 1 << blocksize_bits;
191
192         if (!page_has_buffers(page)) {
193                 create_empty_buffers(page, blocksize, 0);
194                 if (unlikely(!page_has_buffers(page))) {
195                         unlock_page(page);
196                         return -ENOMEM;
197                 }
198         }
199         bh = head = page_buffers(page);
200         BUG_ON(!bh);
201
202         iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
203         read_lock_irqsave(&ni->size_lock, flags);
204         lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
205         zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
206         read_unlock_irqrestore(&ni->size_lock, flags);
207
208         /* Loop through all the buffers in the page. */
209         rl = NULL;
210         nr = i = 0;
211         do {
212                 u8 *kaddr;
213                 int err;
214
215                 if (unlikely(buffer_uptodate(bh)))
216                         continue;
217                 if (unlikely(buffer_mapped(bh))) {
218                         arr[nr++] = bh;
219                         continue;
220                 }
221                 err = 0;
222                 bh->b_bdev = vol->sb->s_bdev;
223                 /* Is the block within the allowed limits? */
224                 if (iblock < lblock) {
225                         BOOL is_retry = FALSE;
226
227                         /* Convert iblock into corresponding vcn and offset. */
228                         vcn = (VCN)iblock << blocksize_bits >>
229                                         vol->cluster_size_bits;
230                         vcn_ofs = ((VCN)iblock << blocksize_bits) &
231                                         vol->cluster_size_mask;
232                         if (!rl) {
233 lock_retry_remap:
234                                 down_read(&ni->runlist.lock);
235                                 rl = ni->runlist.rl;
236                         }
237                         if (likely(rl != NULL)) {
238                                 /* Seek to element containing target vcn. */
239                                 while (rl->length && rl[1].vcn <= vcn)
240                                         rl++;
241                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
242                         } else
243                                 lcn = LCN_RL_NOT_MAPPED;
244                         /* Successful remap. */
245                         if (lcn >= 0) {
246                                 /* Setup buffer head to correct block. */
247                                 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
248                                                 + vcn_ofs) >> blocksize_bits;
249                                 set_buffer_mapped(bh);
250                                 /* Only read initialized data blocks. */
251                                 if (iblock < zblock) {
252                                         arr[nr++] = bh;
253                                         continue;
254                                 }
255                                 /* Fully non-initialized data block, zero it. */
256                                 goto handle_zblock;
257                         }
258                         /* It is a hole, need to zero it. */
259                         if (lcn == LCN_HOLE)
260                                 goto handle_hole;
261                         /* If first try and runlist unmapped, map and retry. */
262                         if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
263                                 is_retry = TRUE;
264                                 /*
265                                  * Attempt to map runlist, dropping lock for
266                                  * the duration.
267                                  */
268                                 up_read(&ni->runlist.lock);
269                                 err = ntfs_map_runlist(ni, vcn);
270                                 if (likely(!err))
271                                         goto lock_retry_remap;
272                                 rl = NULL;
273                         } else if (!rl)
274                                 up_read(&ni->runlist.lock);
275                         /*
276                          * If buffer is outside the runlist, treat it as a
277                          * hole.  This can happen due to concurrent truncate
278                          * for example.
279                          */
280                         if (err == -ENOENT || lcn == LCN_ENOENT) {
281                                 err = 0;
282                                 goto handle_hole;
283                         }
284                         /* Hard error, zero out region. */
285                         if (!err)
286                                 err = -EIO;
287                         bh->b_blocknr = -1;
288                         SetPageError(page);
289                         ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
290                                         "attribute type 0x%x, vcn 0x%llx, "
291                                         "offset 0x%x because its location on "
292                                         "disk could not be determined%s "
293                                         "(error code %i).", ni->mft_no,
294                                         ni->type, (unsigned long long)vcn,
295                                         vcn_ofs, is_retry ? " even after "
296                                         "retrying" : "", err);
297                 }
298                 /*
299                  * Either iblock was outside lblock limits or
300                  * ntfs_rl_vcn_to_lcn() returned error.  Just zero that portion
301                  * of the page and set the buffer uptodate.
302                  */
303 handle_hole:
304                 bh->b_blocknr = -1UL;
305                 clear_buffer_mapped(bh);
306 handle_zblock:
307                 kaddr = kmap_atomic(page, KM_USER0);
308                 memset(kaddr + i * blocksize, 0, blocksize);
309                 kunmap_atomic(kaddr, KM_USER0);
310                 flush_dcache_page(page);
311                 if (likely(!err))
312                         set_buffer_uptodate(bh);
313         } while (i++, iblock++, (bh = bh->b_this_page) != head);
314
315         /* Release the lock if we took it. */
316         if (rl)
317                 up_read(&ni->runlist.lock);
318
319         /* Check we have at least one buffer ready for i/o. */
320         if (nr) {
321                 struct buffer_head *tbh;
322
323                 /* Lock the buffers. */
324                 for (i = 0; i < nr; i++) {
325                         tbh = arr[i];
326                         lock_buffer(tbh);
327                         tbh->b_end_io = ntfs_end_buffer_async_read;
328                         set_buffer_async_read(tbh);
329                 }
330                 /* Finally, start i/o on the buffers. */
331                 for (i = 0; i < nr; i++) {
332                         tbh = arr[i];
333                         if (likely(!buffer_uptodate(tbh)))
334                                 submit_bh(READ, tbh);
335                         else
336                                 ntfs_end_buffer_async_read(tbh, 1);
337                 }
338                 return 0;
339         }
340         /* No i/o was scheduled on any of the buffers. */
341         if (likely(!PageError(page)))
342                 SetPageUptodate(page);
343         else /* Signal synchronous i/o error. */
344                 nr = -EIO;
345         unlock_page(page);
346         return nr;
347 }
348
349 /**
350  * ntfs_readpage - fill a @page of a @file with data from the device
351  * @file:       open file to which the page @page belongs or NULL
352  * @page:       page cache page to fill with data
353  *
354  * For non-resident attributes, ntfs_readpage() fills the @page of the open
355  * file @file by calling the ntfs version of the generic block_read_full_page()
356  * function, ntfs_read_block(), which in turn creates and reads in the buffers
357  * associated with the page asynchronously.
358  *
359  * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
360  * data from the mft record (which at this stage is most likely in memory) and
361  * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
362  * even if the mft record is not cached at this point in time, we need to wait
363  * for it to be read in before we can do the copy.
364  *
365  * Return 0 on success and -errno on error.
366  */
367 static int ntfs_readpage(struct file *file, struct page *page)
368 {
369         ntfs_inode *ni, *base_ni;
370         u8 *kaddr;
371         ntfs_attr_search_ctx *ctx;
372         MFT_RECORD *mrec;
373         unsigned long flags;
374         u32 attr_len;
375         int err = 0;
376
377 retry_readpage:
378         BUG_ON(!PageLocked(page));
379         /*
380          * This can potentially happen because we clear PageUptodate() during
381          * ntfs_writepage() of MstProtected() attributes.
382          */
383         if (PageUptodate(page)) {
384                 unlock_page(page);
385                 return 0;
386         }
387         ni = NTFS_I(page->mapping->host);
388         /*
389          * Only $DATA attributes can be encrypted and only unnamed $DATA
390          * attributes can be compressed.  Index root can have the flags set but
391          * this means to create compressed/encrypted files, not that the
392          * attribute is compressed/encrypted.
393          */
394         if (ni->type != AT_INDEX_ROOT) {
395                 /* If attribute is encrypted, deny access, just like NT4. */
396                 if (NInoEncrypted(ni)) {
397                         BUG_ON(ni->type != AT_DATA);
398                         err = -EACCES;
399                         goto err_out;
400                 }
401                 /* Compressed data streams are handled in compress.c. */
402                 if (NInoNonResident(ni) && NInoCompressed(ni)) {
403                         BUG_ON(ni->type != AT_DATA);
404                         BUG_ON(ni->name_len);
405                         return ntfs_read_compressed_block(page);
406                 }
407         }
408         /* NInoNonResident() == NInoIndexAllocPresent() */
409         if (NInoNonResident(ni)) {
410                 /* Normal, non-resident data stream. */
411                 return ntfs_read_block(page);
412         }
413         /*
414          * Attribute is resident, implying it is not compressed or encrypted.
415          * This also means the attribute is smaller than an mft record and
416          * hence smaller than a page, so can simply zero out any pages with
417          * index above 0.  Note the attribute can actually be marked compressed
418          * but if it is resident the actual data is not compressed so we are
419          * ok to ignore the compressed flag here.
420          */
421         if (unlikely(page->index > 0)) {
422                 kaddr = kmap_atomic(page, KM_USER0);
423                 memset(kaddr, 0, PAGE_CACHE_SIZE);
424                 flush_dcache_page(page);
425                 kunmap_atomic(kaddr, KM_USER0);
426                 goto done;
427         }
428         if (!NInoAttr(ni))
429                 base_ni = ni;
430         else
431                 base_ni = ni->ext.base_ntfs_ino;
432         /* Map, pin, and lock the mft record. */
433         mrec = map_mft_record(base_ni);
434         if (IS_ERR(mrec)) {
435                 err = PTR_ERR(mrec);
436                 goto err_out;
437         }
438         /*
439          * If a parallel write made the attribute non-resident, drop the mft
440          * record and retry the readpage.
441          */
442         if (unlikely(NInoNonResident(ni))) {
443                 unmap_mft_record(base_ni);
444                 goto retry_readpage;
445         }
446         ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
447         if (unlikely(!ctx)) {
448                 err = -ENOMEM;
449                 goto unm_err_out;
450         }
451         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
452                         CASE_SENSITIVE, 0, NULL, 0, ctx);
453         if (unlikely(err))
454                 goto put_unm_err_out;
455         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
456         read_lock_irqsave(&ni->size_lock, flags);
457         if (unlikely(attr_len > ni->initialized_size))
458                 attr_len = ni->initialized_size;
459         read_unlock_irqrestore(&ni->size_lock, flags);
460         kaddr = kmap_atomic(page, KM_USER0);
461         /* Copy the data to the page. */
462         memcpy(kaddr, (u8*)ctx->attr +
463                         le16_to_cpu(ctx->attr->data.resident.value_offset),
464                         attr_len);
465         /* Zero the remainder of the page. */
466         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
467         flush_dcache_page(page);
468         kunmap_atomic(kaddr, KM_USER0);
469 put_unm_err_out:
470         ntfs_attr_put_search_ctx(ctx);
471 unm_err_out:
472         unmap_mft_record(base_ni);
473 done:
474         SetPageUptodate(page);
475 err_out:
476         unlock_page(page);
477         return err;
478 }
479
480 #ifdef NTFS_RW
481
482 /**
483  * ntfs_write_block - write a @page to the backing store
484  * @page:       page cache page to write out
485  * @wbc:        writeback control structure
486  *
487  * This function is for writing pages belonging to non-resident, non-mst
488  * protected attributes to their backing store.
489  *
490  * For a page with buffers, map and write the dirty buffers asynchronously
491  * under page writeback. For a page without buffers, create buffers for the
492  * page, then proceed as above.
493  *
494  * If a page doesn't have buffers the page dirty state is definitive. If a page
495  * does have buffers, the page dirty state is just a hint, and the buffer dirty
496  * state is definitive. (A hint which has rules: dirty buffers against a clean
497  * page is illegal. Other combinations are legal and need to be handled. In
498  * particular a dirty page containing clean buffers for example.)
499  *
500  * Return 0 on success and -errno on error.
501  *
502  * Based on ntfs_read_block() and __block_write_full_page().
503  */
504 static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
505 {
506         VCN vcn;
507         LCN lcn;
508         s64 initialized_size;
509         loff_t i_size;
510         sector_t block, dblock, iblock;
511         struct inode *vi;
512         ntfs_inode *ni;
513         ntfs_volume *vol;
514         runlist_element *rl;
515         struct buffer_head *bh, *head;
516         unsigned long flags;
517         unsigned int blocksize, vcn_ofs;
518         int err;
519         BOOL need_end_writeback;
520         unsigned char blocksize_bits;
521
522         vi = page->mapping->host;
523         ni = NTFS_I(vi);
524         vol = ni->vol;
525
526         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
527                         "0x%lx.", ni->mft_no, ni->type, page->index);
528
529         BUG_ON(!NInoNonResident(ni));
530         BUG_ON(NInoMstProtected(ni));
531
532         blocksize_bits = vi->i_blkbits;
533         blocksize = 1 << blocksize_bits;
534
535         if (!page_has_buffers(page)) {
536                 BUG_ON(!PageUptodate(page));
537                 create_empty_buffers(page, blocksize,
538                                 (1 << BH_Uptodate) | (1 << BH_Dirty));
539                 if (unlikely(!page_has_buffers(page))) {
540                         ntfs_warning(vol->sb, "Error allocating page "
541                                         "buffers.  Redirtying page so we try "
542                                         "again later.");
543                         /*
544                          * Put the page back on mapping->dirty_pages, but leave
545                          * its buffers' dirty state as-is.
546                          */
547                         redirty_page_for_writepage(wbc, page);
548                         unlock_page(page);
549                         return 0;
550                 }
551         }
552         bh = head = page_buffers(page);
553         BUG_ON(!bh);
554
555         /* NOTE: Different naming scheme to ntfs_read_block()! */
556
557         /* The first block in the page. */
558         block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
559
560         read_lock_irqsave(&ni->size_lock, flags);
561         i_size = i_size_read(vi);
562         initialized_size = ni->initialized_size;
563         read_unlock_irqrestore(&ni->size_lock, flags);
564
565         /* The first out of bounds block for the data size. */
566         dblock = (i_size + blocksize - 1) >> blocksize_bits;
567
568         /* The last (fully or partially) initialized block. */
569         iblock = initialized_size >> blocksize_bits;
570
571         /*
572          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
573          * here, and the (potentially unmapped) buffers may become dirty at
574          * any time.  If a buffer becomes dirty here after we've inspected it
575          * then we just miss that fact, and the page stays dirty.
576          *
577          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
578          * handle that here by just cleaning them.
579          */
580
581         /*
582          * Loop through all the buffers in the page, mapping all the dirty
583          * buffers to disk addresses and handling any aliases from the
584          * underlying block device's mapping.
585          */
586         rl = NULL;
587         err = 0;
588         do {
589                 BOOL is_retry = FALSE;
590
591                 if (unlikely(block >= dblock)) {
592                         /*
593                          * Mapped buffers outside i_size will occur, because
594                          * this page can be outside i_size when there is a
595                          * truncate in progress. The contents of such buffers
596                          * were zeroed by ntfs_writepage().
597                          *
598                          * FIXME: What about the small race window where
599                          * ntfs_writepage() has not done any clearing because
600                          * the page was within i_size but before we get here,
601                          * vmtruncate() modifies i_size?
602                          */
603                         clear_buffer_dirty(bh);
604                         set_buffer_uptodate(bh);
605                         continue;
606                 }
607
608                 /* Clean buffers are not written out, so no need to map them. */
609                 if (!buffer_dirty(bh))
610                         continue;
611
612                 /* Make sure we have enough initialized size. */
613                 if (unlikely((block >= iblock) &&
614                                 (initialized_size < i_size))) {
615                         /*
616                          * If this page is fully outside initialized size, zero
617                          * out all pages between the current initialized size
618                          * and the current page. Just use ntfs_readpage() to do
619                          * the zeroing transparently.
620                          */
621                         if (block > iblock) {
622                                 // TODO:
623                                 // For each page do:
624                                 // - read_cache_page()
625                                 // Again for each page do:
626                                 // - wait_on_page_locked()
627                                 // - Check (PageUptodate(page) &&
628                                 //                      !PageError(page))
629                                 // Update initialized size in the attribute and
630                                 // in the inode.
631                                 // Again, for each page do:
632                                 //      __set_page_dirty_buffers();
633                                 // page_cache_release()
634                                 // We don't need to wait on the writes.
635                                 // Update iblock.
636                         }
637                         /*
638                          * The current page straddles initialized size. Zero
639                          * all non-uptodate buffers and set them uptodate (and
640                          * dirty?). Note, there aren't any non-uptodate buffers
641                          * if the page is uptodate.
642                          * FIXME: For an uptodate page, the buffers may need to
643                          * be written out because they were not initialized on
644                          * disk before.
645                          */
646                         if (!PageUptodate(page)) {
647                                 // TODO:
648                                 // Zero any non-uptodate buffers up to i_size.
649                                 // Set them uptodate and dirty.
650                         }
651                         // TODO:
652                         // Update initialized size in the attribute and in the
653                         // inode (up to i_size).
654                         // Update iblock.
655                         // FIXME: This is inefficient. Try to batch the two
656                         // size changes to happen in one go.
657                         ntfs_error(vol->sb, "Writing beyond initialized size "
658                                         "is not supported yet. Sorry.");
659                         err = -EOPNOTSUPP;
660                         break;
661                         // Do NOT set_buffer_new() BUT DO clear buffer range
662                         // outside write request range.
663                         // set_buffer_uptodate() on complete buffers as well as
664                         // set_buffer_dirty().
665                 }
666
667                 /* No need to map buffers that are already mapped. */
668                 if (buffer_mapped(bh))
669                         continue;
670
671                 /* Unmapped, dirty buffer. Need to map it. */
672                 bh->b_bdev = vol->sb->s_bdev;
673
674                 /* Convert block into corresponding vcn and offset. */
675                 vcn = (VCN)block << blocksize_bits;
676                 vcn_ofs = vcn & vol->cluster_size_mask;
677                 vcn >>= vol->cluster_size_bits;
678                 if (!rl) {
679 lock_retry_remap:
680                         down_read(&ni->runlist.lock);
681                         rl = ni->runlist.rl;
682                 }
683                 if (likely(rl != NULL)) {
684                         /* Seek to element containing target vcn. */
685                         while (rl->length && rl[1].vcn <= vcn)
686                                 rl++;
687                         lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
688                 } else
689                         lcn = LCN_RL_NOT_MAPPED;
690                 /* Successful remap. */
691                 if (lcn >= 0) {
692                         /* Setup buffer head to point to correct block. */
693                         bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
694                                         vcn_ofs) >> blocksize_bits;
695                         set_buffer_mapped(bh);
696                         continue;
697                 }
698                 /* It is a hole, need to instantiate it. */
699                 if (lcn == LCN_HOLE) {
700                         u8 *kaddr;
701                         unsigned long *bpos, *bend;
702
703                         /* Check if the buffer is zero. */
704                         kaddr = kmap_atomic(page, KM_USER0);
705                         bpos = (unsigned long *)(kaddr + bh_offset(bh));
706                         bend = (unsigned long *)((u8*)bpos + blocksize);
707                         do {
708                                 if (unlikely(*bpos))
709                                         break;
710                         } while (likely(++bpos < bend));
711                         kunmap_atomic(kaddr, KM_USER0);
712                         if (bpos == bend) {
713                                 /*
714                                  * Buffer is zero and sparse, no need to write
715                                  * it.
716                                  */
717                                 bh->b_blocknr = -1;
718                                 clear_buffer_dirty(bh);
719                                 continue;
720                         }
721                         // TODO: Instantiate the hole.
722                         // clear_buffer_new(bh);
723                         // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
724                         ntfs_error(vol->sb, "Writing into sparse regions is "
725                                         "not supported yet. Sorry.");
726                         err = -EOPNOTSUPP;
727                         break;
728                 }
729                 /* If first try and runlist unmapped, map and retry. */
730                 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
731                         is_retry = TRUE;
732                         /*
733                          * Attempt to map runlist, dropping lock for
734                          * the duration.
735                          */
736                         up_read(&ni->runlist.lock);
737                         err = ntfs_map_runlist(ni, vcn);
738                         if (likely(!err))
739                                 goto lock_retry_remap;
740                         rl = NULL;
741                 } else if (!rl)
742                         up_read(&ni->runlist.lock);
743                 /*
744                  * If buffer is outside the runlist, truncate has cut it out
745                  * of the runlist.  Just clean and clear the buffer and set it
746                  * uptodate so it can get discarded by the VM.
747                  */
748                 if (err == -ENOENT || lcn == LCN_ENOENT) {
749                         u8 *kaddr;
750
751                         bh->b_blocknr = -1;
752                         clear_buffer_dirty(bh);
753                         kaddr = kmap_atomic(page, KM_USER0);
754                         memset(kaddr + bh_offset(bh), 0, blocksize);
755                         kunmap_atomic(kaddr, KM_USER0);
756                         flush_dcache_page(page);
757                         set_buffer_uptodate(bh);
758                         err = 0;
759                         continue;
760                 }
761                 /* Failed to map the buffer, even after retrying. */
762                 if (!err)
763                         err = -EIO;
764                 bh->b_blocknr = -1;
765                 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
766                                 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
767                                 "because its location on disk could not be "
768                                 "determined%s (error code %i).", ni->mft_no,
769                                 ni->type, (unsigned long long)vcn,
770                                 vcn_ofs, is_retry ? " even after "
771                                 "retrying" : "", err);
772                 break;
773         } while (block++, (bh = bh->b_this_page) != head);
774
775         /* Release the lock if we took it. */
776         if (rl)
777                 up_read(&ni->runlist.lock);
778
779         /* For the error case, need to reset bh to the beginning. */
780         bh = head;
781
782         /* Just an optimization, so ->readpage() is not called later. */
783         if (unlikely(!PageUptodate(page))) {
784                 int uptodate = 1;
785                 do {
786                         if (!buffer_uptodate(bh)) {
787                                 uptodate = 0;
788                                 bh = head;
789                                 break;
790                         }
791                 } while ((bh = bh->b_this_page) != head);
792                 if (uptodate)
793                         SetPageUptodate(page);
794         }
795
796         /* Setup all mapped, dirty buffers for async write i/o. */
797         do {
798                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
799                         lock_buffer(bh);
800                         if (test_clear_buffer_dirty(bh)) {
801                                 BUG_ON(!buffer_uptodate(bh));
802                                 mark_buffer_async_write(bh);
803                         } else
804                                 unlock_buffer(bh);
805                 } else if (unlikely(err)) {
806                         /*
807                          * For the error case. The buffer may have been set
808                          * dirty during attachment to a dirty page.
809                          */
810                         if (err != -ENOMEM)
811                                 clear_buffer_dirty(bh);
812                 }
813         } while ((bh = bh->b_this_page) != head);
814
815         if (unlikely(err)) {
816                 // TODO: Remove the -EOPNOTSUPP check later on...
817                 if (unlikely(err == -EOPNOTSUPP))
818                         err = 0;
819                 else if (err == -ENOMEM) {
820                         ntfs_warning(vol->sb, "Error allocating memory. "
821                                         "Redirtying page so we try again "
822                                         "later.");
823                         /*
824                          * Put the page back on mapping->dirty_pages, but
825                          * leave its buffer's dirty state as-is.
826                          */
827                         redirty_page_for_writepage(wbc, page);
828                         err = 0;
829                 } else
830                         SetPageError(page);
831         }
832
833         BUG_ON(PageWriteback(page));
834         set_page_writeback(page);       /* Keeps try_to_free_buffers() away. */
835
836         /* Submit the prepared buffers for i/o. */
837         need_end_writeback = TRUE;
838         do {
839                 struct buffer_head *next = bh->b_this_page;
840                 if (buffer_async_write(bh)) {
841                         submit_bh(WRITE, bh);
842                         need_end_writeback = FALSE;
843                 }
844                 bh = next;
845         } while (bh != head);
846         unlock_page(page);
847
848         /* If no i/o was started, need to end_page_writeback(). */
849         if (unlikely(need_end_writeback))
850                 end_page_writeback(page);
851
852         ntfs_debug("Done.");
853         return err;
854 }
855
856 /**
857  * ntfs_write_mst_block - write a @page to the backing store
858  * @page:       page cache page to write out
859  * @wbc:        writeback control structure
860  *
861  * This function is for writing pages belonging to non-resident, mst protected
862  * attributes to their backing store.  The only supported attributes are index
863  * allocation and $MFT/$DATA.  Both directory inodes and index inodes are
864  * supported for the index allocation case.
865  *
866  * The page must remain locked for the duration of the write because we apply
867  * the mst fixups, write, and then undo the fixups, so if we were to unlock the
868  * page before undoing the fixups, any other user of the page will see the
869  * page contents as corrupt.
870  *
871  * We clear the page uptodate flag for the duration of the function to ensure
872  * exclusion for the $MFT/$DATA case against someone mapping an mft record we
873  * are about to apply the mst fixups to.
874  *
875  * Return 0 on success and -errno on error.
876  *
877  * Based on ntfs_write_block(), ntfs_mft_writepage(), and
878  * write_mft_record_nolock().
879  */
880 static int ntfs_write_mst_block(struct page *page,
881                 struct writeback_control *wbc)
882 {
883         sector_t block, dblock, rec_block;
884         struct inode *vi = page->mapping->host;
885         ntfs_inode *ni = NTFS_I(vi);
886         ntfs_volume *vol = ni->vol;
887         u8 *kaddr;
888         unsigned int rec_size = ni->itype.index.block_size;
889         ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
890         struct buffer_head *bh, *head, *tbh, *rec_start_bh;
891         struct buffer_head *bhs[MAX_BUF_PER_PAGE];
892         runlist_element *rl;
893         int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
894         unsigned bh_size, rec_size_bits;
895         BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
896         unsigned char bh_size_bits;
897
898         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
899                         "0x%lx.", vi->i_ino, ni->type, page->index);
900         BUG_ON(!NInoNonResident(ni));
901         BUG_ON(!NInoMstProtected(ni));
902         is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
903         /*
904          * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
905          * in its page cache were to be marked dirty.  However this should
906          * never happen with the current driver and considering we do not
907          * handle this case here we do want to BUG(), at least for now.
908          */
909         BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
910                         (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
911         bh_size_bits = vi->i_blkbits;
912         bh_size = 1 << bh_size_bits;
913         max_bhs = PAGE_CACHE_SIZE / bh_size;
914         BUG_ON(!max_bhs);
915         BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
916
917         /* Were we called for sync purposes? */
918         sync = (wbc->sync_mode == WB_SYNC_ALL);
919
920         /* Make sure we have mapped buffers. */
921         bh = head = page_buffers(page);
922         BUG_ON(!bh);
923
924         rec_size_bits = ni->itype.index.block_size_bits;
925         BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
926         bhs_per_rec = rec_size >> bh_size_bits;
927         BUG_ON(!bhs_per_rec);
928
929         /* The first block in the page. */
930         rec_block = block = (sector_t)page->index <<
931                         (PAGE_CACHE_SHIFT - bh_size_bits);
932
933         /* The first out of bounds block for the data size. */
934         dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
935
936         rl = NULL;
937         err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
938         page_is_dirty = rec_is_dirty = FALSE;
939         rec_start_bh = NULL;
940         do {
941                 BOOL is_retry = FALSE;
942
943                 if (likely(block < rec_block)) {
944                         if (unlikely(block >= dblock)) {
945                                 clear_buffer_dirty(bh);
946                                 set_buffer_uptodate(bh);
947                                 continue;
948                         }
949                         /*
950                          * This block is not the first one in the record.  We
951                          * ignore the buffer's dirty state because we could
952                          * have raced with a parallel mark_ntfs_record_dirty().
953                          */
954                         if (!rec_is_dirty)
955                                 continue;
956                         if (unlikely(err2)) {
957                                 if (err2 != -ENOMEM)
958                                         clear_buffer_dirty(bh);
959                                 continue;
960                         }
961                 } else /* if (block == rec_block) */ {
962                         BUG_ON(block > rec_block);
963                         /* This block is the first one in the record. */
964                         rec_block += bhs_per_rec;
965                         err2 = 0;
966                         if (unlikely(block >= dblock)) {
967                                 clear_buffer_dirty(bh);
968                                 continue;
969                         }
970                         if (!buffer_dirty(bh)) {
971                                 /* Clean records are not written out. */
972                                 rec_is_dirty = FALSE;
973                                 continue;
974                         }
975                         rec_is_dirty = TRUE;
976                         rec_start_bh = bh;
977                 }
978                 /* Need to map the buffer if it is not mapped already. */
979                 if (unlikely(!buffer_mapped(bh))) {
980                         VCN vcn;
981                         LCN lcn;
982                         unsigned int vcn_ofs;
983
984                         bh->b_bdev = vol->sb->s_bdev;
985                         /* Obtain the vcn and offset of the current block. */
986                         vcn = (VCN)block << bh_size_bits;
987                         vcn_ofs = vcn & vol->cluster_size_mask;
988                         vcn >>= vol->cluster_size_bits;
989                         if (!rl) {
990 lock_retry_remap:
991                                 down_read(&ni->runlist.lock);
992                                 rl = ni->runlist.rl;
993                         }
994                         if (likely(rl != NULL)) {
995                                 /* Seek to element containing target vcn. */
996                                 while (rl->length && rl[1].vcn <= vcn)
997                                         rl++;
998                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
999                         } else
1000                                 lcn = LCN_RL_NOT_MAPPED;
1001                         /* Successful remap. */
1002                         if (likely(lcn >= 0)) {
1003                                 /* Setup buffer head to correct block. */
1004                                 bh->b_blocknr = ((lcn <<
1005                                                 vol->cluster_size_bits) +
1006                                                 vcn_ofs) >> bh_size_bits;
1007                                 set_buffer_mapped(bh);
1008                         } else {
1009                                 /*
1010                                  * Remap failed.  Retry to map the runlist once
1011                                  * unless we are working on $MFT which always
1012                                  * has the whole of its runlist in memory.
1013                                  */
1014                                 if (!is_mft && !is_retry &&
1015                                                 lcn == LCN_RL_NOT_MAPPED) {
1016                                         is_retry = TRUE;
1017                                         /*
1018                                          * Attempt to map runlist, dropping
1019                                          * lock for the duration.
1020                                          */
1021                                         up_read(&ni->runlist.lock);
1022                                         err2 = ntfs_map_runlist(ni, vcn);
1023                                         if (likely(!err2))
1024                                                 goto lock_retry_remap;
1025                                         if (err2 == -ENOMEM)
1026                                                 page_is_dirty = TRUE;
1027                                         lcn = err2;
1028                                 } else {
1029                                         err2 = -EIO;
1030                                         if (!rl)
1031                                                 up_read(&ni->runlist.lock);
1032                                 }
1033                                 /* Hard error.  Abort writing this record. */
1034                                 if (!err || err == -ENOMEM)
1035                                         err = err2;
1036                                 bh->b_blocknr = -1;
1037                                 ntfs_error(vol->sb, "Cannot write ntfs record "
1038                                                 "0x%llx (inode 0x%lx, "
1039                                                 "attribute type 0x%x) because "
1040                                                 "its location on disk could "
1041                                                 "not be determined (error "
1042                                                 "code %lli).",
1043                                                 (long long)block <<
1044                                                 bh_size_bits >>
1045                                                 vol->mft_record_size_bits,
1046                                                 ni->mft_no, ni->type,
1047                                                 (long long)lcn);
1048                                 /*
1049                                  * If this is not the first buffer, remove the
1050                                  * buffers in this record from the list of
1051                                  * buffers to write and clear their dirty bit
1052                                  * if not error -ENOMEM.
1053                                  */
1054                                 if (rec_start_bh != bh) {
1055                                         while (bhs[--nr_bhs] != rec_start_bh)
1056                                                 ;
1057                                         if (err2 != -ENOMEM) {
1058                                                 do {
1059                                                         clear_buffer_dirty(
1060                                                                 rec_start_bh);
1061                                                 } while ((rec_start_bh =
1062                                                                 rec_start_bh->
1063                                                                 b_this_page) !=
1064                                                                 bh);
1065                                         }
1066                                 }
1067                                 continue;
1068                         }
1069                 }
1070                 BUG_ON(!buffer_uptodate(bh));
1071                 BUG_ON(nr_bhs >= max_bhs);
1072                 bhs[nr_bhs++] = bh;
1073         } while (block++, (bh = bh->b_this_page) != head);
1074         if (unlikely(rl))
1075                 up_read(&ni->runlist.lock);
1076         /* If there were no dirty buffers, we are done. */
1077         if (!nr_bhs)
1078                 goto done;
1079         /* Map the page so we can access its contents. */
1080         kaddr = kmap(page);
1081         /* Clear the page uptodate flag whilst the mst fixups are applied. */
1082         BUG_ON(!PageUptodate(page));
1083         ClearPageUptodate(page);
1084         for (i = 0; i < nr_bhs; i++) {
1085                 unsigned int ofs;
1086
1087                 /* Skip buffers which are not at the beginning of records. */
1088                 if (i % bhs_per_rec)
1089                         continue;
1090                 tbh = bhs[i];
1091                 ofs = bh_offset(tbh);
1092                 if (is_mft) {
1093                         ntfs_inode *tni;
1094                         unsigned long mft_no;
1095
1096                         /* Get the mft record number. */
1097                         mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1098                                         >> rec_size_bits;
1099                         /* Check whether to write this mft record. */
1100                         tni = NULL;
1101                         if (!ntfs_may_write_mft_record(vol, mft_no,
1102                                         (MFT_RECORD*)(kaddr + ofs), &tni)) {
1103                                 /*
1104                                  * The record should not be written.  This
1105                                  * means we need to redirty the page before
1106                                  * returning.
1107                                  */
1108                                 page_is_dirty = TRUE;
1109                                 /*
1110                                  * Remove the buffers in this mft record from
1111                                  * the list of buffers to write.
1112                                  */
1113                                 do {
1114                                         bhs[i] = NULL;
1115                                 } while (++i % bhs_per_rec);
1116                                 continue;
1117                         }
1118                         /*
1119                          * The record should be written.  If a locked ntfs
1120                          * inode was returned, add it to the array of locked
1121                          * ntfs inodes.
1122                          */
1123                         if (tni)
1124                                 locked_nis[nr_locked_nis++] = tni;
1125                 }
1126                 /* Apply the mst protection fixups. */
1127                 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1128                                 rec_size);
1129                 if (unlikely(err2)) {
1130                         if (!err || err == -ENOMEM)
1131                                 err = -EIO;
1132                         ntfs_error(vol->sb, "Failed to apply mst fixups "
1133                                         "(inode 0x%lx, attribute type 0x%x, "
1134                                         "page index 0x%lx, page offset 0x%x)!"
1135                                         "  Unmount and run chkdsk.", vi->i_ino,
1136                                         ni->type, page->index, ofs);
1137                         /*
1138                          * Mark all the buffers in this record clean as we do
1139                          * not want to write corrupt data to disk.
1140                          */
1141                         do {
1142                                 clear_buffer_dirty(bhs[i]);
1143                                 bhs[i] = NULL;
1144                         } while (++i % bhs_per_rec);
1145                         continue;
1146                 }
1147                 nr_recs++;
1148         }
1149         /* If no records are to be written out, we are done. */
1150         if (!nr_recs)
1151                 goto unm_done;
1152         flush_dcache_page(page);
1153         /* Lock buffers and start synchronous write i/o on them. */
1154         for (i = 0; i < nr_bhs; i++) {
1155                 tbh = bhs[i];
1156                 if (!tbh)
1157                         continue;
1158                 if (unlikely(test_set_buffer_locked(tbh)))
1159                         BUG();
1160                 /* The buffer dirty state is now irrelevant, just clean it. */
1161                 clear_buffer_dirty(tbh);
1162                 BUG_ON(!buffer_uptodate(tbh));
1163                 BUG_ON(!buffer_mapped(tbh));
1164                 get_bh(tbh);
1165                 tbh->b_end_io = end_buffer_write_sync;
1166                 submit_bh(WRITE, tbh);
1167         }
1168         /* Synchronize the mft mirror now if not @sync. */
1169         if (is_mft && !sync)
1170                 goto do_mirror;
1171 do_wait:
1172         /* Wait on i/o completion of buffers. */
1173         for (i = 0; i < nr_bhs; i++) {
1174                 tbh = bhs[i];
1175                 if (!tbh)
1176                         continue;
1177                 wait_on_buffer(tbh);
1178                 if (unlikely(!buffer_uptodate(tbh))) {
1179                         ntfs_error(vol->sb, "I/O error while writing ntfs "
1180                                         "record buffer (inode 0x%lx, "
1181                                         "attribute type 0x%x, page index "
1182                                         "0x%lx, page offset 0x%lx)!  Unmount "
1183                                         "and run chkdsk.", vi->i_ino, ni->type,
1184                                         page->index, bh_offset(tbh));
1185                         if (!err || err == -ENOMEM)
1186                                 err = -EIO;
1187                         /*
1188                          * Set the buffer uptodate so the page and buffer
1189                          * states do not become out of sync.
1190                          */
1191                         set_buffer_uptodate(tbh);
1192                 }
1193         }
1194         /* If @sync, now synchronize the mft mirror. */
1195         if (is_mft && sync) {
1196 do_mirror:
1197                 for (i = 0; i < nr_bhs; i++) {
1198                         unsigned long mft_no;
1199                         unsigned int ofs;
1200
1201                         /*
1202                          * Skip buffers which are not at the beginning of
1203                          * records.
1204                          */
1205                         if (i % bhs_per_rec)
1206                                 continue;
1207                         tbh = bhs[i];
1208                         /* Skip removed buffers (and hence records). */
1209                         if (!tbh)
1210                                 continue;
1211                         ofs = bh_offset(tbh);
1212                         /* Get the mft record number. */
1213                         mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1214                                         >> rec_size_bits;
1215                         if (mft_no < vol->mftmirr_size)
1216                                 ntfs_sync_mft_mirror(vol, mft_no,
1217                                                 (MFT_RECORD*)(kaddr + ofs),
1218                                                 sync);
1219                 }
1220                 if (!sync)
1221                         goto do_wait;
1222         }
1223         /* Remove the mst protection fixups again. */
1224         for (i = 0; i < nr_bhs; i++) {
1225                 if (!(i % bhs_per_rec)) {
1226                         tbh = bhs[i];
1227                         if (!tbh)
1228                                 continue;
1229                         post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1230                                         bh_offset(tbh)));
1231                 }
1232         }
1233         flush_dcache_page(page);
1234 unm_done:
1235         /* Unlock any locked inodes. */
1236         while (nr_locked_nis-- > 0) {
1237                 ntfs_inode *tni, *base_tni;
1238                 
1239                 tni = locked_nis[nr_locked_nis];
1240                 /* Get the base inode. */
1241                 down(&tni->extent_lock);
1242                 if (tni->nr_extents >= 0)
1243                         base_tni = tni;
1244                 else {
1245                         base_tni = tni->ext.base_ntfs_ino;
1246                         BUG_ON(!base_tni);
1247                 }
1248                 up(&tni->extent_lock);
1249                 ntfs_debug("Unlocking %s inode 0x%lx.",
1250                                 tni == base_tni ? "base" : "extent",
1251                                 tni->mft_no);
1252                 up(&tni->mrec_lock);
1253                 atomic_dec(&tni->count);
1254                 iput(VFS_I(base_tni));
1255         }
1256         SetPageUptodate(page);
1257         kunmap(page);
1258 done:
1259         if (unlikely(err && err != -ENOMEM)) {
1260                 /*
1261                  * Set page error if there is only one ntfs record in the page.
1262                  * Otherwise we would loose per-record granularity.
1263                  */
1264                 if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
1265                         SetPageError(page);
1266                 NVolSetErrors(vol);
1267         }
1268         if (page_is_dirty) {
1269                 ntfs_debug("Page still contains one or more dirty ntfs "
1270                                 "records.  Redirtying the page starting at "
1271                                 "record 0x%lx.", page->index <<
1272                                 (PAGE_CACHE_SHIFT - rec_size_bits));
1273                 redirty_page_for_writepage(wbc, page);
1274                 unlock_page(page);
1275         } else {
1276                 /*
1277                  * Keep the VM happy.  This must be done otherwise the
1278                  * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
1279                  * the page is clean.
1280                  */
1281                 BUG_ON(PageWriteback(page));
1282                 set_page_writeback(page);
1283                 unlock_page(page);
1284                 end_page_writeback(page);
1285         }
1286         if (likely(!err))
1287                 ntfs_debug("Done.");
1288         return err;
1289 }
1290
1291 /**
1292  * ntfs_writepage - write a @page to the backing store
1293  * @page:       page cache page to write out
1294  * @wbc:        writeback control structure
1295  *
1296  * This is called from the VM when it wants to have a dirty ntfs page cache
1297  * page cleaned.  The VM has already locked the page and marked it clean.
1298  *
1299  * For non-resident attributes, ntfs_writepage() writes the @page by calling
1300  * the ntfs version of the generic block_write_full_page() function,
1301  * ntfs_write_block(), which in turn if necessary creates and writes the
1302  * buffers associated with the page asynchronously.
1303  *
1304  * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
1305  * the data to the mft record (which at this stage is most likely in memory).
1306  * The mft record is then marked dirty and written out asynchronously via the
1307  * vfs inode dirty code path for the inode the mft record belongs to or via the
1308  * vm page dirty code path for the page the mft record is in.
1309  *
1310  * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
1311  *
1312  * Return 0 on success and -errno on error.
1313  */
1314 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1315 {
1316         loff_t i_size;
1317         struct inode *vi = page->mapping->host;
1318         ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1319         char *kaddr;
1320         ntfs_attr_search_ctx *ctx = NULL;
1321         MFT_RECORD *m = NULL;
1322         u32 attr_len;
1323         int err;
1324
1325 retry_writepage:
1326         BUG_ON(!PageLocked(page));
1327         i_size = i_size_read(vi);
1328         /* Is the page fully outside i_size? (truncate in progress) */
1329         if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
1330                         PAGE_CACHE_SHIFT)) {
1331                 /*
1332                  * The page may have dirty, unmapped buffers.  Make them
1333                  * freeable here, so the page does not leak.
1334                  */
1335                 block_invalidatepage(page, 0);
1336                 unlock_page(page);
1337                 ntfs_debug("Write outside i_size - truncated?");
1338                 return 0;
1339         }
1340         /*
1341          * Only $DATA attributes can be encrypted and only unnamed $DATA
1342          * attributes can be compressed.  Index root can have the flags set but
1343          * this means to create compressed/encrypted files, not that the
1344          * attribute is compressed/encrypted.
1345          */
1346         if (ni->type != AT_INDEX_ROOT) {
1347                 /* If file is encrypted, deny access, just like NT4. */
1348                 if (NInoEncrypted(ni)) {
1349                         unlock_page(page);
1350                         BUG_ON(ni->type != AT_DATA);
1351                         ntfs_debug("Denying write access to encrypted "
1352                                         "file.");
1353                         return -EACCES;
1354                 }
1355                 /* Compressed data streams are handled in compress.c. */
1356                 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1357                         BUG_ON(ni->type != AT_DATA);
1358                         BUG_ON(ni->name_len);
1359                         // TODO: Implement and replace this with
1360                         // return ntfs_write_compressed_block(page);
1361                         unlock_page(page);
1362                         ntfs_error(vi->i_sb, "Writing to compressed files is "
1363                                         "not supported yet.  Sorry.");
1364                         return -EOPNOTSUPP;
1365                 }
1366                 // TODO: Implement and remove this check.
1367                 if (NInoNonResident(ni) && NInoSparse(ni)) {
1368                         unlock_page(page);
1369                         ntfs_error(vi->i_sb, "Writing to sparse files is not "
1370                                         "supported yet.  Sorry.");
1371                         return -EOPNOTSUPP;
1372                 }
1373         }
1374         /* NInoNonResident() == NInoIndexAllocPresent() */
1375         if (NInoNonResident(ni)) {
1376                 /* We have to zero every time due to mmap-at-end-of-file. */
1377                 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1378                         /* The page straddles i_size. */
1379                         unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1380                         kaddr = kmap_atomic(page, KM_USER0);
1381                         memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
1382                         flush_dcache_page(page);
1383                         kunmap_atomic(kaddr, KM_USER0);
1384                 }
1385                 /* Handle mst protected attributes. */
1386                 if (NInoMstProtected(ni))
1387                         return ntfs_write_mst_block(page, wbc);
1388                 /* Normal, non-resident data stream. */
1389                 return ntfs_write_block(page, wbc);
1390         }
1391         /*
1392          * Attribute is resident, implying it is not compressed, encrypted, or
1393          * mst protected.  This also means the attribute is smaller than an mft
1394          * record and hence smaller than a page, so can simply return error on
1395          * any pages with index above 0.  Note the attribute can actually be
1396          * marked compressed but if it is resident the actual data is not
1397          * compressed so we are ok to ignore the compressed flag here.
1398          */
1399         BUG_ON(page_has_buffers(page));
1400         BUG_ON(!PageUptodate(page));
1401         if (unlikely(page->index > 0)) {
1402                 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0.  "
1403                                 "Aborting write.", page->index);
1404                 BUG_ON(PageWriteback(page));
1405                 set_page_writeback(page);
1406                 unlock_page(page);
1407                 end_page_writeback(page);
1408                 return -EIO;
1409         }
1410         if (!NInoAttr(ni))
1411                 base_ni = ni;
1412         else
1413                 base_ni = ni->ext.base_ntfs_ino;
1414         /* Map, pin, and lock the mft record. */
1415         m = map_mft_record(base_ni);
1416         if (IS_ERR(m)) {
1417                 err = PTR_ERR(m);
1418                 m = NULL;
1419                 ctx = NULL;
1420                 goto err_out;
1421         }
1422         /*
1423          * If a parallel write made the attribute non-resident, drop the mft
1424          * record and retry the writepage.
1425          */
1426         if (unlikely(NInoNonResident(ni))) {
1427                 unmap_mft_record(base_ni);
1428                 goto retry_writepage;
1429         }
1430         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1431         if (unlikely(!ctx)) {
1432                 err = -ENOMEM;
1433                 goto err_out;
1434         }
1435         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1436                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1437         if (unlikely(err))
1438                 goto err_out;
1439         /*
1440          * Keep the VM happy.  This must be done otherwise the radix-tree tag
1441          * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
1442          */
1443         BUG_ON(PageWriteback(page));
1444         set_page_writeback(page);
1445         unlock_page(page);
1446         /*
1447          * Here, we do not need to zero the out of bounds area everytime
1448          * because the below memcpy() already takes care of the
1449          * mmap-at-end-of-file requirements.  If the file is converted to a
1450          * non-resident one, then the code path use is switched to the
1451          * non-resident one where the zeroing happens on each ntfs_writepage()
1452          * invocation.
1453          */
1454         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1455         i_size = i_size_read(vi);
1456         if (unlikely(attr_len > i_size)) {
1457                 attr_len = i_size;
1458                 ctx->attr->data.resident.value_length = cpu_to_le32(attr_len);
1459         }
1460         kaddr = kmap_atomic(page, KM_USER0);
1461         /* Copy the data from the page to the mft record. */
1462         memcpy((u8*)ctx->attr +
1463                         le16_to_cpu(ctx->attr->data.resident.value_offset),
1464                         kaddr, attr_len);
1465         flush_dcache_mft_record_page(ctx->ntfs_ino);
1466         /* Zero out of bounds area in the page cache page. */
1467         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1468         flush_dcache_page(page);
1469         kunmap_atomic(kaddr, KM_USER0);
1470
1471         end_page_writeback(page);
1472
1473         /* Mark the mft record dirty, so it gets written back. */
1474         mark_mft_record_dirty(ctx->ntfs_ino);
1475         ntfs_attr_put_search_ctx(ctx);
1476         unmap_mft_record(base_ni);
1477         return 0;
1478 err_out:
1479         if (err == -ENOMEM) {
1480                 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1481                                 "page so we try again later.");
1482                 /*
1483                  * Put the page back on mapping->dirty_pages, but leave its
1484                  * buffers' dirty state as-is.
1485                  */
1486                 redirty_page_for_writepage(wbc, page);
1487                 err = 0;
1488         } else {
1489                 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1490                                 "error %i.", err);
1491                 SetPageError(page);
1492                 NVolSetErrors(ni->vol);
1493                 make_bad_inode(vi);
1494         }
1495         unlock_page(page);
1496         if (ctx)
1497                 ntfs_attr_put_search_ctx(ctx);
1498         if (m)
1499                 unmap_mft_record(base_ni);
1500         return err;
1501 }
1502
1503 /**
1504  * ntfs_prepare_nonresident_write -
1505  *
1506  */
1507 static int ntfs_prepare_nonresident_write(struct page *page,
1508                 unsigned from, unsigned to)
1509 {
1510         VCN vcn;
1511         LCN lcn;
1512         s64 initialized_size;
1513         loff_t i_size;
1514         sector_t block, ablock, iblock;
1515         struct inode *vi;
1516         ntfs_inode *ni;
1517         ntfs_volume *vol;
1518         runlist_element *rl;
1519         struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
1520         unsigned long flags;
1521         unsigned int vcn_ofs, block_start, block_end, blocksize;
1522         int err;
1523         BOOL is_retry;
1524         unsigned char blocksize_bits;
1525
1526         vi = page->mapping->host;
1527         ni = NTFS_I(vi);
1528         vol = ni->vol;
1529
1530         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
1531                         "0x%lx, from = %u, to = %u.", ni->mft_no, ni->type,
1532                         page->index, from, to);
1533
1534         BUG_ON(!NInoNonResident(ni));
1535
1536         blocksize_bits = vi->i_blkbits;
1537         blocksize = 1 << blocksize_bits;
1538
1539         /*
1540          * create_empty_buffers() will create uptodate/dirty buffers if the
1541          * page is uptodate/dirty.
1542          */
1543         if (!page_has_buffers(page))
1544                 create_empty_buffers(page, blocksize, 0);
1545         bh = head = page_buffers(page);
1546         if (unlikely(!bh))
1547                 return -ENOMEM;
1548
1549         /* The first block in the page. */
1550         block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
1551
1552         read_lock_irqsave(&ni->size_lock, flags);
1553         /*
1554          * The first out of bounds block for the allocated size.  No need to
1555          * round up as allocated_size is in multiples of cluster size and the
1556          * minimum cluster size is 512 bytes, which is equal to the smallest
1557          * blocksize.
1558          */
1559         ablock = ni->allocated_size >> blocksize_bits;
1560         i_size = i_size_read(vi);
1561         initialized_size = ni->initialized_size;
1562         read_unlock_irqrestore(&ni->size_lock, flags);
1563
1564         /* The last (fully or partially) initialized block. */
1565         iblock = initialized_size >> blocksize_bits;
1566
1567         /* Loop through all the buffers in the page. */
1568         block_start = 0;
1569         rl = NULL;
1570         err = 0;
1571         do {
1572                 block_end = block_start + blocksize;
1573                 /*
1574                  * If buffer @bh is outside the write, just mark it uptodate
1575                  * if the page is uptodate and continue with the next buffer.
1576                  */
1577                 if (block_end <= from || block_start >= to) {
1578                         if (PageUptodate(page)) {
1579                                 if (!buffer_uptodate(bh))
1580                                         set_buffer_uptodate(bh);
1581                         }
1582                         continue;
1583                 }
1584                 /*
1585                  * @bh is at least partially being written to.
1586                  * Make sure it is not marked as new.
1587                  */
1588                 //if (buffer_new(bh))
1589                 //      clear_buffer_new(bh);
1590
1591                 if (block >= ablock) {
1592                         // TODO: block is above allocated_size, need to
1593                         // allocate it. Best done in one go to accommodate not
1594                         // only block but all above blocks up to and including:
1595                         // ((page->index << PAGE_CACHE_SHIFT) + to + blocksize
1596                         // - 1) >> blobksize_bits. Obviously will need to round
1597                         // up to next cluster boundary, too. This should be
1598                         // done with a helper function, so it can be reused.
1599                         ntfs_error(vol->sb, "Writing beyond allocated size "
1600                                         "is not supported yet. Sorry.");
1601                         err = -EOPNOTSUPP;
1602                         goto err_out;
1603                         // Need to update ablock.
1604                         // Need to set_buffer_new() on all block bhs that are
1605                         // newly allocated.
1606                 }
1607                 /*
1608                  * Now we have enough allocated size to fulfill the whole
1609                  * request, i.e. block < ablock is true.
1610                  */
1611                 if (unlikely((block >= iblock) &&
1612                                 (initialized_size < i_size))) {
1613                         /*
1614                          * If this page is fully outside initialized size, zero
1615                          * out all pages between the current initialized size
1616                          * and the current page. Just use ntfs_readpage() to do
1617                          * the zeroing transparently.
1618                          */
1619                         if (block > iblock) {
1620                                 // TODO:
1621                                 // For each page do:
1622                                 // - read_cache_page()
1623                                 // Again for each page do:
1624                                 // - wait_on_page_locked()
1625                                 // - Check (PageUptodate(page) &&
1626                                 //                      !PageError(page))
1627                                 // Update initialized size in the attribute and
1628                                 // in the inode.
1629                                 // Again, for each page do:
1630                                 //      __set_page_dirty_buffers();
1631                                 // page_cache_release()
1632                                 // We don't need to wait on the writes.
1633                                 // Update iblock.
1634                         }
1635                         /*
1636                          * The current page straddles initialized size. Zero
1637                          * all non-uptodate buffers and set them uptodate (and
1638                          * dirty?). Note, there aren't any non-uptodate buffers
1639                          * if the page is uptodate.
1640                          * FIXME: For an uptodate page, the buffers may need to
1641                          * be written out because they were not initialized on
1642                          * disk before.
1643                          */
1644                         if (!PageUptodate(page)) {
1645                                 // TODO:
1646                                 // Zero any non-uptodate buffers up to i_size.
1647                                 // Set them uptodate and dirty.
1648                         }
1649                         // TODO:
1650                         // Update initialized size in the attribute and in the
1651                         // inode (up to i_size).
1652                         // Update iblock.
1653                         // FIXME: This is inefficient. Try to batch the two
1654                         // size changes to happen in one go.
1655                         ntfs_error(vol->sb, "Writing beyond initialized size "
1656                                         "is not supported yet. Sorry.");
1657                         err = -EOPNOTSUPP;
1658                         goto err_out;
1659                         // Do NOT set_buffer_new() BUT DO clear buffer range
1660                         // outside write request range.
1661                         // set_buffer_uptodate() on complete buffers as well as
1662                         // set_buffer_dirty().
1663                 }
1664
1665                 /* Need to map unmapped buffers. */
1666                 if (!buffer_mapped(bh)) {
1667                         /* Unmapped buffer. Need to map it. */
1668                         bh->b_bdev = vol->sb->s_bdev;
1669
1670                         /* Convert block into corresponding vcn and offset. */
1671                         vcn = (VCN)block << blocksize_bits >>
1672                                         vol->cluster_size_bits;
1673                         vcn_ofs = ((VCN)block << blocksize_bits) &
1674                                         vol->cluster_size_mask;
1675
1676                         is_retry = FALSE;
1677                         if (!rl) {
1678 lock_retry_remap:
1679                                 down_read(&ni->runlist.lock);
1680                                 rl = ni->runlist.rl;
1681                         }
1682                         if (likely(rl != NULL)) {
1683                                 /* Seek to element containing target vcn. */
1684                                 while (rl->length && rl[1].vcn <= vcn)
1685                                         rl++;
1686                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1687                         } else
1688                                 lcn = LCN_RL_NOT_MAPPED;
1689                         if (unlikely(lcn < 0)) {
1690                                 /*
1691                                  * We extended the attribute allocation above.
1692                                  * If we hit an ENOENT here it means that the
1693                                  * allocation was insufficient which is a bug.
1694                                  */
1695                                 BUG_ON(lcn == LCN_ENOENT);
1696
1697                                 /* It is a hole, need to instantiate it. */
1698                                 if (lcn == LCN_HOLE) {
1699                                         // TODO: Instantiate the hole.
1700                                         // clear_buffer_new(bh);
1701                                         // unmap_underlying_metadata(bh->b_bdev,
1702                                         //              bh->b_blocknr);
1703                                         // For non-uptodate buffers, need to
1704                                         // zero out the region outside the
1705                                         // request in this bh or all bhs,
1706                                         // depending on what we implemented
1707                                         // above.
1708                                         // Need to flush_dcache_page().
1709                                         // Or could use set_buffer_new()
1710                                         // instead?
1711                                         ntfs_error(vol->sb, "Writing into "
1712                                                         "sparse regions is "
1713                                                         "not supported yet. "
1714                                                         "Sorry.");
1715                                         err = -EOPNOTSUPP;
1716                                         if (!rl)
1717                                                 up_read(&ni->runlist.lock);
1718                                         goto err_out;
1719                                 } else if (!is_retry &&
1720                                                 lcn == LCN_RL_NOT_MAPPED) {
1721                                         is_retry = TRUE;
1722                                         /*
1723                                          * Attempt to map runlist, dropping
1724                                          * lock for the duration.
1725                                          */
1726                                         up_read(&ni->runlist.lock);
1727                                         err = ntfs_map_runlist(ni, vcn);
1728                                         if (likely(!err))
1729                                                 goto lock_retry_remap;
1730                                         rl = NULL;
1731                                 } else if (!rl)
1732                                         up_read(&ni->runlist.lock);
1733                                 /*
1734                                  * Failed to map the buffer, even after
1735                                  * retrying.
1736                                  */
1737                                 if (!err)
1738                                         err = -EIO;
1739                                 bh->b_blocknr = -1;
1740                                 ntfs_error(vol->sb, "Failed to write to inode "
1741                                                 "0x%lx, attribute type 0x%x, "
1742                                                 "vcn 0x%llx, offset 0x%x "
1743                                                 "because its location on disk "
1744                                                 "could not be determined%s "
1745                                                 "(error code %i).",
1746                                                 ni->mft_no, ni->type,
1747                                                 (unsigned long long)vcn,
1748                                                 vcn_ofs, is_retry ? " even "
1749                                                 "after retrying" : "", err);
1750                                 goto err_out;
1751                         }
1752                         /* We now have a successful remap, i.e. lcn >= 0. */
1753
1754                         /* Setup buffer head to correct block. */
1755                         bh->b_blocknr = ((lcn << vol->cluster_size_bits)
1756                                         + vcn_ofs) >> blocksize_bits;
1757                         set_buffer_mapped(bh);
1758
1759                         // FIXME: Something analogous to this is needed for
1760                         // each newly allocated block, i.e. BH_New.
1761                         // FIXME: Might need to take this out of the
1762                         // if (!buffer_mapped(bh)) {}, depending on how we
1763                         // implement things during the allocated_size and
1764                         // initialized_size extension code above.
1765                         if (buffer_new(bh)) {
1766                                 clear_buffer_new(bh);
1767                                 unmap_underlying_metadata(bh->b_bdev,
1768                                                 bh->b_blocknr);
1769                                 if (PageUptodate(page)) {
1770                                         set_buffer_uptodate(bh);
1771                                         continue;
1772                                 }
1773                                 /*
1774                                  * Page is _not_ uptodate, zero surrounding
1775                                  * region. NOTE: This is how we decide if to
1776                                  * zero or not!
1777                                  */
1778                                 if (block_end > to || block_start < from) {
1779                                         void *kaddr;
1780
1781                                         kaddr = kmap_atomic(page, KM_USER0);
1782                                         if (block_end > to)
1783                                                 memset(kaddr + to, 0,
1784                                                                 block_end - to);
1785                                         if (block_start < from)
1786                                                 memset(kaddr + block_start, 0,
1787                                                                 from -
1788                                                                 block_start);
1789                                         flush_dcache_page(page);
1790                                         kunmap_atomic(kaddr, KM_USER0);
1791                                 }
1792                                 continue;
1793                         }
1794                 }
1795                 /* @bh is mapped, set it uptodate if the page is uptodate. */
1796                 if (PageUptodate(page)) {
1797                         if (!buffer_uptodate(bh))
1798                                 set_buffer_uptodate(bh);
1799                         continue;
1800                 }
1801                 /*
1802                  * The page is not uptodate. The buffer is mapped. If it is not
1803                  * uptodate, and it is only partially being written to, we need
1804                  * to read the buffer in before the write, i.e. right now.
1805                  */
1806                 if (!buffer_uptodate(bh) &&
1807                                 (block_start < from || block_end > to)) {
1808                         ll_rw_block(READ, 1, &bh);
1809                         *wait_bh++ = bh;
1810                 }
1811         } while (block++, block_start = block_end,
1812                         (bh = bh->b_this_page) != head);
1813
1814         /* Release the lock if we took it. */
1815         if (rl) {
1816                 up_read(&ni->runlist.lock);
1817                 rl = NULL;
1818         }
1819
1820         /* If we issued read requests, let them complete. */
1821         while (wait_bh > wait) {
1822                 wait_on_buffer(*--wait_bh);
1823                 if (!buffer_uptodate(*wait_bh))
1824                         return -EIO;
1825         }
1826
1827         ntfs_debug("Done.");
1828         return 0;
1829 err_out:
1830         /*
1831          * Zero out any newly allocated blocks to avoid exposing stale data.
1832          * If BH_New is set, we know that the block was newly allocated in the
1833          * above loop.
1834          * FIXME: What about initialized_size increments? Have we done all the
1835          * required zeroing above? If not this error handling is broken, and
1836          * in particular the if (block_end <= from) check is completely bogus.
1837          */
1838         bh = head;
1839         block_start = 0;
1840         is_retry = FALSE;
1841         do {
1842                 block_end = block_start + blocksize;
1843                 if (block_end <= from)
1844                         continue;
1845                 if (block_start >= to)
1846                         break;
1847                 if (buffer_new(bh)) {
1848                         void *kaddr;
1849
1850                         clear_buffer_new(bh);
1851                         kaddr = kmap_atomic(page, KM_USER0);
1852                         memset(kaddr + block_start, 0, bh->b_size);
1853                         kunmap_atomic(kaddr, KM_USER0);
1854                         set_buffer_uptodate(bh);
1855                         mark_buffer_dirty(bh);
1856                         is_retry = TRUE;
1857                 }
1858         } while (block_start = block_end, (bh = bh->b_this_page) != head);
1859         if (is_retry)
1860                 flush_dcache_page(page);
1861         if (rl)
1862                 up_read(&ni->runlist.lock);
1863         return err;
1864 }
1865
1866 /**
1867  * ntfs_prepare_write - prepare a page for receiving data
1868  *
1869  * This is called from generic_file_write() with i_sem held on the inode
1870  * (@page->mapping->host).  The @page is locked but not kmap()ped.  The source
1871  * data has not yet been copied into the @page.
1872  *
1873  * Need to extend the attribute/fill in holes if necessary, create blocks and
1874  * make partially overwritten blocks uptodate,
1875  *
1876  * i_size is not to be modified yet.
1877  *
1878  * Return 0 on success or -errno on error.
1879  *
1880  * Should be using block_prepare_write() [support for sparse files] or
1881  * cont_prepare_write() [no support for sparse files].  Cannot do that due to
1882  * ntfs specifics but can look at them for implementation guidance.
1883  *
1884  * Note: In the range, @from is inclusive and @to is exclusive, i.e. @from is
1885  * the first byte in the page that will be written to and @to is the first byte
1886  * after the last byte that will be written to.
1887  */
1888 static int ntfs_prepare_write(struct file *file, struct page *page,
1889                 unsigned from, unsigned to)
1890 {
1891         s64 new_size;
1892         loff_t i_size;
1893         struct inode *vi = page->mapping->host;
1894         ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1895         ntfs_volume *vol = ni->vol;
1896         ntfs_attr_search_ctx *ctx = NULL;
1897         MFT_RECORD *m = NULL;
1898         ATTR_RECORD *a;
1899         u8 *kaddr;
1900         u32 attr_len;
1901         int err;
1902
1903         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
1904                         "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
1905                         page->index, from, to);
1906         BUG_ON(!PageLocked(page));
1907         BUG_ON(from > PAGE_CACHE_SIZE);
1908         BUG_ON(to > PAGE_CACHE_SIZE);
1909         BUG_ON(from > to);
1910         BUG_ON(NInoMstProtected(ni));
1911         /*
1912          * If a previous ntfs_truncate() failed, repeat it and abort if it
1913          * fails again.
1914          */
1915         if (unlikely(NInoTruncateFailed(ni))) {
1916                 down_write(&vi->i_alloc_sem);
1917                 err = ntfs_truncate(vi);
1918                 up_write(&vi->i_alloc_sem);
1919                 if (err || NInoTruncateFailed(ni)) {
1920                         if (!err)
1921                                 err = -EIO;
1922                         goto err_out;
1923                 }
1924         }
1925         /* If the attribute is not resident, deal with it elsewhere. */
1926         if (NInoNonResident(ni)) {
1927                 /*
1928                  * Only unnamed $DATA attributes can be compressed, encrypted,
1929                  * and/or sparse.
1930                  */
1931                 if (ni->type == AT_DATA && !ni->name_len) {
1932                         /* If file is encrypted, deny access, just like NT4. */
1933                         if (NInoEncrypted(ni)) {
1934                                 ntfs_debug("Denying write access to encrypted "
1935                                                 "file.");
1936                                 return -EACCES;
1937                         }
1938                         /* Compressed data streams are handled in compress.c. */
1939                         if (NInoCompressed(ni)) {
1940                                 // TODO: Implement and replace this check with
1941                                 // return ntfs_write_compressed_block(page);
1942                                 ntfs_error(vi->i_sb, "Writing to compressed "
1943                                                 "files is not supported yet. "
1944                                                 "Sorry.");
1945                                 return -EOPNOTSUPP;
1946                         }
1947                         // TODO: Implement and remove this check.
1948                         if (NInoSparse(ni)) {
1949                                 ntfs_error(vi->i_sb, "Writing to sparse files "
1950                                                 "is not supported yet. Sorry.");
1951                                 return -EOPNOTSUPP;
1952                         }
1953                 }
1954                 /* Normal data stream. */
1955                 return ntfs_prepare_nonresident_write(page, from, to);
1956         }
1957         /*
1958          * Attribute is resident, implying it is not compressed, encrypted, or
1959          * sparse.
1960          */
1961         BUG_ON(page_has_buffers(page));
1962         new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
1963         /* If we do not need to resize the attribute allocation we are done. */
1964         if (new_size <= i_size_read(vi))
1965                 goto done;
1966         /* Map, pin, and lock the (base) mft record. */
1967         if (!NInoAttr(ni))
1968                 base_ni = ni;
1969         else
1970                 base_ni = ni->ext.base_ntfs_ino;
1971         m = map_mft_record(base_ni);
1972         if (IS_ERR(m)) {
1973                 err = PTR_ERR(m);
1974                 m = NULL;
1975                 ctx = NULL;
1976                 goto err_out;
1977         }
1978         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1979         if (unlikely(!ctx)) {
1980                 err = -ENOMEM;
1981                 goto err_out;
1982         }
1983         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1984                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1985         if (unlikely(err)) {
1986                 if (err == -ENOENT)
1987                         err = -EIO;
1988                 goto err_out;
1989         }
1990         m = ctx->mrec;
1991         a = ctx->attr;
1992         /* The total length of the attribute value. */
1993         attr_len = le32_to_cpu(a->data.resident.value_length);
1994         /* Fix an eventual previous failure of ntfs_commit_write(). */
1995         i_size = i_size_read(vi);
1996         if (unlikely(attr_len > i_size)) {
1997                 attr_len = i_size;
1998                 a->data.resident.value_length = cpu_to_le32(attr_len);
1999         }
2000         /* If we do not need to resize the attribute allocation we are done. */
2001         if (new_size <= attr_len)
2002                 goto done_unm;
2003         /* Check if new size is allowed in $AttrDef. */
2004         err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
2005         if (unlikely(err)) {
2006                 if (err == -ERANGE) {
2007                         ntfs_error(vol->sb, "Write would cause the inode "
2008                                         "0x%lx to exceed the maximum size for "
2009                                         "its attribute type (0x%x).  Aborting "
2010                                         "write.", vi->i_ino,
2011                                         le32_to_cpu(ni->type));
2012                 } else {
2013                         ntfs_error(vol->sb, "Inode 0x%lx has unknown "
2014                                         "attribute type 0x%x.  Aborting "
2015                                         "write.", vi->i_ino,
2016                                         le32_to_cpu(ni->type));
2017                         err = -EIO;
2018                 }
2019                 goto err_out2;
2020         }
2021         /*
2022          * Extend the attribute record to be able to store the new attribute
2023          * size.
2024          */
2025         if (new_size >= vol->mft_record_size || ntfs_attr_record_resize(m, a,
2026                         le16_to_cpu(a->data.resident.value_offset) +
2027                         new_size)) {
2028                 /* Not enough space in the mft record. */
2029                 ntfs_error(vol->sb, "Not enough space in the mft record for "
2030                                 "the resized attribute value.  This is not "
2031                                 "supported yet.  Aborting write.");
2032                 err = -EOPNOTSUPP;
2033                 goto err_out2;
2034         }
2035         /*
2036          * We have enough space in the mft record to fit the write.  This
2037          * implies the attribute is smaller than the mft record and hence the
2038          * attribute must be in a single page and hence page->index must be 0.
2039          */
2040         BUG_ON(page->index);
2041         /*
2042          * If the beginning of the write is past the old size, enlarge the
2043          * attribute value up to the beginning of the write and fill it with
2044          * zeroes.
2045          */
2046         if (from > attr_len) {
2047                 memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
2048                                 attr_len, 0, from - attr_len);
2049                 a->data.resident.value_length = cpu_to_le32(from);
2050                 /* Zero the corresponding area in the page as well. */
2051                 if (PageUptodate(page)) {
2052                         kaddr = kmap_atomic(page, KM_USER0);
2053                         memset(kaddr + attr_len, 0, from - attr_len);
2054                         kunmap_atomic(kaddr, KM_USER0);
2055                         flush_dcache_page(page);
2056                 }
2057         }
2058         flush_dcache_mft_record_page(ctx->ntfs_ino);
2059         mark_mft_record_dirty(ctx->ntfs_ino);
2060 done_unm:
2061         ntfs_attr_put_search_ctx(ctx);
2062         unmap_mft_record(base_ni);
2063         /*
2064          * Because resident attributes are handled by memcpy() to/from the
2065          * corresponding MFT record, and because this form of i/o is byte
2066          * aligned rather than block aligned, there is no need to bring the
2067          * page uptodate here as in the non-resident case where we need to
2068          * bring the buffers straddled by the write uptodate before
2069          * generic_file_write() does the copying from userspace.
2070          *
2071          * We thus defer the uptodate bringing of the page region outside the
2072          * region written to to ntfs_commit_write(), which makes the code
2073          * simpler and saves one atomic kmap which is good.
2074          */
2075 done:
2076         ntfs_debug("Done.");
2077         return 0;
2078 err_out:
2079         if (err == -ENOMEM)
2080                 ntfs_warning(vi->i_sb, "Error allocating memory required to "
2081                                 "prepare the write.");
2082         else {
2083                 ntfs_error(vi->i_sb, "Resident attribute prepare write failed "
2084                                 "with error %i.", err);
2085                 NVolSetErrors(vol);
2086                 make_bad_inode(vi);
2087         }
2088 err_out2:
2089         if (ctx)
2090                 ntfs_attr_put_search_ctx(ctx);
2091         if (m)
2092                 unmap_mft_record(base_ni);
2093         return err;
2094 }
2095
2096 /**
2097  * ntfs_commit_nonresident_write -
2098  *
2099  */
2100 static int ntfs_commit_nonresident_write(struct page *page,
2101                 unsigned from, unsigned to)
2102 {
2103         s64 pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
2104         struct inode *vi = page->mapping->host;
2105         struct buffer_head *bh, *head;
2106         unsigned int block_start, block_end, blocksize;
2107         BOOL partial;
2108
2109         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
2110                         "0x%lx, from = %u, to = %u.", vi->i_ino,
2111                         NTFS_I(vi)->type, page->index, from, to);
2112         blocksize = 1 << vi->i_blkbits;
2113
2114         // FIXME: We need a whole slew of special cases in here for compressed
2115         // files for example...
2116         // For now, we know ntfs_prepare_write() would have failed so we can't
2117         // get here in any of the cases which we have to special case, so we
2118         // are just a ripped off, unrolled generic_commit_write().
2119
2120         bh = head = page_buffers(page);
2121         block_start = 0;
2122         partial = FALSE;
2123         do {
2124                 block_end = block_start + blocksize;
2125                 if (block_end <= from || block_start >= to) {
2126                         if (!buffer_uptodate(bh))
2127                                 partial = TRUE;
2128                 } else {
2129                         set_buffer_uptodate(bh);
2130                         mark_buffer_dirty(bh);
2131                 }
2132         } while (block_start = block_end, (bh = bh->b_this_page) != head);
2133         /*
2134          * If this is a partial write which happened to make all buffers
2135          * uptodate then we can optimize away a bogus ->readpage() for the next
2136          * read().  Here we 'discover' whether the page went uptodate as a
2137          * result of this (potentially partial) write.
2138          */
2139         if (!partial)
2140                 SetPageUptodate(page);
2141         /*
2142          * Not convinced about this at all.  See disparity comment above.  For
2143          * now we know ntfs_prepare_write() would have failed in the write
2144          * exceeds i_size case, so this will never trigger which is fine.
2145          */
2146         if (pos > i_size_read(vi)) {
2147                 ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
2148                                 "not supported yet.  Sorry.");
2149                 return -EOPNOTSUPP;
2150                 // vi->i_size = pos;
2151                 // mark_inode_dirty(vi);
2152         }
2153         ntfs_debug("Done.");
2154         return 0;
2155 }
2156
2157 /**
2158  * ntfs_commit_write - commit the received data
2159  *
2160  * This is called from generic_file_write() with i_sem held on the inode
2161  * (@page->mapping->host).  The @page is locked but not kmap()ped.  The source
2162  * data has already been copied into the @page.  ntfs_prepare_write() has been
2163  * called before the data copied and it returned success so we can take the
2164  * results of various BUG checks and some error handling for granted.
2165  *
2166  * Need to mark modified blocks dirty so they get written out later when
2167  * ntfs_writepage() is invoked by the VM.
2168  *
2169  * Return 0 on success or -errno on error.
2170  *
2171  * Should be using generic_commit_write().  This marks buffers uptodate and
2172  * dirty, sets the page uptodate if all buffers in the page are uptodate, and
2173  * updates i_size if the end of io is beyond i_size.  In that case, it also
2174  * marks the inode dirty.
2175  *
2176  * Cannot use generic_commit_write() due to ntfs specialities but can look at
2177  * it for implementation guidance.
2178  *
2179  * If things have gone as outlined in ntfs_prepare_write(), then we do not
2180  * need to do any page content modifications here at all, except in the write
2181  * to resident attribute case, where we need to do the uptodate bringing here
2182  * which we combine with the copying into the mft record which means we save
2183  * one atomic kmap.
2184  */
2185 static int ntfs_commit_write(struct file *file, struct page *page,
2186                 unsigned from, unsigned to)
2187 {
2188         struct inode *vi = page->mapping->host;
2189         ntfs_inode *base_ni, *ni = NTFS_I(vi);
2190         char *kaddr, *kattr;
2191         ntfs_attr_search_ctx *ctx;
2192         MFT_RECORD *m;
2193         ATTR_RECORD *a;
2194         u32 attr_len;
2195         int err;
2196
2197         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
2198                         "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
2199                         page->index, from, to);
2200         /* If the attribute is not resident, deal with it elsewhere. */
2201         if (NInoNonResident(ni)) {
2202                 /* Only unnamed $DATA attributes can be compressed/encrypted. */
2203                 if (ni->type == AT_DATA && !ni->name_len) {
2204                         /* Encrypted files need separate handling. */
2205                         if (NInoEncrypted(ni)) {
2206                                 // We never get here at present!
2207                                 BUG();
2208                         }
2209                         /* Compressed data streams are handled in compress.c. */
2210                         if (NInoCompressed(ni)) {
2211                                 // TODO: Implement this!
2212                                 // return ntfs_write_compressed_block(page);
2213                                 // We never get here at present!
2214                                 BUG();
2215                         }
2216                 }
2217                 /* Normal data stream. */
2218                 return ntfs_commit_nonresident_write(page, from, to);
2219         }
2220         /*
2221          * Attribute is resident, implying it is not compressed, encrypted, or
2222          * sparse.
2223          */
2224         if (!NInoAttr(ni))
2225                 base_ni = ni;
2226         else
2227                 base_ni = ni->ext.base_ntfs_ino;
2228         /* Map, pin, and lock the mft record. */
2229         m = map_mft_record(base_ni);
2230         if (IS_ERR(m)) {
2231                 err = PTR_ERR(m);
2232                 m = NULL;
2233                 ctx = NULL;
2234                 goto err_out;
2235         }
2236         ctx = ntfs_attr_get_search_ctx(base_ni, m);
2237         if (unlikely(!ctx)) {
2238                 err = -ENOMEM;
2239                 goto err_out;
2240         }
2241         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2242                         CASE_SENSITIVE, 0, NULL, 0, ctx);
2243         if (unlikely(err)) {
2244                 if (err == -ENOENT)
2245                         err = -EIO;
2246                 goto err_out;
2247         }
2248         a = ctx->attr;
2249         /* The total length of the attribute value. */
2250         attr_len = le32_to_cpu(a->data.resident.value_length);
2251         BUG_ON(from > attr_len);
2252         kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
2253         kaddr = kmap_atomic(page, KM_USER0);
2254         /* Copy the received data from the page to the mft record. */
2255         memcpy(kattr + from, kaddr + from, to - from);
2256         /* Update the attribute length if necessary. */
2257         if (to > attr_len) {
2258                 attr_len = to;
2259                 a->data.resident.value_length = cpu_to_le32(attr_len);
2260         }
2261         /*
2262          * If the page is not uptodate, bring the out of bounds area(s)
2263          * uptodate by copying data from the mft record to the page.
2264          */
2265         if (!PageUptodate(page)) {
2266                 if (from > 0)
2267                         memcpy(kaddr, kattr, from);
2268                 if (to < attr_len)
2269                         memcpy(kaddr + to, kattr + to, attr_len - to);
2270                 /* Zero the region outside the end of the attribute value. */
2271                 if (attr_len < PAGE_CACHE_SIZE)
2272                         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
2273                 /*
2274                  * The probability of not having done any of the above is
2275                  * extremely small, so we just flush unconditionally.
2276                  */
2277                 flush_dcache_page(page);
2278                 SetPageUptodate(page);
2279         }
2280         kunmap_atomic(kaddr, KM_USER0);
2281         /* Update i_size if necessary. */
2282         if (i_size_read(vi) < attr_len) {
2283                 unsigned long flags;
2284
2285                 write_lock_irqsave(&ni->size_lock, flags);
2286                 ni->allocated_size = ni->initialized_size = attr_len;
2287                 i_size_write(vi, attr_len);
2288                 write_unlock_irqrestore(&ni->size_lock, flags);
2289         }
2290         /* Mark the mft record dirty, so it gets written back. */
2291         flush_dcache_mft_record_page(ctx->ntfs_ino);
2292         mark_mft_record_dirty(ctx->ntfs_ino);
2293         ntfs_attr_put_search_ctx(ctx);
2294         unmap_mft_record(base_ni);
2295         ntfs_debug("Done.");
2296         return 0;
2297 err_out:
2298         if (err == -ENOMEM) {
2299                 ntfs_warning(vi->i_sb, "Error allocating memory required to "
2300                                 "commit the write.");
2301                 if (PageUptodate(page)) {
2302                         ntfs_warning(vi->i_sb, "Page is uptodate, setting "
2303                                         "dirty so the write will be retried "
2304                                         "later on by the VM.");
2305                         /*
2306                          * Put the page on mapping->dirty_pages, but leave its
2307                          * buffers' dirty state as-is.
2308                          */
2309                         __set_page_dirty_nobuffers(page);
2310                         err = 0;
2311                 } else
2312                         ntfs_error(vi->i_sb, "Page is not uptodate.  Written "
2313                                         "data has been lost.");
2314         } else {
2315                 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
2316                                 "with error %i.", err);
2317                 NVolSetErrors(ni->vol);
2318                 make_bad_inode(vi);
2319         }
2320         if (ctx)
2321                 ntfs_attr_put_search_ctx(ctx);
2322         if (m)
2323                 unmap_mft_record(base_ni);
2324         return err;
2325 }
2326
2327 #endif  /* NTFS_RW */
2328
2329 /**
2330  * ntfs_aops - general address space operations for inodes and attributes
2331  */
2332 struct address_space_operations ntfs_aops = {
2333         .readpage       = ntfs_readpage,        /* Fill page with data. */
2334         .sync_page      = block_sync_page,      /* Currently, just unplugs the
2335                                                    disk request queue. */
2336 #ifdef NTFS_RW
2337         .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
2338         .prepare_write  = ntfs_prepare_write,   /* Prepare page and buffers
2339                                                    ready to receive data. */
2340         .commit_write   = ntfs_commit_write,    /* Commit received data. */
2341 #endif /* NTFS_RW */
2342 };
2343
2344 /**
2345  * ntfs_mst_aops - general address space operations for mst protecteed inodes
2346  *                 and attributes
2347  */
2348 struct address_space_operations ntfs_mst_aops = {
2349         .readpage       = ntfs_readpage,        /* Fill page with data. */
2350         .sync_page      = block_sync_page,      /* Currently, just unplugs the
2351                                                    disk request queue. */
2352 #ifdef NTFS_RW
2353         .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
2354         .set_page_dirty = __set_page_dirty_nobuffers,   /* Set the page dirty
2355                                                    without touching the buffers
2356                                                    belonging to the page. */
2357 #endif /* NTFS_RW */
2358 };
2359
2360 #ifdef NTFS_RW
2361
2362 /**
2363  * mark_ntfs_record_dirty - mark an ntfs record dirty
2364  * @page:       page containing the ntfs record to mark dirty
2365  * @ofs:        byte offset within @page at which the ntfs record begins
2366  *
2367  * Set the buffers and the page in which the ntfs record is located dirty.
2368  *
2369  * The latter also marks the vfs inode the ntfs record belongs to dirty
2370  * (I_DIRTY_PAGES only).
2371  *
2372  * If the page does not have buffers, we create them and set them uptodate.
2373  * The page may not be locked which is why we need to handle the buffers under
2374  * the mapping->private_lock.  Once the buffers are marked dirty we no longer
2375  * need the lock since try_to_free_buffers() does not free dirty buffers.
2376  */
2377 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
2378         struct address_space *mapping = page->mapping;
2379         ntfs_inode *ni = NTFS_I(mapping->host);
2380         struct buffer_head *bh, *head, *buffers_to_free = NULL;
2381         unsigned int end, bh_size, bh_ofs;
2382
2383         BUG_ON(!PageUptodate(page));
2384         end = ofs + ni->itype.index.block_size;
2385         bh_size = 1 << VFS_I(ni)->i_blkbits;
2386         spin_lock(&mapping->private_lock);
2387         if (unlikely(!page_has_buffers(page))) {
2388                 spin_unlock(&mapping->private_lock);
2389                 bh = head = alloc_page_buffers(page, bh_size, 1);
2390                 spin_lock(&mapping->private_lock);
2391                 if (likely(!page_has_buffers(page))) {
2392                         struct buffer_head *tail;
2393
2394                         do {
2395                                 set_buffer_uptodate(bh);
2396                                 tail = bh;
2397                                 bh = bh->b_this_page;
2398                         } while (bh);
2399                         tail->b_this_page = head;
2400                         attach_page_buffers(page, head);
2401                 } else
2402                         buffers_to_free = bh;
2403         }
2404         bh = head = page_buffers(page);
2405         BUG_ON(!bh);
2406         do {
2407                 bh_ofs = bh_offset(bh);
2408                 if (bh_ofs + bh_size <= ofs)
2409                         continue;
2410                 if (unlikely(bh_ofs >= end))
2411                         break;
2412                 set_buffer_dirty(bh);
2413         } while ((bh = bh->b_this_page) != head);
2414         spin_unlock(&mapping->private_lock);
2415         __set_page_dirty_nobuffers(page);
2416         if (unlikely(buffers_to_free)) {
2417                 do {
2418                         bh = buffers_to_free->b_this_page;
2419                         free_buffer_head(buffers_to_free);
2420                         buffers_to_free = bh;
2421                 } while (buffers_to_free);
2422         }
2423 }
2424
2425 #endif /* NTFS_RW */