NTFS: Fix printk format warnings on ia64. (Randy Dunlap)
[pandora-kernel.git] / fs / ntfs / aops.c
1 /**
2  * aops.c - NTFS kernel address space operations and page cache handling.
3  *          Part of the Linux-NTFS project.
4  *
5  * Copyright (c) 2001-2005 Anton Altaparmakov
6  * Copyright (c) 2002 Richard Russon
7  *
8  * This program/include file is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as published
10  * by the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program/include file is distributed in the hope that it will be
14  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program (in the main directory of the Linux-NTFS
20  * distribution in the file COPYING); if not, write to the Free Software
21  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  */
23
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/buffer_head.h>
29 #include <linux/writeback.h>
30
31 #include "aops.h"
32 #include "attrib.h"
33 #include "debug.h"
34 #include "inode.h"
35 #include "mft.h"
36 #include "runlist.h"
37 #include "types.h"
38 #include "ntfs.h"
39
40 /**
41  * ntfs_end_buffer_async_read - async io completion for reading attributes
42  * @bh:         buffer head on which io is completed
43  * @uptodate:   whether @bh is now uptodate or not
44  *
45  * Asynchronous I/O completion handler for reading pages belonging to the
46  * attribute address space of an inode.  The inodes can either be files or
47  * directories or they can be fake inodes describing some attribute.
48  *
49  * If NInoMstProtected(), perform the post read mst fixups when all IO on the
50  * page has been completed and mark the page uptodate or set the error bit on
51  * the page.  To determine the size of the records that need fixing up, we
52  * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
53  * record size, and index_block_size_bits, to the log(base 2) of the ntfs
54  * record size.
55  */
56 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
57 {
58         static DEFINE_SPINLOCK(page_uptodate_lock);
59         unsigned long flags;
60         struct buffer_head *tmp;
61         struct page *page;
62         ntfs_inode *ni;
63         int page_uptodate = 1;
64
65         page = bh->b_page;
66         ni = NTFS_I(page->mapping->host);
67
68         if (likely(uptodate)) {
69                 s64 file_ofs, initialized_size;
70
71                 set_buffer_uptodate(bh);
72
73                 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
74                                 bh_offset(bh);
75                 read_lock_irqsave(&ni->size_lock, flags);
76                 initialized_size = ni->initialized_size;
77                 read_unlock_irqrestore(&ni->size_lock, flags);
78                 /* Check for the current buffer head overflowing. */
79                 if (file_ofs + bh->b_size > initialized_size) {
80                         char *addr;
81                         int ofs = 0;
82
83                         if (file_ofs < initialized_size)
84                                 ofs = initialized_size - file_ofs;
85                         addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
86                         memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);
87                         flush_dcache_page(page);
88                         kunmap_atomic(addr, KM_BIO_SRC_IRQ);
89                 }
90         } else {
91                 clear_buffer_uptodate(bh);
92                 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",
93                                 (unsigned long long)bh->b_blocknr);
94                 SetPageError(page);
95         }
96         spin_lock_irqsave(&page_uptodate_lock, flags);
97         clear_buffer_async_read(bh);
98         unlock_buffer(bh);
99         tmp = bh;
100         do {
101                 if (!buffer_uptodate(tmp))
102                         page_uptodate = 0;
103                 if (buffer_async_read(tmp)) {
104                         if (likely(buffer_locked(tmp)))
105                                 goto still_busy;
106                         /* Async buffers must be locked. */
107                         BUG();
108                 }
109                 tmp = tmp->b_this_page;
110         } while (tmp != bh);
111         spin_unlock_irqrestore(&page_uptodate_lock, flags);
112         /*
113          * If none of the buffers had errors then we can set the page uptodate,
114          * but we first have to perform the post read mst fixups, if the
115          * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
116          * Note we ignore fixup errors as those are detected when
117          * map_mft_record() is called which gives us per record granularity
118          * rather than per page granularity.
119          */
120         if (!NInoMstProtected(ni)) {
121                 if (likely(page_uptodate && !PageError(page)))
122                         SetPageUptodate(page);
123         } else {
124                 char *addr;
125                 unsigned int i, recs;
126                 u32 rec_size;
127
128                 rec_size = ni->itype.index.block_size;
129                 recs = PAGE_CACHE_SIZE / rec_size;
130                 /* Should have been verified before we got here... */
131                 BUG_ON(!recs);
132                 addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
133                 for (i = 0; i < recs; i++)
134                         post_read_mst_fixup((NTFS_RECORD*)(addr +
135                                         i * rec_size), rec_size);
136                 flush_dcache_page(page);
137                 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
138                 if (likely(page_uptodate && !PageError(page)))
139                         SetPageUptodate(page);
140         }
141         unlock_page(page);
142         return;
143 still_busy:
144         spin_unlock_irqrestore(&page_uptodate_lock, flags);
145         return;
146 }
147
148 /**
149  * ntfs_read_block - fill a @page of an address space with data
150  * @page:       page cache page to fill with data
151  *
152  * Fill the page @page of the address space belonging to the @page->host inode.
153  * We read each buffer asynchronously and when all buffers are read in, our io
154  * completion handler ntfs_end_buffer_read_async(), if required, automatically
155  * applies the mst fixups to the page before finally marking it uptodate and
156  * unlocking it.
157  *
158  * We only enforce allocated_size limit because i_size is checked for in
159  * generic_file_read().
160  *
161  * Return 0 on success and -errno on error.
162  *
163  * Contains an adapted version of fs/buffer.c::block_read_full_page().
164  */
165 static int ntfs_read_block(struct page *page)
166 {
167         VCN vcn;
168         LCN lcn;
169         ntfs_inode *ni;
170         ntfs_volume *vol;
171         runlist_element *rl;
172         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
173         sector_t iblock, lblock, zblock;
174         unsigned long flags;
175         unsigned int blocksize, vcn_ofs;
176         int i, nr;
177         unsigned char blocksize_bits;
178
179         ni = NTFS_I(page->mapping->host);
180         vol = ni->vol;
181
182         /* $MFT/$DATA must have its complete runlist in memory at all times. */
183         BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
184
185         blocksize_bits = VFS_I(ni)->i_blkbits;
186         blocksize = 1 << blocksize_bits;
187
188         if (!page_has_buffers(page))
189                 create_empty_buffers(page, blocksize, 0);
190         bh = head = page_buffers(page);
191         if (unlikely(!bh)) {
192                 unlock_page(page);
193                 return -ENOMEM;
194         }
195
196         iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
197         read_lock_irqsave(&ni->size_lock, flags);
198         lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
199         zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
200         read_unlock_irqrestore(&ni->size_lock, flags);
201
202         /* Loop through all the buffers in the page. */
203         rl = NULL;
204         nr = i = 0;
205         do {
206                 u8 *kaddr;
207
208                 if (unlikely(buffer_uptodate(bh)))
209                         continue;
210                 if (unlikely(buffer_mapped(bh))) {
211                         arr[nr++] = bh;
212                         continue;
213                 }
214                 bh->b_bdev = vol->sb->s_bdev;
215                 /* Is the block within the allowed limits? */
216                 if (iblock < lblock) {
217                         BOOL is_retry = FALSE;
218
219                         /* Convert iblock into corresponding vcn and offset. */
220                         vcn = (VCN)iblock << blocksize_bits >>
221                                         vol->cluster_size_bits;
222                         vcn_ofs = ((VCN)iblock << blocksize_bits) &
223                                         vol->cluster_size_mask;
224                         if (!rl) {
225 lock_retry_remap:
226                                 down_read(&ni->runlist.lock);
227                                 rl = ni->runlist.rl;
228                         }
229                         if (likely(rl != NULL)) {
230                                 /* Seek to element containing target vcn. */
231                                 while (rl->length && rl[1].vcn <= vcn)
232                                         rl++;
233                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
234                         } else
235                                 lcn = LCN_RL_NOT_MAPPED;
236                         /* Successful remap. */
237                         if (lcn >= 0) {
238                                 /* Setup buffer head to correct block. */
239                                 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
240                                                 + vcn_ofs) >> blocksize_bits;
241                                 set_buffer_mapped(bh);
242                                 /* Only read initialized data blocks. */
243                                 if (iblock < zblock) {
244                                         arr[nr++] = bh;
245                                         continue;
246                                 }
247                                 /* Fully non-initialized data block, zero it. */
248                                 goto handle_zblock;
249                         }
250                         /* It is a hole, need to zero it. */
251                         if (lcn == LCN_HOLE)
252                                 goto handle_hole;
253                         /* If first try and runlist unmapped, map and retry. */
254                         if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
255                                 int err;
256                                 is_retry = TRUE;
257                                 /*
258                                  * Attempt to map runlist, dropping lock for
259                                  * the duration.
260                                  */
261                                 up_read(&ni->runlist.lock);
262                                 err = ntfs_map_runlist(ni, vcn);
263                                 if (likely(!err))
264                                         goto lock_retry_remap;
265                                 rl = NULL;
266                                 lcn = err;
267                         }
268                         /* Hard error, zero out region. */
269                         bh->b_blocknr = -1;
270                         SetPageError(page);
271                         ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
272                                         "attribute type 0x%x, vcn 0x%llx, "
273                                         "offset 0x%x because its location on "
274                                         "disk could not be determined%s "
275                                         "(error code %lli).", ni->mft_no,
276                                         ni->type, (unsigned long long)vcn,
277                                         vcn_ofs, is_retry ? " even after "
278                                         "retrying" : "", (long long)lcn);
279                 }
280                 /*
281                  * Either iblock was outside lblock limits or
282                  * ntfs_rl_vcn_to_lcn() returned error.  Just zero that portion
283                  * of the page and set the buffer uptodate.
284                  */
285 handle_hole:
286                 bh->b_blocknr = -1UL;
287                 clear_buffer_mapped(bh);
288 handle_zblock:
289                 kaddr = kmap_atomic(page, KM_USER0);
290                 memset(kaddr + i * blocksize, 0, blocksize);
291                 flush_dcache_page(page);
292                 kunmap_atomic(kaddr, KM_USER0);
293                 set_buffer_uptodate(bh);
294         } while (i++, iblock++, (bh = bh->b_this_page) != head);
295
296         /* Release the lock if we took it. */
297         if (rl)
298                 up_read(&ni->runlist.lock);
299
300         /* Check we have at least one buffer ready for i/o. */
301         if (nr) {
302                 struct buffer_head *tbh;
303
304                 /* Lock the buffers. */
305                 for (i = 0; i < nr; i++) {
306                         tbh = arr[i];
307                         lock_buffer(tbh);
308                         tbh->b_end_io = ntfs_end_buffer_async_read;
309                         set_buffer_async_read(tbh);
310                 }
311                 /* Finally, start i/o on the buffers. */
312                 for (i = 0; i < nr; i++) {
313                         tbh = arr[i];
314                         if (likely(!buffer_uptodate(tbh)))
315                                 submit_bh(READ, tbh);
316                         else
317                                 ntfs_end_buffer_async_read(tbh, 1);
318                 }
319                 return 0;
320         }
321         /* No i/o was scheduled on any of the buffers. */
322         if (likely(!PageError(page)))
323                 SetPageUptodate(page);
324         else /* Signal synchronous i/o error. */
325                 nr = -EIO;
326         unlock_page(page);
327         return nr;
328 }
329
330 /**
331  * ntfs_readpage - fill a @page of a @file with data from the device
332  * @file:       open file to which the page @page belongs or NULL
333  * @page:       page cache page to fill with data
334  *
335  * For non-resident attributes, ntfs_readpage() fills the @page of the open
336  * file @file by calling the ntfs version of the generic block_read_full_page()
337  * function, ntfs_read_block(), which in turn creates and reads in the buffers
338  * associated with the page asynchronously.
339  *
340  * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
341  * data from the mft record (which at this stage is most likely in memory) and
342  * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
343  * even if the mft record is not cached at this point in time, we need to wait
344  * for it to be read in before we can do the copy.
345  *
346  * Return 0 on success and -errno on error.
347  */
348 static int ntfs_readpage(struct file *file, struct page *page)
349 {
350         ntfs_inode *ni, *base_ni;
351         u8 *kaddr;
352         ntfs_attr_search_ctx *ctx;
353         MFT_RECORD *mrec;
354         unsigned long flags;
355         u32 attr_len;
356         int err = 0;
357
358         BUG_ON(!PageLocked(page));
359         /*
360          * This can potentially happen because we clear PageUptodate() during
361          * ntfs_writepage() of MstProtected() attributes.
362          */
363         if (PageUptodate(page)) {
364                 unlock_page(page);
365                 return 0;
366         }
367         ni = NTFS_I(page->mapping->host);
368
369         /* NInoNonResident() == NInoIndexAllocPresent() */
370         if (NInoNonResident(ni)) {
371                 /*
372                  * Only unnamed $DATA attributes can be compressed or
373                  * encrypted.
374                  */
375                 if (ni->type == AT_DATA && !ni->name_len) {
376                         /* If file is encrypted, deny access, just like NT4. */
377                         if (NInoEncrypted(ni)) {
378                                 err = -EACCES;
379                                 goto err_out;
380                         }
381                         /* Compressed data streams are handled in compress.c. */
382                         if (NInoCompressed(ni))
383                                 return ntfs_read_compressed_block(page);
384                 }
385                 /* Normal data stream. */
386                 return ntfs_read_block(page);
387         }
388         /*
389          * Attribute is resident, implying it is not compressed or encrypted.
390          * This also means the attribute is smaller than an mft record and
391          * hence smaller than a page, so can simply zero out any pages with
392          * index above 0.
393          */
394         if (unlikely(page->index > 0)) {
395                 kaddr = kmap_atomic(page, KM_USER0);
396                 memset(kaddr, 0, PAGE_CACHE_SIZE);
397                 flush_dcache_page(page);
398                 kunmap_atomic(kaddr, KM_USER0);
399                 goto done;
400         }
401         if (!NInoAttr(ni))
402                 base_ni = ni;
403         else
404                 base_ni = ni->ext.base_ntfs_ino;
405         /* Map, pin, and lock the mft record. */
406         mrec = map_mft_record(base_ni);
407         if (IS_ERR(mrec)) {
408                 err = PTR_ERR(mrec);
409                 goto err_out;
410         }
411         ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
412         if (unlikely(!ctx)) {
413                 err = -ENOMEM;
414                 goto unm_err_out;
415         }
416         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
417                         CASE_SENSITIVE, 0, NULL, 0, ctx);
418         if (unlikely(err))
419                 goto put_unm_err_out;
420         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
421         read_lock_irqsave(&ni->size_lock, flags);
422         if (unlikely(attr_len > ni->initialized_size))
423                 attr_len = ni->initialized_size;
424         read_unlock_irqrestore(&ni->size_lock, flags);
425         kaddr = kmap_atomic(page, KM_USER0);
426         /* Copy the data to the page. */
427         memcpy(kaddr, (u8*)ctx->attr +
428                         le16_to_cpu(ctx->attr->data.resident.value_offset),
429                         attr_len);
430         /* Zero the remainder of the page. */
431         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
432         flush_dcache_page(page);
433         kunmap_atomic(kaddr, KM_USER0);
434 put_unm_err_out:
435         ntfs_attr_put_search_ctx(ctx);
436 unm_err_out:
437         unmap_mft_record(base_ni);
438 done:
439         SetPageUptodate(page);
440 err_out:
441         unlock_page(page);
442         return err;
443 }
444
445 #ifdef NTFS_RW
446
447 /**
448  * ntfs_write_block - write a @page to the backing store
449  * @page:       page cache page to write out
450  * @wbc:        writeback control structure
451  *
452  * This function is for writing pages belonging to non-resident, non-mst
453  * protected attributes to their backing store.
454  *
455  * For a page with buffers, map and write the dirty buffers asynchronously
456  * under page writeback. For a page without buffers, create buffers for the
457  * page, then proceed as above.
458  *
459  * If a page doesn't have buffers the page dirty state is definitive. If a page
460  * does have buffers, the page dirty state is just a hint, and the buffer dirty
461  * state is definitive. (A hint which has rules: dirty buffers against a clean
462  * page is illegal. Other combinations are legal and need to be handled. In
463  * particular a dirty page containing clean buffers for example.)
464  *
465  * Return 0 on success and -errno on error.
466  *
467  * Based on ntfs_read_block() and __block_write_full_page().
468  */
469 static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
470 {
471         VCN vcn;
472         LCN lcn;
473         s64 initialized_size;
474         loff_t i_size;
475         sector_t block, dblock, iblock;
476         struct inode *vi;
477         ntfs_inode *ni;
478         ntfs_volume *vol;
479         runlist_element *rl;
480         struct buffer_head *bh, *head;
481         unsigned long flags;
482         unsigned int blocksize, vcn_ofs;
483         int err;
484         BOOL need_end_writeback;
485         unsigned char blocksize_bits;
486
487         vi = page->mapping->host;
488         ni = NTFS_I(vi);
489         vol = ni->vol;
490
491         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
492                         "0x%lx.", ni->mft_no, ni->type, page->index);
493
494         BUG_ON(!NInoNonResident(ni));
495         BUG_ON(NInoMstProtected(ni));
496
497         blocksize_bits = vi->i_blkbits;
498         blocksize = 1 << blocksize_bits;
499
500         if (!page_has_buffers(page)) {
501                 BUG_ON(!PageUptodate(page));
502                 create_empty_buffers(page, blocksize,
503                                 (1 << BH_Uptodate) | (1 << BH_Dirty));
504         }
505         bh = head = page_buffers(page);
506         if (unlikely(!bh)) {
507                 ntfs_warning(vol->sb, "Error allocating page buffers. "
508                                 "Redirtying page so we try again later.");
509                 /*
510                  * Put the page back on mapping->dirty_pages, but leave its
511                  * buffer's dirty state as-is.
512                  */
513                 redirty_page_for_writepage(wbc, page);
514                 unlock_page(page);
515                 return 0;
516         }
517
518         /* NOTE: Different naming scheme to ntfs_read_block()! */
519
520         /* The first block in the page. */
521         block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
522
523         read_lock_irqsave(&ni->size_lock, flags);
524         i_size = i_size_read(vi);
525         initialized_size = ni->initialized_size;
526         read_unlock_irqrestore(&ni->size_lock, flags);
527
528         /* The first out of bounds block for the data size. */
529         dblock = (i_size + blocksize - 1) >> blocksize_bits;
530
531         /* The last (fully or partially) initialized block. */
532         iblock = initialized_size >> blocksize_bits;
533
534         /*
535          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
536          * here, and the (potentially unmapped) buffers may become dirty at
537          * any time.  If a buffer becomes dirty here after we've inspected it
538          * then we just miss that fact, and the page stays dirty.
539          *
540          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
541          * handle that here by just cleaning them.
542          */
543
544         /*
545          * Loop through all the buffers in the page, mapping all the dirty
546          * buffers to disk addresses and handling any aliases from the
547          * underlying block device's mapping.
548          */
549         rl = NULL;
550         err = 0;
551         do {
552                 BOOL is_retry = FALSE;
553
554                 if (unlikely(block >= dblock)) {
555                         /*
556                          * Mapped buffers outside i_size will occur, because
557                          * this page can be outside i_size when there is a
558                          * truncate in progress. The contents of such buffers
559                          * were zeroed by ntfs_writepage().
560                          *
561                          * FIXME: What about the small race window where
562                          * ntfs_writepage() has not done any clearing because
563                          * the page was within i_size but before we get here,
564                          * vmtruncate() modifies i_size?
565                          */
566                         clear_buffer_dirty(bh);
567                         set_buffer_uptodate(bh);
568                         continue;
569                 }
570
571                 /* Clean buffers are not written out, so no need to map them. */
572                 if (!buffer_dirty(bh))
573                         continue;
574
575                 /* Make sure we have enough initialized size. */
576                 if (unlikely((block >= iblock) &&
577                                 (initialized_size < i_size))) {
578                         /*
579                          * If this page is fully outside initialized size, zero
580                          * out all pages between the current initialized size
581                          * and the current page. Just use ntfs_readpage() to do
582                          * the zeroing transparently.
583                          */
584                         if (block > iblock) {
585                                 // TODO:
586                                 // For each page do:
587                                 // - read_cache_page()
588                                 // Again for each page do:
589                                 // - wait_on_page_locked()
590                                 // - Check (PageUptodate(page) &&
591                                 //                      !PageError(page))
592                                 // Update initialized size in the attribute and
593                                 // in the inode.
594                                 // Again, for each page do:
595                                 //      __set_page_dirty_buffers();
596                                 // page_cache_release()
597                                 // We don't need to wait on the writes.
598                                 // Update iblock.
599                         }
600                         /*
601                          * The current page straddles initialized size. Zero
602                          * all non-uptodate buffers and set them uptodate (and
603                          * dirty?). Note, there aren't any non-uptodate buffers
604                          * if the page is uptodate.
605                          * FIXME: For an uptodate page, the buffers may need to
606                          * be written out because they were not initialized on
607                          * disk before.
608                          */
609                         if (!PageUptodate(page)) {
610                                 // TODO:
611                                 // Zero any non-uptodate buffers up to i_size.
612                                 // Set them uptodate and dirty.
613                         }
614                         // TODO:
615                         // Update initialized size in the attribute and in the
616                         // inode (up to i_size).
617                         // Update iblock.
618                         // FIXME: This is inefficient. Try to batch the two
619                         // size changes to happen in one go.
620                         ntfs_error(vol->sb, "Writing beyond initialized size "
621                                         "is not supported yet. Sorry.");
622                         err = -EOPNOTSUPP;
623                         break;
624                         // Do NOT set_buffer_new() BUT DO clear buffer range
625                         // outside write request range.
626                         // set_buffer_uptodate() on complete buffers as well as
627                         // set_buffer_dirty().
628                 }
629
630                 /* No need to map buffers that are already mapped. */
631                 if (buffer_mapped(bh))
632                         continue;
633
634                 /* Unmapped, dirty buffer. Need to map it. */
635                 bh->b_bdev = vol->sb->s_bdev;
636
637                 /* Convert block into corresponding vcn and offset. */
638                 vcn = (VCN)block << blocksize_bits;
639                 vcn_ofs = vcn & vol->cluster_size_mask;
640                 vcn >>= vol->cluster_size_bits;
641                 if (!rl) {
642 lock_retry_remap:
643                         down_read(&ni->runlist.lock);
644                         rl = ni->runlist.rl;
645                 }
646                 if (likely(rl != NULL)) {
647                         /* Seek to element containing target vcn. */
648                         while (rl->length && rl[1].vcn <= vcn)
649                                 rl++;
650                         lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
651                 } else
652                         lcn = LCN_RL_NOT_MAPPED;
653                 /* Successful remap. */
654                 if (lcn >= 0) {
655                         /* Setup buffer head to point to correct block. */
656                         bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
657                                         vcn_ofs) >> blocksize_bits;
658                         set_buffer_mapped(bh);
659                         continue;
660                 }
661                 /* It is a hole, need to instantiate it. */
662                 if (lcn == LCN_HOLE) {
663                         // TODO: Instantiate the hole.
664                         // clear_buffer_new(bh);
665                         // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
666                         ntfs_error(vol->sb, "Writing into sparse regions is "
667                                         "not supported yet. Sorry.");
668                         err = -EOPNOTSUPP;
669                         break;
670                 }
671                 /* If first try and runlist unmapped, map and retry. */
672                 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
673                         is_retry = TRUE;
674                         /*
675                          * Attempt to map runlist, dropping lock for
676                          * the duration.
677                          */
678                         up_read(&ni->runlist.lock);
679                         err = ntfs_map_runlist(ni, vcn);
680                         if (likely(!err))
681                                 goto lock_retry_remap;
682                         rl = NULL;
683                         lcn = err;
684                 }
685                 /* Failed to map the buffer, even after retrying. */
686                 bh->b_blocknr = -1;
687                 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
688                                 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
689                                 "because its location on disk could not be "
690                                 "determined%s (error code %lli).", ni->mft_no,
691                                 ni->type, (unsigned long long)vcn,
692                                 vcn_ofs, is_retry ? " even after "
693                                 "retrying" : "", (long long)lcn);
694                 if (!err)
695                         err = -EIO;
696                 break;
697         } while (block++, (bh = bh->b_this_page) != head);
698
699         /* Release the lock if we took it. */
700         if (rl)
701                 up_read(&ni->runlist.lock);
702
703         /* For the error case, need to reset bh to the beginning. */
704         bh = head;
705
706         /* Just an optimization, so ->readpage() isn't called later. */
707         if (unlikely(!PageUptodate(page))) {
708                 int uptodate = 1;
709                 do {
710                         if (!buffer_uptodate(bh)) {
711                                 uptodate = 0;
712                                 bh = head;
713                                 break;
714                         }
715                 } while ((bh = bh->b_this_page) != head);
716                 if (uptodate)
717                         SetPageUptodate(page);
718         }
719
720         /* Setup all mapped, dirty buffers for async write i/o. */
721         do {
722                 get_bh(bh);
723                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
724                         lock_buffer(bh);
725                         if (test_clear_buffer_dirty(bh)) {
726                                 BUG_ON(!buffer_uptodate(bh));
727                                 mark_buffer_async_write(bh);
728                         } else
729                                 unlock_buffer(bh);
730                 } else if (unlikely(err)) {
731                         /*
732                          * For the error case. The buffer may have been set
733                          * dirty during attachment to a dirty page.
734                          */
735                         if (err != -ENOMEM)
736                                 clear_buffer_dirty(bh);
737                 }
738         } while ((bh = bh->b_this_page) != head);
739
740         if (unlikely(err)) {
741                 // TODO: Remove the -EOPNOTSUPP check later on...
742                 if (unlikely(err == -EOPNOTSUPP))
743                         err = 0;
744                 else if (err == -ENOMEM) {
745                         ntfs_warning(vol->sb, "Error allocating memory. "
746                                         "Redirtying page so we try again "
747                                         "later.");
748                         /*
749                          * Put the page back on mapping->dirty_pages, but
750                          * leave its buffer's dirty state as-is.
751                          */
752                         redirty_page_for_writepage(wbc, page);
753                         err = 0;
754                 } else
755                         SetPageError(page);
756         }
757
758         BUG_ON(PageWriteback(page));
759         set_page_writeback(page);       /* Keeps try_to_free_buffers() away. */
760         unlock_page(page);
761
762         /*
763          * Submit the prepared buffers for i/o. Note the page is unlocked,
764          * and the async write i/o completion handler can end_page_writeback()
765          * at any time after the *first* submit_bh(). So the buffers can then
766          * disappear...
767          */
768         need_end_writeback = TRUE;
769         do {
770                 struct buffer_head *next = bh->b_this_page;
771                 if (buffer_async_write(bh)) {
772                         submit_bh(WRITE, bh);
773                         need_end_writeback = FALSE;
774                 }
775                 put_bh(bh);
776                 bh = next;
777         } while (bh != head);
778
779         /* If no i/o was started, need to end_page_writeback(). */
780         if (unlikely(need_end_writeback))
781                 end_page_writeback(page);
782
783         ntfs_debug("Done.");
784         return err;
785 }
786
787 /**
788  * ntfs_write_mst_block - write a @page to the backing store
789  * @page:       page cache page to write out
790  * @wbc:        writeback control structure
791  *
792  * This function is for writing pages belonging to non-resident, mst protected
793  * attributes to their backing store.  The only supported attributes are index
794  * allocation and $MFT/$DATA.  Both directory inodes and index inodes are
795  * supported for the index allocation case.
796  *
797  * The page must remain locked for the duration of the write because we apply
798  * the mst fixups, write, and then undo the fixups, so if we were to unlock the
799  * page before undoing the fixups, any other user of the page will see the
800  * page contents as corrupt.
801  *
802  * We clear the page uptodate flag for the duration of the function to ensure
803  * exclusion for the $MFT/$DATA case against someone mapping an mft record we
804  * are about to apply the mst fixups to.
805  *
806  * Return 0 on success and -errno on error.
807  *
808  * Based on ntfs_write_block(), ntfs_mft_writepage(), and
809  * write_mft_record_nolock().
810  */
811 static int ntfs_write_mst_block(struct page *page,
812                 struct writeback_control *wbc)
813 {
814         sector_t block, dblock, rec_block;
815         struct inode *vi = page->mapping->host;
816         ntfs_inode *ni = NTFS_I(vi);
817         ntfs_volume *vol = ni->vol;
818         u8 *kaddr;
819         unsigned char bh_size_bits = vi->i_blkbits;
820         unsigned int bh_size = 1 << bh_size_bits;
821         unsigned int rec_size = ni->itype.index.block_size;
822         ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
823         struct buffer_head *bh, *head, *tbh, *rec_start_bh;
824         int max_bhs = PAGE_CACHE_SIZE / bh_size;
825         struct buffer_head *bhs[max_bhs];
826         runlist_element *rl;
827         int i, nr_locked_nis, nr_recs, nr_bhs, bhs_per_rec, err, err2;
828         unsigned rec_size_bits;
829         BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
830
831         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
832                         "0x%lx.", vi->i_ino, ni->type, page->index);
833         BUG_ON(!NInoNonResident(ni));
834         BUG_ON(!NInoMstProtected(ni));
835         is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
836         /*
837          * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
838          * in its page cache were to be marked dirty.  However this should
839          * never happen with the current driver and considering we do not
840          * handle this case here we do want to BUG(), at least for now.
841          */
842         BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
843                         (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
844         BUG_ON(!max_bhs);
845
846         /* Were we called for sync purposes? */
847         sync = (wbc->sync_mode == WB_SYNC_ALL);
848
849         /* Make sure we have mapped buffers. */
850         BUG_ON(!page_has_buffers(page));
851         bh = head = page_buffers(page);
852         BUG_ON(!bh);
853
854         rec_size_bits = ni->itype.index.block_size_bits;
855         BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
856         bhs_per_rec = rec_size >> bh_size_bits;
857         BUG_ON(!bhs_per_rec);
858
859         /* The first block in the page. */
860         rec_block = block = (sector_t)page->index <<
861                         (PAGE_CACHE_SHIFT - bh_size_bits);
862
863         /* The first out of bounds block for the data size. */
864         dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
865
866         rl = NULL;
867         err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
868         page_is_dirty = rec_is_dirty = FALSE;
869         rec_start_bh = NULL;
870         do {
871                 BOOL is_retry = FALSE;
872
873                 if (likely(block < rec_block)) {
874                         if (unlikely(block >= dblock)) {
875                                 clear_buffer_dirty(bh);
876                                 set_buffer_uptodate(bh);
877                                 continue;
878                         }
879                         /*
880                          * This block is not the first one in the record.  We
881                          * ignore the buffer's dirty state because we could
882                          * have raced with a parallel mark_ntfs_record_dirty().
883                          */
884                         if (!rec_is_dirty)
885                                 continue;
886                         if (unlikely(err2)) {
887                                 if (err2 != -ENOMEM)
888                                         clear_buffer_dirty(bh);
889                                 continue;
890                         }
891                 } else /* if (block == rec_block) */ {
892                         BUG_ON(block > rec_block);
893                         /* This block is the first one in the record. */
894                         rec_block += bhs_per_rec;
895                         err2 = 0;
896                         if (unlikely(block >= dblock)) {
897                                 clear_buffer_dirty(bh);
898                                 continue;
899                         }
900                         if (!buffer_dirty(bh)) {
901                                 /* Clean records are not written out. */
902                                 rec_is_dirty = FALSE;
903                                 continue;
904                         }
905                         rec_is_dirty = TRUE;
906                         rec_start_bh = bh;
907                 }
908                 /* Need to map the buffer if it is not mapped already. */
909                 if (unlikely(!buffer_mapped(bh))) {
910                         VCN vcn;
911                         LCN lcn;
912                         unsigned int vcn_ofs;
913
914                         /* Obtain the vcn and offset of the current block. */
915                         vcn = (VCN)block << bh_size_bits;
916                         vcn_ofs = vcn & vol->cluster_size_mask;
917                         vcn >>= vol->cluster_size_bits;
918                         if (!rl) {
919 lock_retry_remap:
920                                 down_read(&ni->runlist.lock);
921                                 rl = ni->runlist.rl;
922                         }
923                         if (likely(rl != NULL)) {
924                                 /* Seek to element containing target vcn. */
925                                 while (rl->length && rl[1].vcn <= vcn)
926                                         rl++;
927                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
928                         } else
929                                 lcn = LCN_RL_NOT_MAPPED;
930                         /* Successful remap. */
931                         if (likely(lcn >= 0)) {
932                                 /* Setup buffer head to correct block. */
933                                 bh->b_blocknr = ((lcn <<
934                                                 vol->cluster_size_bits) +
935                                                 vcn_ofs) >> bh_size_bits;
936                                 set_buffer_mapped(bh);
937                         } else {
938                                 /*
939                                  * Remap failed.  Retry to map the runlist once
940                                  * unless we are working on $MFT which always
941                                  * has the whole of its runlist in memory.
942                                  */
943                                 if (!is_mft && !is_retry &&
944                                                 lcn == LCN_RL_NOT_MAPPED) {
945                                         is_retry = TRUE;
946                                         /*
947                                          * Attempt to map runlist, dropping
948                                          * lock for the duration.
949                                          */
950                                         up_read(&ni->runlist.lock);
951                                         err2 = ntfs_map_runlist(ni, vcn);
952                                         if (likely(!err2))
953                                                 goto lock_retry_remap;
954                                         if (err2 == -ENOMEM)
955                                                 page_is_dirty = TRUE;
956                                         lcn = err2;
957                                 } else
958                                         err2 = -EIO;
959                                 /* Hard error.  Abort writing this record. */
960                                 if (!err || err == -ENOMEM)
961                                         err = err2;
962                                 bh->b_blocknr = -1;
963                                 ntfs_error(vol->sb, "Cannot write ntfs record "
964                                                 "0x%llx (inode 0x%lx, "
965                                                 "attribute type 0x%x) because "
966                                                 "its location on disk could "
967                                                 "not be determined (error "
968                                                 "code %lli).",
969                                                 (long long)block <<
970                                                 bh_size_bits >>
971                                                 vol->mft_record_size_bits,
972                                                 ni->mft_no, ni->type,
973                                                 (long long)lcn);
974                                 /*
975                                  * If this is not the first buffer, remove the
976                                  * buffers in this record from the list of
977                                  * buffers to write and clear their dirty bit
978                                  * if not error -ENOMEM.
979                                  */
980                                 if (rec_start_bh != bh) {
981                                         while (bhs[--nr_bhs] != rec_start_bh)
982                                                 ;
983                                         if (err2 != -ENOMEM) {
984                                                 do {
985                                                         clear_buffer_dirty(
986                                                                 rec_start_bh);
987                                                 } while ((rec_start_bh =
988                                                                 rec_start_bh->
989                                                                 b_this_page) !=
990                                                                 bh);
991                                         }
992                                 }
993                                 continue;
994                         }
995                 }
996                 BUG_ON(!buffer_uptodate(bh));
997                 BUG_ON(nr_bhs >= max_bhs);
998                 bhs[nr_bhs++] = bh;
999         } while (block++, (bh = bh->b_this_page) != head);
1000         if (unlikely(rl))
1001                 up_read(&ni->runlist.lock);
1002         /* If there were no dirty buffers, we are done. */
1003         if (!nr_bhs)
1004                 goto done;
1005         /* Map the page so we can access its contents. */
1006         kaddr = kmap(page);
1007         /* Clear the page uptodate flag whilst the mst fixups are applied. */
1008         BUG_ON(!PageUptodate(page));
1009         ClearPageUptodate(page);
1010         for (i = 0; i < nr_bhs; i++) {
1011                 unsigned int ofs;
1012
1013                 /* Skip buffers which are not at the beginning of records. */
1014                 if (i % bhs_per_rec)
1015                         continue;
1016                 tbh = bhs[i];
1017                 ofs = bh_offset(tbh);
1018                 if (is_mft) {
1019                         ntfs_inode *tni;
1020                         unsigned long mft_no;
1021
1022                         /* Get the mft record number. */
1023                         mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1024                                         >> rec_size_bits;
1025                         /* Check whether to write this mft record. */
1026                         tni = NULL;
1027                         if (!ntfs_may_write_mft_record(vol, mft_no,
1028                                         (MFT_RECORD*)(kaddr + ofs), &tni)) {
1029                                 /*
1030                                  * The record should not be written.  This
1031                                  * means we need to redirty the page before
1032                                  * returning.
1033                                  */
1034                                 page_is_dirty = TRUE;
1035                                 /*
1036                                  * Remove the buffers in this mft record from
1037                                  * the list of buffers to write.
1038                                  */
1039                                 do {
1040                                         bhs[i] = NULL;
1041                                 } while (++i % bhs_per_rec);
1042                                 continue;
1043                         }
1044                         /*
1045                          * The record should be written.  If a locked ntfs
1046                          * inode was returned, add it to the array of locked
1047                          * ntfs inodes.
1048                          */
1049                         if (tni)
1050                                 locked_nis[nr_locked_nis++] = tni;
1051                 }
1052                 /* Apply the mst protection fixups. */
1053                 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1054                                 rec_size);
1055                 if (unlikely(err2)) {
1056                         if (!err || err == -ENOMEM)
1057                                 err = -EIO;
1058                         ntfs_error(vol->sb, "Failed to apply mst fixups "
1059                                         "(inode 0x%lx, attribute type 0x%x, "
1060                                         "page index 0x%lx, page offset 0x%x)!"
1061                                         "  Unmount and run chkdsk.", vi->i_ino,
1062                                         ni->type, page->index, ofs);
1063                         /*
1064                          * Mark all the buffers in this record clean as we do
1065                          * not want to write corrupt data to disk.
1066                          */
1067                         do {
1068                                 clear_buffer_dirty(bhs[i]);
1069                                 bhs[i] = NULL;
1070                         } while (++i % bhs_per_rec);
1071                         continue;
1072                 }
1073                 nr_recs++;
1074         }
1075         /* If no records are to be written out, we are done. */
1076         if (!nr_recs)
1077                 goto unm_done;
1078         flush_dcache_page(page);
1079         /* Lock buffers and start synchronous write i/o on them. */
1080         for (i = 0; i < nr_bhs; i++) {
1081                 tbh = bhs[i];
1082                 if (!tbh)
1083                         continue;
1084                 if (unlikely(test_set_buffer_locked(tbh)))
1085                         BUG();
1086                 /* The buffer dirty state is now irrelevant, just clean it. */
1087                 clear_buffer_dirty(tbh);
1088                 BUG_ON(!buffer_uptodate(tbh));
1089                 BUG_ON(!buffer_mapped(tbh));
1090                 get_bh(tbh);
1091                 tbh->b_end_io = end_buffer_write_sync;
1092                 submit_bh(WRITE, tbh);
1093         }
1094         /* Synchronize the mft mirror now if not @sync. */
1095         if (is_mft && !sync)
1096                 goto do_mirror;
1097 do_wait:
1098         /* Wait on i/o completion of buffers. */
1099         for (i = 0; i < nr_bhs; i++) {
1100                 tbh = bhs[i];
1101                 if (!tbh)
1102                         continue;
1103                 wait_on_buffer(tbh);
1104                 if (unlikely(!buffer_uptodate(tbh))) {
1105                         ntfs_error(vol->sb, "I/O error while writing ntfs "
1106                                         "record buffer (inode 0x%lx, "
1107                                         "attribute type 0x%x, page index "
1108                                         "0x%lx, page offset 0x%lx)!  Unmount "
1109                                         "and run chkdsk.", vi->i_ino, ni->type,
1110                                         page->index, bh_offset(tbh));
1111                         if (!err || err == -ENOMEM)
1112                                 err = -EIO;
1113                         /*
1114                          * Set the buffer uptodate so the page and buffer
1115                          * states do not become out of sync.
1116                          */
1117                         set_buffer_uptodate(tbh);
1118                 }
1119         }
1120         /* If @sync, now synchronize the mft mirror. */
1121         if (is_mft && sync) {
1122 do_mirror:
1123                 for (i = 0; i < nr_bhs; i++) {
1124                         unsigned long mft_no;
1125                         unsigned int ofs;
1126
1127                         /*
1128                          * Skip buffers which are not at the beginning of
1129                          * records.
1130                          */
1131                         if (i % bhs_per_rec)
1132                                 continue;
1133                         tbh = bhs[i];
1134                         /* Skip removed buffers (and hence records). */
1135                         if (!tbh)
1136                                 continue;
1137                         ofs = bh_offset(tbh);
1138                         /* Get the mft record number. */
1139                         mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1140                                         >> rec_size_bits;
1141                         if (mft_no < vol->mftmirr_size)
1142                                 ntfs_sync_mft_mirror(vol, mft_no,
1143                                                 (MFT_RECORD*)(kaddr + ofs),
1144                                                 sync);
1145                 }
1146                 if (!sync)
1147                         goto do_wait;
1148         }
1149         /* Remove the mst protection fixups again. */
1150         for (i = 0; i < nr_bhs; i++) {
1151                 if (!(i % bhs_per_rec)) {
1152                         tbh = bhs[i];
1153                         if (!tbh)
1154                                 continue;
1155                         post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1156                                         bh_offset(tbh)));
1157                 }
1158         }
1159         flush_dcache_page(page);
1160 unm_done:
1161         /* Unlock any locked inodes. */
1162         while (nr_locked_nis-- > 0) {
1163                 ntfs_inode *tni, *base_tni;
1164                 
1165                 tni = locked_nis[nr_locked_nis];
1166                 /* Get the base inode. */
1167                 down(&tni->extent_lock);
1168                 if (tni->nr_extents >= 0)
1169                         base_tni = tni;
1170                 else {
1171                         base_tni = tni->ext.base_ntfs_ino;
1172                         BUG_ON(!base_tni);
1173                 }
1174                 up(&tni->extent_lock);
1175                 ntfs_debug("Unlocking %s inode 0x%lx.",
1176                                 tni == base_tni ? "base" : "extent",
1177                                 tni->mft_no);
1178                 up(&tni->mrec_lock);
1179                 atomic_dec(&tni->count);
1180                 iput(VFS_I(base_tni));
1181         }
1182         SetPageUptodate(page);
1183         kunmap(page);
1184 done:
1185         if (unlikely(err && err != -ENOMEM)) {
1186                 /*
1187                  * Set page error if there is only one ntfs record in the page.
1188                  * Otherwise we would loose per-record granularity.
1189                  */
1190                 if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
1191                         SetPageError(page);
1192                 NVolSetErrors(vol);
1193         }
1194         if (page_is_dirty) {
1195                 ntfs_debug("Page still contains one or more dirty ntfs "
1196                                 "records.  Redirtying the page starting at "
1197                                 "record 0x%lx.", page->index <<
1198                                 (PAGE_CACHE_SHIFT - rec_size_bits));
1199                 redirty_page_for_writepage(wbc, page);
1200                 unlock_page(page);
1201         } else {
1202                 /*
1203                  * Keep the VM happy.  This must be done otherwise the
1204                  * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
1205                  * the page is clean.
1206                  */
1207                 BUG_ON(PageWriteback(page));
1208                 set_page_writeback(page);
1209                 unlock_page(page);
1210                 end_page_writeback(page);
1211         }
1212         if (likely(!err))
1213                 ntfs_debug("Done.");
1214         return err;
1215 }
1216
1217 /**
1218  * ntfs_writepage - write a @page to the backing store
1219  * @page:       page cache page to write out
1220  * @wbc:        writeback control structure
1221  *
1222  * This is called from the VM when it wants to have a dirty ntfs page cache
1223  * page cleaned.  The VM has already locked the page and marked it clean.
1224  *
1225  * For non-resident attributes, ntfs_writepage() writes the @page by calling
1226  * the ntfs version of the generic block_write_full_page() function,
1227  * ntfs_write_block(), which in turn if necessary creates and writes the
1228  * buffers associated with the page asynchronously.
1229  *
1230  * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
1231  * the data to the mft record (which at this stage is most likely in memory).
1232  * The mft record is then marked dirty and written out asynchronously via the
1233  * vfs inode dirty code path for the inode the mft record belongs to or via the
1234  * vm page dirty code path for the page the mft record is in.
1235  *
1236  * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
1237  *
1238  * Return 0 on success and -errno on error.
1239  */
1240 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1241 {
1242         loff_t i_size;
1243         struct inode *vi = page->mapping->host;
1244         ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1245         char *kaddr;
1246         ntfs_attr_search_ctx *ctx = NULL;
1247         MFT_RECORD *m = NULL;
1248         u32 attr_len;
1249         int err;
1250
1251         BUG_ON(!PageLocked(page));
1252         i_size = i_size_read(vi);
1253         /* Is the page fully outside i_size? (truncate in progress) */
1254         if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
1255                         PAGE_CACHE_SHIFT)) {
1256                 /*
1257                  * The page may have dirty, unmapped buffers.  Make them
1258                  * freeable here, so the page does not leak.
1259                  */
1260                 block_invalidatepage(page, 0);
1261                 unlock_page(page);
1262                 ntfs_debug("Write outside i_size - truncated?");
1263                 return 0;
1264         }
1265         /* NInoNonResident() == NInoIndexAllocPresent() */
1266         if (NInoNonResident(ni)) {
1267                 /*
1268                  * Only unnamed $DATA attributes can be compressed, encrypted,
1269                  * and/or sparse.
1270                  */
1271                 if (ni->type == AT_DATA && !ni->name_len) {
1272                         /* If file is encrypted, deny access, just like NT4. */
1273                         if (NInoEncrypted(ni)) {
1274                                 unlock_page(page);
1275                                 ntfs_debug("Denying write access to encrypted "
1276                                                 "file.");
1277                                 return -EACCES;
1278                         }
1279                         /* Compressed data streams are handled in compress.c. */
1280                         if (NInoCompressed(ni)) {
1281                                 // TODO: Implement and replace this check with
1282                                 // return ntfs_write_compressed_block(page);
1283                                 unlock_page(page);
1284                                 ntfs_error(vi->i_sb, "Writing to compressed "
1285                                                 "files is not supported yet. "
1286                                                 "Sorry.");
1287                                 return -EOPNOTSUPP;
1288                         }
1289                         // TODO: Implement and remove this check.
1290                         if (NInoSparse(ni)) {
1291                                 unlock_page(page);
1292                                 ntfs_error(vi->i_sb, "Writing to sparse files "
1293                                                 "is not supported yet. Sorry.");
1294                                 return -EOPNOTSUPP;
1295                         }
1296                 }
1297                 /* We have to zero every time due to mmap-at-end-of-file. */
1298                 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1299                         /* The page straddles i_size. */
1300                         unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1301                         kaddr = kmap_atomic(page, KM_USER0);
1302                         memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
1303                         flush_dcache_page(page);
1304                         kunmap_atomic(kaddr, KM_USER0);
1305                 }
1306                 /* Handle mst protected attributes. */
1307                 if (NInoMstProtected(ni))
1308                         return ntfs_write_mst_block(page, wbc);
1309                 /* Normal data stream. */
1310                 return ntfs_write_block(page, wbc);
1311         }
1312         /*
1313          * Attribute is resident, implying it is not compressed, encrypted,
1314          * sparse, or mst protected.  This also means the attribute is smaller
1315          * than an mft record and hence smaller than a page, so can simply
1316          * return error on any pages with index above 0.
1317          */
1318         BUG_ON(page_has_buffers(page));
1319         BUG_ON(!PageUptodate(page));
1320         if (unlikely(page->index > 0)) {
1321                 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0.  "
1322                                 "Aborting write.", page->index);
1323                 BUG_ON(PageWriteback(page));
1324                 set_page_writeback(page);
1325                 unlock_page(page);
1326                 end_page_writeback(page);
1327                 return -EIO;
1328         }
1329         if (!NInoAttr(ni))
1330                 base_ni = ni;
1331         else
1332                 base_ni = ni->ext.base_ntfs_ino;
1333         /* Map, pin, and lock the mft record. */
1334         m = map_mft_record(base_ni);
1335         if (IS_ERR(m)) {
1336                 err = PTR_ERR(m);
1337                 m = NULL;
1338                 ctx = NULL;
1339                 goto err_out;
1340         }
1341         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1342         if (unlikely(!ctx)) {
1343                 err = -ENOMEM;
1344                 goto err_out;
1345         }
1346         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1347                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1348         if (unlikely(err))
1349                 goto err_out;
1350         /*
1351          * Keep the VM happy.  This must be done otherwise the radix-tree tag
1352          * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
1353          */
1354         BUG_ON(PageWriteback(page));
1355         set_page_writeback(page);
1356         unlock_page(page);
1357
1358         /*
1359          * Here, we don't need to zero the out of bounds area everytime because
1360          * the below memcpy() already takes care of the mmap-at-end-of-file
1361          * requirements. If the file is converted to a non-resident one, then
1362          * the code path use is switched to the non-resident one where the
1363          * zeroing happens on each ntfs_writepage() invocation.
1364          *
1365          * The above also applies nicely when i_size is decreased.
1366          *
1367          * When i_size is increased, the memory between the old and new i_size
1368          * _must_ be zeroed (or overwritten with new data). Otherwise we will
1369          * expose data to userspace/disk which should never have been exposed.
1370          *
1371          * FIXME: Ensure that i_size increases do the zeroing/overwriting and
1372          * if we cannot guarantee that, then enable the zeroing below.  If the
1373          * zeroing below is enabled, we MUST move the unlock_page() from above
1374          * to after the kunmap_atomic(), i.e. just before the
1375          * end_page_writeback().
1376          * UPDATE: ntfs_prepare/commit_write() do the zeroing on i_size
1377          * increases for resident attributes so those are ok.
1378          * TODO: ntfs_truncate(), others?
1379          */
1380
1381         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1382         i_size = i_size_read(vi);
1383         if (unlikely(attr_len > i_size)) {
1384                 attr_len = i_size;
1385                 ctx->attr->data.resident.value_length = cpu_to_le32(attr_len);
1386         }
1387         kaddr = kmap_atomic(page, KM_USER0);
1388         /* Copy the data from the page to the mft record. */
1389         memcpy((u8*)ctx->attr +
1390                         le16_to_cpu(ctx->attr->data.resident.value_offset),
1391                         kaddr, attr_len);
1392         flush_dcache_mft_record_page(ctx->ntfs_ino);
1393         /* Zero out of bounds area in the page cache page. */
1394         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1395         flush_dcache_page(page);
1396         kunmap_atomic(kaddr, KM_USER0);
1397
1398         end_page_writeback(page);
1399
1400         /* Mark the mft record dirty, so it gets written back. */
1401         mark_mft_record_dirty(ctx->ntfs_ino);
1402         ntfs_attr_put_search_ctx(ctx);
1403         unmap_mft_record(base_ni);
1404         return 0;
1405 err_out:
1406         if (err == -ENOMEM) {
1407                 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1408                                 "page so we try again later.");
1409                 /*
1410                  * Put the page back on mapping->dirty_pages, but leave its
1411                  * buffers' dirty state as-is.
1412                  */
1413                 redirty_page_for_writepage(wbc, page);
1414                 err = 0;
1415         } else {
1416                 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1417                                 "error %i.", err);
1418                 SetPageError(page);
1419                 NVolSetErrors(ni->vol);
1420                 make_bad_inode(vi);
1421         }
1422         unlock_page(page);
1423         if (ctx)
1424                 ntfs_attr_put_search_ctx(ctx);
1425         if (m)
1426                 unmap_mft_record(base_ni);
1427         return err;
1428 }
1429
1430 /**
1431  * ntfs_prepare_nonresident_write -
1432  *
1433  */
1434 static int ntfs_prepare_nonresident_write(struct page *page,
1435                 unsigned from, unsigned to)
1436 {
1437         VCN vcn;
1438         LCN lcn;
1439         s64 initialized_size;
1440         loff_t i_size;
1441         sector_t block, ablock, iblock;
1442         struct inode *vi;
1443         ntfs_inode *ni;
1444         ntfs_volume *vol;
1445         runlist_element *rl;
1446         struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
1447         unsigned long flags;
1448         unsigned int vcn_ofs, block_start, block_end, blocksize;
1449         int err;
1450         BOOL is_retry;
1451         unsigned char blocksize_bits;
1452
1453         vi = page->mapping->host;
1454         ni = NTFS_I(vi);
1455         vol = ni->vol;
1456
1457         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
1458                         "0x%lx, from = %u, to = %u.", ni->mft_no, ni->type,
1459                         page->index, from, to);
1460
1461         BUG_ON(!NInoNonResident(ni));
1462
1463         blocksize_bits = vi->i_blkbits;
1464         blocksize = 1 << blocksize_bits;
1465
1466         /*
1467          * create_empty_buffers() will create uptodate/dirty buffers if the
1468          * page is uptodate/dirty.
1469          */
1470         if (!page_has_buffers(page))
1471                 create_empty_buffers(page, blocksize, 0);
1472         bh = head = page_buffers(page);
1473         if (unlikely(!bh))
1474                 return -ENOMEM;
1475
1476         /* The first block in the page. */
1477         block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
1478
1479         read_lock_irqsave(&ni->size_lock, flags);
1480         /*
1481          * The first out of bounds block for the allocated size.  No need to
1482          * round up as allocated_size is in multiples of cluster size and the
1483          * minimum cluster size is 512 bytes, which is equal to the smallest
1484          * blocksize.
1485          */
1486         ablock = ni->allocated_size >> blocksize_bits;
1487         i_size = i_size_read(vi);
1488         initialized_size = ni->initialized_size;
1489         read_unlock_irqrestore(&ni->size_lock, flags);
1490
1491         /* The last (fully or partially) initialized block. */
1492         iblock = initialized_size >> blocksize_bits;
1493
1494         /* Loop through all the buffers in the page. */
1495         block_start = 0;
1496         rl = NULL;
1497         err = 0;
1498         do {
1499                 block_end = block_start + blocksize;
1500                 /*
1501                  * If buffer @bh is outside the write, just mark it uptodate
1502                  * if the page is uptodate and continue with the next buffer.
1503                  */
1504                 if (block_end <= from || block_start >= to) {
1505                         if (PageUptodate(page)) {
1506                                 if (!buffer_uptodate(bh))
1507                                         set_buffer_uptodate(bh);
1508                         }
1509                         continue;
1510                 }
1511                 /*
1512                  * @bh is at least partially being written to.
1513                  * Make sure it is not marked as new.
1514                  */
1515                 //if (buffer_new(bh))
1516                 //      clear_buffer_new(bh);
1517
1518                 if (block >= ablock) {
1519                         // TODO: block is above allocated_size, need to
1520                         // allocate it. Best done in one go to accommodate not
1521                         // only block but all above blocks up to and including:
1522                         // ((page->index << PAGE_CACHE_SHIFT) + to + blocksize
1523                         // - 1) >> blobksize_bits. Obviously will need to round
1524                         // up to next cluster boundary, too. This should be
1525                         // done with a helper function, so it can be reused.
1526                         ntfs_error(vol->sb, "Writing beyond allocated size "
1527                                         "is not supported yet. Sorry.");
1528                         err = -EOPNOTSUPP;
1529                         goto err_out;
1530                         // Need to update ablock.
1531                         // Need to set_buffer_new() on all block bhs that are
1532                         // newly allocated.
1533                 }
1534                 /*
1535                  * Now we have enough allocated size to fulfill the whole
1536                  * request, i.e. block < ablock is true.
1537                  */
1538                 if (unlikely((block >= iblock) &&
1539                                 (initialized_size < i_size))) {
1540                         /*
1541                          * If this page is fully outside initialized size, zero
1542                          * out all pages between the current initialized size
1543                          * and the current page. Just use ntfs_readpage() to do
1544                          * the zeroing transparently.
1545                          */
1546                         if (block > iblock) {
1547                                 // TODO:
1548                                 // For each page do:
1549                                 // - read_cache_page()
1550                                 // Again for each page do:
1551                                 // - wait_on_page_locked()
1552                                 // - Check (PageUptodate(page) &&
1553                                 //                      !PageError(page))
1554                                 // Update initialized size in the attribute and
1555                                 // in the inode.
1556                                 // Again, for each page do:
1557                                 //      __set_page_dirty_buffers();
1558                                 // page_cache_release()
1559                                 // We don't need to wait on the writes.
1560                                 // Update iblock.
1561                         }
1562                         /*
1563                          * The current page straddles initialized size. Zero
1564                          * all non-uptodate buffers and set them uptodate (and
1565                          * dirty?). Note, there aren't any non-uptodate buffers
1566                          * if the page is uptodate.
1567                          * FIXME: For an uptodate page, the buffers may need to
1568                          * be written out because they were not initialized on
1569                          * disk before.
1570                          */
1571                         if (!PageUptodate(page)) {
1572                                 // TODO:
1573                                 // Zero any non-uptodate buffers up to i_size.
1574                                 // Set them uptodate and dirty.
1575                         }
1576                         // TODO:
1577                         // Update initialized size in the attribute and in the
1578                         // inode (up to i_size).
1579                         // Update iblock.
1580                         // FIXME: This is inefficient. Try to batch the two
1581                         // size changes to happen in one go.
1582                         ntfs_error(vol->sb, "Writing beyond initialized size "
1583                                         "is not supported yet. Sorry.");
1584                         err = -EOPNOTSUPP;
1585                         goto err_out;
1586                         // Do NOT set_buffer_new() BUT DO clear buffer range
1587                         // outside write request range.
1588                         // set_buffer_uptodate() on complete buffers as well as
1589                         // set_buffer_dirty().
1590                 }
1591
1592                 /* Need to map unmapped buffers. */
1593                 if (!buffer_mapped(bh)) {
1594                         /* Unmapped buffer. Need to map it. */
1595                         bh->b_bdev = vol->sb->s_bdev;
1596
1597                         /* Convert block into corresponding vcn and offset. */
1598                         vcn = (VCN)block << blocksize_bits >>
1599                                         vol->cluster_size_bits;
1600                         vcn_ofs = ((VCN)block << blocksize_bits) &
1601                                         vol->cluster_size_mask;
1602
1603                         is_retry = FALSE;
1604                         if (!rl) {
1605 lock_retry_remap:
1606                                 down_read(&ni->runlist.lock);
1607                                 rl = ni->runlist.rl;
1608                         }
1609                         if (likely(rl != NULL)) {
1610                                 /* Seek to element containing target vcn. */
1611                                 while (rl->length && rl[1].vcn <= vcn)
1612                                         rl++;
1613                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1614                         } else
1615                                 lcn = LCN_RL_NOT_MAPPED;
1616                         if (unlikely(lcn < 0)) {
1617                                 /*
1618                                  * We extended the attribute allocation above.
1619                                  * If we hit an ENOENT here it means that the
1620                                  * allocation was insufficient which is a bug.
1621                                  */
1622                                 BUG_ON(lcn == LCN_ENOENT);
1623
1624                                 /* It is a hole, need to instantiate it. */
1625                                 if (lcn == LCN_HOLE) {
1626                                         // TODO: Instantiate the hole.
1627                                         // clear_buffer_new(bh);
1628                                         // unmap_underlying_metadata(bh->b_bdev,
1629                                         //              bh->b_blocknr);
1630                                         // For non-uptodate buffers, need to
1631                                         // zero out the region outside the
1632                                         // request in this bh or all bhs,
1633                                         // depending on what we implemented
1634                                         // above.
1635                                         // Need to flush_dcache_page().
1636                                         // Or could use set_buffer_new()
1637                                         // instead?
1638                                         ntfs_error(vol->sb, "Writing into "
1639                                                         "sparse regions is "
1640                                                         "not supported yet. "
1641                                                         "Sorry.");
1642                                         err = -EOPNOTSUPP;
1643                                         goto err_out;
1644                                 } else if (!is_retry &&
1645                                                 lcn == LCN_RL_NOT_MAPPED) {
1646                                         is_retry = TRUE;
1647                                         /*
1648                                          * Attempt to map runlist, dropping
1649                                          * lock for the duration.
1650                                          */
1651                                         up_read(&ni->runlist.lock);
1652                                         err = ntfs_map_runlist(ni, vcn);
1653                                         if (likely(!err))
1654                                                 goto lock_retry_remap;
1655                                         rl = NULL;
1656                                         lcn = err;
1657                                 }
1658                                 /*
1659                                  * Failed to map the buffer, even after
1660                                  * retrying.
1661                                  */
1662                                 bh->b_blocknr = -1;
1663                                 ntfs_error(vol->sb, "Failed to write to inode "
1664                                                 "0x%lx, attribute type 0x%x, "
1665                                                 "vcn 0x%llx, offset 0x%x "
1666                                                 "because its location on disk "
1667                                                 "could not be determined%s "
1668                                                 "(error code %lli).",
1669                                                 ni->mft_no, ni->type,
1670                                                 (unsigned long long)vcn,
1671                                                 vcn_ofs, is_retry ? " even "
1672                                                 "after retrying" : "",
1673                                                 (long long)lcn);
1674                                 if (!err)
1675                                         err = -EIO;
1676                                 goto err_out;
1677                         }
1678                         /* We now have a successful remap, i.e. lcn >= 0. */
1679
1680                         /* Setup buffer head to correct block. */
1681                         bh->b_blocknr = ((lcn << vol->cluster_size_bits)
1682                                         + vcn_ofs) >> blocksize_bits;
1683                         set_buffer_mapped(bh);
1684
1685                         // FIXME: Something analogous to this is needed for
1686                         // each newly allocated block, i.e. BH_New.
1687                         // FIXME: Might need to take this out of the
1688                         // if (!buffer_mapped(bh)) {}, depending on how we
1689                         // implement things during the allocated_size and
1690                         // initialized_size extension code above.
1691                         if (buffer_new(bh)) {
1692                                 clear_buffer_new(bh);
1693                                 unmap_underlying_metadata(bh->b_bdev,
1694                                                 bh->b_blocknr);
1695                                 if (PageUptodate(page)) {
1696                                         set_buffer_uptodate(bh);
1697                                         continue;
1698                                 }
1699                                 /*
1700                                  * Page is _not_ uptodate, zero surrounding
1701                                  * region. NOTE: This is how we decide if to
1702                                  * zero or not!
1703                                  */
1704                                 if (block_end > to || block_start < from) {
1705                                         void *kaddr;
1706
1707                                         kaddr = kmap_atomic(page, KM_USER0);
1708                                         if (block_end > to)
1709                                                 memset(kaddr + to, 0,
1710                                                                 block_end - to);
1711                                         if (block_start < from)
1712                                                 memset(kaddr + block_start, 0,
1713                                                                 from -
1714                                                                 block_start);
1715                                         flush_dcache_page(page);
1716                                         kunmap_atomic(kaddr, KM_USER0);
1717                                 }
1718                                 continue;
1719                         }
1720                 }
1721                 /* @bh is mapped, set it uptodate if the page is uptodate. */
1722                 if (PageUptodate(page)) {
1723                         if (!buffer_uptodate(bh))
1724                                 set_buffer_uptodate(bh);
1725                         continue;
1726                 }
1727                 /*
1728                  * The page is not uptodate. The buffer is mapped. If it is not
1729                  * uptodate, and it is only partially being written to, we need
1730                  * to read the buffer in before the write, i.e. right now.
1731                  */
1732                 if (!buffer_uptodate(bh) &&
1733                                 (block_start < from || block_end > to)) {
1734                         ll_rw_block(READ, 1, &bh);
1735                         *wait_bh++ = bh;
1736                 }
1737         } while (block++, block_start = block_end,
1738                         (bh = bh->b_this_page) != head);
1739
1740         /* Release the lock if we took it. */
1741         if (rl) {
1742                 up_read(&ni->runlist.lock);
1743                 rl = NULL;
1744         }
1745
1746         /* If we issued read requests, let them complete. */
1747         while (wait_bh > wait) {
1748                 wait_on_buffer(*--wait_bh);
1749                 if (!buffer_uptodate(*wait_bh))
1750                         return -EIO;
1751         }
1752
1753         ntfs_debug("Done.");
1754         return 0;
1755 err_out:
1756         /*
1757          * Zero out any newly allocated blocks to avoid exposing stale data.
1758          * If BH_New is set, we know that the block was newly allocated in the
1759          * above loop.
1760          * FIXME: What about initialized_size increments? Have we done all the
1761          * required zeroing above? If not this error handling is broken, and
1762          * in particular the if (block_end <= from) check is completely bogus.
1763          */
1764         bh = head;
1765         block_start = 0;
1766         is_retry = FALSE;
1767         do {
1768                 block_end = block_start + blocksize;
1769                 if (block_end <= from)
1770                         continue;
1771                 if (block_start >= to)
1772                         break;
1773                 if (buffer_new(bh)) {
1774                         void *kaddr;
1775
1776                         clear_buffer_new(bh);
1777                         kaddr = kmap_atomic(page, KM_USER0);
1778                         memset(kaddr + block_start, 0, bh->b_size);
1779                         kunmap_atomic(kaddr, KM_USER0);
1780                         set_buffer_uptodate(bh);
1781                         mark_buffer_dirty(bh);
1782                         is_retry = TRUE;
1783                 }
1784         } while (block_start = block_end, (bh = bh->b_this_page) != head);
1785         if (is_retry)
1786                 flush_dcache_page(page);
1787         if (rl)
1788                 up_read(&ni->runlist.lock);
1789         return err;
1790 }
1791
1792 /**
1793  * ntfs_prepare_write - prepare a page for receiving data
1794  *
1795  * This is called from generic_file_write() with i_sem held on the inode
1796  * (@page->mapping->host).  The @page is locked but not kmap()ped.  The source
1797  * data has not yet been copied into the @page.
1798  *
1799  * Need to extend the attribute/fill in holes if necessary, create blocks and
1800  * make partially overwritten blocks uptodate,
1801  *
1802  * i_size is not to be modified yet.
1803  *
1804  * Return 0 on success or -errno on error.
1805  *
1806  * Should be using block_prepare_write() [support for sparse files] or
1807  * cont_prepare_write() [no support for sparse files].  Cannot do that due to
1808  * ntfs specifics but can look at them for implementation guidance.
1809  *
1810  * Note: In the range, @from is inclusive and @to is exclusive, i.e. @from is
1811  * the first byte in the page that will be written to and @to is the first byte
1812  * after the last byte that will be written to.
1813  */
1814 static int ntfs_prepare_write(struct file *file, struct page *page,
1815                 unsigned from, unsigned to)
1816 {
1817         s64 new_size;
1818         loff_t i_size;
1819         struct inode *vi = page->mapping->host;
1820         ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1821         ntfs_volume *vol = ni->vol;
1822         ntfs_attr_search_ctx *ctx = NULL;
1823         MFT_RECORD *m = NULL;
1824         ATTR_RECORD *a;
1825         u8 *kaddr;
1826         u32 attr_len;
1827         int err;
1828
1829         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
1830                         "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
1831                         page->index, from, to);
1832         BUG_ON(!PageLocked(page));
1833         BUG_ON(from > PAGE_CACHE_SIZE);
1834         BUG_ON(to > PAGE_CACHE_SIZE);
1835         BUG_ON(from > to);
1836         BUG_ON(NInoMstProtected(ni));
1837         /*
1838          * If a previous ntfs_truncate() failed, repeat it and abort if it
1839          * fails again.
1840          */
1841         if (unlikely(NInoTruncateFailed(ni))) {
1842                 down_write(&vi->i_alloc_sem);
1843                 err = ntfs_truncate(vi);
1844                 up_write(&vi->i_alloc_sem);
1845                 if (err || NInoTruncateFailed(ni)) {
1846                         if (!err)
1847                                 err = -EIO;
1848                         goto err_out;
1849                 }
1850         }
1851         /* If the attribute is not resident, deal with it elsewhere. */
1852         if (NInoNonResident(ni)) {
1853                 /*
1854                  * Only unnamed $DATA attributes can be compressed, encrypted,
1855                  * and/or sparse.
1856                  */
1857                 if (ni->type == AT_DATA && !ni->name_len) {
1858                         /* If file is encrypted, deny access, just like NT4. */
1859                         if (NInoEncrypted(ni)) {
1860                                 ntfs_debug("Denying write access to encrypted "
1861                                                 "file.");
1862                                 return -EACCES;
1863                         }
1864                         /* Compressed data streams are handled in compress.c. */
1865                         if (NInoCompressed(ni)) {
1866                                 // TODO: Implement and replace this check with
1867                                 // return ntfs_write_compressed_block(page);
1868                                 ntfs_error(vi->i_sb, "Writing to compressed "
1869                                                 "files is not supported yet. "
1870                                                 "Sorry.");
1871                                 return -EOPNOTSUPP;
1872                         }
1873                         // TODO: Implement and remove this check.
1874                         if (NInoSparse(ni)) {
1875                                 ntfs_error(vi->i_sb, "Writing to sparse files "
1876                                                 "is not supported yet. Sorry.");
1877                                 return -EOPNOTSUPP;
1878                         }
1879                 }
1880                 /* Normal data stream. */
1881                 return ntfs_prepare_nonresident_write(page, from, to);
1882         }
1883         /*
1884          * Attribute is resident, implying it is not compressed, encrypted, or
1885          * sparse.
1886          */
1887         BUG_ON(page_has_buffers(page));
1888         new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
1889         /* If we do not need to resize the attribute allocation we are done. */
1890         if (new_size <= i_size_read(vi))
1891                 goto done;
1892         /* Map, pin, and lock the (base) mft record. */
1893         if (!NInoAttr(ni))
1894                 base_ni = ni;
1895         else
1896                 base_ni = ni->ext.base_ntfs_ino;
1897         m = map_mft_record(base_ni);
1898         if (IS_ERR(m)) {
1899                 err = PTR_ERR(m);
1900                 m = NULL;
1901                 ctx = NULL;
1902                 goto err_out;
1903         }
1904         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1905         if (unlikely(!ctx)) {
1906                 err = -ENOMEM;
1907                 goto err_out;
1908         }
1909         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1910                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1911         if (unlikely(err)) {
1912                 if (err == -ENOENT)
1913                         err = -EIO;
1914                 goto err_out;
1915         }
1916         m = ctx->mrec;
1917         a = ctx->attr;
1918         /* The total length of the attribute value. */
1919         attr_len = le32_to_cpu(a->data.resident.value_length);
1920         /* Fix an eventual previous failure of ntfs_commit_write(). */
1921         i_size = i_size_read(vi);
1922         if (unlikely(attr_len > i_size)) {
1923                 attr_len = i_size;
1924                 a->data.resident.value_length = cpu_to_le32(attr_len);
1925         }
1926         /* If we do not need to resize the attribute allocation we are done. */
1927         if (new_size <= attr_len)
1928                 goto done_unm;
1929         /* Check if new size is allowed in $AttrDef. */
1930         err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
1931         if (unlikely(err)) {
1932                 if (err == -ERANGE) {
1933                         ntfs_error(vol->sb, "Write would cause the inode "
1934                                         "0x%lx to exceed the maximum size for "
1935                                         "its attribute type (0x%x).  Aborting "
1936                                         "write.", vi->i_ino,
1937                                         le32_to_cpu(ni->type));
1938                 } else {
1939                         ntfs_error(vol->sb, "Inode 0x%lx has unknown "
1940                                         "attribute type 0x%x.  Aborting "
1941                                         "write.", vi->i_ino,
1942                                         le32_to_cpu(ni->type));
1943                         err = -EIO;
1944                 }
1945                 goto err_out2;
1946         }
1947         /*
1948          * Extend the attribute record to be able to store the new attribute
1949          * size.
1950          */
1951         if (new_size >= vol->mft_record_size || ntfs_attr_record_resize(m, a,
1952                         le16_to_cpu(a->data.resident.value_offset) +
1953                         new_size)) {
1954                 /* Not enough space in the mft record. */
1955                 ntfs_error(vol->sb, "Not enough space in the mft record for "
1956                                 "the resized attribute value.  This is not "
1957                                 "supported yet.  Aborting write.");
1958                 err = -EOPNOTSUPP;
1959                 goto err_out2;
1960         }
1961         /*
1962          * We have enough space in the mft record to fit the write.  This
1963          * implies the attribute is smaller than the mft record and hence the
1964          * attribute must be in a single page and hence page->index must be 0.
1965          */
1966         BUG_ON(page->index);
1967         /*
1968          * If the beginning of the write is past the old size, enlarge the
1969          * attribute value up to the beginning of the write and fill it with
1970          * zeroes.
1971          */
1972         if (from > attr_len) {
1973                 memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
1974                                 attr_len, 0, from - attr_len);
1975                 a->data.resident.value_length = cpu_to_le32(from);
1976                 /* Zero the corresponding area in the page as well. */
1977                 if (PageUptodate(page)) {
1978                         kaddr = kmap_atomic(page, KM_USER0);
1979                         memset(kaddr + attr_len, 0, from - attr_len);
1980                         kunmap_atomic(kaddr, KM_USER0);
1981                         flush_dcache_page(page);
1982                 }
1983         }
1984         flush_dcache_mft_record_page(ctx->ntfs_ino);
1985         mark_mft_record_dirty(ctx->ntfs_ino);
1986 done_unm:
1987         ntfs_attr_put_search_ctx(ctx);
1988         unmap_mft_record(base_ni);
1989         /*
1990          * Because resident attributes are handled by memcpy() to/from the
1991          * corresponding MFT record, and because this form of i/o is byte
1992          * aligned rather than block aligned, there is no need to bring the
1993          * page uptodate here as in the non-resident case where we need to
1994          * bring the buffers straddled by the write uptodate before
1995          * generic_file_write() does the copying from userspace.
1996          *
1997          * We thus defer the uptodate bringing of the page region outside the
1998          * region written to to ntfs_commit_write(), which makes the code
1999          * simpler and saves one atomic kmap which is good.
2000          */
2001 done:
2002         ntfs_debug("Done.");
2003         return 0;
2004 err_out:
2005         if (err == -ENOMEM)
2006                 ntfs_warning(vi->i_sb, "Error allocating memory required to "
2007                                 "prepare the write.");
2008         else {
2009                 ntfs_error(vi->i_sb, "Resident attribute prepare write failed "
2010                                 "with error %i.", err);
2011                 NVolSetErrors(vol);
2012                 make_bad_inode(vi);
2013         }
2014 err_out2:
2015         if (ctx)
2016                 ntfs_attr_put_search_ctx(ctx);
2017         if (m)
2018                 unmap_mft_record(base_ni);
2019         return err;
2020 }
2021
2022 /**
2023  * ntfs_commit_nonresident_write -
2024  *
2025  */
2026 static int ntfs_commit_nonresident_write(struct page *page,
2027                 unsigned from, unsigned to)
2028 {
2029         s64 pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
2030         struct inode *vi = page->mapping->host;
2031         struct buffer_head *bh, *head;
2032         unsigned int block_start, block_end, blocksize;
2033         BOOL partial;
2034
2035         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
2036                         "0x%lx, from = %u, to = %u.", vi->i_ino,
2037                         NTFS_I(vi)->type, page->index, from, to);
2038         blocksize = 1 << vi->i_blkbits;
2039
2040         // FIXME: We need a whole slew of special cases in here for compressed
2041         // files for example...
2042         // For now, we know ntfs_prepare_write() would have failed so we can't
2043         // get here in any of the cases which we have to special case, so we
2044         // are just a ripped off, unrolled generic_commit_write().
2045
2046         bh = head = page_buffers(page);
2047         block_start = 0;
2048         partial = FALSE;
2049         do {
2050                 block_end = block_start + blocksize;
2051                 if (block_end <= from || block_start >= to) {
2052                         if (!buffer_uptodate(bh))
2053                                 partial = TRUE;
2054                 } else {
2055                         set_buffer_uptodate(bh);
2056                         mark_buffer_dirty(bh);
2057                 }
2058         } while (block_start = block_end, (bh = bh->b_this_page) != head);
2059         /*
2060          * If this is a partial write which happened to make all buffers
2061          * uptodate then we can optimize away a bogus ->readpage() for the next
2062          * read().  Here we 'discover' whether the page went uptodate as a
2063          * result of this (potentially partial) write.
2064          */
2065         if (!partial)
2066                 SetPageUptodate(page);
2067         /*
2068          * Not convinced about this at all.  See disparity comment above.  For
2069          * now we know ntfs_prepare_write() would have failed in the write
2070          * exceeds i_size case, so this will never trigger which is fine.
2071          */
2072         if (pos > i_size_read(vi)) {
2073                 ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
2074                                 "not supported yet.  Sorry.");
2075                 return -EOPNOTSUPP;
2076                 // vi->i_size = pos;
2077                 // mark_inode_dirty(vi);
2078         }
2079         ntfs_debug("Done.");
2080         return 0;
2081 }
2082
2083 /**
2084  * ntfs_commit_write - commit the received data
2085  *
2086  * This is called from generic_file_write() with i_sem held on the inode
2087  * (@page->mapping->host).  The @page is locked but not kmap()ped.  The source
2088  * data has already been copied into the @page.  ntfs_prepare_write() has been
2089  * called before the data copied and it returned success so we can take the
2090  * results of various BUG checks and some error handling for granted.
2091  *
2092  * Need to mark modified blocks dirty so they get written out later when
2093  * ntfs_writepage() is invoked by the VM.
2094  *
2095  * Return 0 on success or -errno on error.
2096  *
2097  * Should be using generic_commit_write().  This marks buffers uptodate and
2098  * dirty, sets the page uptodate if all buffers in the page are uptodate, and
2099  * updates i_size if the end of io is beyond i_size.  In that case, it also
2100  * marks the inode dirty.
2101  *
2102  * Cannot use generic_commit_write() due to ntfs specialities but can look at
2103  * it for implementation guidance.
2104  *
2105  * If things have gone as outlined in ntfs_prepare_write(), then we do not
2106  * need to do any page content modifications here at all, except in the write
2107  * to resident attribute case, where we need to do the uptodate bringing here
2108  * which we combine with the copying into the mft record which means we save
2109  * one atomic kmap.
2110  */
2111 static int ntfs_commit_write(struct file *file, struct page *page,
2112                 unsigned from, unsigned to)
2113 {
2114         struct inode *vi = page->mapping->host;
2115         ntfs_inode *base_ni, *ni = NTFS_I(vi);
2116         char *kaddr, *kattr;
2117         ntfs_attr_search_ctx *ctx;
2118         MFT_RECORD *m;
2119         ATTR_RECORD *a;
2120         u32 attr_len;
2121         int err;
2122
2123         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
2124                         "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
2125                         page->index, from, to);
2126         /* If the attribute is not resident, deal with it elsewhere. */
2127         if (NInoNonResident(ni)) {
2128                 /* Only unnamed $DATA attributes can be compressed/encrypted. */
2129                 if (ni->type == AT_DATA && !ni->name_len) {
2130                         /* Encrypted files need separate handling. */
2131                         if (NInoEncrypted(ni)) {
2132                                 // We never get here at present!
2133                                 BUG();
2134                         }
2135                         /* Compressed data streams are handled in compress.c. */
2136                         if (NInoCompressed(ni)) {
2137                                 // TODO: Implement this!
2138                                 // return ntfs_write_compressed_block(page);
2139                                 // We never get here at present!
2140                                 BUG();
2141                         }
2142                 }
2143                 /* Normal data stream. */
2144                 return ntfs_commit_nonresident_write(page, from, to);
2145         }
2146         /*
2147          * Attribute is resident, implying it is not compressed, encrypted, or
2148          * sparse.
2149          */
2150         if (!NInoAttr(ni))
2151                 base_ni = ni;
2152         else
2153                 base_ni = ni->ext.base_ntfs_ino;
2154         /* Map, pin, and lock the mft record. */
2155         m = map_mft_record(base_ni);
2156         if (IS_ERR(m)) {
2157                 err = PTR_ERR(m);
2158                 m = NULL;
2159                 ctx = NULL;
2160                 goto err_out;
2161         }
2162         ctx = ntfs_attr_get_search_ctx(base_ni, m);
2163         if (unlikely(!ctx)) {
2164                 err = -ENOMEM;
2165                 goto err_out;
2166         }
2167         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2168                         CASE_SENSITIVE, 0, NULL, 0, ctx);
2169         if (unlikely(err)) {
2170                 if (err == -ENOENT)
2171                         err = -EIO;
2172                 goto err_out;
2173         }
2174         a = ctx->attr;
2175         /* The total length of the attribute value. */
2176         attr_len = le32_to_cpu(a->data.resident.value_length);
2177         BUG_ON(from > attr_len);
2178         kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
2179         kaddr = kmap_atomic(page, KM_USER0);
2180         /* Copy the received data from the page to the mft record. */
2181         memcpy(kattr + from, kaddr + from, to - from);
2182         /* Update the attribute length if necessary. */
2183         if (to > attr_len) {
2184                 attr_len = to;
2185                 a->data.resident.value_length = cpu_to_le32(attr_len);
2186         }
2187         /*
2188          * If the page is not uptodate, bring the out of bounds area(s)
2189          * uptodate by copying data from the mft record to the page.
2190          */
2191         if (!PageUptodate(page)) {
2192                 if (from > 0)
2193                         memcpy(kaddr, kattr, from);
2194                 if (to < attr_len)
2195                         memcpy(kaddr + to, kattr + to, attr_len - to);
2196                 /* Zero the region outside the end of the attribute value. */
2197                 if (attr_len < PAGE_CACHE_SIZE)
2198                         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
2199                 /*
2200                  * The probability of not having done any of the above is
2201                  * extremely small, so we just flush unconditionally.
2202                  */
2203                 flush_dcache_page(page);
2204                 SetPageUptodate(page);
2205         }
2206         kunmap_atomic(kaddr, KM_USER0);
2207         /* Update i_size if necessary. */
2208         if (i_size_read(vi) < attr_len) {
2209                 unsigned long flags;
2210
2211                 write_lock_irqsave(&ni->size_lock, flags);
2212                 ni->allocated_size = ni->initialized_size = attr_len;
2213                 i_size_write(vi, attr_len);
2214                 write_unlock_irqrestore(&ni->size_lock, flags);
2215         }
2216         /* Mark the mft record dirty, so it gets written back. */
2217         flush_dcache_mft_record_page(ctx->ntfs_ino);
2218         mark_mft_record_dirty(ctx->ntfs_ino);
2219         ntfs_attr_put_search_ctx(ctx);
2220         unmap_mft_record(base_ni);
2221         ntfs_debug("Done.");
2222         return 0;
2223 err_out:
2224         if (err == -ENOMEM) {
2225                 ntfs_warning(vi->i_sb, "Error allocating memory required to "
2226                                 "commit the write.");
2227                 if (PageUptodate(page)) {
2228                         ntfs_warning(vi->i_sb, "Page is uptodate, setting "
2229                                         "dirty so the write will be retried "
2230                                         "later on by the VM.");
2231                         /*
2232                          * Put the page on mapping->dirty_pages, but leave its
2233                          * buffers' dirty state as-is.
2234                          */
2235                         __set_page_dirty_nobuffers(page);
2236                         err = 0;
2237                 } else
2238                         ntfs_error(vi->i_sb, "Page is not uptodate.  Written "
2239                                         "data has been lost.");
2240         } else {
2241                 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
2242                                 "with error %i.", err);
2243                 NVolSetErrors(ni->vol);
2244                 make_bad_inode(vi);
2245         }
2246         if (ctx)
2247                 ntfs_attr_put_search_ctx(ctx);
2248         if (m)
2249                 unmap_mft_record(base_ni);
2250         return err;
2251 }
2252
2253 #endif  /* NTFS_RW */
2254
2255 /**
2256  * ntfs_aops - general address space operations for inodes and attributes
2257  */
2258 struct address_space_operations ntfs_aops = {
2259         .readpage       = ntfs_readpage,        /* Fill page with data. */
2260         .sync_page      = block_sync_page,      /* Currently, just unplugs the
2261                                                    disk request queue. */
2262 #ifdef NTFS_RW
2263         .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
2264         .prepare_write  = ntfs_prepare_write,   /* Prepare page and buffers
2265                                                    ready to receive data. */
2266         .commit_write   = ntfs_commit_write,    /* Commit received data. */
2267 #endif /* NTFS_RW */
2268 };
2269
2270 /**
2271  * ntfs_mst_aops - general address space operations for mst protecteed inodes
2272  *                 and attributes
2273  */
2274 struct address_space_operations ntfs_mst_aops = {
2275         .readpage       = ntfs_readpage,        /* Fill page with data. */
2276         .sync_page      = block_sync_page,      /* Currently, just unplugs the
2277                                                    disk request queue. */
2278 #ifdef NTFS_RW
2279         .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
2280         .set_page_dirty = __set_page_dirty_nobuffers,   /* Set the page dirty
2281                                                    without touching the buffers
2282                                                    belonging to the page. */
2283 #endif /* NTFS_RW */
2284 };
2285
2286 #ifdef NTFS_RW
2287
2288 /**
2289  * mark_ntfs_record_dirty - mark an ntfs record dirty
2290  * @page:       page containing the ntfs record to mark dirty
2291  * @ofs:        byte offset within @page at which the ntfs record begins
2292  *
2293  * Set the buffers and the page in which the ntfs record is located dirty.
2294  *
2295  * The latter also marks the vfs inode the ntfs record belongs to dirty
2296  * (I_DIRTY_PAGES only).
2297  *
2298  * If the page does not have buffers, we create them and set them uptodate.
2299  * The page may not be locked which is why we need to handle the buffers under
2300  * the mapping->private_lock.  Once the buffers are marked dirty we no longer
2301  * need the lock since try_to_free_buffers() does not free dirty buffers.
2302  */
2303 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
2304         struct address_space *mapping = page->mapping;
2305         ntfs_inode *ni = NTFS_I(mapping->host);
2306         struct buffer_head *bh, *head, *buffers_to_free = NULL;
2307         unsigned int end, bh_size, bh_ofs;
2308
2309         BUG_ON(!PageUptodate(page));
2310         end = ofs + ni->itype.index.block_size;
2311         bh_size = 1 << VFS_I(ni)->i_blkbits;
2312         spin_lock(&mapping->private_lock);
2313         if (unlikely(!page_has_buffers(page))) {
2314                 spin_unlock(&mapping->private_lock);
2315                 bh = head = alloc_page_buffers(page, bh_size, 1);
2316                 spin_lock(&mapping->private_lock);
2317                 if (likely(!page_has_buffers(page))) {
2318                         struct buffer_head *tail;
2319
2320                         do {
2321                                 set_buffer_uptodate(bh);
2322                                 tail = bh;
2323                                 bh = bh->b_this_page;
2324                         } while (bh);
2325                         tail->b_this_page = head;
2326                         attach_page_buffers(page, head);
2327                 } else
2328                         buffers_to_free = bh;
2329         }
2330         bh = head = page_buffers(page);
2331         do {
2332                 bh_ofs = bh_offset(bh);
2333                 if (bh_ofs + bh_size <= ofs)
2334                         continue;
2335                 if (unlikely(bh_ofs >= end))
2336                         break;
2337                 set_buffer_dirty(bh);
2338         } while ((bh = bh->b_this_page) != head);
2339         spin_unlock(&mapping->private_lock);
2340         __set_page_dirty_nobuffers(page);
2341         if (unlikely(buffers_to_free)) {
2342                 do {
2343                         bh = buffers_to_free->b_this_page;
2344                         free_buffer_head(buffers_to_free);
2345                         buffers_to_free = bh;
2346                 } while (buffers_to_free);
2347         }
2348 }
2349
2350 #endif /* NTFS_RW */