xfs: introduced uncached buffer read primitve
[pandora-kernel.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 #include <linux/list_sort.h>
37
38 #include "xfs_sb.h"
39 #include "xfs_inum.h"
40 #include "xfs_log.h"
41 #include "xfs_ag.h"
42 #include "xfs_mount.h"
43 #include "xfs_trace.h"
44
45 static kmem_zone_t *xfs_buf_zone;
46 STATIC int xfsbufd(void *);
47 STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
48 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
49 static struct shrinker xfs_buf_shake = {
50         .shrink = xfsbufd_wakeup,
51         .seeks = DEFAULT_SEEKS,
52 };
53
54 static struct workqueue_struct *xfslogd_workqueue;
55 struct workqueue_struct *xfsdatad_workqueue;
56 struct workqueue_struct *xfsconvertd_workqueue;
57
58 #ifdef XFS_BUF_LOCK_TRACKING
59 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
60 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
61 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
62 #else
63 # define XB_SET_OWNER(bp)       do { } while (0)
64 # define XB_CLEAR_OWNER(bp)     do { } while (0)
65 # define XB_GET_OWNER(bp)       do { } while (0)
66 #endif
67
68 #define xb_to_gfp(flags) \
69         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
70           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
71
72 #define xb_to_km(flags) \
73          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
74
75 #define xfs_buf_allocate(flags) \
76         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
77 #define xfs_buf_deallocate(bp) \
78         kmem_zone_free(xfs_buf_zone, (bp));
79
80 static inline int
81 xfs_buf_is_vmapped(
82         struct xfs_buf  *bp)
83 {
84         /*
85          * Return true if the buffer is vmapped.
86          *
87          * The XBF_MAPPED flag is set if the buffer should be mapped, but the
88          * code is clever enough to know it doesn't have to map a single page,
89          * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
90          */
91         return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
92 }
93
94 static inline int
95 xfs_buf_vmap_len(
96         struct xfs_buf  *bp)
97 {
98         return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
99 }
100
101 /*
102  *      Page Region interfaces.
103  *
104  *      For pages in filesystems where the blocksize is smaller than the
105  *      pagesize, we use the page->private field (long) to hold a bitmap
106  *      of uptodate regions within the page.
107  *
108  *      Each such region is "bytes per page / bits per long" bytes long.
109  *
110  *      NBPPR == number-of-bytes-per-page-region
111  *      BTOPR == bytes-to-page-region (rounded up)
112  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
113  */
114 #if (BITS_PER_LONG == 32)
115 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
116 #elif (BITS_PER_LONG == 64)
117 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
118 #else
119 #error BITS_PER_LONG must be 32 or 64
120 #endif
121 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
122 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
123 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
124
125 STATIC unsigned long
126 page_region_mask(
127         size_t          offset,
128         size_t          length)
129 {
130         unsigned long   mask;
131         int             first, final;
132
133         first = BTOPR(offset);
134         final = BTOPRT(offset + length - 1);
135         first = min(first, final);
136
137         mask = ~0UL;
138         mask <<= BITS_PER_LONG - (final - first);
139         mask >>= BITS_PER_LONG - (final);
140
141         ASSERT(offset + length <= PAGE_CACHE_SIZE);
142         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
143
144         return mask;
145 }
146
147 STATIC void
148 set_page_region(
149         struct page     *page,
150         size_t          offset,
151         size_t          length)
152 {
153         set_page_private(page,
154                 page_private(page) | page_region_mask(offset, length));
155         if (page_private(page) == ~0UL)
156                 SetPageUptodate(page);
157 }
158
159 STATIC int
160 test_page_region(
161         struct page     *page,
162         size_t          offset,
163         size_t          length)
164 {
165         unsigned long   mask = page_region_mask(offset, length);
166
167         return (mask && (page_private(page) & mask) == mask);
168 }
169
170 /*
171  *      Internal xfs_buf_t object manipulation
172  */
173
174 STATIC void
175 _xfs_buf_initialize(
176         xfs_buf_t               *bp,
177         xfs_buftarg_t           *target,
178         xfs_off_t               range_base,
179         size_t                  range_length,
180         xfs_buf_flags_t         flags)
181 {
182         /*
183          * We don't want certain flags to appear in b_flags.
184          */
185         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
186
187         memset(bp, 0, sizeof(xfs_buf_t));
188         atomic_set(&bp->b_hold, 1);
189         init_completion(&bp->b_iowait);
190         INIT_LIST_HEAD(&bp->b_list);
191         INIT_LIST_HEAD(&bp->b_hash_list);
192         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
193         XB_SET_OWNER(bp);
194         bp->b_target = target;
195         bp->b_file_offset = range_base;
196         /*
197          * Set buffer_length and count_desired to the same value initially.
198          * I/O routines should use count_desired, which will be the same in
199          * most cases but may be reset (e.g. XFS recovery).
200          */
201         bp->b_buffer_length = bp->b_count_desired = range_length;
202         bp->b_flags = flags;
203         bp->b_bn = XFS_BUF_DADDR_NULL;
204         atomic_set(&bp->b_pin_count, 0);
205         init_waitqueue_head(&bp->b_waiters);
206
207         XFS_STATS_INC(xb_create);
208
209         trace_xfs_buf_init(bp, _RET_IP_);
210 }
211
212 /*
213  *      Allocate a page array capable of holding a specified number
214  *      of pages, and point the page buf at it.
215  */
216 STATIC int
217 _xfs_buf_get_pages(
218         xfs_buf_t               *bp,
219         int                     page_count,
220         xfs_buf_flags_t         flags)
221 {
222         /* Make sure that we have a page list */
223         if (bp->b_pages == NULL) {
224                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
225                 bp->b_page_count = page_count;
226                 if (page_count <= XB_PAGES) {
227                         bp->b_pages = bp->b_page_array;
228                 } else {
229                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
230                                         page_count, xb_to_km(flags));
231                         if (bp->b_pages == NULL)
232                                 return -ENOMEM;
233                 }
234                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
235         }
236         return 0;
237 }
238
239 /*
240  *      Frees b_pages if it was allocated.
241  */
242 STATIC void
243 _xfs_buf_free_pages(
244         xfs_buf_t       *bp)
245 {
246         if (bp->b_pages != bp->b_page_array) {
247                 kmem_free(bp->b_pages);
248                 bp->b_pages = NULL;
249         }
250 }
251
252 /*
253  *      Releases the specified buffer.
254  *
255  *      The modification state of any associated pages is left unchanged.
256  *      The buffer most not be on any hash - use xfs_buf_rele instead for
257  *      hashed and refcounted buffers
258  */
259 void
260 xfs_buf_free(
261         xfs_buf_t               *bp)
262 {
263         trace_xfs_buf_free(bp, _RET_IP_);
264
265         ASSERT(list_empty(&bp->b_hash_list));
266
267         if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
268                 uint            i;
269
270                 if (xfs_buf_is_vmapped(bp))
271                         vm_unmap_ram(bp->b_addr - bp->b_offset,
272                                         bp->b_page_count);
273
274                 for (i = 0; i < bp->b_page_count; i++) {
275                         struct page     *page = bp->b_pages[i];
276
277                         if (bp->b_flags & _XBF_PAGE_CACHE)
278                                 ASSERT(!PagePrivate(page));
279                         page_cache_release(page);
280                 }
281         }
282         _xfs_buf_free_pages(bp);
283         xfs_buf_deallocate(bp);
284 }
285
286 /*
287  *      Finds all pages for buffer in question and builds it's page list.
288  */
289 STATIC int
290 _xfs_buf_lookup_pages(
291         xfs_buf_t               *bp,
292         uint                    flags)
293 {
294         struct address_space    *mapping = bp->b_target->bt_mapping;
295         size_t                  blocksize = bp->b_target->bt_bsize;
296         size_t                  size = bp->b_count_desired;
297         size_t                  nbytes, offset;
298         gfp_t                   gfp_mask = xb_to_gfp(flags);
299         unsigned short          page_count, i;
300         pgoff_t                 first;
301         xfs_off_t               end;
302         int                     error;
303
304         end = bp->b_file_offset + bp->b_buffer_length;
305         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
306
307         error = _xfs_buf_get_pages(bp, page_count, flags);
308         if (unlikely(error))
309                 return error;
310         bp->b_flags |= _XBF_PAGE_CACHE;
311
312         offset = bp->b_offset;
313         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
314
315         for (i = 0; i < bp->b_page_count; i++) {
316                 struct page     *page;
317                 uint            retries = 0;
318
319               retry:
320                 page = find_or_create_page(mapping, first + i, gfp_mask);
321                 if (unlikely(page == NULL)) {
322                         if (flags & XBF_READ_AHEAD) {
323                                 bp->b_page_count = i;
324                                 for (i = 0; i < bp->b_page_count; i++)
325                                         unlock_page(bp->b_pages[i]);
326                                 return -ENOMEM;
327                         }
328
329                         /*
330                          * This could deadlock.
331                          *
332                          * But until all the XFS lowlevel code is revamped to
333                          * handle buffer allocation failures we can't do much.
334                          */
335                         if (!(++retries % 100))
336                                 printk(KERN_ERR
337                                         "XFS: possible memory allocation "
338                                         "deadlock in %s (mode:0x%x)\n",
339                                         __func__, gfp_mask);
340
341                         XFS_STATS_INC(xb_page_retries);
342                         xfsbufd_wakeup(NULL, 0, gfp_mask);
343                         congestion_wait(BLK_RW_ASYNC, HZ/50);
344                         goto retry;
345                 }
346
347                 XFS_STATS_INC(xb_page_found);
348
349                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
350                 size -= nbytes;
351
352                 ASSERT(!PagePrivate(page));
353                 if (!PageUptodate(page)) {
354                         page_count--;
355                         if (blocksize >= PAGE_CACHE_SIZE) {
356                                 if (flags & XBF_READ)
357                                         bp->b_flags |= _XBF_PAGE_LOCKED;
358                         } else if (!PagePrivate(page)) {
359                                 if (test_page_region(page, offset, nbytes))
360                                         page_count++;
361                         }
362                 }
363
364                 bp->b_pages[i] = page;
365                 offset = 0;
366         }
367
368         if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
369                 for (i = 0; i < bp->b_page_count; i++)
370                         unlock_page(bp->b_pages[i]);
371         }
372
373         if (page_count == bp->b_page_count)
374                 bp->b_flags |= XBF_DONE;
375
376         return error;
377 }
378
379 /*
380  *      Map buffer into kernel address-space if nessecary.
381  */
382 STATIC int
383 _xfs_buf_map_pages(
384         xfs_buf_t               *bp,
385         uint                    flags)
386 {
387         /* A single page buffer is always mappable */
388         if (bp->b_page_count == 1) {
389                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
390                 bp->b_flags |= XBF_MAPPED;
391         } else if (flags & XBF_MAPPED) {
392                 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
393                                         -1, PAGE_KERNEL);
394                 if (unlikely(bp->b_addr == NULL))
395                         return -ENOMEM;
396                 bp->b_addr += bp->b_offset;
397                 bp->b_flags |= XBF_MAPPED;
398         }
399
400         return 0;
401 }
402
403 /*
404  *      Finding and Reading Buffers
405  */
406
407 /*
408  *      Look up, and creates if absent, a lockable buffer for
409  *      a given range of an inode.  The buffer is returned
410  *      locked.  If other overlapping buffers exist, they are
411  *      released before the new buffer is created and locked,
412  *      which may imply that this call will block until those buffers
413  *      are unlocked.  No I/O is implied by this call.
414  */
415 xfs_buf_t *
416 _xfs_buf_find(
417         xfs_buftarg_t           *btp,   /* block device target          */
418         xfs_off_t               ioff,   /* starting offset of range     */
419         size_t                  isize,  /* length of range              */
420         xfs_buf_flags_t         flags,
421         xfs_buf_t               *new_bp)
422 {
423         xfs_off_t               range_base;
424         size_t                  range_length;
425         xfs_bufhash_t           *hash;
426         xfs_buf_t               *bp, *n;
427
428         range_base = (ioff << BBSHIFT);
429         range_length = (isize << BBSHIFT);
430
431         /* Check for IOs smaller than the sector size / not sector aligned */
432         ASSERT(!(range_length < (1 << btp->bt_sshift)));
433         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
434
435         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
436
437         spin_lock(&hash->bh_lock);
438
439         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
440                 ASSERT(btp == bp->b_target);
441                 if (bp->b_file_offset == range_base &&
442                     bp->b_buffer_length == range_length) {
443                         atomic_inc(&bp->b_hold);
444                         goto found;
445                 }
446         }
447
448         /* No match found */
449         if (new_bp) {
450                 _xfs_buf_initialize(new_bp, btp, range_base,
451                                 range_length, flags);
452                 new_bp->b_hash = hash;
453                 list_add(&new_bp->b_hash_list, &hash->bh_list);
454         } else {
455                 XFS_STATS_INC(xb_miss_locked);
456         }
457
458         spin_unlock(&hash->bh_lock);
459         return new_bp;
460
461 found:
462         spin_unlock(&hash->bh_lock);
463
464         /* Attempt to get the semaphore without sleeping,
465          * if this does not work then we need to drop the
466          * spinlock and do a hard attempt on the semaphore.
467          */
468         if (down_trylock(&bp->b_sema)) {
469                 if (!(flags & XBF_TRYLOCK)) {
470                         /* wait for buffer ownership */
471                         xfs_buf_lock(bp);
472                         XFS_STATS_INC(xb_get_locked_waited);
473                 } else {
474                         /* We asked for a trylock and failed, no need
475                          * to look at file offset and length here, we
476                          * know that this buffer at least overlaps our
477                          * buffer and is locked, therefore our buffer
478                          * either does not exist, or is this buffer.
479                          */
480                         xfs_buf_rele(bp);
481                         XFS_STATS_INC(xb_busy_locked);
482                         return NULL;
483                 }
484         } else {
485                 /* trylock worked */
486                 XB_SET_OWNER(bp);
487         }
488
489         if (bp->b_flags & XBF_STALE) {
490                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
491                 bp->b_flags &= XBF_MAPPED;
492         }
493
494         trace_xfs_buf_find(bp, flags, _RET_IP_);
495         XFS_STATS_INC(xb_get_locked);
496         return bp;
497 }
498
499 /*
500  *      Assembles a buffer covering the specified range.
501  *      Storage in memory for all portions of the buffer will be allocated,
502  *      although backing storage may not be.
503  */
504 xfs_buf_t *
505 xfs_buf_get(
506         xfs_buftarg_t           *target,/* target for buffer            */
507         xfs_off_t               ioff,   /* starting offset of range     */
508         size_t                  isize,  /* length of range              */
509         xfs_buf_flags_t         flags)
510 {
511         xfs_buf_t               *bp, *new_bp;
512         int                     error = 0, i;
513
514         new_bp = xfs_buf_allocate(flags);
515         if (unlikely(!new_bp))
516                 return NULL;
517
518         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
519         if (bp == new_bp) {
520                 error = _xfs_buf_lookup_pages(bp, flags);
521                 if (error)
522                         goto no_buffer;
523         } else {
524                 xfs_buf_deallocate(new_bp);
525                 if (unlikely(bp == NULL))
526                         return NULL;
527         }
528
529         for (i = 0; i < bp->b_page_count; i++)
530                 mark_page_accessed(bp->b_pages[i]);
531
532         if (!(bp->b_flags & XBF_MAPPED)) {
533                 error = _xfs_buf_map_pages(bp, flags);
534                 if (unlikely(error)) {
535                         printk(KERN_WARNING "%s: failed to map pages\n",
536                                         __func__);
537                         goto no_buffer;
538                 }
539         }
540
541         XFS_STATS_INC(xb_get);
542
543         /*
544          * Always fill in the block number now, the mapped cases can do
545          * their own overlay of this later.
546          */
547         bp->b_bn = ioff;
548         bp->b_count_desired = bp->b_buffer_length;
549
550         trace_xfs_buf_get(bp, flags, _RET_IP_);
551         return bp;
552
553  no_buffer:
554         if (flags & (XBF_LOCK | XBF_TRYLOCK))
555                 xfs_buf_unlock(bp);
556         xfs_buf_rele(bp);
557         return NULL;
558 }
559
560 STATIC int
561 _xfs_buf_read(
562         xfs_buf_t               *bp,
563         xfs_buf_flags_t         flags)
564 {
565         int                     status;
566
567         ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
568         ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
569
570         bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
571                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
572         bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
573                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
574
575         status = xfs_buf_iorequest(bp);
576         if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
577                 return status;
578         return xfs_buf_iowait(bp);
579 }
580
581 xfs_buf_t *
582 xfs_buf_read(
583         xfs_buftarg_t           *target,
584         xfs_off_t               ioff,
585         size_t                  isize,
586         xfs_buf_flags_t         flags)
587 {
588         xfs_buf_t               *bp;
589
590         flags |= XBF_READ;
591
592         bp = xfs_buf_get(target, ioff, isize, flags);
593         if (bp) {
594                 trace_xfs_buf_read(bp, flags, _RET_IP_);
595
596                 if (!XFS_BUF_ISDONE(bp)) {
597                         XFS_STATS_INC(xb_get_read);
598                         _xfs_buf_read(bp, flags);
599                 } else if (flags & XBF_ASYNC) {
600                         /*
601                          * Read ahead call which is already satisfied,
602                          * drop the buffer
603                          */
604                         goto no_buffer;
605                 } else {
606                         /* We do not want read in the flags */
607                         bp->b_flags &= ~XBF_READ;
608                 }
609         }
610
611         return bp;
612
613  no_buffer:
614         if (flags & (XBF_LOCK | XBF_TRYLOCK))
615                 xfs_buf_unlock(bp);
616         xfs_buf_rele(bp);
617         return NULL;
618 }
619
620 /*
621  *      If we are not low on memory then do the readahead in a deadlock
622  *      safe manner.
623  */
624 void
625 xfs_buf_readahead(
626         xfs_buftarg_t           *target,
627         xfs_off_t               ioff,
628         size_t                  isize,
629         xfs_buf_flags_t         flags)
630 {
631         struct backing_dev_info *bdi;
632
633         bdi = target->bt_mapping->backing_dev_info;
634         if (bdi_read_congested(bdi))
635                 return;
636
637         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
638         xfs_buf_read(target, ioff, isize, flags);
639 }
640
641 /*
642  * Read an uncached buffer from disk. Allocates and returns a locked
643  * buffer containing the disk contents or nothing.
644  */
645 struct xfs_buf *
646 xfs_buf_read_uncached(
647         struct xfs_mount        *mp,
648         struct xfs_buftarg      *target,
649         xfs_daddr_t             daddr,
650         size_t                  length,
651         int                     flags)
652 {
653         xfs_buf_t               *bp;
654         int                     error;
655
656         bp = xfs_buf_get_uncached(target, length, flags);
657         if (!bp)
658                 return NULL;
659
660         /* set up the buffer for a read IO */
661         xfs_buf_lock(bp);
662         XFS_BUF_SET_ADDR(bp, daddr);
663         XFS_BUF_READ(bp);
664         XFS_BUF_BUSY(bp);
665
666         xfsbdstrat(mp, bp);
667         error = xfs_iowait(bp);
668         if (error || bp->b_error) {
669                 xfs_buf_relse(bp);
670                 return NULL;
671         }
672         return bp;
673 }
674
675 xfs_buf_t *
676 xfs_buf_get_empty(
677         size_t                  len,
678         xfs_buftarg_t           *target)
679 {
680         xfs_buf_t               *bp;
681
682         bp = xfs_buf_allocate(0);
683         if (bp)
684                 _xfs_buf_initialize(bp, target, 0, len, 0);
685         return bp;
686 }
687
688 static inline struct page *
689 mem_to_page(
690         void                    *addr)
691 {
692         if ((!is_vmalloc_addr(addr))) {
693                 return virt_to_page(addr);
694         } else {
695                 return vmalloc_to_page(addr);
696         }
697 }
698
699 int
700 xfs_buf_associate_memory(
701         xfs_buf_t               *bp,
702         void                    *mem,
703         size_t                  len)
704 {
705         int                     rval;
706         int                     i = 0;
707         unsigned long           pageaddr;
708         unsigned long           offset;
709         size_t                  buflen;
710         int                     page_count;
711
712         pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
713         offset = (unsigned long)mem - pageaddr;
714         buflen = PAGE_CACHE_ALIGN(len + offset);
715         page_count = buflen >> PAGE_CACHE_SHIFT;
716
717         /* Free any previous set of page pointers */
718         if (bp->b_pages)
719                 _xfs_buf_free_pages(bp);
720
721         bp->b_pages = NULL;
722         bp->b_addr = mem;
723
724         rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
725         if (rval)
726                 return rval;
727
728         bp->b_offset = offset;
729
730         for (i = 0; i < bp->b_page_count; i++) {
731                 bp->b_pages[i] = mem_to_page((void *)pageaddr);
732                 pageaddr += PAGE_CACHE_SIZE;
733         }
734
735         bp->b_count_desired = len;
736         bp->b_buffer_length = buflen;
737         bp->b_flags |= XBF_MAPPED;
738         bp->b_flags &= ~_XBF_PAGE_LOCKED;
739
740         return 0;
741 }
742
743 xfs_buf_t *
744 xfs_buf_get_uncached(
745         struct xfs_buftarg      *target,
746         size_t                  len,
747         int                     flags)
748 {
749         unsigned long           page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
750         int                     error, i;
751         xfs_buf_t               *bp;
752
753         bp = xfs_buf_allocate(0);
754         if (unlikely(bp == NULL))
755                 goto fail;
756         _xfs_buf_initialize(bp, target, 0, len, 0);
757
758         error = _xfs_buf_get_pages(bp, page_count, 0);
759         if (error)
760                 goto fail_free_buf;
761
762         for (i = 0; i < page_count; i++) {
763                 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
764                 if (!bp->b_pages[i])
765                         goto fail_free_mem;
766         }
767         bp->b_flags |= _XBF_PAGES;
768
769         error = _xfs_buf_map_pages(bp, XBF_MAPPED);
770         if (unlikely(error)) {
771                 printk(KERN_WARNING "%s: failed to map pages\n",
772                                 __func__);
773                 goto fail_free_mem;
774         }
775
776         xfs_buf_unlock(bp);
777
778         trace_xfs_buf_get_uncached(bp, _RET_IP_);
779         return bp;
780
781  fail_free_mem:
782         while (--i >= 0)
783                 __free_page(bp->b_pages[i]);
784         _xfs_buf_free_pages(bp);
785  fail_free_buf:
786         xfs_buf_deallocate(bp);
787  fail:
788         return NULL;
789 }
790
791 /*
792  *      Increment reference count on buffer, to hold the buffer concurrently
793  *      with another thread which may release (free) the buffer asynchronously.
794  *      Must hold the buffer already to call this function.
795  */
796 void
797 xfs_buf_hold(
798         xfs_buf_t               *bp)
799 {
800         trace_xfs_buf_hold(bp, _RET_IP_);
801         atomic_inc(&bp->b_hold);
802 }
803
804 /*
805  *      Releases a hold on the specified buffer.  If the
806  *      the hold count is 1, calls xfs_buf_free.
807  */
808 void
809 xfs_buf_rele(
810         xfs_buf_t               *bp)
811 {
812         xfs_bufhash_t           *hash = bp->b_hash;
813
814         trace_xfs_buf_rele(bp, _RET_IP_);
815
816         if (unlikely(!hash)) {
817                 ASSERT(!bp->b_relse);
818                 if (atomic_dec_and_test(&bp->b_hold))
819                         xfs_buf_free(bp);
820                 return;
821         }
822
823         ASSERT(atomic_read(&bp->b_hold) > 0);
824         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
825                 if (bp->b_relse) {
826                         atomic_inc(&bp->b_hold);
827                         spin_unlock(&hash->bh_lock);
828                         (*(bp->b_relse)) (bp);
829                 } else if (bp->b_flags & XBF_FS_MANAGED) {
830                         spin_unlock(&hash->bh_lock);
831                 } else {
832                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
833                         list_del_init(&bp->b_hash_list);
834                         spin_unlock(&hash->bh_lock);
835                         xfs_buf_free(bp);
836                 }
837         }
838 }
839
840
841 /*
842  *      Mutual exclusion on buffers.  Locking model:
843  *
844  *      Buffers associated with inodes for which buffer locking
845  *      is not enabled are not protected by semaphores, and are
846  *      assumed to be exclusively owned by the caller.  There is a
847  *      spinlock in the buffer, used by the caller when concurrent
848  *      access is possible.
849  */
850
851 /*
852  *      Locks a buffer object, if it is not already locked.
853  *      Note that this in no way locks the underlying pages, so it is only
854  *      useful for synchronizing concurrent use of buffer objects, not for
855  *      synchronizing independent access to the underlying pages.
856  */
857 int
858 xfs_buf_cond_lock(
859         xfs_buf_t               *bp)
860 {
861         int                     locked;
862
863         locked = down_trylock(&bp->b_sema) == 0;
864         if (locked)
865                 XB_SET_OWNER(bp);
866
867         trace_xfs_buf_cond_lock(bp, _RET_IP_);
868         return locked ? 0 : -EBUSY;
869 }
870
871 int
872 xfs_buf_lock_value(
873         xfs_buf_t               *bp)
874 {
875         return bp->b_sema.count;
876 }
877
878 /*
879  *      Locks a buffer object.
880  *      Note that this in no way locks the underlying pages, so it is only
881  *      useful for synchronizing concurrent use of buffer objects, not for
882  *      synchronizing independent access to the underlying pages.
883  *
884  *      If we come across a stale, pinned, locked buffer, we know that we
885  *      are being asked to lock a buffer that has been reallocated. Because
886  *      it is pinned, we know that the log has not been pushed to disk and
887  *      hence it will still be locked. Rather than sleeping until someone
888  *      else pushes the log, push it ourselves before trying to get the lock.
889  */
890 void
891 xfs_buf_lock(
892         xfs_buf_t               *bp)
893 {
894         trace_xfs_buf_lock(bp, _RET_IP_);
895
896         if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
897                 xfs_log_force(bp->b_mount, 0);
898         if (atomic_read(&bp->b_io_remaining))
899                 blk_run_address_space(bp->b_target->bt_mapping);
900         down(&bp->b_sema);
901         XB_SET_OWNER(bp);
902
903         trace_xfs_buf_lock_done(bp, _RET_IP_);
904 }
905
906 /*
907  *      Releases the lock on the buffer object.
908  *      If the buffer is marked delwri but is not queued, do so before we
909  *      unlock the buffer as we need to set flags correctly.  We also need to
910  *      take a reference for the delwri queue because the unlocker is going to
911  *      drop their's and they don't know we just queued it.
912  */
913 void
914 xfs_buf_unlock(
915         xfs_buf_t               *bp)
916 {
917         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
918                 atomic_inc(&bp->b_hold);
919                 bp->b_flags |= XBF_ASYNC;
920                 xfs_buf_delwri_queue(bp, 0);
921         }
922
923         XB_CLEAR_OWNER(bp);
924         up(&bp->b_sema);
925
926         trace_xfs_buf_unlock(bp, _RET_IP_);
927 }
928
929 STATIC void
930 xfs_buf_wait_unpin(
931         xfs_buf_t               *bp)
932 {
933         DECLARE_WAITQUEUE       (wait, current);
934
935         if (atomic_read(&bp->b_pin_count) == 0)
936                 return;
937
938         add_wait_queue(&bp->b_waiters, &wait);
939         for (;;) {
940                 set_current_state(TASK_UNINTERRUPTIBLE);
941                 if (atomic_read(&bp->b_pin_count) == 0)
942                         break;
943                 if (atomic_read(&bp->b_io_remaining))
944                         blk_run_address_space(bp->b_target->bt_mapping);
945                 schedule();
946         }
947         remove_wait_queue(&bp->b_waiters, &wait);
948         set_current_state(TASK_RUNNING);
949 }
950
951 /*
952  *      Buffer Utility Routines
953  */
954
955 STATIC void
956 xfs_buf_iodone_work(
957         struct work_struct      *work)
958 {
959         xfs_buf_t               *bp =
960                 container_of(work, xfs_buf_t, b_iodone_work);
961
962         /*
963          * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
964          * ordered flag and reissue them.  Because we can't tell the higher
965          * layers directly that they should not issue ordered I/O anymore, they
966          * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
967          */
968         if ((bp->b_error == EOPNOTSUPP) &&
969             (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
970                 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
971                 bp->b_flags &= ~XBF_ORDERED;
972                 bp->b_flags |= _XFS_BARRIER_FAILED;
973                 xfs_buf_iorequest(bp);
974         } else if (bp->b_iodone)
975                 (*(bp->b_iodone))(bp);
976         else if (bp->b_flags & XBF_ASYNC)
977                 xfs_buf_relse(bp);
978 }
979
980 void
981 xfs_buf_ioend(
982         xfs_buf_t               *bp,
983         int                     schedule)
984 {
985         trace_xfs_buf_iodone(bp, _RET_IP_);
986
987         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
988         if (bp->b_error == 0)
989                 bp->b_flags |= XBF_DONE;
990
991         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
992                 if (schedule) {
993                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
994                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
995                 } else {
996                         xfs_buf_iodone_work(&bp->b_iodone_work);
997                 }
998         } else {
999                 complete(&bp->b_iowait);
1000         }
1001 }
1002
1003 void
1004 xfs_buf_ioerror(
1005         xfs_buf_t               *bp,
1006         int                     error)
1007 {
1008         ASSERT(error >= 0 && error <= 0xffff);
1009         bp->b_error = (unsigned short)error;
1010         trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1011 }
1012
1013 int
1014 xfs_bwrite(
1015         struct xfs_mount        *mp,
1016         struct xfs_buf          *bp)
1017 {
1018         int                     error;
1019
1020         bp->b_mount = mp;
1021         bp->b_flags |= XBF_WRITE;
1022         bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
1023
1024         xfs_buf_delwri_dequeue(bp);
1025         xfs_bdstrat_cb(bp);
1026
1027         error = xfs_buf_iowait(bp);
1028         if (error)
1029                 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1030         xfs_buf_relse(bp);
1031         return error;
1032 }
1033
1034 void
1035 xfs_bdwrite(
1036         void                    *mp,
1037         struct xfs_buf          *bp)
1038 {
1039         trace_xfs_buf_bdwrite(bp, _RET_IP_);
1040
1041         bp->b_mount = mp;
1042
1043         bp->b_flags &= ~XBF_READ;
1044         bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1045
1046         xfs_buf_delwri_queue(bp, 1);
1047 }
1048
1049 /*
1050  * Called when we want to stop a buffer from getting written or read.
1051  * We attach the EIO error, muck with its flags, and call biodone
1052  * so that the proper iodone callbacks get called.
1053  */
1054 STATIC int
1055 xfs_bioerror(
1056         xfs_buf_t *bp)
1057 {
1058 #ifdef XFSERRORDEBUG
1059         ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1060 #endif
1061
1062         /*
1063          * No need to wait until the buffer is unpinned, we aren't flushing it.
1064          */
1065         XFS_BUF_ERROR(bp, EIO);
1066
1067         /*
1068          * We're calling biodone, so delete XBF_DONE flag.
1069          */
1070         XFS_BUF_UNREAD(bp);
1071         XFS_BUF_UNDELAYWRITE(bp);
1072         XFS_BUF_UNDONE(bp);
1073         XFS_BUF_STALE(bp);
1074
1075         xfs_biodone(bp);
1076
1077         return EIO;
1078 }
1079
1080 /*
1081  * Same as xfs_bioerror, except that we are releasing the buffer
1082  * here ourselves, and avoiding the biodone call.
1083  * This is meant for userdata errors; metadata bufs come with
1084  * iodone functions attached, so that we can track down errors.
1085  */
1086 STATIC int
1087 xfs_bioerror_relse(
1088         struct xfs_buf  *bp)
1089 {
1090         int64_t         fl = XFS_BUF_BFLAGS(bp);
1091         /*
1092          * No need to wait until the buffer is unpinned.
1093          * We aren't flushing it.
1094          *
1095          * chunkhold expects B_DONE to be set, whether
1096          * we actually finish the I/O or not. We don't want to
1097          * change that interface.
1098          */
1099         XFS_BUF_UNREAD(bp);
1100         XFS_BUF_UNDELAYWRITE(bp);
1101         XFS_BUF_DONE(bp);
1102         XFS_BUF_STALE(bp);
1103         XFS_BUF_CLR_IODONE_FUNC(bp);
1104         if (!(fl & XBF_ASYNC)) {
1105                 /*
1106                  * Mark b_error and B_ERROR _both_.
1107                  * Lot's of chunkcache code assumes that.
1108                  * There's no reason to mark error for
1109                  * ASYNC buffers.
1110                  */
1111                 XFS_BUF_ERROR(bp, EIO);
1112                 XFS_BUF_FINISH_IOWAIT(bp);
1113         } else {
1114                 xfs_buf_relse(bp);
1115         }
1116
1117         return EIO;
1118 }
1119
1120
1121 /*
1122  * All xfs metadata buffers except log state machine buffers
1123  * get this attached as their b_bdstrat callback function.
1124  * This is so that we can catch a buffer
1125  * after prematurely unpinning it to forcibly shutdown the filesystem.
1126  */
1127 int
1128 xfs_bdstrat_cb(
1129         struct xfs_buf  *bp)
1130 {
1131         if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
1132                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1133                 /*
1134                  * Metadata write that didn't get logged but
1135                  * written delayed anyway. These aren't associated
1136                  * with a transaction, and can be ignored.
1137                  */
1138                 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1139                         return xfs_bioerror_relse(bp);
1140                 else
1141                         return xfs_bioerror(bp);
1142         }
1143
1144         xfs_buf_iorequest(bp);
1145         return 0;
1146 }
1147
1148 /*
1149  * Wrapper around bdstrat so that we can stop data from going to disk in case
1150  * we are shutting down the filesystem.  Typically user data goes thru this
1151  * path; one of the exceptions is the superblock.
1152  */
1153 void
1154 xfsbdstrat(
1155         struct xfs_mount        *mp,
1156         struct xfs_buf          *bp)
1157 {
1158         if (XFS_FORCED_SHUTDOWN(mp)) {
1159                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1160                 xfs_bioerror_relse(bp);
1161                 return;
1162         }
1163
1164         xfs_buf_iorequest(bp);
1165 }
1166
1167 STATIC void
1168 _xfs_buf_ioend(
1169         xfs_buf_t               *bp,
1170         int                     schedule)
1171 {
1172         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1173                 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1174                 xfs_buf_ioend(bp, schedule);
1175         }
1176 }
1177
1178 STATIC void
1179 xfs_buf_bio_end_io(
1180         struct bio              *bio,
1181         int                     error)
1182 {
1183         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1184         unsigned int            blocksize = bp->b_target->bt_bsize;
1185         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1186
1187         xfs_buf_ioerror(bp, -error);
1188
1189         if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1190                 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1191
1192         do {
1193                 struct page     *page = bvec->bv_page;
1194
1195                 ASSERT(!PagePrivate(page));
1196                 if (unlikely(bp->b_error)) {
1197                         if (bp->b_flags & XBF_READ)
1198                                 ClearPageUptodate(page);
1199                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1200                         SetPageUptodate(page);
1201                 } else if (!PagePrivate(page) &&
1202                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1203                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1204                 }
1205
1206                 if (--bvec >= bio->bi_io_vec)
1207                         prefetchw(&bvec->bv_page->flags);
1208
1209                 if (bp->b_flags & _XBF_PAGE_LOCKED)
1210                         unlock_page(page);
1211         } while (bvec >= bio->bi_io_vec);
1212
1213         _xfs_buf_ioend(bp, 1);
1214         bio_put(bio);
1215 }
1216
1217 STATIC void
1218 _xfs_buf_ioapply(
1219         xfs_buf_t               *bp)
1220 {
1221         int                     rw, map_i, total_nr_pages, nr_pages;
1222         struct bio              *bio;
1223         int                     offset = bp->b_offset;
1224         int                     size = bp->b_count_desired;
1225         sector_t                sector = bp->b_bn;
1226         unsigned int            blocksize = bp->b_target->bt_bsize;
1227
1228         total_nr_pages = bp->b_page_count;
1229         map_i = 0;
1230
1231         if (bp->b_flags & XBF_ORDERED) {
1232                 ASSERT(!(bp->b_flags & XBF_READ));
1233                 rw = WRITE_BARRIER;
1234         } else if (bp->b_flags & XBF_LOG_BUFFER) {
1235                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1236                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1237                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1238         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1239                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1240                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1241                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
1242         } else {
1243                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1244                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1245         }
1246
1247         /* Special code path for reading a sub page size buffer in --
1248          * we populate up the whole page, and hence the other metadata
1249          * in the same page.  This optimization is only valid when the
1250          * filesystem block size is not smaller than the page size.
1251          */
1252         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1253             ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1254               (XBF_READ|_XBF_PAGE_LOCKED)) &&
1255             (blocksize >= PAGE_CACHE_SIZE)) {
1256                 bio = bio_alloc(GFP_NOIO, 1);
1257
1258                 bio->bi_bdev = bp->b_target->bt_bdev;
1259                 bio->bi_sector = sector - (offset >> BBSHIFT);
1260                 bio->bi_end_io = xfs_buf_bio_end_io;
1261                 bio->bi_private = bp;
1262
1263                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1264                 size = 0;
1265
1266                 atomic_inc(&bp->b_io_remaining);
1267
1268                 goto submit_io;
1269         }
1270
1271 next_chunk:
1272         atomic_inc(&bp->b_io_remaining);
1273         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1274         if (nr_pages > total_nr_pages)
1275                 nr_pages = total_nr_pages;
1276
1277         bio = bio_alloc(GFP_NOIO, nr_pages);
1278         bio->bi_bdev = bp->b_target->bt_bdev;
1279         bio->bi_sector = sector;
1280         bio->bi_end_io = xfs_buf_bio_end_io;
1281         bio->bi_private = bp;
1282
1283         for (; size && nr_pages; nr_pages--, map_i++) {
1284                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1285
1286                 if (nbytes > size)
1287                         nbytes = size;
1288
1289                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1290                 if (rbytes < nbytes)
1291                         break;
1292
1293                 offset = 0;
1294                 sector += nbytes >> BBSHIFT;
1295                 size -= nbytes;
1296                 total_nr_pages--;
1297         }
1298
1299 submit_io:
1300         if (likely(bio->bi_size)) {
1301                 if (xfs_buf_is_vmapped(bp)) {
1302                         flush_kernel_vmap_range(bp->b_addr,
1303                                                 xfs_buf_vmap_len(bp));
1304                 }
1305                 submit_bio(rw, bio);
1306                 if (size)
1307                         goto next_chunk;
1308         } else {
1309                 /*
1310                  * if we get here, no pages were added to the bio. However,
1311                  * we can't just error out here - if the pages are locked then
1312                  * we have to unlock them otherwise we can hang on a later
1313                  * access to the page.
1314                  */
1315                 xfs_buf_ioerror(bp, EIO);
1316                 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1317                         int i;
1318                         for (i = 0; i < bp->b_page_count; i++)
1319                                 unlock_page(bp->b_pages[i]);
1320                 }
1321                 bio_put(bio);
1322         }
1323 }
1324
1325 int
1326 xfs_buf_iorequest(
1327         xfs_buf_t               *bp)
1328 {
1329         trace_xfs_buf_iorequest(bp, _RET_IP_);
1330
1331         if (bp->b_flags & XBF_DELWRI) {
1332                 xfs_buf_delwri_queue(bp, 1);
1333                 return 0;
1334         }
1335
1336         if (bp->b_flags & XBF_WRITE) {
1337                 xfs_buf_wait_unpin(bp);
1338         }
1339
1340         xfs_buf_hold(bp);
1341
1342         /* Set the count to 1 initially, this will stop an I/O
1343          * completion callout which happens before we have started
1344          * all the I/O from calling xfs_buf_ioend too early.
1345          */
1346         atomic_set(&bp->b_io_remaining, 1);
1347         _xfs_buf_ioapply(bp);
1348         _xfs_buf_ioend(bp, 0);
1349
1350         xfs_buf_rele(bp);
1351         return 0;
1352 }
1353
1354 /*
1355  *      Waits for I/O to complete on the buffer supplied.
1356  *      It returns immediately if no I/O is pending.
1357  *      It returns the I/O error code, if any, or 0 if there was no error.
1358  */
1359 int
1360 xfs_buf_iowait(
1361         xfs_buf_t               *bp)
1362 {
1363         trace_xfs_buf_iowait(bp, _RET_IP_);
1364
1365         if (atomic_read(&bp->b_io_remaining))
1366                 blk_run_address_space(bp->b_target->bt_mapping);
1367         wait_for_completion(&bp->b_iowait);
1368
1369         trace_xfs_buf_iowait_done(bp, _RET_IP_);
1370         return bp->b_error;
1371 }
1372
1373 xfs_caddr_t
1374 xfs_buf_offset(
1375         xfs_buf_t               *bp,
1376         size_t                  offset)
1377 {
1378         struct page             *page;
1379
1380         if (bp->b_flags & XBF_MAPPED)
1381                 return XFS_BUF_PTR(bp) + offset;
1382
1383         offset += bp->b_offset;
1384         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1385         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1386 }
1387
1388 /*
1389  *      Move data into or out of a buffer.
1390  */
1391 void
1392 xfs_buf_iomove(
1393         xfs_buf_t               *bp,    /* buffer to process            */
1394         size_t                  boff,   /* starting buffer offset       */
1395         size_t                  bsize,  /* length to copy               */
1396         void                    *data,  /* data address                 */
1397         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1398 {
1399         size_t                  bend, cpoff, csize;
1400         struct page             *page;
1401
1402         bend = boff + bsize;
1403         while (boff < bend) {
1404                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1405                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1406                 csize = min_t(size_t,
1407                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1408
1409                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1410
1411                 switch (mode) {
1412                 case XBRW_ZERO:
1413                         memset(page_address(page) + cpoff, 0, csize);
1414                         break;
1415                 case XBRW_READ:
1416                         memcpy(data, page_address(page) + cpoff, csize);
1417                         break;
1418                 case XBRW_WRITE:
1419                         memcpy(page_address(page) + cpoff, data, csize);
1420                 }
1421
1422                 boff += csize;
1423                 data += csize;
1424         }
1425 }
1426
1427 /*
1428  *      Handling of buffer targets (buftargs).
1429  */
1430
1431 /*
1432  *      Wait for any bufs with callbacks that have been submitted but
1433  *      have not yet returned... walk the hash list for the target.
1434  */
1435 void
1436 xfs_wait_buftarg(
1437         xfs_buftarg_t   *btp)
1438 {
1439         xfs_buf_t       *bp, *n;
1440         xfs_bufhash_t   *hash;
1441         uint            i;
1442
1443         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1444                 hash = &btp->bt_hash[i];
1445 again:
1446                 spin_lock(&hash->bh_lock);
1447                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1448                         ASSERT(btp == bp->b_target);
1449                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1450                                 spin_unlock(&hash->bh_lock);
1451                                 /*
1452                                  * Catch superblock reference count leaks
1453                                  * immediately
1454                                  */
1455                                 BUG_ON(bp->b_bn == 0);
1456                                 delay(100);
1457                                 goto again;
1458                         }
1459                 }
1460                 spin_unlock(&hash->bh_lock);
1461         }
1462 }
1463
1464 /*
1465  *      Allocate buffer hash table for a given target.
1466  *      For devices containing metadata (i.e. not the log/realtime devices)
1467  *      we need to allocate a much larger hash table.
1468  */
1469 STATIC void
1470 xfs_alloc_bufhash(
1471         xfs_buftarg_t           *btp,
1472         int                     external)
1473 {
1474         unsigned int            i;
1475
1476         btp->bt_hashshift = external ? 3 : 12;  /* 8 or 4096 buckets */
1477         btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
1478                                          sizeof(xfs_bufhash_t));
1479         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1480                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1481                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1482         }
1483 }
1484
1485 STATIC void
1486 xfs_free_bufhash(
1487         xfs_buftarg_t           *btp)
1488 {
1489         kmem_free_large(btp->bt_hash);
1490         btp->bt_hash = NULL;
1491 }
1492
1493 /*
1494  *      buftarg list for delwrite queue processing
1495  */
1496 static LIST_HEAD(xfs_buftarg_list);
1497 static DEFINE_SPINLOCK(xfs_buftarg_lock);
1498
1499 STATIC void
1500 xfs_register_buftarg(
1501         xfs_buftarg_t           *btp)
1502 {
1503         spin_lock(&xfs_buftarg_lock);
1504         list_add(&btp->bt_list, &xfs_buftarg_list);
1505         spin_unlock(&xfs_buftarg_lock);
1506 }
1507
1508 STATIC void
1509 xfs_unregister_buftarg(
1510         xfs_buftarg_t           *btp)
1511 {
1512         spin_lock(&xfs_buftarg_lock);
1513         list_del(&btp->bt_list);
1514         spin_unlock(&xfs_buftarg_lock);
1515 }
1516
1517 void
1518 xfs_free_buftarg(
1519         struct xfs_mount        *mp,
1520         struct xfs_buftarg      *btp)
1521 {
1522         xfs_flush_buftarg(btp, 1);
1523         if (mp->m_flags & XFS_MOUNT_BARRIER)
1524                 xfs_blkdev_issue_flush(btp);
1525         xfs_free_bufhash(btp);
1526         iput(btp->bt_mapping->host);
1527
1528         /* Unregister the buftarg first so that we don't get a
1529          * wakeup finding a non-existent task
1530          */
1531         xfs_unregister_buftarg(btp);
1532         kthread_stop(btp->bt_task);
1533
1534         kmem_free(btp);
1535 }
1536
1537 STATIC int
1538 xfs_setsize_buftarg_flags(
1539         xfs_buftarg_t           *btp,
1540         unsigned int            blocksize,
1541         unsigned int            sectorsize,
1542         int                     verbose)
1543 {
1544         btp->bt_bsize = blocksize;
1545         btp->bt_sshift = ffs(sectorsize) - 1;
1546         btp->bt_smask = sectorsize - 1;
1547
1548         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1549                 printk(KERN_WARNING
1550                         "XFS: Cannot set_blocksize to %u on device %s\n",
1551                         sectorsize, XFS_BUFTARG_NAME(btp));
1552                 return EINVAL;
1553         }
1554
1555         if (verbose &&
1556             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1557                 printk(KERN_WARNING
1558                         "XFS: %u byte sectors in use on device %s.  "
1559                         "This is suboptimal; %u or greater is ideal.\n",
1560                         sectorsize, XFS_BUFTARG_NAME(btp),
1561                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1562         }
1563
1564         return 0;
1565 }
1566
1567 /*
1568  *      When allocating the initial buffer target we have not yet
1569  *      read in the superblock, so don't know what sized sectors
1570  *      are being used is at this early stage.  Play safe.
1571  */
1572 STATIC int
1573 xfs_setsize_buftarg_early(
1574         xfs_buftarg_t           *btp,
1575         struct block_device     *bdev)
1576 {
1577         return xfs_setsize_buftarg_flags(btp,
1578                         PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1579 }
1580
1581 int
1582 xfs_setsize_buftarg(
1583         xfs_buftarg_t           *btp,
1584         unsigned int            blocksize,
1585         unsigned int            sectorsize)
1586 {
1587         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1588 }
1589
1590 STATIC int
1591 xfs_mapping_buftarg(
1592         xfs_buftarg_t           *btp,
1593         struct block_device     *bdev)
1594 {
1595         struct backing_dev_info *bdi;
1596         struct inode            *inode;
1597         struct address_space    *mapping;
1598         static const struct address_space_operations mapping_aops = {
1599                 .sync_page = block_sync_page,
1600                 .migratepage = fail_migrate_page,
1601         };
1602
1603         inode = new_inode(bdev->bd_inode->i_sb);
1604         if (!inode) {
1605                 printk(KERN_WARNING
1606                         "XFS: Cannot allocate mapping inode for device %s\n",
1607                         XFS_BUFTARG_NAME(btp));
1608                 return ENOMEM;
1609         }
1610         inode->i_mode = S_IFBLK;
1611         inode->i_bdev = bdev;
1612         inode->i_rdev = bdev->bd_dev;
1613         bdi = blk_get_backing_dev_info(bdev);
1614         if (!bdi)
1615                 bdi = &default_backing_dev_info;
1616         mapping = &inode->i_data;
1617         mapping->a_ops = &mapping_aops;
1618         mapping->backing_dev_info = bdi;
1619         mapping_set_gfp_mask(mapping, GFP_NOFS);
1620         btp->bt_mapping = mapping;
1621         return 0;
1622 }
1623
1624 STATIC int
1625 xfs_alloc_delwrite_queue(
1626         xfs_buftarg_t           *btp,
1627         const char              *fsname)
1628 {
1629         int     error = 0;
1630
1631         INIT_LIST_HEAD(&btp->bt_list);
1632         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1633         spin_lock_init(&btp->bt_delwrite_lock);
1634         btp->bt_flags = 0;
1635         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1636         if (IS_ERR(btp->bt_task)) {
1637                 error = PTR_ERR(btp->bt_task);
1638                 goto out_error;
1639         }
1640         xfs_register_buftarg(btp);
1641 out_error:
1642         return error;
1643 }
1644
1645 xfs_buftarg_t *
1646 xfs_alloc_buftarg(
1647         struct block_device     *bdev,
1648         int                     external,
1649         const char              *fsname)
1650 {
1651         xfs_buftarg_t           *btp;
1652
1653         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1654
1655         btp->bt_dev =  bdev->bd_dev;
1656         btp->bt_bdev = bdev;
1657         if (xfs_setsize_buftarg_early(btp, bdev))
1658                 goto error;
1659         if (xfs_mapping_buftarg(btp, bdev))
1660                 goto error;
1661         if (xfs_alloc_delwrite_queue(btp, fsname))
1662                 goto error;
1663         xfs_alloc_bufhash(btp, external);
1664         return btp;
1665
1666 error:
1667         kmem_free(btp);
1668         return NULL;
1669 }
1670
1671
1672 /*
1673  *      Delayed write buffer handling
1674  */
1675 STATIC void
1676 xfs_buf_delwri_queue(
1677         xfs_buf_t               *bp,
1678         int                     unlock)
1679 {
1680         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1681         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1682
1683         trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1684
1685         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1686
1687         spin_lock(dwlk);
1688         /* If already in the queue, dequeue and place at tail */
1689         if (!list_empty(&bp->b_list)) {
1690                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1691                 if (unlock)
1692                         atomic_dec(&bp->b_hold);
1693                 list_del(&bp->b_list);
1694         }
1695
1696         if (list_empty(dwq)) {
1697                 /* start xfsbufd as it is about to have something to do */
1698                 wake_up_process(bp->b_target->bt_task);
1699         }
1700
1701         bp->b_flags |= _XBF_DELWRI_Q;
1702         list_add_tail(&bp->b_list, dwq);
1703         bp->b_queuetime = jiffies;
1704         spin_unlock(dwlk);
1705
1706         if (unlock)
1707                 xfs_buf_unlock(bp);
1708 }
1709
1710 void
1711 xfs_buf_delwri_dequeue(
1712         xfs_buf_t               *bp)
1713 {
1714         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1715         int                     dequeued = 0;
1716
1717         spin_lock(dwlk);
1718         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1719                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1720                 list_del_init(&bp->b_list);
1721                 dequeued = 1;
1722         }
1723         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1724         spin_unlock(dwlk);
1725
1726         if (dequeued)
1727                 xfs_buf_rele(bp);
1728
1729         trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1730 }
1731
1732 /*
1733  * If a delwri buffer needs to be pushed before it has aged out, then promote
1734  * it to the head of the delwri queue so that it will be flushed on the next
1735  * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1736  * than the age currently needed to flush the buffer. Hence the next time the
1737  * xfsbufd sees it is guaranteed to be considered old enough to flush.
1738  */
1739 void
1740 xfs_buf_delwri_promote(
1741         struct xfs_buf  *bp)
1742 {
1743         struct xfs_buftarg *btp = bp->b_target;
1744         long            age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1745
1746         ASSERT(bp->b_flags & XBF_DELWRI);
1747         ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1748
1749         /*
1750          * Check the buffer age before locking the delayed write queue as we
1751          * don't need to promote buffers that are already past the flush age.
1752          */
1753         if (bp->b_queuetime < jiffies - age)
1754                 return;
1755         bp->b_queuetime = jiffies - age;
1756         spin_lock(&btp->bt_delwrite_lock);
1757         list_move(&bp->b_list, &btp->bt_delwrite_queue);
1758         spin_unlock(&btp->bt_delwrite_lock);
1759 }
1760
1761 STATIC void
1762 xfs_buf_runall_queues(
1763         struct workqueue_struct *queue)
1764 {
1765         flush_workqueue(queue);
1766 }
1767
1768 STATIC int
1769 xfsbufd_wakeup(
1770         struct shrinker         *shrink,
1771         int                     priority,
1772         gfp_t                   mask)
1773 {
1774         xfs_buftarg_t           *btp;
1775
1776         spin_lock(&xfs_buftarg_lock);
1777         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1778                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1779                         continue;
1780                 if (list_empty(&btp->bt_delwrite_queue))
1781                         continue;
1782                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1783                 wake_up_process(btp->bt_task);
1784         }
1785         spin_unlock(&xfs_buftarg_lock);
1786         return 0;
1787 }
1788
1789 /*
1790  * Move as many buffers as specified to the supplied list
1791  * idicating if we skipped any buffers to prevent deadlocks.
1792  */
1793 STATIC int
1794 xfs_buf_delwri_split(
1795         xfs_buftarg_t   *target,
1796         struct list_head *list,
1797         unsigned long   age)
1798 {
1799         xfs_buf_t       *bp, *n;
1800         struct list_head *dwq = &target->bt_delwrite_queue;
1801         spinlock_t      *dwlk = &target->bt_delwrite_lock;
1802         int             skipped = 0;
1803         int             force;
1804
1805         force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1806         INIT_LIST_HEAD(list);
1807         spin_lock(dwlk);
1808         list_for_each_entry_safe(bp, n, dwq, b_list) {
1809                 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1810                 ASSERT(bp->b_flags & XBF_DELWRI);
1811
1812                 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
1813                         if (!force &&
1814                             time_before(jiffies, bp->b_queuetime + age)) {
1815                                 xfs_buf_unlock(bp);
1816                                 break;
1817                         }
1818
1819                         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1820                                          _XBF_RUN_QUEUES);
1821                         bp->b_flags |= XBF_WRITE;
1822                         list_move_tail(&bp->b_list, list);
1823                 } else
1824                         skipped++;
1825         }
1826         spin_unlock(dwlk);
1827
1828         return skipped;
1829
1830 }
1831
1832 /*
1833  * Compare function is more complex than it needs to be because
1834  * the return value is only 32 bits and we are doing comparisons
1835  * on 64 bit values
1836  */
1837 static int
1838 xfs_buf_cmp(
1839         void            *priv,
1840         struct list_head *a,
1841         struct list_head *b)
1842 {
1843         struct xfs_buf  *ap = container_of(a, struct xfs_buf, b_list);
1844         struct xfs_buf  *bp = container_of(b, struct xfs_buf, b_list);
1845         xfs_daddr_t             diff;
1846
1847         diff = ap->b_bn - bp->b_bn;
1848         if (diff < 0)
1849                 return -1;
1850         if (diff > 0)
1851                 return 1;
1852         return 0;
1853 }
1854
1855 void
1856 xfs_buf_delwri_sort(
1857         xfs_buftarg_t   *target,
1858         struct list_head *list)
1859 {
1860         list_sort(NULL, list, xfs_buf_cmp);
1861 }
1862
1863 STATIC int
1864 xfsbufd(
1865         void            *data)
1866 {
1867         xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1868
1869         current->flags |= PF_MEMALLOC;
1870
1871         set_freezable();
1872
1873         do {
1874                 long    age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1875                 long    tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1876                 int     count = 0;
1877                 struct list_head tmp;
1878
1879                 if (unlikely(freezing(current))) {
1880                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1881                         refrigerator();
1882                 } else {
1883                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1884                 }
1885
1886                 /* sleep for a long time if there is nothing to do. */
1887                 if (list_empty(&target->bt_delwrite_queue))
1888                         tout = MAX_SCHEDULE_TIMEOUT;
1889                 schedule_timeout_interruptible(tout);
1890
1891                 xfs_buf_delwri_split(target, &tmp, age);
1892                 list_sort(NULL, &tmp, xfs_buf_cmp);
1893                 while (!list_empty(&tmp)) {
1894                         struct xfs_buf *bp;
1895                         bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1896                         list_del_init(&bp->b_list);
1897                         xfs_bdstrat_cb(bp);
1898                         count++;
1899                 }
1900                 if (count)
1901                         blk_run_address_space(target->bt_mapping);
1902
1903         } while (!kthread_should_stop());
1904
1905         return 0;
1906 }
1907
1908 /*
1909  *      Go through all incore buffers, and release buffers if they belong to
1910  *      the given device. This is used in filesystem error handling to
1911  *      preserve the consistency of its metadata.
1912  */
1913 int
1914 xfs_flush_buftarg(
1915         xfs_buftarg_t   *target,
1916         int             wait)
1917 {
1918         xfs_buf_t       *bp;
1919         int             pincount = 0;
1920         LIST_HEAD(tmp_list);
1921         LIST_HEAD(wait_list);
1922
1923         xfs_buf_runall_queues(xfsconvertd_workqueue);
1924         xfs_buf_runall_queues(xfsdatad_workqueue);
1925         xfs_buf_runall_queues(xfslogd_workqueue);
1926
1927         set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1928         pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1929
1930         /*
1931          * Dropped the delayed write list lock, now walk the temporary list.
1932          * All I/O is issued async and then if we need to wait for completion
1933          * we do that after issuing all the IO.
1934          */
1935         list_sort(NULL, &tmp_list, xfs_buf_cmp);
1936         while (!list_empty(&tmp_list)) {
1937                 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1938                 ASSERT(target == bp->b_target);
1939                 list_del_init(&bp->b_list);
1940                 if (wait) {
1941                         bp->b_flags &= ~XBF_ASYNC;
1942                         list_add(&bp->b_list, &wait_list);
1943                 }
1944                 xfs_bdstrat_cb(bp);
1945         }
1946
1947         if (wait) {
1948                 /* Expedite and wait for IO to complete. */
1949                 blk_run_address_space(target->bt_mapping);
1950                 while (!list_empty(&wait_list)) {
1951                         bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1952
1953                         list_del_init(&bp->b_list);
1954                         xfs_iowait(bp);
1955                         xfs_buf_relse(bp);
1956                 }
1957         }
1958
1959         return pincount;
1960 }
1961
1962 int __init
1963 xfs_buf_init(void)
1964 {
1965         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1966                                                 KM_ZONE_HWALIGN, NULL);
1967         if (!xfs_buf_zone)
1968                 goto out;
1969
1970         xfslogd_workqueue = alloc_workqueue("xfslogd",
1971                                         WQ_RESCUER | WQ_HIGHPRI, 1);
1972         if (!xfslogd_workqueue)
1973                 goto out_free_buf_zone;
1974
1975         xfsdatad_workqueue = create_workqueue("xfsdatad");
1976         if (!xfsdatad_workqueue)
1977                 goto out_destroy_xfslogd_workqueue;
1978
1979         xfsconvertd_workqueue = create_workqueue("xfsconvertd");
1980         if (!xfsconvertd_workqueue)
1981                 goto out_destroy_xfsdatad_workqueue;
1982
1983         register_shrinker(&xfs_buf_shake);
1984         return 0;
1985
1986  out_destroy_xfsdatad_workqueue:
1987         destroy_workqueue(xfsdatad_workqueue);
1988  out_destroy_xfslogd_workqueue:
1989         destroy_workqueue(xfslogd_workqueue);
1990  out_free_buf_zone:
1991         kmem_zone_destroy(xfs_buf_zone);
1992  out:
1993         return -ENOMEM;
1994 }
1995
1996 void
1997 xfs_buf_terminate(void)
1998 {
1999         unregister_shrinker(&xfs_buf_shake);
2000         destroy_workqueue(xfsconvertd_workqueue);
2001         destroy_workqueue(xfsdatad_workqueue);
2002         destroy_workqueue(xfslogd_workqueue);
2003         kmem_zone_destroy(xfs_buf_zone);
2004 }
2005
2006 #ifdef CONFIG_KDB_MODULES
2007 struct list_head *
2008 xfs_get_buftarg_list(void)
2009 {
2010         return &xfs_buftarg_list;
2011 }
2012 #endif