Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[pandora-kernel.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_sb.h"
38 #include "xfs_inum.h"
39 #include "xfs_log.h"
40 #include "xfs_ag.h"
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
43
44 static kmem_zone_t *xfs_buf_zone;
45 STATIC int xfsbufd(void *);
46 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
47
48 static struct workqueue_struct *xfslogd_workqueue;
49 struct workqueue_struct *xfsdatad_workqueue;
50 struct workqueue_struct *xfsconvertd_workqueue;
51
52 #ifdef XFS_BUF_LOCK_TRACKING
53 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
54 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
55 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
56 #else
57 # define XB_SET_OWNER(bp)       do { } while (0)
58 # define XB_CLEAR_OWNER(bp)     do { } while (0)
59 # define XB_GET_OWNER(bp)       do { } while (0)
60 #endif
61
62 #define xb_to_gfp(flags) \
63         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
64           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
65
66 #define xb_to_km(flags) \
67          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
68
69 #define xfs_buf_allocate(flags) \
70         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
71 #define xfs_buf_deallocate(bp) \
72         kmem_zone_free(xfs_buf_zone, (bp));
73
74 static inline int
75 xfs_buf_is_vmapped(
76         struct xfs_buf  *bp)
77 {
78         /*
79          * Return true if the buffer is vmapped.
80          *
81          * The XBF_MAPPED flag is set if the buffer should be mapped, but the
82          * code is clever enough to know it doesn't have to map a single page,
83          * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
84          */
85         return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
86 }
87
88 static inline int
89 xfs_buf_vmap_len(
90         struct xfs_buf  *bp)
91 {
92         return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
93 }
94
95 /*
96  * xfs_buf_lru_add - add a buffer to the LRU.
97  *
98  * The LRU takes a new reference to the buffer so that it will only be freed
99  * once the shrinker takes the buffer off the LRU.
100  */
101 STATIC void
102 xfs_buf_lru_add(
103         struct xfs_buf  *bp)
104 {
105         struct xfs_buftarg *btp = bp->b_target;
106
107         spin_lock(&btp->bt_lru_lock);
108         if (list_empty(&bp->b_lru)) {
109                 atomic_inc(&bp->b_hold);
110                 list_add_tail(&bp->b_lru, &btp->bt_lru);
111                 btp->bt_lru_nr++;
112         }
113         spin_unlock(&btp->bt_lru_lock);
114 }
115
116 /*
117  * xfs_buf_lru_del - remove a buffer from the LRU
118  *
119  * The unlocked check is safe here because it only occurs when there are not
120  * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
121  * to optimise the shrinker removing the buffer from the LRU and calling
122  * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
123  * bt_lru_lock.
124  */
125 STATIC void
126 xfs_buf_lru_del(
127         struct xfs_buf  *bp)
128 {
129         struct xfs_buftarg *btp = bp->b_target;
130
131         if (list_empty(&bp->b_lru))
132                 return;
133
134         spin_lock(&btp->bt_lru_lock);
135         if (!list_empty(&bp->b_lru)) {
136                 list_del_init(&bp->b_lru);
137                 btp->bt_lru_nr--;
138         }
139         spin_unlock(&btp->bt_lru_lock);
140 }
141
142 /*
143  * When we mark a buffer stale, we remove the buffer from the LRU and clear the
144  * b_lru_ref count so that the buffer is freed immediately when the buffer
145  * reference count falls to zero. If the buffer is already on the LRU, we need
146  * to remove the reference that LRU holds on the buffer.
147  *
148  * This prevents build-up of stale buffers on the LRU.
149  */
150 void
151 xfs_buf_stale(
152         struct xfs_buf  *bp)
153 {
154         bp->b_flags |= XBF_STALE;
155         atomic_set(&(bp)->b_lru_ref, 0);
156         if (!list_empty(&bp->b_lru)) {
157                 struct xfs_buftarg *btp = bp->b_target;
158
159                 spin_lock(&btp->bt_lru_lock);
160                 if (!list_empty(&bp->b_lru)) {
161                         list_del_init(&bp->b_lru);
162                         btp->bt_lru_nr--;
163                         atomic_dec(&bp->b_hold);
164                 }
165                 spin_unlock(&btp->bt_lru_lock);
166         }
167         ASSERT(atomic_read(&bp->b_hold) >= 1);
168 }
169
170 STATIC void
171 _xfs_buf_initialize(
172         xfs_buf_t               *bp,
173         xfs_buftarg_t           *target,
174         xfs_off_t               range_base,
175         size_t                  range_length,
176         xfs_buf_flags_t         flags)
177 {
178         /*
179          * We don't want certain flags to appear in b_flags.
180          */
181         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
182
183         memset(bp, 0, sizeof(xfs_buf_t));
184         atomic_set(&bp->b_hold, 1);
185         atomic_set(&bp->b_lru_ref, 1);
186         init_completion(&bp->b_iowait);
187         INIT_LIST_HEAD(&bp->b_lru);
188         INIT_LIST_HEAD(&bp->b_list);
189         RB_CLEAR_NODE(&bp->b_rbnode);
190         sema_init(&bp->b_sema, 0); /* held, no waiters */
191         XB_SET_OWNER(bp);
192         bp->b_target = target;
193         bp->b_file_offset = range_base;
194         /*
195          * Set buffer_length and count_desired to the same value initially.
196          * I/O routines should use count_desired, which will be the same in
197          * most cases but may be reset (e.g. XFS recovery).
198          */
199         bp->b_buffer_length = bp->b_count_desired = range_length;
200         bp->b_flags = flags;
201         bp->b_bn = XFS_BUF_DADDR_NULL;
202         atomic_set(&bp->b_pin_count, 0);
203         init_waitqueue_head(&bp->b_waiters);
204
205         XFS_STATS_INC(xb_create);
206
207         trace_xfs_buf_init(bp, _RET_IP_);
208 }
209
210 /*
211  *      Allocate a page array capable of holding a specified number
212  *      of pages, and point the page buf at it.
213  */
214 STATIC int
215 _xfs_buf_get_pages(
216         xfs_buf_t               *bp,
217         int                     page_count,
218         xfs_buf_flags_t         flags)
219 {
220         /* Make sure that we have a page list */
221         if (bp->b_pages == NULL) {
222                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
223                 bp->b_page_count = page_count;
224                 if (page_count <= XB_PAGES) {
225                         bp->b_pages = bp->b_page_array;
226                 } else {
227                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
228                                         page_count, xb_to_km(flags));
229                         if (bp->b_pages == NULL)
230                                 return -ENOMEM;
231                 }
232                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
233         }
234         return 0;
235 }
236
237 /*
238  *      Frees b_pages if it was allocated.
239  */
240 STATIC void
241 _xfs_buf_free_pages(
242         xfs_buf_t       *bp)
243 {
244         if (bp->b_pages != bp->b_page_array) {
245                 kmem_free(bp->b_pages);
246                 bp->b_pages = NULL;
247         }
248 }
249
250 /*
251  *      Releases the specified buffer.
252  *
253  *      The modification state of any associated pages is left unchanged.
254  *      The buffer most not be on any hash - use xfs_buf_rele instead for
255  *      hashed and refcounted buffers
256  */
257 void
258 xfs_buf_free(
259         xfs_buf_t               *bp)
260 {
261         trace_xfs_buf_free(bp, _RET_IP_);
262
263         ASSERT(list_empty(&bp->b_lru));
264
265         if (bp->b_flags & _XBF_PAGES) {
266                 uint            i;
267
268                 if (xfs_buf_is_vmapped(bp))
269                         vm_unmap_ram(bp->b_addr - bp->b_offset,
270                                         bp->b_page_count);
271
272                 for (i = 0; i < bp->b_page_count; i++) {
273                         struct page     *page = bp->b_pages[i];
274
275                         __free_page(page);
276                 }
277         } else if (bp->b_flags & _XBF_KMEM)
278                 kmem_free(bp->b_addr);
279         _xfs_buf_free_pages(bp);
280         xfs_buf_deallocate(bp);
281 }
282
283 /*
284  * Allocates all the pages for buffer in question and builds it's page list.
285  */
286 STATIC int
287 xfs_buf_allocate_memory(
288         xfs_buf_t               *bp,
289         uint                    flags)
290 {
291         size_t                  size = bp->b_count_desired;
292         size_t                  nbytes, offset;
293         gfp_t                   gfp_mask = xb_to_gfp(flags);
294         unsigned short          page_count, i;
295         xfs_off_t               end;
296         int                     error;
297
298         /*
299          * for buffers that are contained within a single page, just allocate
300          * the memory from the heap - there's no need for the complexity of
301          * page arrays to keep allocation down to order 0.
302          */
303         if (bp->b_buffer_length < PAGE_SIZE) {
304                 bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
305                 if (!bp->b_addr) {
306                         /* low memory - use alloc_page loop instead */
307                         goto use_alloc_page;
308                 }
309
310                 if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
311                                                                 PAGE_MASK) !=
312                     ((unsigned long)bp->b_addr & PAGE_MASK)) {
313                         /* b_addr spans two pages - use alloc_page instead */
314                         kmem_free(bp->b_addr);
315                         bp->b_addr = NULL;
316                         goto use_alloc_page;
317                 }
318                 bp->b_offset = offset_in_page(bp->b_addr);
319                 bp->b_pages = bp->b_page_array;
320                 bp->b_pages[0] = virt_to_page(bp->b_addr);
321                 bp->b_page_count = 1;
322                 bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
323                 return 0;
324         }
325
326 use_alloc_page:
327         end = bp->b_file_offset + bp->b_buffer_length;
328         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
329         error = _xfs_buf_get_pages(bp, page_count, flags);
330         if (unlikely(error))
331                 return error;
332
333         offset = bp->b_offset;
334         bp->b_flags |= _XBF_PAGES;
335
336         for (i = 0; i < bp->b_page_count; i++) {
337                 struct page     *page;
338                 uint            retries = 0;
339 retry:
340                 page = alloc_page(gfp_mask);
341                 if (unlikely(page == NULL)) {
342                         if (flags & XBF_READ_AHEAD) {
343                                 bp->b_page_count = i;
344                                 error = ENOMEM;
345                                 goto out_free_pages;
346                         }
347
348                         /*
349                          * This could deadlock.
350                          *
351                          * But until all the XFS lowlevel code is revamped to
352                          * handle buffer allocation failures we can't do much.
353                          */
354                         if (!(++retries % 100))
355                                 xfs_err(NULL,
356                 "possible memory allocation deadlock in %s (mode:0x%x)",
357                                         __func__, gfp_mask);
358
359                         XFS_STATS_INC(xb_page_retries);
360                         congestion_wait(BLK_RW_ASYNC, HZ/50);
361                         goto retry;
362                 }
363
364                 XFS_STATS_INC(xb_page_found);
365
366                 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
367                 size -= nbytes;
368                 bp->b_pages[i] = page;
369                 offset = 0;
370         }
371         return 0;
372
373 out_free_pages:
374         for (i = 0; i < bp->b_page_count; i++)
375                 __free_page(bp->b_pages[i]);
376         return error;
377 }
378
379 /*
380  *      Map buffer into kernel address-space if necessary.
381  */
382 STATIC int
383 _xfs_buf_map_pages(
384         xfs_buf_t               *bp,
385         uint                    flags)
386 {
387         ASSERT(bp->b_flags & _XBF_PAGES);
388         if (bp->b_page_count == 1) {
389                 /* A single page buffer is always mappable */
390                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
391                 bp->b_flags |= XBF_MAPPED;
392         } else if (flags & XBF_MAPPED) {
393                 int retried = 0;
394
395                 do {
396                         bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
397                                                 -1, PAGE_KERNEL);
398                         if (bp->b_addr)
399                                 break;
400                         vm_unmap_aliases();
401                 } while (retried++ <= 1);
402
403                 if (!bp->b_addr)
404                         return -ENOMEM;
405                 bp->b_addr += bp->b_offset;
406                 bp->b_flags |= XBF_MAPPED;
407         }
408
409         return 0;
410 }
411
412 /*
413  *      Finding and Reading Buffers
414  */
415
416 /*
417  *      Look up, and creates if absent, a lockable buffer for
418  *      a given range of an inode.  The buffer is returned
419  *      locked.  If other overlapping buffers exist, they are
420  *      released before the new buffer is created and locked,
421  *      which may imply that this call will block until those buffers
422  *      are unlocked.  No I/O is implied by this call.
423  */
424 xfs_buf_t *
425 _xfs_buf_find(
426         xfs_buftarg_t           *btp,   /* block device target          */
427         xfs_off_t               ioff,   /* starting offset of range     */
428         size_t                  isize,  /* length of range              */
429         xfs_buf_flags_t         flags,
430         xfs_buf_t               *new_bp)
431 {
432         xfs_off_t               range_base;
433         size_t                  range_length;
434         struct xfs_perag        *pag;
435         struct rb_node          **rbp;
436         struct rb_node          *parent;
437         xfs_buf_t               *bp;
438
439         range_base = (ioff << BBSHIFT);
440         range_length = (isize << BBSHIFT);
441
442         /* Check for IOs smaller than the sector size / not sector aligned */
443         ASSERT(!(range_length < (1 << btp->bt_sshift)));
444         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
445
446         /* get tree root */
447         pag = xfs_perag_get(btp->bt_mount,
448                                 xfs_daddr_to_agno(btp->bt_mount, ioff));
449
450         /* walk tree */
451         spin_lock(&pag->pag_buf_lock);
452         rbp = &pag->pag_buf_tree.rb_node;
453         parent = NULL;
454         bp = NULL;
455         while (*rbp) {
456                 parent = *rbp;
457                 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
458
459                 if (range_base < bp->b_file_offset)
460                         rbp = &(*rbp)->rb_left;
461                 else if (range_base > bp->b_file_offset)
462                         rbp = &(*rbp)->rb_right;
463                 else {
464                         /*
465                          * found a block offset match. If the range doesn't
466                          * match, the only way this is allowed is if the buffer
467                          * in the cache is stale and the transaction that made
468                          * it stale has not yet committed. i.e. we are
469                          * reallocating a busy extent. Skip this buffer and
470                          * continue searching to the right for an exact match.
471                          */
472                         if (bp->b_buffer_length != range_length) {
473                                 ASSERT(bp->b_flags & XBF_STALE);
474                                 rbp = &(*rbp)->rb_right;
475                                 continue;
476                         }
477                         atomic_inc(&bp->b_hold);
478                         goto found;
479                 }
480         }
481
482         /* No match found */
483         if (new_bp) {
484                 _xfs_buf_initialize(new_bp, btp, range_base,
485                                 range_length, flags);
486                 rb_link_node(&new_bp->b_rbnode, parent, rbp);
487                 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
488                 /* the buffer keeps the perag reference until it is freed */
489                 new_bp->b_pag = pag;
490                 spin_unlock(&pag->pag_buf_lock);
491         } else {
492                 XFS_STATS_INC(xb_miss_locked);
493                 spin_unlock(&pag->pag_buf_lock);
494                 xfs_perag_put(pag);
495         }
496         return new_bp;
497
498 found:
499         spin_unlock(&pag->pag_buf_lock);
500         xfs_perag_put(pag);
501
502         if (!xfs_buf_trylock(bp)) {
503                 if (flags & XBF_TRYLOCK) {
504                         xfs_buf_rele(bp);
505                         XFS_STATS_INC(xb_busy_locked);
506                         return NULL;
507                 }
508                 xfs_buf_lock(bp);
509                 XFS_STATS_INC(xb_get_locked_waited);
510         }
511
512         /*
513          * if the buffer is stale, clear all the external state associated with
514          * it. We need to keep flags such as how we allocated the buffer memory
515          * intact here.
516          */
517         if (bp->b_flags & XBF_STALE) {
518                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
519                 bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
520         }
521
522         trace_xfs_buf_find(bp, flags, _RET_IP_);
523         XFS_STATS_INC(xb_get_locked);
524         return bp;
525 }
526
527 /*
528  *      Assembles a buffer covering the specified range.
529  *      Storage in memory for all portions of the buffer will be allocated,
530  *      although backing storage may not be.
531  */
532 xfs_buf_t *
533 xfs_buf_get(
534         xfs_buftarg_t           *target,/* target for buffer            */
535         xfs_off_t               ioff,   /* starting offset of range     */
536         size_t                  isize,  /* length of range              */
537         xfs_buf_flags_t         flags)
538 {
539         xfs_buf_t               *bp, *new_bp;
540         int                     error = 0;
541
542         new_bp = xfs_buf_allocate(flags);
543         if (unlikely(!new_bp))
544                 return NULL;
545
546         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
547         if (bp == new_bp) {
548                 error = xfs_buf_allocate_memory(bp, flags);
549                 if (error)
550                         goto no_buffer;
551         } else {
552                 xfs_buf_deallocate(new_bp);
553                 if (unlikely(bp == NULL))
554                         return NULL;
555         }
556
557         if (!(bp->b_flags & XBF_MAPPED)) {
558                 error = _xfs_buf_map_pages(bp, flags);
559                 if (unlikely(error)) {
560                         xfs_warn(target->bt_mount,
561                                 "%s: failed to map pages\n", __func__);
562                         goto no_buffer;
563                 }
564         }
565
566         XFS_STATS_INC(xb_get);
567
568         /*
569          * Always fill in the block number now, the mapped cases can do
570          * their own overlay of this later.
571          */
572         bp->b_bn = ioff;
573         bp->b_count_desired = bp->b_buffer_length;
574
575         trace_xfs_buf_get(bp, flags, _RET_IP_);
576         return bp;
577
578  no_buffer:
579         if (flags & (XBF_LOCK | XBF_TRYLOCK))
580                 xfs_buf_unlock(bp);
581         xfs_buf_rele(bp);
582         return NULL;
583 }
584
585 STATIC int
586 _xfs_buf_read(
587         xfs_buf_t               *bp,
588         xfs_buf_flags_t         flags)
589 {
590         int                     status;
591
592         ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
593         ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
594
595         bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
596         bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
597
598         status = xfs_buf_iorequest(bp);
599         if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
600                 return status;
601         return xfs_buf_iowait(bp);
602 }
603
604 xfs_buf_t *
605 xfs_buf_read(
606         xfs_buftarg_t           *target,
607         xfs_off_t               ioff,
608         size_t                  isize,
609         xfs_buf_flags_t         flags)
610 {
611         xfs_buf_t               *bp;
612
613         flags |= XBF_READ;
614
615         bp = xfs_buf_get(target, ioff, isize, flags);
616         if (bp) {
617                 trace_xfs_buf_read(bp, flags, _RET_IP_);
618
619                 if (!XFS_BUF_ISDONE(bp)) {
620                         XFS_STATS_INC(xb_get_read);
621                         _xfs_buf_read(bp, flags);
622                 } else if (flags & XBF_ASYNC) {
623                         /*
624                          * Read ahead call which is already satisfied,
625                          * drop the buffer
626                          */
627                         goto no_buffer;
628                 } else {
629                         /* We do not want read in the flags */
630                         bp->b_flags &= ~XBF_READ;
631                 }
632         }
633
634         return bp;
635
636  no_buffer:
637         if (flags & (XBF_LOCK | XBF_TRYLOCK))
638                 xfs_buf_unlock(bp);
639         xfs_buf_rele(bp);
640         return NULL;
641 }
642
643 /*
644  *      If we are not low on memory then do the readahead in a deadlock
645  *      safe manner.
646  */
647 void
648 xfs_buf_readahead(
649         xfs_buftarg_t           *target,
650         xfs_off_t               ioff,
651         size_t                  isize)
652 {
653         if (bdi_read_congested(target->bt_bdi))
654                 return;
655
656         xfs_buf_read(target, ioff, isize,
657                      XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
658 }
659
660 /*
661  * Read an uncached buffer from disk. Allocates and returns a locked
662  * buffer containing the disk contents or nothing.
663  */
664 struct xfs_buf *
665 xfs_buf_read_uncached(
666         struct xfs_mount        *mp,
667         struct xfs_buftarg      *target,
668         xfs_daddr_t             daddr,
669         size_t                  length,
670         int                     flags)
671 {
672         xfs_buf_t               *bp;
673         int                     error;
674
675         bp = xfs_buf_get_uncached(target, length, flags);
676         if (!bp)
677                 return NULL;
678
679         /* set up the buffer for a read IO */
680         XFS_BUF_SET_ADDR(bp, daddr);
681         XFS_BUF_READ(bp);
682         XFS_BUF_BUSY(bp);
683
684         xfsbdstrat(mp, bp);
685         error = xfs_buf_iowait(bp);
686         if (error || bp->b_error) {
687                 xfs_buf_relse(bp);
688                 return NULL;
689         }
690         return bp;
691 }
692
693 xfs_buf_t *
694 xfs_buf_get_empty(
695         size_t                  len,
696         xfs_buftarg_t           *target)
697 {
698         xfs_buf_t               *bp;
699
700         bp = xfs_buf_allocate(0);
701         if (bp)
702                 _xfs_buf_initialize(bp, target, 0, len, 0);
703         return bp;
704 }
705
706 /*
707  * Return a buffer allocated as an empty buffer and associated to external
708  * memory via xfs_buf_associate_memory() back to it's empty state.
709  */
710 void
711 xfs_buf_set_empty(
712         struct xfs_buf          *bp,
713         size_t                  len)
714 {
715         if (bp->b_pages)
716                 _xfs_buf_free_pages(bp);
717
718         bp->b_pages = NULL;
719         bp->b_page_count = 0;
720         bp->b_addr = NULL;
721         bp->b_file_offset = 0;
722         bp->b_buffer_length = bp->b_count_desired = len;
723         bp->b_bn = XFS_BUF_DADDR_NULL;
724         bp->b_flags &= ~XBF_MAPPED;
725 }
726
727 static inline struct page *
728 mem_to_page(
729         void                    *addr)
730 {
731         if ((!is_vmalloc_addr(addr))) {
732                 return virt_to_page(addr);
733         } else {
734                 return vmalloc_to_page(addr);
735         }
736 }
737
738 int
739 xfs_buf_associate_memory(
740         xfs_buf_t               *bp,
741         void                    *mem,
742         size_t                  len)
743 {
744         int                     rval;
745         int                     i = 0;
746         unsigned long           pageaddr;
747         unsigned long           offset;
748         size_t                  buflen;
749         int                     page_count;
750
751         pageaddr = (unsigned long)mem & PAGE_MASK;
752         offset = (unsigned long)mem - pageaddr;
753         buflen = PAGE_ALIGN(len + offset);
754         page_count = buflen >> PAGE_SHIFT;
755
756         /* Free any previous set of page pointers */
757         if (bp->b_pages)
758                 _xfs_buf_free_pages(bp);
759
760         bp->b_pages = NULL;
761         bp->b_addr = mem;
762
763         rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
764         if (rval)
765                 return rval;
766
767         bp->b_offset = offset;
768
769         for (i = 0; i < bp->b_page_count; i++) {
770                 bp->b_pages[i] = mem_to_page((void *)pageaddr);
771                 pageaddr += PAGE_SIZE;
772         }
773
774         bp->b_count_desired = len;
775         bp->b_buffer_length = buflen;
776         bp->b_flags |= XBF_MAPPED;
777
778         return 0;
779 }
780
781 xfs_buf_t *
782 xfs_buf_get_uncached(
783         struct xfs_buftarg      *target,
784         size_t                  len,
785         int                     flags)
786 {
787         unsigned long           page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
788         int                     error, i;
789         xfs_buf_t               *bp;
790
791         bp = xfs_buf_allocate(0);
792         if (unlikely(bp == NULL))
793                 goto fail;
794         _xfs_buf_initialize(bp, target, 0, len, 0);
795
796         error = _xfs_buf_get_pages(bp, page_count, 0);
797         if (error)
798                 goto fail_free_buf;
799
800         for (i = 0; i < page_count; i++) {
801                 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
802                 if (!bp->b_pages[i])
803                         goto fail_free_mem;
804         }
805         bp->b_flags |= _XBF_PAGES;
806
807         error = _xfs_buf_map_pages(bp, XBF_MAPPED);
808         if (unlikely(error)) {
809                 xfs_warn(target->bt_mount,
810                         "%s: failed to map pages\n", __func__);
811                 goto fail_free_mem;
812         }
813
814         trace_xfs_buf_get_uncached(bp, _RET_IP_);
815         return bp;
816
817  fail_free_mem:
818         while (--i >= 0)
819                 __free_page(bp->b_pages[i]);
820         _xfs_buf_free_pages(bp);
821  fail_free_buf:
822         xfs_buf_deallocate(bp);
823  fail:
824         return NULL;
825 }
826
827 /*
828  *      Increment reference count on buffer, to hold the buffer concurrently
829  *      with another thread which may release (free) the buffer asynchronously.
830  *      Must hold the buffer already to call this function.
831  */
832 void
833 xfs_buf_hold(
834         xfs_buf_t               *bp)
835 {
836         trace_xfs_buf_hold(bp, _RET_IP_);
837         atomic_inc(&bp->b_hold);
838 }
839
840 /*
841  *      Releases a hold on the specified buffer.  If the
842  *      the hold count is 1, calls xfs_buf_free.
843  */
844 void
845 xfs_buf_rele(
846         xfs_buf_t               *bp)
847 {
848         struct xfs_perag        *pag = bp->b_pag;
849
850         trace_xfs_buf_rele(bp, _RET_IP_);
851
852         if (!pag) {
853                 ASSERT(list_empty(&bp->b_lru));
854                 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
855                 if (atomic_dec_and_test(&bp->b_hold))
856                         xfs_buf_free(bp);
857                 return;
858         }
859
860         ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
861
862         ASSERT(atomic_read(&bp->b_hold) > 0);
863         if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
864                 if (!(bp->b_flags & XBF_STALE) &&
865                            atomic_read(&bp->b_lru_ref)) {
866                         xfs_buf_lru_add(bp);
867                         spin_unlock(&pag->pag_buf_lock);
868                 } else {
869                         xfs_buf_lru_del(bp);
870                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
871                         rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
872                         spin_unlock(&pag->pag_buf_lock);
873                         xfs_perag_put(pag);
874                         xfs_buf_free(bp);
875                 }
876         }
877 }
878
879
880 /*
881  *      Lock a buffer object, if it is not already locked.
882  *
883  *      If we come across a stale, pinned, locked buffer, we know that we are
884  *      being asked to lock a buffer that has been reallocated. Because it is
885  *      pinned, we know that the log has not been pushed to disk and hence it
886  *      will still be locked.  Rather than continuing to have trylock attempts
887  *      fail until someone else pushes the log, push it ourselves before
888  *      returning.  This means that the xfsaild will not get stuck trying
889  *      to push on stale inode buffers.
890  */
891 int
892 xfs_buf_trylock(
893         struct xfs_buf          *bp)
894 {
895         int                     locked;
896
897         locked = down_trylock(&bp->b_sema) == 0;
898         if (locked)
899                 XB_SET_OWNER(bp);
900         else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
901                 xfs_log_force(bp->b_target->bt_mount, 0);
902
903         trace_xfs_buf_trylock(bp, _RET_IP_);
904         return locked;
905 }
906
907 /*
908  *      Lock a buffer object.
909  *
910  *      If we come across a stale, pinned, locked buffer, we know that we
911  *      are being asked to lock a buffer that has been reallocated. Because
912  *      it is pinned, we know that the log has not been pushed to disk and
913  *      hence it will still be locked. Rather than sleeping until someone
914  *      else pushes the log, push it ourselves before trying to get the lock.
915  */
916 void
917 xfs_buf_lock(
918         struct xfs_buf          *bp)
919 {
920         trace_xfs_buf_lock(bp, _RET_IP_);
921
922         if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
923                 xfs_log_force(bp->b_target->bt_mount, 0);
924         down(&bp->b_sema);
925         XB_SET_OWNER(bp);
926
927         trace_xfs_buf_lock_done(bp, _RET_IP_);
928 }
929
930 /*
931  *      Releases the lock on the buffer object.
932  *      If the buffer is marked delwri but is not queued, do so before we
933  *      unlock the buffer as we need to set flags correctly.  We also need to
934  *      take a reference for the delwri queue because the unlocker is going to
935  *      drop their's and they don't know we just queued it.
936  */
937 void
938 xfs_buf_unlock(
939         struct xfs_buf          *bp)
940 {
941         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
942                 atomic_inc(&bp->b_hold);
943                 bp->b_flags |= XBF_ASYNC;
944                 xfs_buf_delwri_queue(bp, 0);
945         }
946
947         XB_CLEAR_OWNER(bp);
948         up(&bp->b_sema);
949
950         trace_xfs_buf_unlock(bp, _RET_IP_);
951 }
952
953 STATIC void
954 xfs_buf_wait_unpin(
955         xfs_buf_t               *bp)
956 {
957         DECLARE_WAITQUEUE       (wait, current);
958
959         if (atomic_read(&bp->b_pin_count) == 0)
960                 return;
961
962         add_wait_queue(&bp->b_waiters, &wait);
963         for (;;) {
964                 set_current_state(TASK_UNINTERRUPTIBLE);
965                 if (atomic_read(&bp->b_pin_count) == 0)
966                         break;
967                 io_schedule();
968         }
969         remove_wait_queue(&bp->b_waiters, &wait);
970         set_current_state(TASK_RUNNING);
971 }
972
973 /*
974  *      Buffer Utility Routines
975  */
976
977 STATIC void
978 xfs_buf_iodone_work(
979         struct work_struct      *work)
980 {
981         xfs_buf_t               *bp =
982                 container_of(work, xfs_buf_t, b_iodone_work);
983
984         if (bp->b_iodone)
985                 (*(bp->b_iodone))(bp);
986         else if (bp->b_flags & XBF_ASYNC)
987                 xfs_buf_relse(bp);
988 }
989
990 void
991 xfs_buf_ioend(
992         xfs_buf_t               *bp,
993         int                     schedule)
994 {
995         trace_xfs_buf_iodone(bp, _RET_IP_);
996
997         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
998         if (bp->b_error == 0)
999                 bp->b_flags |= XBF_DONE;
1000
1001         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1002                 if (schedule) {
1003                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1004                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1005                 } else {
1006                         xfs_buf_iodone_work(&bp->b_iodone_work);
1007                 }
1008         } else {
1009                 complete(&bp->b_iowait);
1010         }
1011 }
1012
1013 void
1014 xfs_buf_ioerror(
1015         xfs_buf_t               *bp,
1016         int                     error)
1017 {
1018         ASSERT(error >= 0 && error <= 0xffff);
1019         bp->b_error = (unsigned short)error;
1020         trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1021 }
1022
1023 int
1024 xfs_bwrite(
1025         struct xfs_mount        *mp,
1026         struct xfs_buf          *bp)
1027 {
1028         int                     error;
1029
1030         bp->b_flags |= XBF_WRITE;
1031         bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
1032
1033         xfs_buf_delwri_dequeue(bp);
1034         xfs_bdstrat_cb(bp);
1035
1036         error = xfs_buf_iowait(bp);
1037         if (error)
1038                 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1039         xfs_buf_relse(bp);
1040         return error;
1041 }
1042
1043 void
1044 xfs_bdwrite(
1045         void                    *mp,
1046         struct xfs_buf          *bp)
1047 {
1048         trace_xfs_buf_bdwrite(bp, _RET_IP_);
1049
1050         bp->b_flags &= ~XBF_READ;
1051         bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1052
1053         xfs_buf_delwri_queue(bp, 1);
1054 }
1055
1056 /*
1057  * Called when we want to stop a buffer from getting written or read.
1058  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1059  * so that the proper iodone callbacks get called.
1060  */
1061 STATIC int
1062 xfs_bioerror(
1063         xfs_buf_t *bp)
1064 {
1065 #ifdef XFSERRORDEBUG
1066         ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1067 #endif
1068
1069         /*
1070          * No need to wait until the buffer is unpinned, we aren't flushing it.
1071          */
1072         XFS_BUF_ERROR(bp, EIO);
1073
1074         /*
1075          * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1076          */
1077         XFS_BUF_UNREAD(bp);
1078         XFS_BUF_UNDELAYWRITE(bp);
1079         XFS_BUF_UNDONE(bp);
1080         XFS_BUF_STALE(bp);
1081
1082         xfs_buf_ioend(bp, 0);
1083
1084         return EIO;
1085 }
1086
1087 /*
1088  * Same as xfs_bioerror, except that we are releasing the buffer
1089  * here ourselves, and avoiding the xfs_buf_ioend call.
1090  * This is meant for userdata errors; metadata bufs come with
1091  * iodone functions attached, so that we can track down errors.
1092  */
1093 STATIC int
1094 xfs_bioerror_relse(
1095         struct xfs_buf  *bp)
1096 {
1097         int64_t         fl = XFS_BUF_BFLAGS(bp);
1098         /*
1099          * No need to wait until the buffer is unpinned.
1100          * We aren't flushing it.
1101          *
1102          * chunkhold expects B_DONE to be set, whether
1103          * we actually finish the I/O or not. We don't want to
1104          * change that interface.
1105          */
1106         XFS_BUF_UNREAD(bp);
1107         XFS_BUF_UNDELAYWRITE(bp);
1108         XFS_BUF_DONE(bp);
1109         XFS_BUF_STALE(bp);
1110         bp->b_iodone = NULL;
1111         if (!(fl & XBF_ASYNC)) {
1112                 /*
1113                  * Mark b_error and B_ERROR _both_.
1114                  * Lot's of chunkcache code assumes that.
1115                  * There's no reason to mark error for
1116                  * ASYNC buffers.
1117                  */
1118                 XFS_BUF_ERROR(bp, EIO);
1119                 XFS_BUF_FINISH_IOWAIT(bp);
1120         } else {
1121                 xfs_buf_relse(bp);
1122         }
1123
1124         return EIO;
1125 }
1126
1127
1128 /*
1129  * All xfs metadata buffers except log state machine buffers
1130  * get this attached as their b_bdstrat callback function.
1131  * This is so that we can catch a buffer
1132  * after prematurely unpinning it to forcibly shutdown the filesystem.
1133  */
1134 int
1135 xfs_bdstrat_cb(
1136         struct xfs_buf  *bp)
1137 {
1138         if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1139                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1140                 /*
1141                  * Metadata write that didn't get logged but
1142                  * written delayed anyway. These aren't associated
1143                  * with a transaction, and can be ignored.
1144                  */
1145                 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1146                         return xfs_bioerror_relse(bp);
1147                 else
1148                         return xfs_bioerror(bp);
1149         }
1150
1151         xfs_buf_iorequest(bp);
1152         return 0;
1153 }
1154
1155 /*
1156  * Wrapper around bdstrat so that we can stop data from going to disk in case
1157  * we are shutting down the filesystem.  Typically user data goes thru this
1158  * path; one of the exceptions is the superblock.
1159  */
1160 void
1161 xfsbdstrat(
1162         struct xfs_mount        *mp,
1163         struct xfs_buf          *bp)
1164 {
1165         if (XFS_FORCED_SHUTDOWN(mp)) {
1166                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1167                 xfs_bioerror_relse(bp);
1168                 return;
1169         }
1170
1171         xfs_buf_iorequest(bp);
1172 }
1173
1174 STATIC void
1175 _xfs_buf_ioend(
1176         xfs_buf_t               *bp,
1177         int                     schedule)
1178 {
1179         if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1180                 xfs_buf_ioend(bp, schedule);
1181 }
1182
1183 STATIC void
1184 xfs_buf_bio_end_io(
1185         struct bio              *bio,
1186         int                     error)
1187 {
1188         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1189
1190         xfs_buf_ioerror(bp, -error);
1191
1192         if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1193                 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1194
1195         _xfs_buf_ioend(bp, 1);
1196         bio_put(bio);
1197 }
1198
1199 STATIC void
1200 _xfs_buf_ioapply(
1201         xfs_buf_t               *bp)
1202 {
1203         int                     rw, map_i, total_nr_pages, nr_pages;
1204         struct bio              *bio;
1205         int                     offset = bp->b_offset;
1206         int                     size = bp->b_count_desired;
1207         sector_t                sector = bp->b_bn;
1208
1209         total_nr_pages = bp->b_page_count;
1210         map_i = 0;
1211
1212         if (bp->b_flags & XBF_WRITE) {
1213                 if (bp->b_flags & XBF_SYNCIO)
1214                         rw = WRITE_SYNC;
1215                 else
1216                         rw = WRITE;
1217                 if (bp->b_flags & XBF_FUA)
1218                         rw |= REQ_FUA;
1219                 if (bp->b_flags & XBF_FLUSH)
1220                         rw |= REQ_FLUSH;
1221         } else if (bp->b_flags & XBF_READ_AHEAD) {
1222                 rw = READA;
1223         } else {
1224                 rw = READ;
1225         }
1226
1227 next_chunk:
1228         atomic_inc(&bp->b_io_remaining);
1229         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1230         if (nr_pages > total_nr_pages)
1231                 nr_pages = total_nr_pages;
1232
1233         bio = bio_alloc(GFP_NOIO, nr_pages);
1234         bio->bi_bdev = bp->b_target->bt_bdev;
1235         bio->bi_sector = sector;
1236         bio->bi_end_io = xfs_buf_bio_end_io;
1237         bio->bi_private = bp;
1238
1239
1240         for (; size && nr_pages; nr_pages--, map_i++) {
1241                 int     rbytes, nbytes = PAGE_SIZE - offset;
1242
1243                 if (nbytes > size)
1244                         nbytes = size;
1245
1246                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1247                 if (rbytes < nbytes)
1248                         break;
1249
1250                 offset = 0;
1251                 sector += nbytes >> BBSHIFT;
1252                 size -= nbytes;
1253                 total_nr_pages--;
1254         }
1255
1256         if (likely(bio->bi_size)) {
1257                 if (xfs_buf_is_vmapped(bp)) {
1258                         flush_kernel_vmap_range(bp->b_addr,
1259                                                 xfs_buf_vmap_len(bp));
1260                 }
1261                 submit_bio(rw, bio);
1262                 if (size)
1263                         goto next_chunk;
1264         } else {
1265                 xfs_buf_ioerror(bp, EIO);
1266                 bio_put(bio);
1267         }
1268 }
1269
1270 int
1271 xfs_buf_iorequest(
1272         xfs_buf_t               *bp)
1273 {
1274         trace_xfs_buf_iorequest(bp, _RET_IP_);
1275
1276         if (bp->b_flags & XBF_DELWRI) {
1277                 xfs_buf_delwri_queue(bp, 1);
1278                 return 0;
1279         }
1280
1281         if (bp->b_flags & XBF_WRITE) {
1282                 xfs_buf_wait_unpin(bp);
1283         }
1284
1285         xfs_buf_hold(bp);
1286
1287         /* Set the count to 1 initially, this will stop an I/O
1288          * completion callout which happens before we have started
1289          * all the I/O from calling xfs_buf_ioend too early.
1290          */
1291         atomic_set(&bp->b_io_remaining, 1);
1292         _xfs_buf_ioapply(bp);
1293         _xfs_buf_ioend(bp, 0);
1294
1295         xfs_buf_rele(bp);
1296         return 0;
1297 }
1298
1299 /*
1300  *      Waits for I/O to complete on the buffer supplied.
1301  *      It returns immediately if no I/O is pending.
1302  *      It returns the I/O error code, if any, or 0 if there was no error.
1303  */
1304 int
1305 xfs_buf_iowait(
1306         xfs_buf_t               *bp)
1307 {
1308         trace_xfs_buf_iowait(bp, _RET_IP_);
1309
1310         wait_for_completion(&bp->b_iowait);
1311
1312         trace_xfs_buf_iowait_done(bp, _RET_IP_);
1313         return bp->b_error;
1314 }
1315
1316 xfs_caddr_t
1317 xfs_buf_offset(
1318         xfs_buf_t               *bp,
1319         size_t                  offset)
1320 {
1321         struct page             *page;
1322
1323         if (bp->b_flags & XBF_MAPPED)
1324                 return XFS_BUF_PTR(bp) + offset;
1325
1326         offset += bp->b_offset;
1327         page = bp->b_pages[offset >> PAGE_SHIFT];
1328         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1329 }
1330
1331 /*
1332  *      Move data into or out of a buffer.
1333  */
1334 void
1335 xfs_buf_iomove(
1336         xfs_buf_t               *bp,    /* buffer to process            */
1337         size_t                  boff,   /* starting buffer offset       */
1338         size_t                  bsize,  /* length to copy               */
1339         void                    *data,  /* data address                 */
1340         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1341 {
1342         size_t                  bend, cpoff, csize;
1343         struct page             *page;
1344
1345         bend = boff + bsize;
1346         while (boff < bend) {
1347                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1348                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1349                 csize = min_t(size_t,
1350                               PAGE_SIZE-cpoff, bp->b_count_desired-boff);
1351
1352                 ASSERT(((csize + cpoff) <= PAGE_SIZE));
1353
1354                 switch (mode) {
1355                 case XBRW_ZERO:
1356                         memset(page_address(page) + cpoff, 0, csize);
1357                         break;
1358                 case XBRW_READ:
1359                         memcpy(data, page_address(page) + cpoff, csize);
1360                         break;
1361                 case XBRW_WRITE:
1362                         memcpy(page_address(page) + cpoff, data, csize);
1363                 }
1364
1365                 boff += csize;
1366                 data += csize;
1367         }
1368 }
1369
1370 /*
1371  *      Handling of buffer targets (buftargs).
1372  */
1373
1374 /*
1375  * Wait for any bufs with callbacks that have been submitted but have not yet
1376  * returned. These buffers will have an elevated hold count, so wait on those
1377  * while freeing all the buffers only held by the LRU.
1378  */
1379 void
1380 xfs_wait_buftarg(
1381         struct xfs_buftarg      *btp)
1382 {
1383         struct xfs_buf          *bp;
1384
1385 restart:
1386         spin_lock(&btp->bt_lru_lock);
1387         while (!list_empty(&btp->bt_lru)) {
1388                 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1389                 if (atomic_read(&bp->b_hold) > 1) {
1390                         spin_unlock(&btp->bt_lru_lock);
1391                         delay(100);
1392                         goto restart;
1393                 }
1394                 /*
1395                  * clear the LRU reference count so the bufer doesn't get
1396                  * ignored in xfs_buf_rele().
1397                  */
1398                 atomic_set(&bp->b_lru_ref, 0);
1399                 spin_unlock(&btp->bt_lru_lock);
1400                 xfs_buf_rele(bp);
1401                 spin_lock(&btp->bt_lru_lock);
1402         }
1403         spin_unlock(&btp->bt_lru_lock);
1404 }
1405
1406 int
1407 xfs_buftarg_shrink(
1408         struct shrinker         *shrink,
1409         struct shrink_control   *sc)
1410 {
1411         struct xfs_buftarg      *btp = container_of(shrink,
1412                                         struct xfs_buftarg, bt_shrinker);
1413         struct xfs_buf          *bp;
1414         int nr_to_scan = sc->nr_to_scan;
1415         LIST_HEAD(dispose);
1416
1417         if (!nr_to_scan)
1418                 return btp->bt_lru_nr;
1419
1420         spin_lock(&btp->bt_lru_lock);
1421         while (!list_empty(&btp->bt_lru)) {
1422                 if (nr_to_scan-- <= 0)
1423                         break;
1424
1425                 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1426
1427                 /*
1428                  * Decrement the b_lru_ref count unless the value is already
1429                  * zero. If the value is already zero, we need to reclaim the
1430                  * buffer, otherwise it gets another trip through the LRU.
1431                  */
1432                 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1433                         list_move_tail(&bp->b_lru, &btp->bt_lru);
1434                         continue;
1435                 }
1436
1437                 /*
1438                  * remove the buffer from the LRU now to avoid needing another
1439                  * lock round trip inside xfs_buf_rele().
1440                  */
1441                 list_move(&bp->b_lru, &dispose);
1442                 btp->bt_lru_nr--;
1443         }
1444         spin_unlock(&btp->bt_lru_lock);
1445
1446         while (!list_empty(&dispose)) {
1447                 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1448                 list_del_init(&bp->b_lru);
1449                 xfs_buf_rele(bp);
1450         }
1451
1452         return btp->bt_lru_nr;
1453 }
1454
1455 void
1456 xfs_free_buftarg(
1457         struct xfs_mount        *mp,
1458         struct xfs_buftarg      *btp)
1459 {
1460         unregister_shrinker(&btp->bt_shrinker);
1461
1462         xfs_flush_buftarg(btp, 1);
1463         if (mp->m_flags & XFS_MOUNT_BARRIER)
1464                 xfs_blkdev_issue_flush(btp);
1465
1466         kthread_stop(btp->bt_task);
1467         kmem_free(btp);
1468 }
1469
1470 STATIC int
1471 xfs_setsize_buftarg_flags(
1472         xfs_buftarg_t           *btp,
1473         unsigned int            blocksize,
1474         unsigned int            sectorsize,
1475         int                     verbose)
1476 {
1477         btp->bt_bsize = blocksize;
1478         btp->bt_sshift = ffs(sectorsize) - 1;
1479         btp->bt_smask = sectorsize - 1;
1480
1481         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1482                 xfs_warn(btp->bt_mount,
1483                         "Cannot set_blocksize to %u on device %s\n",
1484                         sectorsize, XFS_BUFTARG_NAME(btp));
1485                 return EINVAL;
1486         }
1487
1488         return 0;
1489 }
1490
1491 /*
1492  *      When allocating the initial buffer target we have not yet
1493  *      read in the superblock, so don't know what sized sectors
1494  *      are being used is at this early stage.  Play safe.
1495  */
1496 STATIC int
1497 xfs_setsize_buftarg_early(
1498         xfs_buftarg_t           *btp,
1499         struct block_device     *bdev)
1500 {
1501         return xfs_setsize_buftarg_flags(btp,
1502                         PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1503 }
1504
1505 int
1506 xfs_setsize_buftarg(
1507         xfs_buftarg_t           *btp,
1508         unsigned int            blocksize,
1509         unsigned int            sectorsize)
1510 {
1511         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1512 }
1513
1514 STATIC int
1515 xfs_alloc_delwrite_queue(
1516         xfs_buftarg_t           *btp,
1517         const char              *fsname)
1518 {
1519         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1520         spin_lock_init(&btp->bt_delwrite_lock);
1521         btp->bt_flags = 0;
1522         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1523         if (IS_ERR(btp->bt_task))
1524                 return PTR_ERR(btp->bt_task);
1525         return 0;
1526 }
1527
1528 xfs_buftarg_t *
1529 xfs_alloc_buftarg(
1530         struct xfs_mount        *mp,
1531         struct block_device     *bdev,
1532         int                     external,
1533         const char              *fsname)
1534 {
1535         xfs_buftarg_t           *btp;
1536
1537         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1538
1539         btp->bt_mount = mp;
1540         btp->bt_dev =  bdev->bd_dev;
1541         btp->bt_bdev = bdev;
1542         btp->bt_bdi = blk_get_backing_dev_info(bdev);
1543         if (!btp->bt_bdi)
1544                 goto error;
1545
1546         INIT_LIST_HEAD(&btp->bt_lru);
1547         spin_lock_init(&btp->bt_lru_lock);
1548         if (xfs_setsize_buftarg_early(btp, bdev))
1549                 goto error;
1550         if (xfs_alloc_delwrite_queue(btp, fsname))
1551                 goto error;
1552         btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1553         btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1554         register_shrinker(&btp->bt_shrinker);
1555         return btp;
1556
1557 error:
1558         kmem_free(btp);
1559         return NULL;
1560 }
1561
1562
1563 /*
1564  *      Delayed write buffer handling
1565  */
1566 STATIC void
1567 xfs_buf_delwri_queue(
1568         xfs_buf_t               *bp,
1569         int                     unlock)
1570 {
1571         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1572         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1573
1574         trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1575
1576         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1577
1578         spin_lock(dwlk);
1579         /* If already in the queue, dequeue and place at tail */
1580         if (!list_empty(&bp->b_list)) {
1581                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1582                 if (unlock)
1583                         atomic_dec(&bp->b_hold);
1584                 list_del(&bp->b_list);
1585         }
1586
1587         if (list_empty(dwq)) {
1588                 /* start xfsbufd as it is about to have something to do */
1589                 wake_up_process(bp->b_target->bt_task);
1590         }
1591
1592         bp->b_flags |= _XBF_DELWRI_Q;
1593         list_add_tail(&bp->b_list, dwq);
1594         bp->b_queuetime = jiffies;
1595         spin_unlock(dwlk);
1596
1597         if (unlock)
1598                 xfs_buf_unlock(bp);
1599 }
1600
1601 void
1602 xfs_buf_delwri_dequeue(
1603         xfs_buf_t               *bp)
1604 {
1605         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1606         int                     dequeued = 0;
1607
1608         spin_lock(dwlk);
1609         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1610                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1611                 list_del_init(&bp->b_list);
1612                 dequeued = 1;
1613         }
1614         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1615         spin_unlock(dwlk);
1616
1617         if (dequeued)
1618                 xfs_buf_rele(bp);
1619
1620         trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1621 }
1622
1623 /*
1624  * If a delwri buffer needs to be pushed before it has aged out, then promote
1625  * it to the head of the delwri queue so that it will be flushed on the next
1626  * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1627  * than the age currently needed to flush the buffer. Hence the next time the
1628  * xfsbufd sees it is guaranteed to be considered old enough to flush.
1629  */
1630 void
1631 xfs_buf_delwri_promote(
1632         struct xfs_buf  *bp)
1633 {
1634         struct xfs_buftarg *btp = bp->b_target;
1635         long            age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1636
1637         ASSERT(bp->b_flags & XBF_DELWRI);
1638         ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1639
1640         /*
1641          * Check the buffer age before locking the delayed write queue as we
1642          * don't need to promote buffers that are already past the flush age.
1643          */
1644         if (bp->b_queuetime < jiffies - age)
1645                 return;
1646         bp->b_queuetime = jiffies - age;
1647         spin_lock(&btp->bt_delwrite_lock);
1648         list_move(&bp->b_list, &btp->bt_delwrite_queue);
1649         spin_unlock(&btp->bt_delwrite_lock);
1650 }
1651
1652 STATIC void
1653 xfs_buf_runall_queues(
1654         struct workqueue_struct *queue)
1655 {
1656         flush_workqueue(queue);
1657 }
1658
1659 /*
1660  * Move as many buffers as specified to the supplied list
1661  * idicating if we skipped any buffers to prevent deadlocks.
1662  */
1663 STATIC int
1664 xfs_buf_delwri_split(
1665         xfs_buftarg_t   *target,
1666         struct list_head *list,
1667         unsigned long   age)
1668 {
1669         xfs_buf_t       *bp, *n;
1670         struct list_head *dwq = &target->bt_delwrite_queue;
1671         spinlock_t      *dwlk = &target->bt_delwrite_lock;
1672         int             skipped = 0;
1673         int             force;
1674
1675         force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1676         INIT_LIST_HEAD(list);
1677         spin_lock(dwlk);
1678         list_for_each_entry_safe(bp, n, dwq, b_list) {
1679                 ASSERT(bp->b_flags & XBF_DELWRI);
1680
1681                 if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) {
1682                         if (!force &&
1683                             time_before(jiffies, bp->b_queuetime + age)) {
1684                                 xfs_buf_unlock(bp);
1685                                 break;
1686                         }
1687
1688                         bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
1689                         bp->b_flags |= XBF_WRITE;
1690                         list_move_tail(&bp->b_list, list);
1691                         trace_xfs_buf_delwri_split(bp, _RET_IP_);
1692                 } else
1693                         skipped++;
1694         }
1695         spin_unlock(dwlk);
1696
1697         return skipped;
1698
1699 }
1700
1701 /*
1702  * Compare function is more complex than it needs to be because
1703  * the return value is only 32 bits and we are doing comparisons
1704  * on 64 bit values
1705  */
1706 static int
1707 xfs_buf_cmp(
1708         void            *priv,
1709         struct list_head *a,
1710         struct list_head *b)
1711 {
1712         struct xfs_buf  *ap = container_of(a, struct xfs_buf, b_list);
1713         struct xfs_buf  *bp = container_of(b, struct xfs_buf, b_list);
1714         xfs_daddr_t             diff;
1715
1716         diff = ap->b_bn - bp->b_bn;
1717         if (diff < 0)
1718                 return -1;
1719         if (diff > 0)
1720                 return 1;
1721         return 0;
1722 }
1723
1724 STATIC int
1725 xfsbufd(
1726         void            *data)
1727 {
1728         xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1729
1730         current->flags |= PF_MEMALLOC;
1731
1732         set_freezable();
1733
1734         do {
1735                 long    age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1736                 long    tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1737                 struct list_head tmp;
1738                 struct blk_plug plug;
1739
1740                 if (unlikely(freezing(current))) {
1741                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1742                         refrigerator();
1743                 } else {
1744                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1745                 }
1746
1747                 /* sleep for a long time if there is nothing to do. */
1748                 if (list_empty(&target->bt_delwrite_queue))
1749                         tout = MAX_SCHEDULE_TIMEOUT;
1750                 schedule_timeout_interruptible(tout);
1751
1752                 xfs_buf_delwri_split(target, &tmp, age);
1753                 list_sort(NULL, &tmp, xfs_buf_cmp);
1754
1755                 blk_start_plug(&plug);
1756                 while (!list_empty(&tmp)) {
1757                         struct xfs_buf *bp;
1758                         bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1759                         list_del_init(&bp->b_list);
1760                         xfs_bdstrat_cb(bp);
1761                 }
1762                 blk_finish_plug(&plug);
1763         } while (!kthread_should_stop());
1764
1765         return 0;
1766 }
1767
1768 /*
1769  *      Go through all incore buffers, and release buffers if they belong to
1770  *      the given device. This is used in filesystem error handling to
1771  *      preserve the consistency of its metadata.
1772  */
1773 int
1774 xfs_flush_buftarg(
1775         xfs_buftarg_t   *target,
1776         int             wait)
1777 {
1778         xfs_buf_t       *bp;
1779         int             pincount = 0;
1780         LIST_HEAD(tmp_list);
1781         LIST_HEAD(wait_list);
1782         struct blk_plug plug;
1783
1784         xfs_buf_runall_queues(xfsconvertd_workqueue);
1785         xfs_buf_runall_queues(xfsdatad_workqueue);
1786         xfs_buf_runall_queues(xfslogd_workqueue);
1787
1788         set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1789         pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1790
1791         /*
1792          * Dropped the delayed write list lock, now walk the temporary list.
1793          * All I/O is issued async and then if we need to wait for completion
1794          * we do that after issuing all the IO.
1795          */
1796         list_sort(NULL, &tmp_list, xfs_buf_cmp);
1797
1798         blk_start_plug(&plug);
1799         while (!list_empty(&tmp_list)) {
1800                 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1801                 ASSERT(target == bp->b_target);
1802                 list_del_init(&bp->b_list);
1803                 if (wait) {
1804                         bp->b_flags &= ~XBF_ASYNC;
1805                         list_add(&bp->b_list, &wait_list);
1806                 }
1807                 xfs_bdstrat_cb(bp);
1808         }
1809         blk_finish_plug(&plug);
1810
1811         if (wait) {
1812                 /* Wait for IO to complete. */
1813                 while (!list_empty(&wait_list)) {
1814                         bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1815
1816                         list_del_init(&bp->b_list);
1817                         xfs_buf_iowait(bp);
1818                         xfs_buf_relse(bp);
1819                 }
1820         }
1821
1822         return pincount;
1823 }
1824
1825 int __init
1826 xfs_buf_init(void)
1827 {
1828         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1829                                                 KM_ZONE_HWALIGN, NULL);
1830         if (!xfs_buf_zone)
1831                 goto out;
1832
1833         xfslogd_workqueue = alloc_workqueue("xfslogd",
1834                                         WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1835         if (!xfslogd_workqueue)
1836                 goto out_free_buf_zone;
1837
1838         xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
1839         if (!xfsdatad_workqueue)
1840                 goto out_destroy_xfslogd_workqueue;
1841
1842         xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
1843                                                 WQ_MEM_RECLAIM, 1);
1844         if (!xfsconvertd_workqueue)
1845                 goto out_destroy_xfsdatad_workqueue;
1846
1847         return 0;
1848
1849  out_destroy_xfsdatad_workqueue:
1850         destroy_workqueue(xfsdatad_workqueue);
1851  out_destroy_xfslogd_workqueue:
1852         destroy_workqueue(xfslogd_workqueue);
1853  out_free_buf_zone:
1854         kmem_zone_destroy(xfs_buf_zone);
1855  out:
1856         return -ENOMEM;
1857 }
1858
1859 void
1860 xfs_buf_terminate(void)
1861 {
1862         destroy_workqueue(xfsconvertd_workqueue);
1863         destroy_workqueue(xfsdatad_workqueue);
1864         destroy_workqueue(xfslogd_workqueue);
1865         kmem_zone_destroy(xfs_buf_zone);
1866 }
1867
1868 #ifdef CONFIG_KDB_MODULES
1869 struct list_head *
1870 xfs_get_buftarg_list(void)
1871 {
1872         return &xfs_buftarg_list;
1873 }
1874 #endif