[XFS] Fixed a few bugs in xfs_buf_associate_memory()
authorLachlan McIlroy <lachlan@sgi.com>
Tue, 27 Nov 2007 06:01:24 +0000 (17:01 +1100)
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>
Mon, 10 Dec 2007 02:46:20 +0000 (13:46 +1100)
- calculation of 'page_count' was incorrect as it did not
  consider the offset of 'mem' into the first page. The
  logic to bump 'page_count' didn't work if 'len' was <=
  PAGE_CACHE_SIZE (ie offset = 3k, len = 2k).
- setting b_buffer_length to 'len' is incorrect if 'offset'
  is > 0. Set it to the total length of the buffer.
- I suspect that passing a non-aligned address into
  mem_to_page() for the first page may have been causing
  issues - don't know but just tidy up that code anyway.

SGI-PV: 971596
SGI-Modid: xfs-linux-melb:xfs-kern:30143a

Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
fs/xfs/linux-2.6/xfs_buf.c

index b9c8589..48bf477 100644 (file)
@@ -725,15 +725,15 @@ xfs_buf_associate_memory(
 {
        int                     rval;
        int                     i = 0;
-       size_t                  ptr;
-       size_t                  end, end_cur;
-       off_t                   offset;
+       unsigned long           pageaddr;
+       unsigned long           offset;
+       size_t                  buflen;
        int                     page_count;
 
-       page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
-       offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
-       if (offset && (len > PAGE_CACHE_SIZE))
-               page_count++;
+       pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
+       offset = (unsigned long)mem - pageaddr;
+       buflen = PAGE_CACHE_ALIGN(len + offset);
+       page_count = buflen >> PAGE_CACHE_SHIFT;
 
        /* Free any previous set of page pointers */
        if (bp->b_pages)
@@ -747,22 +747,15 @@ xfs_buf_associate_memory(
                return rval;
 
        bp->b_offset = offset;
-       ptr = (size_t) mem & PAGE_CACHE_MASK;
-       end = PAGE_CACHE_ALIGN((size_t) mem + len);
-       end_cur = end;
-       /* set up first page */
-       bp->b_pages[0] = mem_to_page(mem);
-
-       ptr += PAGE_CACHE_SIZE;
-       bp->b_page_count = ++i;
-       while (ptr < end) {
-               bp->b_pages[i] = mem_to_page((void *)ptr);
-               bp->b_page_count = ++i;
-               ptr += PAGE_CACHE_SIZE;
+
+       for (i = 0; i < bp->b_page_count; i++) {
+               bp->b_pages[i] = mem_to_page((void *)pageaddr);
+               pageaddr += PAGE_CACHE_SIZE;
        }
        bp->b_locked = 0;
 
-       bp->b_count_desired = bp->b_buffer_length = len;
+       bp->b_count_desired = len;
+       bp->b_buffer_length = buflen;
        bp->b_flags |= XBF_MAPPED;
 
        return 0;