/*
* set some bits on a range in the tree. This may require allocations or
* sleeping, so the gfp mask is used to indicate what is allowed.
/*
* set some bits on a range in the tree. This may require allocations or
* sleeping, so the gfp mask is used to indicate what is allowed.
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
- NULL, mask);
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+ NULL, cached_state, mask);
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
mask);
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
mask);
+ spin_lock(&tree->lock);
+ state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
+ if (state && state->start == start) {
+ /*
+ * take a reference on the state, unlock will drop
+ * the ref
+ */
+ cache_state(state, &cached);
+ }
+ spin_unlock(&tree->lock);
+
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
- set_extent_uptodate(tree, start, end,
+ set_extent_uptodate(tree, start, end, &cached,
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
- set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+ set_extent_uptodate(tree, start, end, &cached,
+ GFP_ATOMIC);
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
iosize = PAGE_CACHE_SIZE - page_offset;
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
iosize = PAGE_CACHE_SIZE - page_offset;
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, page_offset, cur,
break;
}
em = get_extent(inode, page, page_offset, cur,
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
char *userpage;
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
char *userpage;
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
WARN_ON(!PageLocked(page));
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
WARN_ON(!PageLocked(page));
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
- set_extent_uptodate(tree, block_start, cur_end,
+ struct extent_state *cached = NULL;
+
+ set_extent_uptodate(tree, block_start, cur_end, &cached,
- unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+ unlock_extent_cached(tree, block_start, cur_end,
+ &cached, GFP_NOFS);
num_pages = num_extent_pages(eb->start, eb->len);
set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
num_pages = num_extent_pages(eb->start, eb->len);
set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
"wanted %lu %lu\n", (unsigned long long)eb->start,
eb->len, start, min_len);
WARN_ON(1);
"wanted %lu %lu\n", (unsigned long long)eb->start,
eb->len, start, min_len);
WARN_ON(1);
static void copy_pages(struct page *dst_page, struct page *src_page,
unsigned long dst_off, unsigned long src_off,
unsigned long len)
static void copy_pages(struct page *dst_page, struct page *src_page,
unsigned long dst_off, unsigned long src_off,
unsigned long len)
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
kunmap_atomic(dst_kaddr, KM_USER0);
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
kunmap_atomic(dst_kaddr, KM_USER0);
"len %lu len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}
"len %lu len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}