return 0;
}
+static void set_state_cb(struct extent_io_tree *tree,
+ struct extent_state *state,
+ unsigned long bits)
+{
+ if (tree->ops && tree->ops->set_bit_hook) {
+ tree->ops->set_bit_hook(tree->mapping->host, state->start,
+ state->end, bits);
+ }
+}
+
+static void clear_state_cb(struct extent_io_tree *tree,
+ struct extent_state *state,
+ unsigned long bits)
+{
+ if (tree->ops && tree->ops->set_bit_hook) {
+ tree->ops->clear_bit_hook(tree->mapping->host, state->start,
+ state->end, bits);
+ }
+}
+
/*
* insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted.
state->state |= bits;
state->start = start;
state->end = end;
+ set_state_cb(tree, state, bits);
node = tree_insert(&tree->state, end, &state->rb_node);
if (node) {
struct extent_state *found;
tree->dirty_bytes -= range;
}
state->state &= ~bits;
+ clear_state_cb(tree, state, bits);
if (wake)
wake_up(&state->wq);
if (delete || state->state == 0) {
tree->dirty_bytes += range;
}
state->state |= bits;
+ set_state_cb(tree, state, bits);
}
/*
goto search_again;
}
state->state |= EXTENT_LOCKED;
+ set_state_cb(tree, state, EXTENT_LOCKED);
if (!found)
*start = state->start;
found++;
state = NULL;
}
clear->state |= EXTENT_UPTODATE;
+ set_state_cb(tree, clear, EXTENT_UPTODATE);
clear_state_bit(tree, clear, EXTENT_LOCKED,
1, 0);
if (cur == start)
u64 thresh;
int ret = 0;
- return 0;
-
if (for_del)
thresh = total * 90;
else
static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 num_bytes;
int ret;
mutex_lock(&root->fs_info->fs_mutex);
if (btrfs_test_opt(root, NODATACOW) ||
else
ret = cow_file_range(inode, start, end);
- spin_lock(&root->fs_info->delalloc_lock);
- num_bytes = end + 1 - start;
- if (root->fs_info->delalloc_bytes < num_bytes) {
- printk("delalloc accounting error total %llu sub %llu\n",
- root->fs_info->delalloc_bytes, num_bytes);
- } else {
- root->fs_info->delalloc_bytes -= num_bytes;
- }
- spin_unlock(&root->fs_info->delalloc_lock);
-
mutex_unlock(&root->fs_info->fs_mutex);
return ret;
}
+int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
+ unsigned long bits)
+{
+ if ((bits & EXTENT_DELALLOC)) {
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ spin_lock(&root->fs_info->delalloc_lock);
+ root->fs_info->delalloc_bytes += end - start + 1;
+ spin_unlock(&root->fs_info->delalloc_lock);
+ }
+ return 0;
+}
+
+int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
+ unsigned long bits)
+{
+ if ((bits & EXTENT_DELALLOC)) {
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ spin_lock(&root->fs_info->delalloc_lock);
+ root->fs_info->delalloc_bytes -= end - start + 1;
+ spin_unlock(&root->fs_info->delalloc_lock);
+ }
+ return 0;
+}
+
int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
{
char *kaddr;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_root *root = BTRFS_I(inode)->root;
u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
- u64 existing_delalloc;
- u64 delalloc_start;
int ret = 0;
WARN_ON(!PageLocked(page));
set_page_extent_mapped(page);
lock_extent(io_tree, page_start, page_end, GFP_NOFS);
- delalloc_start = page_start;
- existing_delalloc = count_range_bits(&BTRFS_I(inode)->io_tree,
- &delalloc_start, page_end,
- PAGE_CACHE_SIZE, EXTENT_DELALLOC);
set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
page_end, GFP_NOFS);
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE - existing_delalloc;
- spin_unlock(&root->fs_info->delalloc_lock);
-
if (zero_start != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
unsigned long ra_index = 0;
u64 page_start;
u64 page_end;
- u64 delalloc_start;
- u64 existing_delalloc;
unsigned long i;
int ret;
page_end = page_start + PAGE_CACHE_SIZE - 1;
lock_extent(io_tree, page_start, page_end, GFP_NOFS);
- delalloc_start = page_start;
- existing_delalloc =
- count_range_bits(&BTRFS_I(inode)->io_tree,
- &delalloc_start, page_end,
- PAGE_CACHE_SIZE, EXTENT_DELALLOC);
set_extent_delalloc(io_tree, page_start,
page_end, GFP_NOFS);
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE -
- existing_delalloc;
- spin_unlock(&root->fs_info->delalloc_lock);
-
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
set_page_dirty(page);
unlock_page(page);