struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
- u64 search_start, u64 empty_size)
+ u64 search_start, u64 empty_size,
+ u64 prealloc_dest)
{
u64 root_gen;
struct extent_buffer *cow;
} else {
first_key.objectid = 0;
}
- cow = btrfs_alloc_free_block(trans, root, buf->len,
- root->root_key.objectid,
- root_gen, first_key.objectid, level,
- search_start, empty_size);
+ if (prealloc_dest) {
+ struct btrfs_key ins;
+
+ ins.objectid = prealloc_dest;
+ ins.offset = buf->len;
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+
+ ret = btrfs_alloc_reserved_extent(trans, root,
+ root->root_key.objectid,
+ root_gen, level,
+ first_key.objectid,
+ &ins);
+ BUG_ON(ret);
+ cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
+ buf->len);
+ } else {
+ cow = btrfs_alloc_free_block(trans, root, buf->len,
+ root->root_key.objectid,
+ root_gen, first_key.objectid,
+ level, search_start, empty_size);
+ }
if (IS_ERR(cow))
return PTR_ERR(cow);
int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
- struct extent_buffer **cow_ret)
+ struct extent_buffer **cow_ret, u64 prealloc_dest)
{
u64 search_start;
u64 header_trans;
!btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
*cow_ret = buf;
spin_unlock(&root->fs_info->hash_lock);
+ WARN_ON(prealloc_dest);
return 0;
}
spin_unlock(&root->fs_info->hash_lock);
search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
ret = __btrfs_cow_block(trans, root, buf, parent,
- parent_slot, cow_ret, search_start, 0);
+ parent_slot, cow_ret, search_start, 0,
+ prealloc_dest);
return ret;
}
err = __btrfs_cow_block(trans, root, cur, parent, i,
&cur, search_start,
min(16 * blocksize,
- (end_slot - i) * blocksize));
+ (end_slot - i) * blocksize), 0);
if (err) {
btrfs_tree_unlock(cur);
free_extent_buffer(cur);
child = read_node_slot(root, mid, 0);
btrfs_tree_lock(child);
BUG_ON(!child);
- ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
+ ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
BUG_ON(ret);
spin_lock(&root->node_lock);
if (left) {
btrfs_tree_lock(left);
wret = btrfs_cow_block(trans, root, left,
- parent, pslot - 1, &left);
+ parent, pslot - 1, &left, 0);
if (wret) {
ret = wret;
goto enospc;
if (right) {
btrfs_tree_lock(right);
wret = btrfs_cow_block(trans, root, right,
- parent, pslot + 1, &right);
+ parent, pslot + 1, &right, 0);
if (wret) {
ret = wret;
goto enospc;
wret = 1;
} else {
ret = btrfs_cow_block(trans, root, left, parent,
- pslot - 1, &left);
+ pslot - 1, &left, 0);
if (ret)
wret = 1;
else {
} else {
ret = btrfs_cow_block(trans, root, right,
parent, pslot + 1,
- &right);
+ &right, 0);
if (ret)
wret = 1;
else {
u8 lowest_level = 0;
u64 blocknr;
u64 gen;
+ struct btrfs_key prealloc_block;
lowest_level = p->lowest_level;
WARN_ON(lowest_level && ins_len);
!mutex_is_locked(&root->fs_info->alloc_mutex));
if (ins_len < 0)
lowest_unlock = 2;
+
+ prealloc_block.objectid = 0;
+
again:
if (p->skip_locking)
b = btrfs_root_node(root);
while (b) {
level = btrfs_header_level(b);
+
+ /*
+ * setup the path here so we can release it under lock
+ * contention with the cow code
+ */
+ p->nodes[level] = b;
+ if (!p->skip_locking)
+ p->locks[level] = 1;
+
if (cow) {
int wret;
+
+ /* is a cow on this block not required */
+ spin_lock(&root->fs_info->hash_lock);
+ if (btrfs_header_generation(b) == trans->transid &&
+ !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
+ spin_unlock(&root->fs_info->hash_lock);
+ goto cow_done;
+ }
+ spin_unlock(&root->fs_info->hash_lock);
+
+ /* ok, we have to cow, is our old prealloc the right
+ * size?
+ */
+ if (prealloc_block.objectid &&
+ prealloc_block.offset != b->len) {
+ btrfs_free_reserved_extent(root,
+ prealloc_block.objectid,
+ prealloc_block.offset);
+ prealloc_block.objectid = 0;
+ }
+
+ /*
+ * for higher level blocks, try not to allocate blocks
+ * with the block and the parent locks held.
+ */
+ if (level > 1 && !prealloc_block.objectid &&
+ btrfs_path_lock_waiting(p, level)) {
+ u32 size = b->len;
+ u64 hint = b->start;
+
+ btrfs_release_path(root, p);
+ ret = btrfs_reserve_extent(trans, root,
+ size, size, 0,
+ hint, (u64)-1,
+ &prealloc_block, 0);
+ BUG_ON(ret);
+ goto again;
+ }
+
wret = btrfs_cow_block(trans, root, b,
p->nodes[level + 1],
p->slots[level + 1],
- &b);
+ &b, prealloc_block.objectid);
+ prealloc_block.objectid = 0;
if (wret) {
free_extent_buffer(b);
- return wret;
+ ret = wret;
+ goto done;
}
}
+cow_done:
BUG_ON(!cow && ins_len);
if (level != btrfs_header_level(b))
WARN_ON(1);
level = btrfs_header_level(b);
+
p->nodes[level] = b;
if (!p->skip_locking)
p->locks[level] = 1;
+
ret = check_block(root, p, level);
- if (ret)
- return -1;
+ if (ret) {
+ ret = -1;
+ goto done;
+ }
ret = bin_search(b, key, level, &slot);
if (level != 0) {
BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
int sret = split_node(trans, root, p, level);
BUG_ON(sret > 0);
- if (sret)
- return sret;
+ if (sret) {
+ ret = sret;
+ goto done;
+ }
b = p->nodes[level];
slot = p->slots[level];
} else if (ins_len < 0) {
int sret = balance_level(trans, root, p,
level);
- if (sret)
- return sret;
+ if (sret) {
+ ret = sret;
+ goto done;
+ }
b = p->nodes[level];
if (!b) {
btrfs_release_path(NULL, p);
int sret = split_leaf(trans, root, key,
p, ins_len, ret == 0);
BUG_ON(sret > 0);
- if (sret)
- return sret;
+ if (sret) {
+ ret = sret;
+ goto done;
+ }
}
unlock_up(p, level, lowest_unlock);
- return ret;
+ goto done;
}
}
- return 1;
+ ret = 1;
+done:
+ if (prealloc_block.objectid) {
+ btrfs_free_reserved_extent(root,
+ prealloc_block.objectid,
+ prealloc_block.offset);
+ }
+
+ return ret;
}
/*
/* cow and double check */
ret = btrfs_cow_block(trans, root, right, upper,
- slot + 1, &right);
+ slot + 1, &right, 0);
if (ret)
goto out_unlock;
/* cow and double check */
ret = btrfs_cow_block(trans, root, left,
- path->nodes[1], slot - 1, &left);
+ path->nodes[1], slot - 1, &left, 0);
if (ret) {
/* we hit -ENOSPC, but it isn't fatal here */
ret = 1;
return 0;
}
+int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
+{
+ maybe_lock_mutex(root);
+ set_extent_dirty(&root->fs_info->free_space_cache,
+ start, start + len - 1, GFP_NOFS);
+ maybe_unlock_mutex(root);
+ return 0;
+}
+
int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 num_bytes, u64 min_alloc_size,
maybe_unlock_mutex(root);
return ret;
}
+
+struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u32 blocksize)
+{
+ struct extent_buffer *buf;
+
+ buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ btrfs_set_header_generation(buf, trans->transid);
+ btrfs_tree_lock(buf);
+ clean_tree_block(trans, root, buf);
+ btrfs_set_buffer_uptodate(buf);
+ set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
+ buf->start + buf->len - 1, GFP_NOFS);
+ trans->blocks_used++;
+ return buf;
+}
+
/*
* helper function to allocate a block for a given tree
* returns the tree buffer or NULL.
BUG_ON(ret > 0);
return ERR_PTR(ret);
}
- buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
- if (!buf) {
- btrfs_free_extent(trans, root, ins.objectid, blocksize,
- root->root_key.objectid, ref_generation,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
- btrfs_set_header_generation(buf, trans->transid);
- btrfs_tree_lock(buf);
- clean_tree_block(trans, root, buf);
- btrfs_set_buffer_uptodate(buf);
-
- if (PageDirty(buf->first_page)) {
- printk("page %lu dirty\n", buf->first_page->index);
- WARN_ON(1);
- }
- set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
- trans->blocks_used++;
+ buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
return buf;
}