/* the dirty list is only used by non-reference counted roots */
struct list_head dirty_list;
- spinlock_t orphan_lock;
+ spinlock_t list_lock;
+ struct list_head dead_list;
struct list_head orphan_list;
};
INIT_LIST_HEAD(&root->dirty_list);
INIT_LIST_HEAD(&root->orphan_list);
+ INIT_LIST_HEAD(&root->dead_list);
spin_lock_init(&root->node_lock);
- spin_lock_init(&root->orphan_lock);
+ spin_lock_init(&root->list_lock);
mutex_init(&root->objectid_mutex);
btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
printk("btrfs: at umount reference cache size %Lu\n",
fs_info->total_ref_cache_size);
}
-
+
if (fs_info->extent_root->node)
free_extent_buffer(fs_info->extent_root->node);
/*
* For (parent_gen > 0 && parent_gen > ref_gen):
*
- * we reach here through the oldest root, therefore
- * all other reference from same snapshot should have
+ * we reach here through the oldest root, therefore
+ * all other reference from same snapshot should have
* a larger generation.
*/
if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
if (!eb)
continue;
extent_start = eb->start;
- } else
+ } else
extent_start = bytenr;
ret = get_reference_status(root, extent_start, ref_generation,
struct btrfs_leaf_ref *ref;
struct btrfs_extent_info *info;
- ref = btrfs_alloc_leaf_ref(nr_file_extents);
+ ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
if (!ref) {
WARN_ON(1);
goto out;
ref->generation = btrfs_header_generation(buf);
ref->nritems = nr_file_extents;
info = ref->extents;
-
+
for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
u64 disk_bytenr;
btrfs_item_key_to_cpu(buf, &key, i);
BUG_ON(!root->ref_tree);
ret = btrfs_add_leaf_ref(root, ref);
WARN_ON(ret);
- btrfs_free_leaf_ref(ref);
+ btrfs_free_leaf_ref(root, ref);
}
out:
return 0;
}
static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_root *root,
struct extent_buffer *leaf)
{
u64 leaf_owner;
}
static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ struct btrfs_root *root,
struct btrfs_leaf_ref *ref)
{
int i;
ret = drop_leaf_ref(trans, root, ref);
BUG_ON(ret);
btrfs_remove_leaf_ref(root, ref);
- btrfs_free_leaf_ref(ref);
+ btrfs_free_leaf_ref(root, ref);
*level = 0;
break;
}
return ret;
}
EXPORT_SYMBOL(try_release_extent_buffer);
-
BUG_ON(ret);
return ret;
}
-
.compat_ioctl = btrfs_ioctl,
#endif
};
-
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
- spin_lock(&root->orphan_lock);
+ spin_lock(&root->list_lock);
/* already on the orphan list, we're good */
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
- spin_unlock(&root->orphan_lock);
+ spin_unlock(&root->list_lock);
return 0;
}
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
- spin_unlock(&root->orphan_lock);
+ spin_unlock(&root->list_lock);
/*
* insert an orphan item to track this unlinked/truncated file
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
- spin_lock(&root->orphan_lock);
+ spin_lock(&root->list_lock);
if (list_empty(&BTRFS_I(inode)->i_orphan)) {
- spin_unlock(&root->orphan_lock);
+ spin_unlock(&root->list_lock);
return 0;
}
list_del_init(&BTRFS_I(inode)->i_orphan);
if (!trans) {
- spin_unlock(&root->orphan_lock);
+ spin_unlock(&root->list_lock);
return 0;
}
- spin_unlock(&root->orphan_lock);
+ spin_unlock(&root->list_lock);
ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
* add this inode to the orphan list so btrfs_orphan_del does
* the proper thing when we hit it
*/
- spin_lock(&root->orphan_lock);
+ spin_lock(&root->list_lock);
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
- spin_unlock(&root->orphan_lock);
+ spin_unlock(&root->list_lock);
/*
* if this is a bad inode, means we actually succeeded in
BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
posix_acl_release(BTRFS_I(inode)->i_default_acl);
- spin_lock(&BTRFS_I(inode)->root->orphan_lock);
+ spin_lock(&BTRFS_I(inode)->root->list_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
" list\n", inode->i_ino);
dump_stack();
}
- spin_unlock(&BTRFS_I(inode)->root->orphan_lock);
+ spin_unlock(&BTRFS_I(inode)->root->list_lock);
while(1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
{
return mutex_is_locked(&eb->mutex);
}
-
free_extent_buffer(next);
}
}
-
#include "ref-cache.h"
#include "transaction.h"
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents)
+struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
+ int nr_extents)
{
struct btrfs_leaf_ref *ref;
+ size_t size = btrfs_leaf_ref_size(nr_extents);
- ref = kmalloc(btrfs_leaf_ref_size(nr_extents), GFP_NOFS);
+ ref = kmalloc(size, GFP_NOFS);
if (ref) {
+ spin_lock(&root->fs_info->ref_cache_lock);
+ root->fs_info->total_ref_cache_size += size;
+ spin_unlock(&root->fs_info->ref_cache_lock);
+
memset(ref, 0, sizeof(*ref));
atomic_set(&ref->usage, 1);
INIT_LIST_HEAD(&ref->list);
return ref;
}
-void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref)
+void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
{
if (!ref)
return;
WARN_ON(atomic_read(&ref->usage) == 0);
if (atomic_dec_and_test(&ref->usage)) {
+ size_t size = btrfs_leaf_ref_size(ref->nritems);
+
BUG_ON(ref->in_tree);
kfree(ref);
+
+ spin_lock(&root->fs_info->ref_cache_lock);
+ root->fs_info->total_ref_cache_size -= size;
+ spin_unlock(&root->fs_info->ref_cache_lock);
}
}
else
return parent;
}
-
+
entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
entry->in_tree = 1;
rb_link_node(node, parent, p);
return NULL;
}
-int btrfs_remove_leaf_refs(struct btrfs_root *root)
+int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen)
{
- struct rb_node *rb;
struct btrfs_leaf_ref *ref = NULL;
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
return 0;
spin_lock(&tree->lock);
- while(!btrfs_leaf_ref_tree_empty(tree)) {
- rb = rb_first(&tree->root);
- ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
+ while(!list_empty(&tree->list)) {
+ ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
+ BUG_ON(!ref->in_tree);
+ if (ref->root_gen > max_root_gen)
+ break;
+
rb_erase(&ref->rb_node, &tree->root);
ref->in_tree = 0;
list_del_init(&ref->list);
spin_unlock(&tree->lock);
-
- btrfs_free_leaf_ref(ref);
-
+ btrfs_free_leaf_ref(root, ref);
cond_resched();
spin_lock(&tree->lock);
}
{
int ret = 0;
struct rb_node *rb;
- size_t size = btrfs_leaf_ref_size(ref->nritems);
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
spin_lock(&tree->lock);
if (rb) {
ret = -EEXIST;
} else {
- spin_lock(&root->fs_info->ref_cache_lock);
- root->fs_info->total_ref_cache_size += size;
- spin_unlock(&root->fs_info->ref_cache_lock);
atomic_inc(&ref->usage);
list_add_tail(&ref->list, &tree->list);
}
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
{
- size_t size = btrfs_leaf_ref_size(ref->nritems);
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
BUG_ON(!ref->in_tree);
spin_lock(&tree->lock);
-
- spin_lock(&root->fs_info->ref_cache_lock);
- root->fs_info->total_ref_cache_size -= size;
- spin_unlock(&root->fs_info->ref_cache_lock);
rb_erase(&ref->rb_node, &tree->root);
ref->in_tree = 0;
spin_unlock(&tree->lock);
- btrfs_free_leaf_ref(ref);
+ btrfs_free_leaf_ref(root, ref);
return 0;
}
-
int in_tree;
atomic_t usage;
+ u64 root_gen;
u64 bytenr;
u64 owner;
u64 generation;
static inline size_t btrfs_leaf_ref_size(int nr_extents)
{
- return sizeof(struct btrfs_leaf_ref) +
+ return sizeof(struct btrfs_leaf_ref) +
sizeof(struct btrfs_extent_info) * nr_extents;
}
static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
{
tree->root.rb_node = NULL;
- tree->last = NULL;
INIT_LIST_HEAD(&tree->list);
spin_lock_init(&tree->lock);
}
}
void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents);
-void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref);
+struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
+ int nr_extents);
+void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
u64 bytenr);
int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
-int btrfs_remove_leaf_refs(struct btrfs_root *root);
+int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen);
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
#endif
BUG_ON(!dirty);
dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
BUG_ON(!dirty->root);
-
dirty->latest_root = root;
INIT_LIST_HEAD(&dirty->list);
root->commit_root = btrfs_root_node(root);
- root->dirty_root = dirty;
memcpy(dirty->root, root, sizeof(*root));
- dirty->root->ref_tree = &root->ref_tree_struct;
-
spin_lock_init(&dirty->root->node_lock);
+ spin_lock_init(&dirty->root->list_lock);
mutex_init(&dirty->root->objectid_mutex);
+ INIT_LIST_HEAD(&dirty->root->dead_list);
dirty->root->node = root->commit_root;
dirty->root->commit_root = NULL;
+
+ spin_lock(&root->list_lock);
+ list_add(&dirty->root->dead_list, &root->dead_list);
+ spin_unlock(&root->list_lock);
+
+ root->dirty_root = dirty;
} else {
WARN_ON(1);
}
list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list);
update_cowonly_root(trans, root);
- if (root->fs_info->closing)
- btrfs_remove_leaf_refs(root);
}
return 0;
}
free_extent_buffer(root->commit_root);
root->commit_root = NULL;
-
+
+ spin_lock(&root->list_lock);
+ list_del_init(&dirty->root->dead_list);
+ spin_unlock(&root->list_lock);
+
kfree(dirty->root);
kfree(dirty);
unsigned long nr;
u64 num_bytes;
u64 bytes_used;
+ u64 max_useless;
int ret = 0;
int err;
}
mutex_unlock(&root->fs_info->drop_mutex);
+ spin_lock(&root->list_lock);
+ list_del_init(&dirty->root->dead_list);
+ if (!list_empty(&root->dead_list)) {
+ struct btrfs_root *oldest;
+ oldest = list_entry(root->dead_list.prev,
+ struct btrfs_root, dead_list);
+ max_useless = oldest->root_key.offset - 1;
+ } else {
+ max_useless = root->root_key.offset - 1;
+ }
+ spin_unlock(&root->list_lock);
+
nr = trans->blocks_used;
ret = btrfs_end_transaction(trans, tree_root);
BUG_ON(ret);
+ ret = btrfs_remove_leaf_refs(root, max_useless);
+ BUG_ON(ret);
+
free_extent_buffer(dirty->root->node);
kfree(dirty->root);
kfree(dirty);
put_transaction(cur_trans);
put_transaction(cur_trans);
+ list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
if (root->fs_info->closing)
list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
- else
- list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
mutex_unlock(&root->fs_info->trans_mutex);
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
return 0;
}
-