DEFINE_WAIT(wait);
prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
TASK_UNINTERRUPTIBLE);
- batch = log->fs_info->tree_log_batch;
mutex_unlock(&log->fs_info->tree_log_mutex);
if (atomic_read(&log->fs_info->tree_log_writers))
schedule();
}
-/*
- * free all the extents used by the tree log. This should be called
+/* * free all the extents used by the tree log. This should be called
* at commit time of the full transaction
*/
int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
int ret;
int bytes_del = 0;
+ if (BTRFS_I(dir)->logged_trans < trans->transid)
+ return 0;
+
ret = join_running_log_trans(root);
if (ret)
return 0;
u64 index;
int ret;
+ if (BTRFS_I(inode)->logged_trans < trans->transid)
+ return 0;
+
ret = join_running_log_trans(root);
if (ret)
return 0;
struct btrfs_inode_item *inode_item;
u32 size;
int ret;
+ int nritems;
log = root->log_root;
path, 0, trans->transid);
if (ret != 0)
break;
-
+again:
if (min_key.objectid != inode->i_ino)
break;
if (min_key.type > max_key.type)
break;
-
src = path->nodes[0];
size = btrfs_item_size_nr(src, path->slots[0]);
ret = btrfs_insert_empty_item(trans, log, dst_path, &min_key,
}
btrfs_mark_buffer_dirty(dst_path->nodes[0]);
- btrfs_release_path(root, path);
btrfs_release_path(log, dst_path);
+ nritems = btrfs_header_nritems(path->nodes[0]);
+ path->slots[0]++;
+ if (path->slots[0] < nritems) {
+ btrfs_item_key_to_cpu(path->nodes[0], &min_key,
+ path->slots[0]);
+ goto again;
+ }
+ btrfs_release_path(root, path);
+
if (min_key.offset < (u64)-1)
min_key.offset++;
else if (min_key.type < (u8)-1)
ret = log_directory_changes(trans, root, inode, path, dst_path);
BUG_ON(ret);
}
+ BTRFS_I(inode)->logged_trans = trans->transid;
mutex_unlock(&BTRFS_I(inode)->log_mutex);
btrfs_free_path(path);