if (trans->aborted)
return 0;
again:
- inode = lookup_free_space_inode(fs_info, block_group, path);
+ inode = lookup_free_space_inode(block_group, path);
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
ret = PTR_ERR(inode);
btrfs_release_path(path);
* get the inode first so any iput calls done for the io_list
* aren't the final iput (no unlinks allowed now)
*/
- inode = lookup_free_space_inode(fs_info, block_group, path);
+ inode = lookup_free_space_inode(block_group, path);
mutex_lock(&trans->transaction->cache_write_mutex);
/*
return inode;
}
-struct inode *lookup_free_space_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache
- *block_group, struct btrfs_path *path)
+struct inode *lookup_free_space_inode(
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct inode *inode = NULL;
u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
* once created get their ->cached field set to BTRFS_CACHE_FINISHED so
* we will never try to read their inode item while the fs is mounted.
*/
- inode = lookup_free_space_inode(fs_info, block_group, path);
+ inode = lookup_free_space_inode(block_group, path);
if (IS_ERR(inode)) {
btrfs_free_path(path);
return 0;
}
spin_unlock(&block_group->lock);
- inode = lookup_free_space_inode(fs_info, block_group, path);
+ inode = lookup_free_space_inode(block_group, path);
if (IS_ERR(inode))
return 0;
struct btrfs_io_ctl;
-struct inode *lookup_free_space_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache
- *block_group, struct btrfs_path *path);
+struct inode *lookup_free_space_inode(
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path);
int create_free_space_inode(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
goto out;
}
- inode = lookup_free_space_inode(fs_info, rc->block_group, path);
+ inode = lookup_free_space_inode(rc->block_group, path);
btrfs_free_path(path);
if (!IS_ERR(inode))