*
* Should be called with balance_lock held
*/
-u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0;
* reduce to the target profile
*/
spin_lock(&fs_info->balance_lock);
- target = btrfs_get_restripe_target(fs_info, flags);
+ target = get_restripe_target(fs_info, flags);
if (target) {
/* Pick target profile only if it's already available */
if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
}
#ifdef CONFIG_BTRFS_DEBUG
-void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group)
+static void fragment_free_space(struct btrfs_block_group_cache *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 start = block_group->key.objectid;
block_group->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&block_group->lock);
spin_unlock(&block_group->space_info->lock);
- btrfs_fragment_free_space(block_group);
+ fragment_free_space(block_group);
}
#endif
cache->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- btrfs_fragment_free_space(cache);
+ fragment_free_space(cache);
}
#endif
mutex_unlock(&caching_ctl->mutex);
* data in this block group. That check should be done by relocation routine,
* not this function.
*/
-int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
spin_unlock(&block_group->lock);
/* We don't want to force the issue, only flip if it's ok. */
- ret = __btrfs_inc_block_group_ro(block_group, 0);
+ ret = inc_block_group_ro(block_group, 0);
up_write(&space_info->groups_sem);
if (ret < 0) {
ret = 0;
set_avail_alloc_bits(info, cache->flags);
if (btrfs_chunk_readonly(info, cache->key.objectid)) {
- __btrfs_inc_block_group_ro(cache, 1);
+ inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) {
ASSERT(list_empty(&cache->bg_list));
btrfs_mark_bg_unused(cache);
list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_RAID0],
list)
- __btrfs_inc_block_group_ro(cache, 1);
+ inc_block_group_ro(cache, 1);
list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_SINGLE],
list)
- __btrfs_inc_block_group_ro(cache, 1);
+ inc_block_group_ro(cache, 1);
}
btrfs_init_global_block_rsv(info);
u64 new_bytes_used = size - bytes_used;
bytes_used += new_bytes_used >> 1;
- btrfs_fragment_free_space(cache);
+ fragment_free_space(cache);
}
#endif
/*
* if restripe for this chunk_type is on pick target profile and
* return, otherwise do the usual balance
*/
- stripped = btrfs_get_restripe_target(fs_info, flags);
+ stripped = get_restripe_target(fs_info, flags);
if (stripped)
return extended_to_chunk(stripped);
goto out;
}
- ret = __btrfs_inc_block_group_ro(cache, 0);
+ ret = inc_block_group_ro(cache, 0);
if (!ret)
goto out;
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
- ret = __btrfs_inc_block_group_ro(cache, 0);
+ ret = inc_block_group_ro(cache, 0);
out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(fs_info, cache->flags);