delayed_refs = &trans->transaction->delayed_refs;
+ lockdep_assert_held(&locked_ref->mutex);
+ lockdep_assert_held(&locked_ref->lock);
+
while ((ref = select_delayed_ref(locked_ref))) {
if (ref->seq &&
btrfs_check_delayed_seq(fs_info, ref->seq)) {
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL;
- struct btrfs_delayed_extent_op *extent_op;
ktime_t start = ktime_get();
int ret;
unsigned long count = 0;
unsigned long actual_count = 0;
- int must_insert_reserved = 0;
delayed_refs = &trans->transaction->delayed_refs;
- while (1) {
+ do {
if (!locked_ref) {
- if (count >= nr)
- break;
-
locked_ref = btrfs_obtain_ref_head(trans);
- if (!locked_ref)
- break;
- else if (PTR_ERR(locked_ref) == -EAGAIN) {
- locked_ref = NULL;
- count++;
- continue;
+ if (IS_ERR_OR_NULL(locked_ref)) {
+ if (PTR_ERR(locked_ref) == -EAGAIN) {
+ continue;
+ } else {
+ break;
+ }
}
+ count++;
}
-
/*
* We need to try and merge add/drops of the same ref since we
* can run into issues with relocate dropping the implicit ref
spin_lock(&locked_ref->lock);
btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
- ref = select_delayed_ref(locked_ref);
-
- if (ref && ref->seq &&
- btrfs_check_delayed_seq(fs_info, ref->seq)) {
- spin_unlock(&locked_ref->lock);
- unselect_delayed_ref_head(delayed_refs, locked_ref);
- locked_ref = NULL;
- cond_resched();
- count++;
- continue;
- }
-
- /*
- * We're done processing refs in this ref_head, clean everything
- * up and move on to the next ref_head.
- */
- if (!ref) {
+ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
+ &actual_count);
+ if (ret < 0 && ret != -EAGAIN) {
+ /*
+ * Error, btrfs_run_delayed_refs_for_head already
+ * unlocked everything so just bail out
+ */
+ return ret;
+ } else if (!ret) {
+ /*
+ * Success, perform the usual cleanup of a processed
+ * head
+ */
ret = cleanup_ref_head(trans, locked_ref);
if (ret > 0 ) {
/* We dropped our lock, we need to loop. */
} else if (ret) {
return ret;
}
- locked_ref = NULL;
- count++;
- continue;
}
- actual_count++;
- ref->in_tree = 0;
- rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
- RB_CLEAR_NODE(&ref->ref_node);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
/*
- * When we play the delayed ref, also correct the ref_mod on
- * head
+ * Either success case or btrfs_run_delayed_refs_for_head
+ * returned -EAGAIN, meaning we need to select another head
*/
- switch (ref->action) {
- case BTRFS_ADD_DELAYED_REF:
- case BTRFS_ADD_DELAYED_EXTENT:
- locked_ref->ref_mod -= ref->ref_mod;
- break;
- case BTRFS_DROP_DELAYED_REF:
- locked_ref->ref_mod += ref->ref_mod;
- break;
- default:
- WARN_ON(1);
- }
- atomic_dec(&delayed_refs->num_entries);
-
- /*
- * Record the must-insert_reserved flag before we drop the spin
- * lock.
- */
- must_insert_reserved = locked_ref->must_insert_reserved;
- locked_ref->must_insert_reserved = 0;
- extent_op = locked_ref->extent_op;
- locked_ref->extent_op = NULL;
- spin_unlock(&locked_ref->lock);
-
- ret = run_one_delayed_ref(trans, ref, extent_op,
- must_insert_reserved);
-
- btrfs_free_delayed_extent_op(extent_op);
- if (ret) {
- unselect_delayed_ref_head(delayed_refs, locked_ref);
- btrfs_put_delayed_ref(ref);
- btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
- ret);
- return ret;
- }
-
- btrfs_put_delayed_ref(ref);
- count++;
+ locked_ref = NULL;
cond_resched();
- }
+ } while ((nr != -1 && count < nr) || locked_ref);
/*
* We don't want to include ref heads since we can have empty ref heads