em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
if (!em)
return NULL;
- em->in_tree = 0;
+ RB_CLEAR_NODE(&em->rb_node);
em->flags = 0;
em->compress_type = BTRFS_COMPRESS_NONE;
em->generation = 0;
return;
WARN_ON(atomic_read(&em->refs) == 0);
if (atomic_dec_and_test(&em->refs)) {
- WARN_ON(em->in_tree);
+ WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
kmem_cache_free(extent_map_cache, em);
}
parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node);
- WARN_ON(!entry->in_tree);
-
if (em->start < entry->start)
p = &(*p)->rb_left;
else if (em->start >= extent_map_end(entry))
if (end > entry->start && em->start < extent_map_end(entry))
return -EEXIST;
- em->in_tree = 1;
rb_link_node(&em->rb_node, orig_parent, p);
rb_insert_color(&em->rb_node, root);
return 0;
prev = n;
prev_entry = entry;
- WARN_ON(!entry->in_tree);
-
if (offset < entry->start)
n = n->rb_left;
else if (offset >= extent_map_end(entry))
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
- merge->in_tree = 0;
em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
rb_erase(&merge->rb_node, &tree->map);
+ RB_CLEAR_NODE(&merge->rb_node);
free_extent_map(merge);
}
}
em->len += merge->len;
em->block_len += merge->block_len;
rb_erase(&merge->rb_node, &tree->map);
- merge->in_tree = 0;
+ RB_CLEAR_NODE(&merge->rb_node);
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation);
free_extent_map(merge);
void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
{
clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
- if (em->in_tree)
+ if (extent_map_in_tree(em))
try_merge_map(tree, em);
}
rb_erase(&em->rb_node, &tree->map);
if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
list_del_init(&em->list);
- em->in_tree = 0;
+ RB_CLEAR_NODE(&em->rb_node);
return ret;
}