spin_lock_init(&tree->buffer_lock);
tree->mapping = mapping;
}
-EXPORT_SYMBOL(extent_io_tree_init);
static struct extent_state *alloc_extent_state(gfp_t mask)
{
init_waitqueue_head(&state->wq);
return state;
}
-EXPORT_SYMBOL(alloc_extent_state);
static void free_extent_state(struct extent_state *state)
{
kmem_cache_free(extent_state_cache, state);
}
}
-EXPORT_SYMBOL(free_extent_state);
static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
struct rb_node *node)
cond_resched();
goto again;
}
-EXPORT_SYMBOL(clear_extent_bit);
static int wait_on_state(struct extent_io_tree *tree,
struct extent_state *state)
spin_unlock(&tree->lock);
return 0;
}
-EXPORT_SYMBOL(wait_extent_bit);
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state,
cond_resched();
goto again;
}
-EXPORT_SYMBOL(set_extent_bit);
/* wrappers around set/clear extent bit */
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
mask);
}
-EXPORT_SYMBOL(set_extent_dirty);
int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
}
-EXPORT_SYMBOL(set_extent_ordered);
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask)
return set_extent_bit(tree, start, end, bits, 0, NULL,
mask);
}
-EXPORT_SYMBOL(set_extent_bits);
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask)
{
return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
}
-EXPORT_SYMBOL(clear_extent_bits);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
EXTENT_DELALLOC | EXTENT_DIRTY,
0, NULL, mask);
}
-EXPORT_SYMBOL(set_extent_delalloc);
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
return clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
}
-EXPORT_SYMBOL(clear_extent_dirty);
int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
}
-EXPORT_SYMBOL(clear_extent_ordered);
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
mask);
}
-EXPORT_SYMBOL(set_extent_new);
static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
mask);
}
-EXPORT_SYMBOL(set_extent_uptodate);
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask)
{
return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
}
-EXPORT_SYMBOL(wait_on_extent_writeback);
/*
* either insert or lock state struct between start and end use mask to tell
}
return err;
}
-EXPORT_SYMBOL(lock_extent);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
}
return 1;
}
-EXPORT_SYMBOL(try_lock_extent);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
}
-EXPORT_SYMBOL(unlock_extent);
/*
* helper function to set pages and extents in the tree dirty
set_extent_dirty(tree, start, end, GFP_NOFS);
return 0;
}
-EXPORT_SYMBOL(set_range_dirty);
/*
* helper function to set both pages and extents in the tree writeback
spin_unlock(&tree->lock);
return ret;
}
-EXPORT_SYMBOL(find_first_extent_bit);
/* find the first state struct with 'bits' set after 'start', and
* return it. tree->lock must be held. NULL will returned if
out:
return NULL;
}
-EXPORT_SYMBOL(find_first_extent_bit_state);
/*
* find a contiguous range of bytes in the file marked as delalloc, not
}
return 0;
}
-EXPORT_SYMBOL(extent_clear_unlock_delalloc);
/*
* count the number of bytes in the tree that have a given bit(s)
spin_unlock(&tree->lock);
return bitset;
}
-EXPORT_SYMBOL(test_range_bit);
/*
* helper function to set a given page up to date if all the
set_page_private(page, EXTENT_PAGE_PRIVATE);
}
}
-EXPORT_SYMBOL(set_page_extent_mapped);
static void set_page_extent_head(struct page *page, unsigned long len)
{
submit_one_bio(READ, bio, 0, bio_flags);
return ret;
}
-EXPORT_SYMBOL(extent_read_full_page);
/*
* the writepage semantics are similar to regular writepage. extent
submit_one_bio(WRITE, epd.bio, 0, 0);
return ret;
}
-EXPORT_SYMBOL(extent_write_full_page);
int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
u64 start, u64 end, get_extent_t *get_extent,
submit_one_bio(WRITE, epd.bio, 0, 0);
return ret;
}
-EXPORT_SYMBOL(extent_write_locked_range);
-
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
submit_one_bio(WRITE, epd.bio, 0, 0);
return ret;
}
-EXPORT_SYMBOL(extent_writepages);
int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
submit_one_bio(READ, bio, 0, bio_flags);
return 0;
}
-EXPORT_SYMBOL(extent_readpages);
/*
* basic invalidatepage code, this waits on any locked or writeback
1, 1, GFP_NOFS);
return 0;
}
-EXPORT_SYMBOL(extent_invalidatepage);
/*
* simple commit_write call, set_range_dirty is used to mark both
}
return 0;
}
-EXPORT_SYMBOL(extent_commit_write);
int extent_prepare_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
/* FIXME, zero out newly allocated blocks on error */
return err;
}
-EXPORT_SYMBOL(extent_prepare_write);
/*
* a helper for releasepage, this tests for areas of the page that
}
return ret;
}
-EXPORT_SYMBOL(try_release_extent_state);
/*
* a helper for releasepage. As long as there are no locked extents
}
return try_release_extent_state(map, tree, page, mask);
}
-EXPORT_SYMBOL(try_release_extent_mapping);
sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
get_extent_t *get_extent)
__free_extent_buffer(eb);
return exists;
}
-EXPORT_SYMBOL(alloc_extent_buffer);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
return eb;
}
-EXPORT_SYMBOL(find_extent_buffer);
void free_extent_buffer(struct extent_buffer *eb)
{
WARN_ON(1);
}
-EXPORT_SYMBOL(free_extent_buffer);
int clear_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb)
}
return 0;
}
-EXPORT_SYMBOL(clear_extent_buffer_dirty);
int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
struct extent_buffer *eb)
return wait_on_extent_writeback(tree, eb->start,
eb->start + eb->len - 1);
}
-EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb)
}
return 0;
}
-EXPORT_SYMBOL(set_extent_buffer_dirty);
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb)
}
return 0;
}
-EXPORT_SYMBOL(set_extent_buffer_uptodate);
int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end)
}
return pg_uptodate;
}
-EXPORT_SYMBOL(extent_buffer_uptodate);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb,
}
return ret;
}
-EXPORT_SYMBOL(read_extent_buffer_pages);
void read_extent_buffer(struct extent_buffer *eb, void *dstv,
unsigned long start,
i++;
}
}
-EXPORT_SYMBOL(read_extent_buffer);
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long min_len, char **token, char **map,
*map_len = PAGE_CACHE_SIZE - offset;
return 0;
}
-EXPORT_SYMBOL(map_private_extent_buffer);
int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long min_len,
}
return err;
}
-EXPORT_SYMBOL(map_extent_buffer);
void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
{
kunmap_atomic(token, km);
}
-EXPORT_SYMBOL(unmap_extent_buffer);
int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
unsigned long start,
}
return ret;
}
-EXPORT_SYMBOL(memcmp_extent_buffer);
void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
unsigned long start, unsigned long len)
i++;
}
}
-EXPORT_SYMBOL(write_extent_buffer);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len)
i++;
}
}
-EXPORT_SYMBOL(memset_extent_buffer);
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
i++;
}
}
-EXPORT_SYMBOL(copy_extent_buffer);
static void move_pages(struct page *dst_page, struct page *src_page,
unsigned long dst_off, unsigned long src_off,
len -= cur;
}
}
-EXPORT_SYMBOL(memcpy_extent_buffer);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len)
len -= cur;
}
}
-EXPORT_SYMBOL(memmove_extent_buffer);
int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
{
spin_unlock(&tree->buffer_lock);
return ret;
}
-EXPORT_SYMBOL(try_release_extent_buffer);