* this uses that block instead of allocating a new one. btrfs_alloc_reserved_extent
* is used to finish the allocation.
*/
-int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans,
+static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
* extents on the btree inode are pretty simple, there's one extent
* that covers the entire device
*/
-struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
- size_t page_offset, u64 start, u64 len,
- int create)
+static struct extent_map *btree_get_extent(struct inode *inode,
+ struct page *page, size_t page_offset, u64 start, u64 len,
+ int create)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
* checksum a dirty tree block before IO. This has extra checks to make
* sure we only fill in the checksum field in the first page of a multi-page block
*/
-int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
+static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
struct extent_io_tree *tree;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
return ret;
}
-int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
+static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
struct extent_io_tree *tree;
return extent_writepages(tree, mapping, btree_get_extent, wbc);
}
-int btree_readpage(struct file *file, struct page *page)
+static int btree_readpage(struct file *file, struct page *page)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;
}
}
-void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
struct inode *inode;
struct extent_map_tree *em_tree;
put_bh(bh);
}
-int write_all_supers(struct btrfs_root *root)
+static int write_all_supers(struct btrfs_root *root)
{
struct list_head *cur;
struct list_head *head = &root->fs_info->fs_devices->devices;
* this adds the block group to the fs_info rb tree for the block group
* cache
*/
-int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
+static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group_cache *block_group)
{
struct rb_node **p;
/*
* return the block group that starts at or after bytenr
*/
-struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
+static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
btrfs_fs_info *info,
u64 bytenr)
{
return 0;
}
-int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
+static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
u32 *refs)
{
int ret;
return flags;
}
-int __alloc_chunk_for_shrink(struct btrfs_root *root,
+static int __alloc_chunk_for_shrink(struct btrfs_root *root,
struct btrfs_block_group_cache *shrink_block_group,
int force)
{
return ret;
}
-int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *key)
+static int find_first_block_group(struct btrfs_root *root,
+ struct btrfs_path *path, struct btrfs_key *key)
{
int ret = 0;
struct btrfs_key found_key;
}
EXPORT_SYMBOL(extent_io_tree_init);
-struct extent_state *alloc_extent_state(gfp_t mask)
+static struct extent_state *alloc_extent_state(gfp_t mask)
{
struct extent_state *state;
#ifdef LEAK_DEBUG
}
EXPORT_SYMBOL(alloc_extent_state);
-void free_extent_state(struct extent_state *state)
+static void free_extent_state(struct extent_state *state)
{
if (!state)
return;
* [start, end] is inclusive
* This takes the tree lock.
*/
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
+static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
int exclusive, u64 *failed_start, gfp_t mask)
{
struct extent_state *state;
}
EXPORT_SYMBOL(set_extent_new);
-int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
+static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
}
-EXPORT_SYMBOL(clear_extent_new);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
}
EXPORT_SYMBOL(set_extent_uptodate);
-int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
}
-EXPORT_SYMBOL(clear_extent_uptodate);
-int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
+static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
0, NULL, mask);
}
-EXPORT_SYMBOL(set_extent_writeback);
-int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
+static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
}
-EXPORT_SYMBOL(clear_extent_writeback);
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
/*
* helper function to set both pages and extents in the tree writeback
*/
-int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
+static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
set_extent_writeback(tree, start, end, GFP_NOFS);
return 0;
}
-EXPORT_SYMBOL(set_range_writeback);
/*
* find the first offset in the io tree with 'bits' set. zero is
spin_unlock_irq(&tree->lock);
return total_bytes;
}
+
+#if 0
/*
* helper function to lock both pages and extents in the tree.
* pages must be locked first.
*/
-int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
+static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
{
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
}
return err;
}
-EXPORT_SYMBOL(lock_range);
/*
* helper function to unlock both pages and extents in the tree.
*/
-int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
+static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
{
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
unlock_extent(tree, start, end, GFP_NOFS);
return 0;
}
-EXPORT_SYMBOL(unlock_range);
+#endif
/*
* set the private field for a given byte offset in the tree. If there isn't
}
EXPORT_SYMBOL(set_page_extent_mapped);
-void set_page_extent_head(struct page *page, unsigned long len)
+static void set_page_extent_head(struct page *page, unsigned long len)
{
set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
}
* WB_SYNC_ALL then we were called for data integrity and we must wait for
* existing IO to complete.
*/
-int extent_write_cache_pages(struct extent_io_tree *tree,
+static int extent_write_cache_pages(struct extent_io_tree *tree,
struct address_space *mapping,
struct writeback_control *wbc,
writepage_t writepage, void *data,
}
return ret;
}
-EXPORT_SYMBOL(extent_write_cache_pages);
static noinline void flush_write_bio(void *data)
{
mutex_unlock(&block_group->alloc_mutex);
}
-struct btrfs_free_space *btrfs_find_free_space_offset(struct
+#if 0
+static struct btrfs_free_space *btrfs_find_free_space_offset(struct
btrfs_block_group_cache
*block_group, u64 offset,
u64 bytes)
return ret;
}
-struct btrfs_free_space *btrfs_find_free_space_bytes(struct
+static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
btrfs_block_group_cache
*block_group, u64 offset,
u64 bytes)
return ret;
}
+#endif
struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
*block_group, u64 offset,
#include "disk-io.h"
#include "transaction.h"
-int find_name_in_backref(struct btrfs_path *path, const char * name,
+static int find_name_in_backref(struct btrfs_path *path, const char *name,
int name_len, struct btrfs_inode_ref **ref_ret)
{
struct extent_buffer *leaf;
* bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done.
*/
-int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
+static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
unsigned long old, unsigned long bits)
{
unsigned long flags;
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
-int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
+static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
unsigned long old, unsigned long bits)
{
if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
-int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio,
+static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
-int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
* extent_io.c submission hook. This does the right thing for csum calculation on write,
* or reading the csums from the tree before a read
*/
-int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_work work;
};
-void btrfs_writepage_fixup_worker(struct btrfs_work *work)
+static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup;
struct btrfs_ordered_extent *ordered;
* to fix it up. The async helper will wait for ordered extents, set
* the delalloc bit and make it safe to write the page.
*/
-int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
+static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup;
return 0;
}
-int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
+static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
return btrfs_finish_ordered_io(page->mapping->host, start, end);
int last_mirror;
};
-int btrfs_io_failed_hook(struct bio *failed_bio,
+static int btrfs_io_failed_hook(struct bio *failed_bio,
struct page *page, u64 start, u64 end,
struct extent_state *state)
{
* each time an IO finishes, we do a fast check in the IO failure tree
* to see if we need to process or clean up an io_failure_record
*/
-int btrfs_clean_io_failures(struct inode *inode, u64 start)
+static int btrfs_clean_io_failures(struct inode *inode, u64 start)
{
u64 private;
u64 private_failure;
* if there's a match, we allow the bio to finish. If not, we go through
* the io_failure_record routines to find good copies
*/
-int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
+static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
* Invalidate a single dcache entry at the root of the filesystem.
* Needed after creation of snapshot or subvolume.
*/
-void btrfs_invalidate_dcache_root(struct inode *dir, char *name,
- int namelen)
+static void btrfs_invalidate_dcache_root(struct inode *dir,
+ char *name, int namelen)
{
struct dentry *alias, *entry;
struct qstr qstr;
}
-int btrfs_defrag_file(struct file *file)
+static int btrfs_defrag_file(struct file *file)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
return 0;
}
-long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_vol_args *vol_args;
int ret;
return ret;
}
-long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_vol_args *vol_args;
int ret;
return ret;
}
-long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, u64 off,
- u64 olen, u64 destoff)
+static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ u64 off, u64 olen, u64 destoff)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
return ret;
}
-long btrfs_ioctl_clone_range(struct file *file, unsigned long argptr)
+static long btrfs_ioctl_clone_range(struct file *file, unsigned long argptr)
{
struct btrfs_ioctl_clone_range_args args;
* basically own the machine, and have a very in depth understanding
* of all the possible deadlocks and enospc problems.
*/
-long btrfs_ioctl_trans_start(struct file *file)
+static long btrfs_ioctl_trans_start(struct file *file)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
return ret;
}
+#if 0 /* this will get used when snapshot deletion is implemented */
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
u64 root_id, u8 type, u64 ref_id)
btrfs_free_path(path);
return ret;
}
+#endif
int btrfs_find_root_ref(struct btrfs_root *tree_root,
struct btrfs_path *path,
return misc_register(&btrfs_misc);
}
-void btrfs_interface_exit(void)
+static void btrfs_interface_exit(void)
{
if (misc_deregister(&btrfs_misc) < 0)
printk("misc_deregister failed for control device");
#include "locking.h"
#include "print-tree.h"
#include "compat.h"
+#include "tree-log.h"
/* magic values for the inode_only field in btrfs_log_inode:
*
* tree of log tree roots. This must be called with a tree log transaction
* running (see start_log_trans).
*/
-int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
+static int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_key key;
return ret;
}
-int wait_log_commit(struct btrfs_root *log)
+static int wait_log_commit(struct btrfs_root *log)
{
DEFINE_WAIT(wait);
u64 transid = log->fs_info->tree_log_transid;
return 0;
}
-void pending_bios_fn(struct btrfs_work *work)
+static void pending_bios_fn(struct btrfs_work *work)
{
struct btrfs_device *device;
return ret;
}
-int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
+static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 start)
{
goto out;
}
-int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
+static int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
return 0;
}
-int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
+static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
chunk_offset)
{
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
return ret;
}
-int btrfs_relocate_chunk(struct btrfs_root *root,
+static int btrfs_relocate_chunk(struct btrfs_root *root,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset)
{
return ret;
}
-int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
+static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/bio.h>
+#include "compression.h"
/* Plan: call deflate() with avail_in == *sourcelen,
avail_out = *dstlen - 12 and flush == Z_FINISH.