if (!ret &&
!verify_parent_transid(io_tree, eb, parent_transid))
return ret;
-
+printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num);
num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
eb->start, eb->len);
if (num_copies == 1)
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
+ printk("bad tree block start %llu %llu\n",
+ (unsigned long long)found_start,
+ (unsigned long long)eb->start);
ret = -EIO;
goto err;
}
if (ret == 0) {
buf->flags |= EXTENT_UPTODATE;
+ } else {
+ WARN_ON(1);
}
return buf;
}
/* the get_extent function already copied into the page */
if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
+ check_page_uptodate(tree, page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
cur = cur + iosize;
page_offset += iosize;
* properly set. releasepage may drop page->private
* on us if the page isn't already dirty.
*/
+ lock_page(page);
if (i == 0) {
- lock_page(page);
set_page_extent_head(page, eb->len);
} else if (PagePrivate(page) &&
page->private != EXTENT_PAGE_PRIVATE) {
- lock_page(page);
set_page_extent_mapped(page);
- unlock_page(page);
}
__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
- if (i == 0)
- unlock_page(page);
+ set_extent_dirty(tree, page_offset(page),
+ page_offset(page) + PAGE_CACHE_SIZE -1,
+ GFP_NOFS);
+ unlock_page(page);
}
- return set_extent_dirty(tree, eb->start,
- eb->start + eb->len - 1, GFP_NOFS);
+ return 0;
}
EXPORT_SYMBOL(set_extent_buffer_dirty);
if (all_uptodate) {
if (start_i == 0)
eb->flags |= EXTENT_UPTODATE;
+ if (ret) {
+ printk("all up to date but ret is %d\n", ret);
+ }
goto unlock_exit;
}
mirror_num);
if (err) {
ret = err;
+ printk("err %d from __extent_read_full_page\n", ret);
}
} else {
unlock_page(page);
submit_one_bio(READ, bio, mirror_num);
if (ret || !wait) {
+ if (ret)
+ printk("ret %d wait %d returning\n", ret, wait);
return ret;
}
for (i = start_i; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
wait_on_page_locked(page);
if (!PageUptodate(page)) {
+ printk("page not uptodate after wait_on_page_locked\n");
ret = -EIO;
}
}
#include "compat.h"
-static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
- struct page **prepared_pages,
- const char __user * buf)
+static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
+ int write_bytes,
+ struct page **prepared_pages,
+ const char __user * buf)
{
long page_fault = 0;
int i;
return page_fault ? -EFAULT : 0;
}
-static void btrfs_drop_pages(struct page **pages, size_t num_pages)
+static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
return err;
}
-int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
+int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
{
struct extent_map *em;
struct extent_map *split = NULL;
* it is either truncated or split. Anything entirely inside the range
* is deleted from the tree.
*/
-int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
{
/*
* this gets pages into the page cache and locks them down
*/
-static int prepare_pages(struct btrfs_root *root, struct file *file,
+static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
struct page **pages, size_t num_pages,
loff_t pos, unsigned long first_index,
unsigned long last_index, size_t write_bytes)
return 0;
}
-static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
- u8 *uuid)
+static noinline struct btrfs_device *__find_device(struct list_head *head,
+ u64 devid, u8 *uuid)
{
struct btrfs_device *dev;
struct list_head *cur;
return NULL;
}
-static struct btrfs_fs_devices *find_fsid(u8 *fsid)
+static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
{
struct list_head *cur;
struct btrfs_fs_devices *fs_devices;
* the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread.
*/
-int run_scheduled_bios(struct btrfs_device *device)
+static int noinline run_scheduled_bios(struct btrfs_device *device)
{
struct bio *pending;
struct backing_dev_info *bdi;
run_scheduled_bios(device);
}
-static int device_list_add(const char *path,
+static noinline int device_list_add(const char *path,
struct btrfs_super_block *disk_super,
u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
* called very infrequently and that a given device has a small number
* of extents
*/
-static int find_free_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device,
- struct btrfs_path *path,
- u64 num_bytes, u64 *start)
+static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device,
+ struct btrfs_path *path,
+ u64 num_bytes, u64 *start)
{
struct btrfs_key key;
struct btrfs_root *root = device->dev_root;
return ret;
}
-int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
+int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset,
return ret;
}
-static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
+static noinline int find_next_chunk(struct btrfs_root *root,
+ u64 objectid, u64 *offset)
{
struct btrfs_path *path;
int ret;
return ret;
}
-static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
- u64 *objectid)
+static noinline int find_next_devid(struct btrfs_root *root,
+ struct btrfs_path *path, u64 *objectid)
{
int ret;
struct btrfs_key key;
goto out;
}
-int btrfs_update_device(struct btrfs_trans_handle *trans,
- struct btrfs_device *device)
+int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
return 0;
}
-static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
- int sub_stripes)
+static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
+ int num_stripes, int sub_stripes)
{
if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
return calc_size;
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
- int rw, struct bio *bio)
+static int noinline schedule_bio(struct btrfs_root *root,
+ struct btrfs_device *device,
+ int rw, struct bio *bio)
{
int should_queue = 1;