next = bh->b_private;
bh->b_end_io(bh, !ioend->io_error);
}
- if (unlikely(ioend->io_error))
- vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
- vn_iowake(ioend->io_vnode);
+ if (unlikely(ioend->io_error)) {
+ vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,
+ __FILE__,__LINE__);
+ }
+ vn_iowake(XFS_I(ioend->io_inode));
mempool_free(ioend, xfs_ioend_pool);
}
xfs_setfilesize(
xfs_ioend_t *ioend)
{
- xfs_inode_t *ip;
+ xfs_inode_t *ip = XFS_I(ioend->io_inode);
xfs_fsize_t isize;
xfs_fsize_t bsize;
- ip = xfs_vtoi(ioend->io_vnode);
- if (!ip)
- return;
-
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
ASSERT(ioend->io_type != IOMAP_READ);
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
- bhv_vnode_t *vp = ioend->io_vnode;
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
if (likely(!ioend->io_error)) {
- xfs_bmap(xfs_vtoi(vp), offset, size,
+ xfs_bmap(XFS_I(ioend->io_inode), offset, size,
BMAPI_UNWRITTEN, NULL, NULL);
xfs_setfilesize(ioend);
}
ioend->io_error = 0;
ioend->io_list = NULL;
ioend->io_type = type;
- ioend->io_vnode = vn_from_inode(inode);
+ ioend->io_inode = inode;
ioend->io_buffer_head = NULL;
ioend->io_buffer_tail = NULL;
- atomic_inc(&ioend->io_vnode->v_iocount);
+ atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
ioend->io_offset = 0;
ioend->io_size = 0;
unlock_buffer(bh);
} while ((bh = next_bh) != NULL);
- vn_iowake(ioend->io_vnode);
+ vn_iowake(XFS_I(ioend->io_inode));
mempool_free(ioend, xfs_ioend_pool);
} while ((ioend = next) != NULL);
}
unsigned int io_type; /* delalloc / unwritten */
int io_error; /* I/O error code */
atomic_t io_remaining; /* hold count */
- struct bhv_vnode *io_vnode; /* file being written to */
+ struct inode *io_inode; /* file being written to */
struct buffer_head *io_buffer_head;/* buffer linked list head */
struct buffer_head *io_buffer_tail;/* buffer linked list tail */
size_t io_size; /* size of the extent */
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
+/*
+ * And this gunk is needed for xfs_mount.h"
+ */
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dmapi.h"
+#include "xfs_inum.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+
uint64_t vn_generation; /* vnode generation number */
DEFINE_SPINLOCK(vnumber_lock);
void
vn_iowait(
- bhv_vnode_t *vp)
+ xfs_inode_t *ip)
{
- wait_queue_head_t *wq = vptosync(vp);
+ wait_queue_head_t *wq = vptosync(ip);
- wait_event(*wq, (atomic_read(&vp->v_iocount) == 0));
+ wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
}
void
vn_iowake(
- bhv_vnode_t *vp)
+ xfs_inode_t *ip)
{
- if (atomic_dec_and_test(&vp->v_iocount))
- wake_up(vptosync(vp));
+ if (atomic_dec_and_test(&ip->i_iocount))
+ wake_up(vptosync(ip));
}
/*
*/
void
vn_ioerror(
- bhv_vnode_t *vp,
+ xfs_inode_t *ip,
int error,
char *f,
int l)
{
- bhv_vfs_t *vfsp = vfs_from_sb(vp->v_inode.i_sb);
+ bhv_vfs_t *vfsp = XFS_MTOVFS(ip->i_mount);
if (unlikely(error == -ENODEV))
bhv_vfs_force_shutdown(vfsp, SHUTDOWN_DEVICE_REQ, f, l);
ASSERT(VN_CACHED(vp) == 0);
- atomic_set(&vp->v_iocount, 0);
-
#ifdef XFS_VNODE_TRACE
vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
#endif /* XFS_VNODE_TRACE */
typedef struct bhv_vnode {
bhv_vnumber_t v_number; /* in-core vnode number */
- atomic_t v_iocount; /* outstanding I/O count */
#ifdef XFS_VNODE_TRACE
struct ktrace *v_trace; /* trace header structure */
#endif
extern int __vn_revalidate(struct bhv_vnode *, bhv_vattr_t *);
extern void vn_revalidate_core(struct bhv_vnode *, bhv_vattr_t *);
-extern void vn_iowait(struct bhv_vnode *vp);
-extern void vn_iowake(struct bhv_vnode *vp);
-
-extern void vn_ioerror(struct bhv_vnode *vp, int error, char *f, int l);
+/*
+ * Yeah, these don't take vnode anymore at all, all this should be
+ * cleaned up at some point.
+ */
+extern void vn_iowait(struct xfs_inode *ip);
+extern void vn_iowake(struct xfs_inode *ip);
+extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l);
static inline int vn_count(struct bhv_vnode *vp)
{
ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
ip->i_ino = ino;
ip->i_mount = mp;
+ atomic_set(&ip->i_iocount, 0);
spin_lock_init(&ip->i_flags_lock);
/*
mp = ip->i_mount;
vp = XFS_ITOV(ip);
- vn_iowait(vp); /* wait for the completion of any pending DIOs */
+ vn_iowait(ip); /* wait for the completion of any pending DIOs */
/*
* Call toss_pages or flushinval_pages to get rid of pages
struct hlist_node i_cnode; /* cluster link node */
xfs_fsize_t i_size; /* in-memory size */
+ atomic_t i_iocount; /* outstanding I/O count */
/* Trace buffers per inode. */
#ifdef XFS_BMAP_TRACE
struct ktrace *i_xtrace; /* inode extent list trace */
* place after this point
*/
if (flags & SYNC_IOWAIT)
- vn_iowait(vp);
+ vn_iowait(ip);
xfs_ilock(ip, XFS_ILOCK_SHARED);
}
}
/* wait for all I/O to complete */
- vn_iowait(vp);
+ vn_iowait(ip);
if (!code)
code = xfs_itruncate_data(ip, vap->va_size);
return 0;
}
- vn_iowait(vp);
+ vn_iowait(ip);
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
need_iolock = 0;
if (need_iolock) {
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- vn_iowait(vp); /* wait for the completion of any pending DIOs */
+ vn_iowait(ip); /* wait for the completion of any pending DIOs */
}
rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, NBPP);