req->pages = pages;
req->page_descs = page_descs;
req->max_pages = npages;
+ __set_bit(FR_PENDING, &req->flags);
}
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
req->end = NULL;
list_del_init(&req->list);
list_del_init(&req->intr_entry);
+ WARN_ON(test_bit(FR_PENDING, &req->flags));
+ WARN_ON(test_bit(FR_SENT, &req->flags));
smp_wmb();
- req->state = FUSE_REQ_FINISHED;
+ set_bit(FR_FINISHED, &req->flags);
if (test_bit(FR_BACKGROUND, &req->flags)) {
clear_bit(FR_BACKGROUND, &req->flags);
if (fc->num_background == fc->max_background)
if (!fc->no_interrupt) {
/* Any signal may interrupt this */
err = wait_event_interruptible(req->waitq,
- req->state == FUSE_REQ_FINISHED);
+ test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
spin_lock(&fc->lock);
set_bit(FR_INTERRUPTED, &req->flags);
- if (req->state == FUSE_REQ_SENT)
+ if (test_bit(FR_SENT, &req->flags))
queue_interrupt(fc, req);
spin_unlock(&fc->lock);
}
/* Only fatal signals may interrupt this */
block_sigs(&oldset);
err = wait_event_interruptible(req->waitq,
- req->state == FUSE_REQ_FINISHED);
+ test_bit(FR_FINISHED, &req->flags));
restore_sigs(&oldset);
if (!err)
spin_lock(&fc->lock);
/* Request is not yet in userspace, bail out */
- if (req->state == FUSE_REQ_PENDING) {
+ if (test_bit(FR_PENDING, &req->flags)) {
list_del(&req->list);
spin_unlock(&fc->lock);
__fuse_put_request(req);
* Either request is already in userspace, or it was forced.
* Wait it out.
*/
- wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+ wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
}
static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
}
req = list_entry(fc->pending.next, struct fuse_req, list);
- req->state = FUSE_REQ_IO;
+ clear_bit(FR_PENDING, &req->flags);
list_move(&req->list, &fc->io);
in = &req->in;
if (!test_bit(FR_ISREPLY, &req->flags)) {
request_end(fc, req);
} else {
- req->state = FUSE_REQ_SENT;
+ set_bit(FR_SENT, &req->flags);
list_move_tail(&req->list, &fc->processing);
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fc, req);
return nbytes;
}
- req->state = FUSE_REQ_IO;
+ clear_bit(FR_SENT, &req->flags);
list_move(&req->list, &fc->io);
req->out.h = oh;
set_bit(FR_LOCKED, &req->flags);
struct fuse_req *req;
req = list_entry(head->next, struct fuse_req, list);
req->out.h.error = -ECONNABORTED;
+ clear_bit(FR_PENDING, &req->flags);
+ clear_bit(FR_SENT, &req->flags);
request_end(fc, req);
spin_lock(&fc->lock);
}
#define FUSE_ARGS(args) struct fuse_args args = {}
-/** The request state */
-enum fuse_req_state {
- FUSE_REQ_PENDING = 0,
- FUSE_REQ_IO,
- FUSE_REQ_SENT,
- FUSE_REQ_FINISHED
-};
-
/** The request IO state (for asynchronous processing) */
struct fuse_io_priv {
int async;
* FR_ABORTED: the request was aborted
* FR_INTERRUPTED: the request has been interrupted
* FR_LOCKED: data is being copied to/from the request
+ * FR_PENDING: request is not yet in userspace
+ * FR_SENT: request is in userspace, waiting for an answer
+ * FR_FINISHED: request is finished
*/
enum fuse_req_flag {
FR_ISREPLY,
FR_ABORTED,
FR_INTERRUPTED,
FR_LOCKED,
+ FR_PENDING,
+ FR_SENT,
+ FR_FINISHED,
};
/**
/* Request flags, updated with test/set/clear_bit() */
unsigned long flags;
- /** State of the request */
- enum fuse_req_state state;
-
/** The request input */
struct fuse_in in;