wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
restore_sigs(&oldset);
spin_lock(&fuse_lock);
- if (req->state == FUSE_REQ_FINISHED)
+ if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
return;
- req->out.h.error = -EINTR;
- req->interrupted = 1;
+ if (!req->interrupted) {
+ req->out.h.error = -EINTR;
+ req->interrupted = 1;
+ }
if (req->locked) {
/* This is uninterruptible sleep, because data is
being copied to/from the buffers of req. During
goto err_finish;
spin_lock(&fuse_lock);
+ err = -ENOENT;
+ if (!fc->connected)
+ goto err_unlock;
+
req = request_find(fc, oh.unique);
err = -EINVAL;
if (!req)
return mask;
}
-/* Abort all requests on the given list (pending or processing) */
+/*
+ * Abort all requests on the given list (pending or processing)
+ *
+ * This function releases and reacquires fuse_lock
+ */
static void end_requests(struct fuse_conn *fc, struct list_head *head)
{
while (!list_empty(head)) {
}
}
+/*
+ * Abort requests under I/O
+ *
+ * The requests are set to interrupted and finished, and the request
+ * waiter is woken up. This will make request_wait_answer() wait
+ * until the request is unlocked and then return.
+ */
+static void end_io_requests(struct fuse_conn *fc)
+{
+ while (!list_empty(&fc->io)) {
+ struct fuse_req *req;
+ req = list_entry(fc->io.next, struct fuse_req, list);
+ req->interrupted = 1;
+ req->out.h.error = -ECONNABORTED;
+ req->state = FUSE_REQ_FINISHED;
+ list_del_init(&req->list);
+ wake_up(&req->waitq);
+ }
+}
+
+/*
+ * Abort all requests.
+ *
+ * Emergency exit in case of a malicious or accidental deadlock, or
+ * just a hung filesystem.
+ *
+ * The same effect is usually achievable through killing the
+ * filesystem daemon and all users of the filesystem. The exception
+ * is the combination of an asynchronous request and the tricky
+ * deadlock (see Documentation/filesystems/fuse.txt).
+ *
+ * During the aborting, progression of requests from the pending and
+ * processing lists onto the io list, and progression of new requests
+ * onto the pending list is prevented by req->connected being false.
+ *
+ * Progression of requests under I/O to the processing list is
+ * prevented by the req->interrupted flag being true for these
+ * requests. For this reason requests on the io list must be aborted
+ * first.
+ */
+void fuse_abort_conn(struct fuse_conn *fc)
+{
+ spin_lock(&fuse_lock);
+ if (fc->connected) {
+ fc->connected = 0;
+ end_io_requests(fc);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
+ wake_up_all(&fc->waitq);
+ }
+ spin_unlock(&fuse_lock);
+}
+
static int fuse_dev_release(struct inode *inode, struct file *file)
{
struct fuse_conn *fc;
/** Mount is active */
unsigned mounted : 1;
- /** Connection established, cleared on umount and device
- release */
+ /** Connection established, cleared on umount, connection
+ abort and device release */
unsigned connected : 1;
/** Connection failed (version mismatch) */
*/
void fuse_release_background(struct fuse_req *req);
+/* Abort all requests */
+void fuse_abort_conn(struct fuse_conn *fc);
+
/**
* Get the attributes of a file
*/
return inode;
}
+static void fuse_umount_begin(struct super_block *sb)
+{
+ fuse_abort_conn(get_fuse_conn_super(sb));
+}
+
static void fuse_put_super(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
.read_inode = fuse_read_inode,
.clear_inode = fuse_clear_inode,
.put_super = fuse_put_super,
+ .umount_begin = fuse_umount_begin,
.statfs = fuse_statfs,
.show_options = fuse_show_options,
};
return sprintf(page, "%i\n", atomic_read(&fc->num_waiting));
}
+static ssize_t fuse_conn_abort_store(struct fuse_conn *fc, const char *page,
+ size_t count)
+{
+ fuse_abort_conn(fc);
+ return count;
+}
+
static struct fuse_conn_attr fuse_conn_waiting =
__ATTR(waiting, 0400, fuse_conn_waiting_show, NULL);
+static struct fuse_conn_attr fuse_conn_abort =
+ __ATTR(abort, 0600, NULL, fuse_conn_abort_store);
static struct attribute *fuse_conn_attrs[] = {
&fuse_conn_waiting.attr,
+ &fuse_conn_abort.attr,
NULL,
};