From: Anatol P. <ana...@gm...> - 2013-03-25 14:41:58
|
Hi, ok here is promised patch. Most part of it is moving "abort io" and "abort sent request" to separate functions, so they can be used both in "abort connection" and "fatal signal" use-cases. diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 11dfa0c..47c734e 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -25,6 +25,8 @@ MODULE_ALIAS("devname:fuse"); static struct kmem_cache *fuse_req_cachep; +static void request_abort(struct fuse_conn *fc, struct fuse_req *req); + static struct fuse_conn *fuse_get_conn(struct file *file) { /* @@ -98,31 +100,11 @@ void fuse_request_free(struct fuse_req *req) kmem_cache_free(fuse_req_cachep, req); } -static void block_sigs(sigset_t *oldset) -{ - sigset_t mask; - - siginitsetinv(&mask, sigmask(SIGKILL)); - sigprocmask(SIG_BLOCK, &mask, oldset); -} - -static void restore_sigs(sigset_t *oldset) -{ - sigprocmask(SIG_SETMASK, oldset, NULL); -} - static void __fuse_get_request(struct fuse_req *req) { atomic_inc(&req->count); } -/* Must be called with > 1 refcount */ -static void __fuse_put_request(struct fuse_req *req) -{ - BUG_ON(atomic_read(&req->count) < 2); - atomic_dec(&req->count); -} - static void fuse_req_init_context(struct fuse_req *req) { req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid()); @@ -133,14 +115,11 @@ static void fuse_req_init_context(struct fuse_req *req) struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) { struct fuse_req *req; - sigset_t oldset; int intr; int err; atomic_inc(&fc->num_waiting); - block_sigs(&oldset); - intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); - restore_sigs(&oldset); + intr = wait_event_killable(fc->blocked_waitq, !fc->blocked); err = -EINTR; if (intr) goto out; @@ -368,6 +347,23 @@ __acquires(fc->lock) spin_lock(&fc->lock); } +static int wait_answer_killable(struct fuse_conn *fc, + struct fuse_req *req) +__releases(fc->lock) +__acquires(fc->lock) +{ + int ret; + + if (fatal_signal_pending(current)) + return -ERESTARTSYS; + + spin_unlock(&fc->lock); + ret = wait_event_killable(req->waitq, req->state == FUSE_REQ_FINISHED); + spin_lock(&fc->lock); + + return ret; +} + static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) { list_add_tail(&req->intr_entry, &fc->interrupts); @@ -379,6 +375,7 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) __releases(fc->lock) __acquires(fc->lock) { + int res; if (!fc->no_interrupt) { /* Any signal may interrupt this */ wait_answer_interruptible(fc, req); @@ -393,41 +390,15 @@ __acquires(fc->lock) queue_interrupt(fc, req); } - if (!req->force) { - sigset_t oldset; - - /* Only fatal signals may interrupt this */ - block_sigs(&oldset); - wait_answer_interruptible(fc, req); - restore_sigs(&oldset); - - if (req->aborted) - goto aborted; - if (req->state == FUSE_REQ_FINISHED) - return; - - /* Request is not yet in userspace, bail out */ - if (req->state == FUSE_REQ_PENDING) { - list_del(&req->list); - __fuse_put_request(req); - req->out.h.error = -EINTR; - return; - } - } - - /* - * Either request is already in userspace, or it was forced. - * Wait it out. - */ - spin_unlock(&fc->lock); - wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); - spin_lock(&fc->lock); - - if (!req->aborted) + /* Only fatal signals may interrupt this */ + res = wait_answer_killable(fc, req); + if (!res) return; - aborted: - BUG_ON(req->state != FUSE_REQ_FINISHED); + request_abort(fc, req); + +aborted: + BUG_ON(!req->aborted); if (req->locked) { /* This is uninterruptible sleep, because data is being copied to/from the buffers of req. During @@ -1982,6 +1953,50 @@ __acquires(fc->lock) } } +static void end_io_request(struct fuse_conn *fc, struct fuse_req *req) +__releases(fc->lock) +__acquires(fc->lock) +{ + void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; + + req->aborted = 1; + req->state = FUSE_REQ_FINISHED; + list_del_init(&req->list); + wake_up(&req->waitq); + if (end) { + req->end = NULL; + __fuse_get_request(req); + spin_unlock(&fc->lock); + wait_event(req->waitq, !req->locked); + end(fc, req); + fuse_put_request(fc, req); + spin_lock(&fc->lock); + } +} + +static void request_abort(struct fuse_conn *fc, struct fuse_req *req) +__releases(fc->lock) +__acquires(fc->lock) +{ + switch (req->state) { + case FUSE_REQ_READING: + case FUSE_REQ_WRITING: + end_io_request(fc, req); + goto end; + case FUSE_REQ_PENDING: + case FUSE_REQ_SENT: + request_end(fc, req); + spin_lock(&fc->lock); + goto end; + default: + BUG(); + } + +end: + req->aborted = 1; + req->out.h.error = -EINTR; +} + /* * Abort requests under I/O * @@ -2000,22 +2015,9 @@ __acquires(fc->lock) while (!list_empty(&fc->io)) { struct fuse_req *req = list_entry(fc->io.next, struct fuse_req, list); - void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; - req->aborted = 1; req->out.h.error = -ECONNABORTED; - req->state = FUSE_REQ_FINISHED; - list_del_init(&req->list); - wake_up(&req->waitq); - if (end) { - req->end = NULL; - __fuse_get_request(req); - spin_unlock(&fc->lock); - wait_event(req->waitq, !req->locked); - end(fc, req); - fuse_put_request(fc, req); - spin_lock(&fc->lock); - } + end_io_request(fc, req); } } |