/** @} lib */
+
+/* l_wait_event_abortable() is a bit like wait_event_killable()
+ * except there is a fixed set of signals which will abort:
+ * LUSTRE_FATAL_SIGS
+ */
+#define l_wait_event_abortable(wq, condition) \
+({ \
+ sigset_t __blocked; \
+ int __ret = 0; \
+ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
+ __ret = wait_event_interruptible(wq, condition); \
+ cfs_restore_sigs(__blocked); \
+ __ret; \
+})
+
+#define l_wait_event_abortable_exclusive(wq, condition) \
+({ \
+ sigset_t __blocked; \
+ int __ret = 0; \
+ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
+ __ret = wait_event_interruptible_exclusive(wq, condition); \
+ cfs_restore_sigs(__blocked); \
+ __ret; \
+})
#endif /* _LUSTRE_LIB_H */
ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
if (atomic_read(&ns->ns_bref) > 0) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
int rc;
CDEBUG(D_DLMTRACE,
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
force_wait:
if (force)
- lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
- MSEC_PER_SEC) / 4, NULL, NULL);
-
- rc = l_wait_event(ns->ns_waitq,
- atomic_read(&ns->ns_bref) == 0, &lwi);
+ rc = wait_event_idle_timeout(ns->ns_waitq,
+ atomic_read(&ns->ns_bref) == 0,
+ obd_timeout * HZ / 4) ? 0 : -ETIMEDOUT;
+ else
+ rc = l_wait_event_abortable(ns->ns_waitq,
+ atomic_read(&ns->ns_bref) == 0);
/* Forced cleanups should be able to reclaim all references,
* so it's safe to wait forever... we can't leak locks...
}
/* Wait for unstable pages to be committed to stable storage */
- if (!force) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
- rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
- !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr),
- &lwi);
- }
+ if (!force)
+ rc = l_wait_event_abortable(sbi->ll_cache->ccc_unstable_waitq,
+ !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr));
ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
- if (!force && rc != -EINTR)
+ if (!force && rc != -ERESTARTSYS)
LASSERTF(!ccc_count, "count: %li\n", ccc_count);
/* We need to set force before the lov_disconnect in
int obd_get_request_slot(struct client_obd *cli)
{
struct obd_request_slot_waiter orsw;
- struct l_wait_info lwi;
int rc;
spin_lock(&cli->cl_loi_list_lock);
orsw.orsw_signaled = false;
spin_unlock(&cli->cl_loi_list_lock);
- lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(orsw.orsw_waitq,
- obd_request_slot_avail(cli, &orsw) ||
- orsw.orsw_signaled,
- &lwi);
+ rc = l_wait_event_abortable(orsw.orsw_waitq,
+ obd_request_slot_avail(cli, &orsw) ||
+ orsw.orsw_signaled);
/*
* Here, we must take the lock to avoid the on-stack 'orsw' to be
int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
{
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct obd_llog_group *olg;
int rc, idx;
CERROR("Error %d while cleaning up ctxt %p\n",
rc, ctxt);
- l_wait_event(olg->olg_waitq,
- llog_group_ctxt_null(olg, idx), &lwi);
+ l_wait_event_abortable(olg->olg_waitq,
+ llog_group_ctxt_null(olg, idx));
return rc;
}
static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
struct osc_page *opg)
{
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct osc_io *oio = osc_env_io(env);
int rc = 0;
cond_resched();
- rc = l_wait_event(osc_lru_waitq,
- atomic_long_read(cli->cl_lru_left) > 0,
- &lwi);
+ rc = l_wait_event_abortable(osc_lru_waitq,
+ atomic_long_read(cli->cl_lru_left) > 0);
if (rc < 0)
break;
req->rq_interpret_reply = osc_destroy_interpret;
if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
/*
* Wait until the number of on-going destroy RPCs drops
* under max_rpc_in_flight
*/
- l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
+ l_wait_event_abortable_exclusive(cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli));
}
/* Do not wait for response */