* - spin_unlock(x)
* - spin_unlock_bh(x)
* - spin_trylock(x)
- * - spin_is_locked(x)
+ * - assert_spin_locked(x)
*
* - spin_lock_irq(x)
* - spin_lock_irqsave(x, f)
/** Check if resource is already locked, assert if not. */
static inline void check_res_locked(struct ldlm_resource *res)
{
- LASSERT(spin_is_locked(&res->lr_lock));
+ assert_spin_locked(&res->lr_lock);
}
struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
* \a nrq
* \param[in,out] nrq The request
*
- * \pre spin_is_locked(&svcpt->scp_req_lock)
+ * \pre assert_spin_locked(&svcpt->scp_req_lock)
*
* \see ptlrpc_nrs_req_stop_nolock()
*/
int i;
int rc = 0;
- LASSERT(spin_is_locked(&lsm->lsm_lock));
+ assert_spin_locked(&lsm->lsm_lock);
LASSERT(lsm->lsm_lock_owner == current_pid());
CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s="LPU64" m="LPU64
int stripe = 0;
__u64 kms;
- LASSERT(spin_is_locked(&lsm->lsm_lock));
+ assert_spin_locked(&lsm->lsm_lock);
LASSERT(lsm->lsm_lock_owner == current_pid());
if (shrink) {
struct cl_object_header *head;
head = cl_object_header(obj);
- LINVRNT(spin_is_locked(&head->coh_lock_guard));
+ assert_spin_locked(&head->coh_lock_guard);
CS_LOCK_INC(obj, lookup);
list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
struct lu_object_header *top;
int result;
- LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ assert_spin_locked(cl_object_attr_guard(obj));
top = obj->co_lu.lo_header;
result = 0;
struct lu_object_header *top;
int result;
- LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ assert_spin_locked(cl_object_attr_guard(obj));
top = obj->co_lu.lo_header;
result = 0;
{
struct cl_page *page;
- LASSERT(spin_is_locked(&hdr->coh_page_guard));
+ assert_spin_locked(&hdr->coh_page_guard);
page = radix_tree_lookup(&hdr->coh_tree, index);
if (page != NULL)
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+ assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_CACHE_SIZE;
static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+ assert_spin_locked(&cli->cl_loi_list_lock.lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
return;
}
static inline int osc_object_is_locked(struct osc_object *obj)
{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return spin_is_locked(&obj->oo_lock);
+#else
+ /*
+ * It is not perfect to return true all the time.
+ * But since this function is only used for assertion
+ * and checking, it seems OK.
+ */
+ return 1;
+#endif
}
/*
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
- LASSERT(spin_is_locked(&request->rq_import->imp_lock));
+ assert_spin_locked(&request->rq_import->imp_lock);
(void)__ptlrpc_req_finished(request, 1);
}
EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
bool skip_committed_list = true;
LASSERT(imp != NULL);
-
- LASSERT(spin_is_locked(&imp->imp_lock));
-
+ assert_spin_locked(&imp->imp_lock);
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
{
struct list_head *tmp;
- LASSERT(spin_is_locked(&imp->imp_lock));
+ assert_spin_locked(&imp->imp_lock);
if (req->rq_transno == 0) {
DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
static
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
{
- LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
+ assert_spin_locked(&ctx->cc_sec->ps_lock);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
LASSERT(!hlist_unhashed(&ctx->cc_cache));
__u32 idx = gmsg->gum_mechidx;
LASSERT(idx < MECH_MAX);
- LASSERT(spin_is_locked(&upcall_locks[idx]));
+ assert_spin_locked(&upcall_locks[idx]);
if (list_empty(&gmsg->gum_list))
return;
/* Must be called with imp_lock held! */
static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
{
- LASSERT(spin_is_locked(&imp->imp_lock));
+ assert_spin_locked(&imp->imp_lock);
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
imp->imp_invalid = 1;
{
LASSERT(policy != NULL);
LASSERT(info != NULL);
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
void ptlrpc_pinger_commit_expected(struct obd_import *imp)
{
ptlrpc_update_next_ping(imp, 1);
- LASSERT(spin_is_locked(&imp->imp_lock));
+ assert_spin_locked(&imp->imp_lock);
/*
* Avoid reading stale imp_connect_data. When not sure if pings are
* expected or not on next connection, we assume they are not and force
static inline void enc_pools_wakeup(void)
{
- LASSERT(spin_is_locked(&page_pools.epp_lock));
+ assert_spin_locked(&page_pools.epp_lock);
LASSERT(page_pools.epp_waitqlen >= 0);
if (unlikely(page_pools.epp_waitqlen)) {
void
ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
{
- LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
- LASSERT(spin_is_locked(&rs->rs_lock));
+ assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
+ assert_spin_locked(&rs->rs_lock);
LASSERT(rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */