*/
list_move(&elem->list_node, &tmp);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
list_for_each_entry_safe(elem, next, &tmp, list_node) {
kmem_zone_free(xfs_mru_elem_zone, elem);
}
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
}
/*
if (!mru || !mru->lists)
return;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
next = _xfs_mru_cache_migrate(mru, jiffies);
_xfs_mru_cache_clear_reap_list(mru);
queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}
int
if (!mru || !mru->lists)
return;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
if (mru->queued) {
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
}
_xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
_xfs_mru_cache_clear_reap_list(mru);
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}
void
elem->key = key;
elem->value = value;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
radix_tree_insert(&mru->store, key, elem);
radix_tree_preload_end();
_xfs_mru_cache_list_insert(mru, elem);
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
return 0;
}
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_delete(&mru->store, key);
if (elem) {
value = elem->value;
list_del(&elem->list_node);
}
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
if (elem)
kmem_zone_free(xfs_mru_elem_zone, elem);
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key);
if (elem) {
list_del(&elem->list_node);
_xfs_mru_cache_list_insert(mru, elem);
}
else
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
return elem ? elem->value : NULL;
}
if (!mru || !mru->lists)
return NULL;
- mutex_spinlock(&mru->lock);
+ spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key);
if (!elem)
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
return elem ? elem->value : NULL;
}
xfs_mru_cache_done(
xfs_mru_cache_t *mru)
{
- mutex_spinunlock(&mru->lock, 0);
+ spin_unlock(&mru->lock);
}