{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- BUG_ON(atomic_read(&gl->gl_revokes));
+ BUG_ON(test_bit(GLF_REVOKES, &gl->gl_flags));
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
state2str(gl->gl_target),
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
- atomic_read(&gl->gl_revokes),
+ test_bit(GLF_REVOKES, &gl->gl_flags) ? 1 : 0,
(int)gl->gl_lockref.count, gl->gl_hold_time);
list_for_each_entry(gh, &gl->gl_holders, gh_list)
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
GLF_INODE_CREATING = 16, /* Inode creation occurring */
+ GLF_REVOKES = 17, /* Glock has revokes in queue */
};
struct gfs2_glock {
struct list_head gl_lru;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
- atomic_t gl_revokes;
struct delayed_work gl_work;
union {
/* For inode and iopen glocks only */
gfs2_remove_from_ail(bd); /* drops ref on bh */
bd->bd_bh = NULL;
sdp->sd_log_num_revoke++;
- if (atomic_inc_return(&gl->gl_revokes) == 1)
+ if (!test_bit(GLF_REVOKES, &gl->gl_flags)) {
+ set_bit(GLF_REVOKES, &gl->gl_flags);
gfs2_glock_hold(gl);
+ }
set_bit(GLF_LFLUSH, &gl->gl_flags);
list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
}
static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
struct list_head *head = &sdp->sd_log_le_revoke;
- struct gfs2_bufdata *bd;
- struct gfs2_glock *gl;
+ struct gfs2_bufdata *bd, *tmp;
- while (!list_empty(head)) {
- bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
- list_del_init(&bd->bd_list);
- gl = bd->bd_gl;
- if (atomic_dec_return(&gl->gl_revokes) == 0) {
- clear_bit(GLF_LFLUSH, &gl->gl_flags);
- gfs2_glock_queue_put(gl);
+ /*
+ * Glocks can be referenced repeatedly on the revoke list, but the list
+ * only holds one reference. All glocks on the list will have the
+ * GLF_REVOKES flag set initially.
+ */
+
+ list_for_each_entry_safe(bd, tmp, head, bd_list) {
+ struct gfs2_glock *gl = bd->bd_gl;
+
+ if (test_bit(GLF_REVOKES, &gl->gl_flags)) {
+ /* Keep each glock on the list exactly once. */
+ clear_bit(GLF_REVOKES, &gl->gl_flags);
+ continue;
}
+ list_del(&bd->bd_list);
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
+ }
+ list_for_each_entry_safe(bd, tmp, head, bd_list) {
+ struct gfs2_glock *gl = bd->bd_gl;
+
+ list_del(&bd->bd_list);
kmem_cache_free(gfs2_bufdata_cachep, bd);
+ clear_bit(GLF_LFLUSH, &gl->gl_flags);
+ gfs2_glock_queue_put(gl);
}
+ /* the list is empty now */
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
atomic_set(&gl->gl_ail_count, 0);
- atomic_set(&gl->gl_revokes, 0);
}
static void gfs2_init_gl_aspace_once(void *foo)
truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
truncate_inode_pages(&inode->i_data, 0);
- if (atomic_read(&gl->gl_revokes) == 0) {
+ if (!test_bit(GLF_REVOKES, &gl->gl_flags)) {
clear_bit(GLF_LFLUSH, &gl->gl_flags);
clear_bit(GLF_DIRTY, &gl->gl_flags);
}