struct obd_import *imp, int force);
void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
-void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
void ldlm_namespace_get(struct ldlm_namespace *ns);
void ldlm_namespace_put(struct ldlm_namespace *ns);
int ldlm_proc_setup(void);
#define MAX_STRING_SIZE 128
-extern atomic_t ldlm_srv_namespace_nr;
-extern atomic_t ldlm_cli_namespace_nr;
+extern int ldlm_srv_namespace_nr;
+extern int ldlm_cli_namespace_nr;
extern struct mutex ldlm_srv_namespace_lock;
extern struct list_head ldlm_srv_namespace_list;
extern struct mutex ldlm_cli_namespace_lock;
-extern struct list_head ldlm_cli_namespace_list;
+extern struct list_head ldlm_cli_active_namespace_list;
+extern struct list_head ldlm_cli_inactive_namespace_list;
-static inline atomic_t *ldlm_namespace_nr(ldlm_side_t client)
+static inline int ldlm_namespace_nr_read(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr;
+ ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
+}
+
+static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
+{
+ if (client == LDLM_NAMESPACE_SERVER)
+ ldlm_srv_namespace_nr++;
+ else
+ ldlm_cli_namespace_nr++;
+}
+
+static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
+{
+ if (client == LDLM_NAMESPACE_SERVER)
+ ldlm_srv_namespace_nr--;
+ else
+ ldlm_cli_namespace_nr--;
}
static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
+ &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
+}
+
+static inline struct list_head *ldlm_namespace_inactive_list(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
}
static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
}
+/* ns_bref is the number of resources in this namespace with the notable
+ * exception of quota namespaces which have their empty refcount at 1 */
+static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
+{
+ return atomic_read(&ns->ns_bref) == 0;
+}
+
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, ldlm_side_t);
+struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
+
/* ldlm_request.c */
/* Cancel lru flag, it indicates we cancel aged locks. */
enum {
{
int total = 0, cached = 0, nr_ns;
struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL; /* loop detection */
void *cookie;
if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
/*
* Find out how many resources we may release.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = ldlm_namespace_nr_read(client);
nr_ns > 0; nr_ns--)
{
mutex_lock(ldlm_namespace_lock(client));
return 0;
}
ns = ldlm_namespace_first_locked(client);
+
+ if (ns == ns_old) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
ldlm_namespace_get(ns);
- ldlm_namespace_move_locked(ns, client);
+ ldlm_namespace_move_to_active_locked(ns, client);
mutex_unlock(ldlm_namespace_lock(client));
total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
ldlm_namespace_put(ns);
/*
* Shrink at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = ldlm_namespace_nr_read(client) - nr_ns;
nr_ns > 0; nr_ns--)
{
int cancel, nr_locks;
}
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
- ldlm_namespace_move_locked(ns, client);
+ ldlm_namespace_move_to_active_locked(ns, client);
mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
{
__u32 nr_l = 0, nr_p = 0, l;
struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL;
int nr, equal = 0;
/*
* for _all_ pools.
*/
l = LDLM_POOL_HOST_L /
- atomic_read(
- ldlm_namespace_nr(client));
+ ldlm_namespace_nr_read(client);
} else {
/*
* All the rest of greedy pools will have
* all locks in equal parts.
*/
l = (LDLM_POOL_HOST_L - nr_l) /
- (atomic_read(
- ldlm_namespace_nr(client)) -
+ (ldlm_namespace_nr_read(client) -
nr_p);
}
ldlm_pool_setup(&ns->ns_pool, l);
/*
* Recalc at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+ for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
int skip;
/*
* Lock the list, get first @ns in the list, getref, move it
}
ns = ldlm_namespace_first_locked(client);
+ if (ns_old == ns) { /* Full pass complete */
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ /* We got an empty namespace, need to move it back to inactive
+ * list.
+ * The race with parallel resource creation is fine:
+ * - If they do namespace_get before our check, we fail the
+ * check and they move this item to the end of the list anyway
+ * - If we do the check and then they do namespace_get, then
+ * we move the namespace to inactive and they will move
+ * it back to active (synchronised by the lock, so no clash
+ * there).
+ */
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
spin_lock(&ns->ns_lock);
/*
* skip ns which is being freed, and we don't want to increase
}
spin_unlock(&ns->ns_lock);
- ldlm_namespace_move_locked(ns, client);
+ ldlm_namespace_move_to_active_locked(ns, client);
mutex_unlock(ldlm_namespace_lock(client));
/*
struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
-atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
-atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
+int ldlm_srv_namespace_nr = 0;
+int ldlm_cli_namespace_nr = 0;
struct mutex ldlm_srv_namespace_lock;
LIST_HEAD(ldlm_srv_namespace_list);
struct mutex ldlm_cli_namespace_lock;
-LIST_HEAD(ldlm_cli_namespace_list);
+/* Client Namespaces that have active resources in them.
+ * Once all resources go away, ldlm_poold moves such namespaces to the
+ * inactive list */
+LIST_HEAD(ldlm_cli_active_namespace_list);
+/* Client namespaces that don't have any locks in them */
+LIST_HEAD(ldlm_cli_inactive_namespace_list);
proc_dir_entry_t *ldlm_type_proc_dir = NULL;
proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
GOTO(out_hash, rc);
}
- idx = atomic_read(ldlm_namespace_nr(client));
+ idx = ldlm_namespace_nr_read(client);
rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
if (rc) {
CERROR("Can't initialize lock pool, rc %d\n", rc);
}
EXPORT_SYMBOL(ldlm_namespace_get);
+/* This is only for callers that care about refcount */
+int ldlm_namespace_get_return(struct ldlm_namespace *ns)
+{
+ return atomic_inc_return(&ns->ns_bref);
+}
+
void ldlm_namespace_put(struct ldlm_namespace *ns)
{
if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(list_empty(&ns->ns_list_chain));
- list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
- atomic_inc(ldlm_namespace_nr(client));
+ list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
+ ldlm_namespace_nr_inc(client);
mutex_unlock(ldlm_namespace_lock(client));
}
* using list_empty(&ns->ns_list_chain). This is why it is
* important to use list_del_init() here. */
list_del_init(&ns->ns_list_chain);
- atomic_dec(ldlm_namespace_nr(client));
+ ldlm_namespace_nr_dec(client);
mutex_unlock(ldlm_namespace_lock(client));
}
/** Should be called with ldlm_namespace_lock(client) taken. */
-void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
+ ldlm_side_t client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
}
+/** Should be called with ldlm_namespace_lock(client) taken. */
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
+ ldlm_side_t client)
+{
+ LASSERT(!list_empty(&ns->ns_list_chain));
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
+ list_move_tail(&ns->ns_list_chain,
+ ldlm_namespace_inactive_list(client));
+}
+
/** Should be called with ldlm_namespace_lock(client) taken. */
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
{
struct ldlm_resource *res;
cfs_hash_bd_t bd;
__u64 version;
+ int ns_refcount = 0;
LASSERT(ns != NULL);
LASSERT(parent == NULL);
/* We won! Let's add the resource. */
cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
if (cfs_hash_bd_count_get(&bd) == 1)
- ldlm_namespace_get(ns);
+ ns_refcount = ldlm_namespace_get_return(ns);
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
/* We create resource with locked lr_lvb_mutex. */
mutex_unlock(&res->lr_lvb_mutex);
+ /* Let's see if we happened to be the very first resource in this
+ * namespace. If so, and this is a client namespace, we need to move
+ * the namespace into the active namespaces list to be patrolled by
+ * the ldlm_poold.
+ * A notable exception, for quota namespaces qsd_lib.c already took a
+ * namespace reference, so it won't be participating in all of this,
+ * but I guess that's ok since we have no business cancelling quota
+ * locks anyway */
+ if (ns_is_client(ns) && ns_refcount == 1) {
+ mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
+ mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ }
+
return res;
}
EXPORT_SYMBOL(ldlm_resource_get);