return 0;
}
+/**
+ * __bio_associate_blkg_from_css - internal blkg association function
+ *
+ * This in the core association function that all association paths rely on.
+ * A blkg reference is taken which is released upon freeing of the bio.
+ */
static int __bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
{
+ struct request_queue *q = bio->bi_disk->queue;
struct blkcg_gq *blkg;
+ int ret;
rcu_read_lock();
- blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue);
+ if (!css || !css->parent)
+ blkg = q->root_blkg;
+ else
+ blkg = blkg_lookup_create(css_to_blkcg(css), q);
- rcu_read_unlock();
+ ret = bio_associate_blkg(bio, blkg);
- return bio_associate_blkg(bio, blkg);
+ rcu_read_unlock();
+ return ret;
}
/**
* @css: target css
*
* Associate @bio with the blkg found by combining the css's blkg and the
- * request_queue of the @bio. This takes a reference on the css that will
- * be put upon freeing of @bio.
+ * request_queue of the @bio. This falls back to the queue's root_blkg if
+ * the association fails with the css.
*/
int bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
{
- css_get(css);
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
return __bio_associate_blkg_from_css(bio, css);
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
* @page: the page to lookup the blkcg from
*
* Associate @bio with the blkg from @page's owning memcg and the respective
- * request_queue. This works like every other associate function wrt
- * references.
+ * request_queue. If cgroup_e_css returns NULL, fall back to the queue's
+ * root_blkg.
*
* Note: this must be called after bio has an associated device.
*/
int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
{
struct cgroup_subsys_state *css;
+ int ret;
if (unlikely(bio->bi_blkg))
return -EBUSY;
if (!page->mem_cgroup)
return 0;
- css = cgroup_get_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
- return __bio_associate_blkg_from_css(bio, css);
+ rcu_read_lock();
+
+ css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
+
+ ret = __bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+ return ret;
}
#endif /* CONFIG_MEMCG */
* @bio: target bio
*
* Associate @bio with the blkg found from the bio's css and the request_queue.
- * If one is not found, bio_lookup_blkg creates the blkg.
+ * If one is not found, bio_lookup_blkg creates the blkg. This falls back to
+ * the queue's root_blkg if association fails.
*/
int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
+ struct cgroup_subsys_state *css;
int ret = 0;
/* someone has already associated this bio with a blkg */
rcu_read_lock();
- blkcg = css_to_blkcg(blkcg_get_css());
+ css = blkcg_css();
- if (!blkcg->css.parent) {
- ret = bio_associate_blkg(bio, q->root_blkg);
- } else {
- blkg = blkg_lookup_create(blkcg, q);
-
- ret = bio_associate_blkg(bio, blkg);
- }
+ ret = __bio_associate_blkg_from_css(bio, css);
rcu_read_unlock();
return ret;
bio->bi_ioc = NULL;
}
if (bio->bi_blkg) {
- /* a ref is always taken on css */
- css_put(&bio_blkcg(bio)->css);
blkg_put(bio->bi_blkg);
bio->bi_blkg = NULL;
}
*/
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
- if (src->bi_blkg) {
- css_get(&bio_blkcg(src)->css);
+ if (src->bi_blkg)
bio_associate_blkg(dst, src->bi_blkg);
- }
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
#endif /* CONFIG_BLK_CGROUP */
return task_css(current, io_cgrp_id);
}
-/**
- * blkcg_get_css - find and get a reference to the css
- *
- * Find the css associated with either the kthread or the current task.
- * This takes a reference on the blkcg which will need to be managed by the
- * caller.
- */
-static inline struct cgroup_subsys_state *blkcg_get_css(void)
-{
- struct cgroup_subsys_state *css;
-
- rcu_read_lock();
-
- css = kthread_blkcg();
- if (css) {
- css_get(css);
- } else {
- /*
- * This is a bit complicated. It is possible task_css is seeing
- * an old css pointer here. This is caused by the current
- * thread migrating away from this cgroup and this cgroup dying.
- * css_tryget() will fail when trying to take a ref on a cgroup
- * that's ref count has hit 0.
- *
- * Therefore, if it does fail, this means current must have
- * been swapped away already and this is waiting for it to
- * propagate on the polling cpu. Hence the use of cpu_relax().
- */
- while (true) {
- css = task_css(current, io_cgrp_id);
- if (likely(css_tryget(css)))
- break;
- cpu_relax();
- }
- }
-
- rcu_read_unlock();
-
- return css;
-}
-
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
rcu_read_lock();
blkcg = bio_blkcg(bio);
- if (blkcg)
- css_get(&blkcg->css);
- else
- blkcg = css_to_blkcg(blkcg_get_css());
+ if (!blkcg)
+ blkcg = css_to_blkcg(blkcg_css());
/* bypass blkg lookup and use @q->root_rl directly for root */
if (blkcg == &blkcg_root)
if (unlikely(!blkg))
goto root_rl;
- blkg_get(blkg);
+ if (!blkg_try_get(blkg))
+ goto root_rl;
rcu_read_unlock();
return &blkg->rl;
root_rl:
*/
static inline void blk_put_rl(struct request_list *rl)
{
- /* an additional ref is always taken for rl */
- css_put(&rl->blkg->blkcg->css);
if (rl->blkg->blkcg != &blkcg_root)
blkg_put(rl->blkg);
}
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
}
/**
- * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
*
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
* function is guaranteed to return non-NULL css.
*/
-static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
- struct cgroup_subsys *ss)
+static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
{
lockdep_assert_held(&cgroup_mutex);
return cgroup_css(cgrp, ss);
}
+/**
+ * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest
+ *
+ * Find and get the effective css of @cgrp for @ss. The effective css is
+ * defined as the matching css of the nearest ancestor including self which
+ * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
+ * the root css is returned, so this function always returns a valid css.
+ *
+ * The returned css is not guaranteed to be online, and therefore it is the
+ * callers responsiblity to tryget a reference for it.
+ */
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+
+ do {
+ css = cgroup_css(cgrp, ss);
+
+ if (css)
+ return css;
+ cgrp = cgroup_parent(cgrp);
+ } while (cgrp);
+
+ return init_css_set.subsys[ss->id];
+}
+
/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
*
* Should be called under cgroup_[tree_]mutex.
*/
-#define for_each_e_css(css, ssid, cgrp) \
- for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
- if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
- ; \
+#define for_each_e_css(css, ssid, cgrp) \
+ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
+ if (!((css) = cgroup_e_css_by_mask(cgrp, \
+ cgroup_subsys[(ssid)]))) \
+ ; \
else
/**
* @ss is in this hierarchy, so we want the
* effective css from @cgrp.
*/
- template[i] = cgroup_e_css(cgrp, ss);
+ template[i] = cgroup_e_css_by_mask(cgrp, ss);
} else {
/*
* @ss is not in this hierarchy, so we don't want
return ret;
/*
- * At this point, cgroup_e_css() results reflect the new csses
+ * At this point, cgroup_e_css_by_mask() results reflect the new csses
* making the following cgroup_update_dfl_csses() properly update
* css associations of all tasks in the subtree.
*/