/* we don't lose anything even if ioc allocation fails */
ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
- ioc->cgroup_changed = 1;
+ ioc_cgroup_changed(ioc);
put_io_context(ioc);
}
}
}
EXPORT_SYMBOL(get_task_io_context);
+void ioc_set_changed(struct io_context *ioc, int which)
+{
+ struct cfq_io_context *cic;
+ struct hlist_node *n;
+
+ hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list)
+ set_bit(which, &cic->changed);
+}
+
+/**
+ * ioc_ioprio_changed - notify ioprio change
+ * @ioc: io_context of interest
+ * @ioprio: new ioprio
+ *
+ * @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all
+ * cic's. iosched is responsible for checking the bit and applying it on
+ * request issue path.
+ */
+void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ioc->ioprio = ioprio;
+ ioc_set_changed(ioc, CIC_IOPRIO_CHANGED);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+}
+
+/**
+ * ioc_cgroup_changed - notify cgroup change
+ * @ioc: io_context of interest
+ *
+ * @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's.
+ * iosched is responsible for checking the bit and applying it on request
+ * issue path.
+ */
+void ioc_cgroup_changed(struct io_context *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ioc_set_changed(ioc, CIC_CGROUP_CHANGED);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+}
+
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
cfq_clear_cfqq_prio_changed(cfqq);
}
-static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
+static void changed_ioprio(struct cfq_io_context *cic)
{
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
-static void cfq_ioc_set_ioprio(struct io_context *ioc)
-{
- call_for_each_cic(ioc, changed_ioprio);
- ioc->ioprio_changed = 0;
-}
-
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
pid_t pid, bool is_sync)
{
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
+static void changed_cgroup(struct cfq_io_context *cic)
{
struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-
-static void cfq_ioc_set_cgroup(struct io_context *ioc)
-{
- call_for_each_cic(ioc, changed_cgroup);
- ioc->cgroup_changed = 0;
-}
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
out:
get_io_context(ioc);
- if (unlikely(ioc->ioprio_changed))
- cfq_ioc_set_ioprio(ioc);
-
+ if (unlikely(cic->changed)) {
+ if (test_and_clear_bit(CIC_IOPRIO_CHANGED, &cic->changed))
+ changed_ioprio(cic);
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (unlikely(ioc->cgroup_changed))
- cfq_ioc_set_cgroup(ioc);
+ if (test_and_clear_bit(CIC_CGROUP_CHANGED, &cic->changed))
+ changed_cgroup(cic);
#endif
+ }
+
return cic;
err:
if (cic)
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
- ioc->ioprio = ioprio;
- ioc->ioprio_changed = 1;
+ ioc_ioprio_changed(ioc, ioprio);
put_io_context(ioc);
}
unsigned long ttime_mean;
};
+enum {
+ CIC_IOPRIO_CHANGED,
+ CIC_CGROUP_CHANGED,
+};
+
struct cfq_io_context {
void *key;
struct request_queue *q;
struct list_head queue_list;
struct hlist_node cic_list;
+ unsigned long changed;
+
void (*dtor)(struct io_context *); /* destructor */
void (*exit)(struct io_context *); /* called on task exit */
spinlock_t lock;
unsigned short ioprio;
- unsigned short ioprio_changed;
-
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
- unsigned short cgroup_changed;
-#endif
/*
* For request batching
void exit_io_context(struct task_struct *task);
struct io_context *get_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node);
+void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
+void ioc_cgroup_changed(struct io_context *ioc);
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }