contiguous I/Os to try to spot when the io is in one of these sequential
modes.
-Internally the mq policy maintains a promotion threshold variable. If
-the hit count of a block not in the cache goes above this threshold it
-gets promoted to the cache. The read, write and discard promote adjustment
+Internally the mq policy determines a promotion threshold. If the hit
+count of a block not in the cache goes above this threshold it gets
+promoted to the cache. The read, write and discard promote adjustment
tunables allow you to tweak the promotion threshold by adding a small
value based on the io type. They default to 4, 8 and 1 respectively.
If you're trying to quickly warm a new cache device you may wish to
* Gives us the oldest entry of the lowest popoulated level. If the first
* level is emptied then we shift down one level.
*/
-static struct list_head *queue_pop(struct queue *q)
+static struct list_head *queue_peek(struct queue *q)
{
unsigned level;
- struct list_head *r;
for (level = 0; level < NR_QUEUE_LEVELS; level++)
- if (!list_empty(q->qs + level)) {
- r = q->qs[level].next;
- list_del(r);
+ if (!list_empty(q->qs + level))
+ return q->qs[level].next;
- /* have we just emptied the bottom level? */
- if (level == 0 && list_empty(q->qs))
- queue_shift_down(q);
+ return NULL;
+}
- return r;
- }
+static struct list_head *queue_pop(struct queue *q)
+{
+ struct list_head *r = queue_peek(q);
- return NULL;
+ if (r) {
+ list_del(r);
+
+ /* have we just emptied the bottom level? */
+ if (list_empty(q->qs))
+ queue_shift_down(q);
+ }
+
+ return r;
}
static struct list_head *list_pop(struct list_head *lh)
unsigned generation;
unsigned generation_period; /* in lookups (will probably change) */
- /*
- * Entries in the pre_cache whose hit count passes the promotion
- * threshold move to the cache proper. Working out the correct
- * value for the promotion_threshold is crucial to this policy.
- */
- unsigned promote_threshold;
-
unsigned discard_promote_adjustment;
unsigned read_promote_adjustment;
unsigned write_promote_adjustment;
#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
+#define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
/*----------------------------------------------------------------*/
return e;
}
+static struct entry *peek(struct queue *q)
+{
+ struct list_head *h = queue_peek(q);
+ return h ? container_of(h, struct entry, list) : NULL;
+}
+
/*
* Has this entry already been updated?
*/
break;
}
}
-
- mq->promote_threshold = nr ? total / nr : 1;
- if (mq->promote_threshold * nr < total)
- mq->promote_threshold++;
}
}
return 0;
}
+/*
+ * Entries in the pre_cache whose hit count passes the promotion
+ * threshold move to the cache proper. Working out the correct
+ * value for the promotion_threshold is crucial to this policy.
+ */
+static unsigned promote_threshold(struct mq_policy *mq)
+{
+ struct entry *e;
+
+ if (any_free_cblocks(mq))
+ return 0;
+
+ e = peek(&mq->cache_clean);
+ if (e)
+ return e->hit_count;
+
+ e = peek(&mq->cache_dirty);
+ if (e)
+ return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;
+
+ /* This should never happen */
+ return 0;
+}
+
/*
* We modify the basic promotion_threshold depending on the specific io.
*
bool discarded_oblock, int data_dir)
{
if (data_dir == READ)
- return mq->promote_threshold + mq->read_promote_adjustment;
+ return promote_threshold(mq) + mq->read_promote_adjustment;
if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
/*
return mq->discard_promote_adjustment;
}
- return mq->promote_threshold + mq->write_promote_adjustment;
+ return promote_threshold(mq) + mq->write_promote_adjustment;
}
static bool should_promote(struct mq_policy *mq, struct entry *e,
mq->tick = 0;
mq->hit_count = 0;
mq->generation = 0;
- mq->promote_threshold = 0;
mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;