#include <linux/slab.h>
#include <linux/genhd.h>
#include <linux/delay.h>
+#include <linux/atomic.h>
#include "blk-cgroup.h"
#include "blk.h"
/*
* Add to the appropriate stat variable depending on the request type.
- * This should be called with the blkg->stats_lock held.
+ * This should be called with queue_lock held.
*/
static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
bool sync)
/*
* Decrements the appropriate stat variable if non-zero depending on the
* request type. Panics on value being zero.
- * This should be called with the blkg->stats_lock held.
+ * This should be called with the queue_lock held.
*/
static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
{
}
#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the blkg->stats_lock held. */
+/* This should be called with the queue_lock held. */
static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
struct blkio_policy_type *pol,
struct blkio_group *curr_blkg)
blkio_mark_blkg_waiting(&pd->stats);
}
-/* This should be called with the blkg->stats_lock held. */
+/* This should be called with the queue_lock held. */
static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
{
unsigned long long now;
blkio_clear_blkg_waiting(stats);
}
-/* This should be called with the blkg->stats_lock held. */
+/* This should be called with the queue_lock held. */
static void blkio_end_empty_time(struct blkio_group_stats *stats)
{
unsigned long long now;
void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
struct blkio_policy_type *pol)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- unsigned long flags;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
+
+ lockdep_assert_held(blkg->q->queue_lock);
+ BUG_ON(blkio_blkg_idling(stats));
- spin_lock_irqsave(&blkg->stats_lock, flags);
- BUG_ON(blkio_blkg_idling(&pd->stats));
- pd->stats.start_idle_time = sched_clock();
- blkio_mark_blkg_idling(&pd->stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ stats->start_idle_time = sched_clock();
+ blkio_mark_blkg_idling(stats);
}
EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
struct blkio_policy_type *pol)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- unsigned long flags;
- unsigned long long now;
- struct blkio_group_stats *stats;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
+
+ lockdep_assert_held(blkg->q->queue_lock);
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &pd->stats;
if (blkio_blkg_idling(stats)) {
- now = sched_clock();
- if (time_after64(now, stats->start_idle_time))
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, stats->start_idle_time)) {
+ u64_stats_update_begin(&stats->syncp);
stats->idle_time += now - stats->start_idle_time;
+ u64_stats_update_end(&stats->syncp);
+ }
blkio_clear_blkg_idling(stats);
}
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
}
EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
struct blkio_policy_type *pol)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- unsigned long flags;
- struct blkio_group_stats *stats;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
+
+ lockdep_assert_held(blkg->q->queue_lock);
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &pd->stats;
+ u64_stats_update_begin(&stats->syncp);
stats->avg_queue_size_sum +=
stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
stats->avg_queue_size_samples++;
blkio_update_group_wait_time(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ u64_stats_update_end(&stats->syncp);
}
EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
void blkiocg_set_start_empty_time(struct blkio_group *blkg,
struct blkio_policy_type *pol)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- unsigned long flags;
- struct blkio_group_stats *stats;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &pd->stats;
+ lockdep_assert_held(blkg->q->queue_lock);
if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE])
return;
- }
/*
* group is already marked empty. This can happen if cfqq got new
* request in parent group and moved to this group while being added
* to service tree. Just ignore the event and move on.
*/
- if(blkio_blkg_empty(stats)) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ if (blkio_blkg_empty(stats))
return;
- }
stats->start_empty_time = sched_clock();
blkio_mark_blkg_empty(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
}
EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
{
struct blkg_policy_data *pd = blkg->pd[pol->plid];
+ lockdep_assert_held(blkg->q->queue_lock);
+
pd->stats.dequeue += dequeue;
}
EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
struct blkio_group *curr_blkg, bool direction,
bool sync)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- unsigned long flags;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
+
+ lockdep_assert_held(blkg->q->queue_lock);
+
+ u64_stats_update_begin(&stats->syncp);
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_QUEUED], 1, direction, sync);
+ blkio_end_empty_time(stats);
+ u64_stats_update_end(&stats->syncp);
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
- sync);
- blkio_end_empty_time(&pd->stats);
blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
}
EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
struct blkio_policy_type *pol,
bool direction, bool sync)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- unsigned long flags;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
- direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ lockdep_assert_held(blkg->q->queue_lock);
+
+ u64_stats_update_begin(&stats->syncp);
+ blkio_check_and_dec_stat(stats->stat_arr[BLKIO_STAT_QUEUED], direction,
+ sync);
+ u64_stats_update_end(&stats->syncp);
}
EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
unsigned long time,
unsigned long unaccounted_time)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- unsigned long flags;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- pd->stats.time += time;
+ lockdep_assert_held(blkg->q->queue_lock);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->time += time;
#ifdef CONFIG_DEBUG_BLK_CGROUP
- pd->stats.unaccounted_time += unaccounted_time;
+ stats->unaccounted_time += unaccounted_time;
#endif
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ u64_stats_update_end(&stats->syncp);
}
EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
uint64_t io_start_time, bool direction,
bool sync)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- struct blkio_group_stats *stats;
- unsigned long flags;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
unsigned long long now = sched_clock();
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &pd->stats;
+ lockdep_assert_held(blkg->q->queue_lock);
+
+ u64_stats_update_begin(&stats->syncp);
if (time_after64(now, io_start_time))
blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
now - io_start_time, direction, sync);
if (time_after64(io_start_time, start_time))
blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
io_start_time - start_time, direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ u64_stats_update_end(&stats->syncp);
}
EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
struct blkio_policy_type *pol,
bool direction, bool sync)
{
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- struct blkio_group_stats *stats;
- unsigned long flags;
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &pd->stats;
+ lockdep_assert_held(blkg->q->queue_lock);
+
+ u64_stats_update_begin(&stats->syncp);
blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ u64_stats_update_end(&stats->syncp);
}
EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
if (!blkg)
return NULL;
- spin_lock_init(&blkg->stats_lock);
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
INIT_LIST_HEAD(&blkg->alloc_node);
static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
{
struct blkg_policy_data *pd = blkg->pd[plid];
- struct blkio_group_stats_cpu *stats_cpu;
- int i, j, k;
+ int cpu;
if (pd->stats_cpu == NULL)
return;
- /*
- * Note: On 64 bit arch this should not be an issue. This has the
- * possibility of returning some inconsistent value on 32bit arch
- * as 64bit update on 32bit is non atomic. Taking care of this
- * corner case makes code very complicated, like sending IPIs to
- * cpus, taking care of stats of offline cpus etc.
- *
- * reset stats is anyway more of a debug feature and this sounds a
- * corner case. So I am not complicating the code yet until and
- * unless this becomes a real issue.
- */
- for_each_possible_cpu(i) {
- stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
- stats_cpu->sectors = 0;
- for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
- for (k = 0; k < BLKIO_STAT_TOTAL; k++)
- stats_cpu->stat_arr_cpu[j][k] = 0;
+
+ for_each_possible_cpu(cpu) {
+ struct blkio_group_stats_cpu *sc =
+ per_cpu_ptr(pd->stats_cpu, cpu);
+
+ sc->sectors = 0;
+ memset(sc->stat_arr_cpu, 0, sizeof(sc->stat_arr_cpu));
}
}
static int
blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
{
- struct blkio_cgroup *blkcg;
+ struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
struct blkio_group *blkg;
- struct blkio_group_stats *stats;
struct hlist_node *n;
- uint64_t queued[BLKIO_STAT_TOTAL];
int i;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- bool idling, waiting, empty;
- unsigned long long now = sched_clock();
-#endif
- blkcg = cgroup_to_blkio_cgroup(cgroup);
spin_lock(&blkio_list_lock);
spin_lock_irq(&blkcg->lock);
+
+ /*
+ * Note that stat reset is racy - it doesn't synchronize against
+ * stat updates. This is a debug feature which shouldn't exist
+ * anyway. If you get hit by a race, retry.
+ */
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
struct blkio_policy_type *pol;
list_for_each_entry(pol, &blkio_list, list) {
struct blkg_policy_data *pd = blkg->pd[pol->plid];
-
- spin_lock(&blkg->stats_lock);
- stats = &pd->stats;
+ struct blkio_group_stats *stats = &pd->stats;
+
+ /* queued stats shouldn't be cleared */
+ for (i = 0; i < ARRAY_SIZE(stats->stat_arr); i++)
+ if (i != BLKIO_STAT_QUEUED)
+ memset(stats->stat_arr[i], 0,
+ sizeof(stats->stat_arr[i]));
+ stats->time = 0;
#ifdef CONFIG_DEBUG_BLK_CGROUP
- idling = blkio_blkg_idling(stats);
- waiting = blkio_blkg_waiting(stats);
- empty = blkio_blkg_empty(stats);
+ memset((void *)stats + BLKG_STATS_DEBUG_CLEAR_START, 0,
+ BLKG_STATS_DEBUG_CLEAR_SIZE);
#endif
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
- memset(stats, 0, sizeof(struct blkio_group_stats));
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (idling) {
- blkio_mark_blkg_idling(stats);
- stats->start_idle_time = now;
- }
- if (waiting) {
- blkio_mark_blkg_waiting(stats);
- stats->start_group_wait_time = now;
- }
- if (empty) {
- blkio_mark_blkg_empty(stats);
- stats->start_empty_time = now;
- }
-#endif
- spin_unlock(&blkg->stats_lock);
-
- /* Reset Per cpu stats which don't take blkg->stats_lock */
blkio_reset_stats_cpu(blkg, pol->plid);
}
}
}
}
-static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
- struct cgroup_map_cb *cb, const char *dname)
-{
- blkio_get_key_name(0, dname, str, chars_left, true);
- cb->fill(cb, str, val);
- return val;
-}
-
-
static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
enum stat_type_cpu type, enum stat_sub_type sub_type)
{
if (type == BLKIO_STAT_CPU_SECTORS) {
val = blkio_read_stat_cpu(blkg, plid, type, 0);
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
- dname);
+ blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
+ cb->fill(cb, key_str, val);
+ return val;
}
for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
return disk_total;
}
-/* This should be called with blkg->stats_lock held */
static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
struct cgroup_map_cb *cb, const char *dname,
enum stat_type type)
{
- struct blkg_policy_data *pd = blkg->pd[plid];
- uint64_t disk_total;
+ struct blkio_group_stats *stats = &blkg->pd[plid]->stats;
+ uint64_t v = 0, disk_total = 0;
char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
+ unsigned int sync_start;
+ int st;
- if (type == BLKIO_STAT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- pd->stats.time, cb, dname);
+ if (type >= BLKIO_STAT_ARR_NR) {
+ do {
+ sync_start = u64_stats_fetch_begin(&stats->syncp);
+ switch (type) {
+ case BLKIO_STAT_TIME:
+ v = stats->time;
+ break;
#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (type == BLKIO_STAT_UNACCOUNTED_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- pd->stats.unaccounted_time, cb, dname);
- if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
- uint64_t sum = pd->stats.avg_queue_size_sum;
- uint64_t samples = pd->stats.avg_queue_size_samples;
- if (samples)
- do_div(sum, samples);
- else
- sum = 0;
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- sum, cb, dname);
- }
- if (type == BLKIO_STAT_GROUP_WAIT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- pd->stats.group_wait_time, cb, dname);
- if (type == BLKIO_STAT_IDLE_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- pd->stats.idle_time, cb, dname);
- if (type == BLKIO_STAT_EMPTY_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- pd->stats.empty_time, cb, dname);
- if (type == BLKIO_STAT_DEQUEUE)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- pd->stats.dequeue, cb, dname);
+ case BLKIO_STAT_UNACCOUNTED_TIME:
+ v = stats->unaccounted_time;
+ break;
+ case BLKIO_STAT_AVG_QUEUE_SIZE: {
+ uint64_t samples = stats->avg_queue_size_samples;
+
+ if (samples) {
+ v = stats->avg_queue_size_sum;
+ do_div(v, samples);
+ }
+ break;
+ }
+ case BLKIO_STAT_IDLE_TIME:
+ v = stats->idle_time;
+ break;
+ case BLKIO_STAT_EMPTY_TIME:
+ v = stats->empty_time;
+ break;
+ case BLKIO_STAT_DEQUEUE:
+ v = stats->dequeue;
+ break;
+ case BLKIO_STAT_GROUP_WAIT_TIME:
+ v = stats->group_wait_time;
+ break;
#endif
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
- false);
- cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
+ blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
+ cb->fill(cb, key_str, v);
+ return v;
}
- disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
- pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
+
+ for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) {
+ do {
+ sync_start = u64_stats_fetch_begin(&stats->syncp);
+ v = stats->stat_arr[type][st];
+ } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
+
+ blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, v);
+ if (st == BLKIO_STAT_READ || st == BLKIO_STAT_WRITE)
+ disk_total += v;
+ }
+
blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
false);
cb->fill(cb, key_str, disk_total);
if (!dname)
continue;
- if (pcpu) {
+ if (pcpu)
cgroup_total += blkio_get_stat_cpu(blkg, plid,
cb, dname, type);
- } else {
- spin_lock(&blkg->stats_lock);
+ else
cgroup_total += blkio_get_stat(blkg, plid,
cb, dname, type);
- spin_unlock(&blkg->stats_lock);
- }
}
if (show_total)
cb->fill(cb, "Total", cgroup_total);
static struct cgroup_subsys_state *
blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
{
+ static atomic64_t id_seq = ATOMIC64_INIT(0);
struct blkio_cgroup *blkcg;
struct cgroup *parent = cgroup->parent;
return ERR_PTR(-ENOMEM);
blkcg->weight = BLKIO_WEIGHT_DEFAULT;
+ blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
done:
spin_lock_init(&blkcg->lock);
INIT_HLIST_HEAD(&blkcg->blkg_list);