diff options
author | Divyesh Shah <dpshah@google.com> | 2010-04-14 11:22:38 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-04-14 11:22:38 +0200 |
commit | 28baf44299e0480d66ebb3093de5d51deff04e9f (patch) | |
tree | 2cac2546027638c269441f4035a67abd3d267ca4 | |
parent | 4facdaec1ce186e731e6baa04f074804849e9a49 (diff) | |
download | kernel_samsung_aries-28baf44299e0480d66ebb3093de5d51deff04e9f.zip kernel_samsung_aries-28baf44299e0480d66ebb3093de5d51deff04e9f.tar.gz kernel_samsung_aries-28baf44299e0480d66ebb3093de5d51deff04e9f.tar.bz2 |
blkio: Fix compile errors
Fixes compile errors in blk-cgroup code for empty_time stat and a merge fix in
CFQ. The first error was when CONFIG_DEBUG_CFQ_IOSCHED is not set.
Signed-off-by: Divyesh Shah <dpshah@google.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-cgroup.c | 54 | ||||
-rw-r--r-- | block/cfq-iosched.c | 2 |
2 files changed, 28 insertions, 28 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index aa97cd4..80c1261 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -219,6 +219,33 @@ void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) } EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); +void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore) +{ + unsigned long flags; + struct blkio_group_stats *stats; + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + + if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || + stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { + spin_unlock_irqrestore(&blkg->stats_lock, flags); + return; + } + + /* + * If ignore is set, we do not panic on the empty flag being set + * already. This is to avoid cases where there are superfluous timeslice + * complete events (for eg., forced_dispatch in CFQ) when no IOs are + * served which could result in triggering the empty check incorrectly. + */ + BUG_ON(!ignore && blkio_blkg_empty(stats)); + stats->start_empty_time = sched_clock(); + blkio_mark_blkg_empty(stats); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); + void blkiocg_update_dequeue_stats(struct blkio_group *blkg, unsigned long dequeue) { @@ -268,33 +295,6 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) } EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); -void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore) -{ - unsigned long flags; - struct blkio_group_stats *stats; - - spin_lock_irqsave(&blkg->stats_lock, flags); - stats = &blkg->stats; - - if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || - stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { - spin_unlock_irqrestore(&blkg->stats_lock, flags); - return; - } - - /* - * If ignore is set, we do not panic on the empty flag being set - * already. This is to avoid cases where there are superfluous timeslice - * complete events (for eg., forced_dispatch in CFQ) when no IOs are - * served which could result in triggering the empty check incorrectly. - */ - BUG_ON(!ignore && blkio_blkg_empty(stats)); - stats->start_empty_time = sched_clock(); - blkio_mark_blkg_empty(stats); - spin_unlock_irqrestore(&blkg->stats_lock, flags); -} -EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); - void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, bool direction, bool sync) { diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9e0df2b..0177109 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2231,7 +2231,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) int dispatched = 0; /* Expire the timeslice of the current active queue first */ - cfq_slice_expired(cfqd, 0); + cfq_slice_expired(cfqd, 0, true); while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { __cfq_set_active_queue(cfqd, cfqq); dispatched += __cfq_forced_dispatch_cfqq(cfqq); |