diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-14 00:33:42 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-14 00:33:42 +0100 |
commit | 7e5a8794492e43e9eebb68a98a23be055888ccd0 (patch) | |
tree | cc049a23b2c994f910d3101860bc1c2ecb7aa35f /block/blk-ioc.c | |
parent | 3d3c2379feb177a5fd55bb0ed76776dc9d4f3243 (diff) | |
download | kernel_goldelico_gta04-7e5a8794492e43e9eebb68a98a23be055888ccd0.zip kernel_goldelico_gta04-7e5a8794492e43e9eebb68a98a23be055888ccd0.tar.gz kernel_goldelico_gta04-7e5a8794492e43e9eebb68a98a23be055888ccd0.tar.bz2 |
block, cfq: move io_cq exit/release to blk-ioc.c
With kmem_cache managed by blk-ioc, io_cq exit/release can be moved to
blk-ioc too. The odd ->io_cq->exit/release() callbacks are replaced
with elevator_ops->elevator_exit_icq_fn() with unlinking from both ioc
and q, and freeing automatically handled by blk-ioc. The elevator
operation only need to perform exit operation specific to the elevator
- in cfq's case, exiting the cfqq's.
Also, clearing of io_cq's on q detach is moved to block core and
automatically performed on elevator switch and q release.
Because the q io_cq points to might be freed before RCU callback for
the io_cq runs, blk-ioc code should remember to which cache the io_cq
needs to be freed when the io_cq is released. New field
io_cq->__rcu_icq_cache is added for this purpose. As both the new
field and rcu_head are used only after io_cq is released and the
q/ioc_node fields aren't, they are put into unions.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 76 |
1 files changed, 68 insertions, 8 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 87ecc98..0910a55 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -44,6 +44,51 @@ EXPORT_SYMBOL(get_io_context); #define ioc_release_depth_dec(q) do { } while (0) #endif +static void icq_free_icq_rcu(struct rcu_head *head) +{ + struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); + + kmem_cache_free(icq->__rcu_icq_cache, icq); +} + +/* + * Exit and free an icq. Called with both ioc and q locked. + */ +static void ioc_exit_icq(struct io_cq *icq) +{ + struct io_context *ioc = icq->ioc; + struct request_queue *q = icq->q; + struct elevator_type *et = q->elevator->type; + + lockdep_assert_held(&ioc->lock); + lockdep_assert_held(q->queue_lock); + + radix_tree_delete(&ioc->icq_tree, icq->q->id); + hlist_del_init(&icq->ioc_node); + list_del_init(&icq->q_node); + + /* + * Both setting lookup hint to and clearing it from @icq are done + * under queue_lock. If it's not pointing to @icq now, it never + * will. Hint assignment itself can race safely. + */ + if (rcu_dereference_raw(ioc->icq_hint) == icq) + rcu_assign_pointer(ioc->icq_hint, NULL); + + if (et->ops.elevator_exit_icq_fn) { + ioc_release_depth_inc(q); + et->ops.elevator_exit_icq_fn(icq); + ioc_release_depth_dec(q); + } + + /* + * @icq->q might have gone away by the time RCU callback runs + * making it impossible to determine icq_cache. Record it in @icq. + */ + icq->__rcu_icq_cache = et->icq_cache; + call_rcu(&icq->__rcu_head, icq_free_icq_rcu); +} + /* * Slow path for ioc release in put_io_context(). Performs double-lock * dancing to unlink all icq's and then frees ioc. @@ -87,10 +132,7 @@ static void ioc_release_fn(struct work_struct *work) spin_lock(&ioc->lock); continue; } - ioc_release_depth_inc(this_q); - icq->exit(icq); - icq->release(icq); - ioc_release_depth_dec(this_q); + ioc_exit_icq(icq); } if (last_q) { @@ -167,10 +209,7 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) last_q = this_q; continue; } - ioc_release_depth_inc(this_q); - icq->exit(icq); - icq->release(icq); - ioc_release_depth_dec(this_q); + ioc_exit_icq(icq); } if (last_q && last_q != locked_q) @@ -203,6 +242,27 @@ void exit_io_context(struct task_struct *task) put_io_context(ioc, NULL); } +/** + * ioc_clear_queue - break any ioc association with the specified queue + * @q: request_queue being cleared + * + * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. + */ +void ioc_clear_queue(struct request_queue *q) +{ + lockdep_assert_held(q->queue_lock); + + while (!list_empty(&q->icq_list)) { + struct io_cq *icq = list_entry(q->icq_list.next, + struct io_cq, q_node); + struct io_context *ioc = icq->ioc; + + spin_lock(&ioc->lock); + ioc_exit_icq(icq); + spin_unlock(&ioc->lock); + } +} + void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, int node) { |