aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorfaux123 <reioux@gmail.com>2012-02-07 01:39:30 -0800
committerZiyan <jaraidaniel@gmail.com>2016-01-08 10:37:04 +0100
commit02ab8685496603c74868e8fc91847cbe12d22a48 (patch)
treed269d4be15dfcce00c3984ff3082f11e61beb220 /block
parent2c9abfbfb37eb7da79d0a01be78b33440e085b7c (diff)
downloadkernel_samsung_tuna-02ab8685496603c74868e8fc91847cbe12d22a48.zip
kernel_samsung_tuna-02ab8685496603c74868e8fc91847cbe12d22a48.tar.gz
kernel_samsung_tuna-02ab8685496603c74868e8fc91847cbe12d22a48.tar.bz2
block: fix merge error from "block, cfq: move icq creation and rq->elv.icq
association to block core"
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c170
1 files changed, 0 insertions, 170 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a103099..4009bc7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2941,176 +2941,6 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
}
/**
- * ioc_create_icq - create and link io_cq
- * @q: request_queue of interest
- * @gfp_mask: allocation mask
- *
- * Make sure io_cq linking %current->io_context and @q exists. If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
- *
- * The caller is responsible for ensuring @ioc won't go away and @q is
- * alive and will stay alive until this function returns.
- */
-static struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
-{
- struct elevator_type *et = q->elevator->type;
- struct io_context *ioc;
- struct io_cq *icq;
-
- /* allocate stuff */
- ioc = create_io_context(current, gfp_mask, q->node);
- if (!ioc)
- return NULL;
-
- icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
- q->node);
- if (!icq)
- return NULL;
-
- if (radix_tree_preload(gfp_mask) < 0) {
- kmem_cache_free(et->icq_cache, icq);
- return NULL;
- }
-
- icq->ioc = ioc;
- icq->q = q;
- INIT_LIST_HEAD(&icq->q_node);
- INIT_HLIST_NODE(&icq->ioc_node);
-
- /* lock both q and ioc and try to link @icq */
- spin_lock_irq(q->queue_lock);
- spin_lock(&ioc->lock);
-
- if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
- hlist_add_head(&icq->ioc_node, &ioc->icq_list);
- list_add(&icq->q_node, &q->icq_list);
- if (et->ops.elevator_init_icq_fn)
- et->ops.elevator_init_icq_fn(icq);
- } else {
- kmem_cache_free(et->icq_cache, icq);
- icq = ioc_lookup_icq(ioc, q);
- if (!icq)
- printk(KERN_ERR "cfq: icq link failed!\n");
- }
-
- spin_unlock(&ioc->lock);
- spin_unlock_irq(q->queue_lock);
- radix_tree_preload_end();
- return icq;
-}
-
-/**
- * cfq_get_cic - acquire cfq_io_cq and bump refcnt on io_context
- * @cfqd: cfqd to setup cic for
- * @gfp_mask: allocation mask
- *
- * Return cfq_io_cq associating @cfqd and %current->io_context and
- * bump refcnt on io_context. If ioc or cic doesn't exist, they're created
- * using @gfp_mask.
- *
- * Must be called under queue_lock which may be released and re-acquired.
- * This function also may sleep depending on @gfp_mask.
- */
-static struct cfq_io_cq *cfq_get_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
-{
- struct request_queue *q = cfqd->queue;
- struct cfq_io_cq *cic = NULL;
- struct io_context *ioc;
-
- lockdep_assert_held(q->queue_lock);
-
- while (true) {
- /* fast path */
- ioc = current->io_context;
- if (likely(ioc)) {
- cic = cfq_cic_lookup(cfqd, ioc);
- if (likely(cic))
- break;
- }
-
- /* slow path - unlock, create missing ones and retry */
- spin_unlock_irq(q->queue_lock);
- cic = icq_to_cic(ioc_create_icq(q, gfp_mask));
- spin_lock_irq(q->queue_lock);
- if (!cic)
- return NULL;
- }
-
- /* bump @ioc's refcnt and handle changed notifications */
- get_io_context(ioc);
-
- if (unlikely(cic->icq.changed)) {
- if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
- changed_ioprio(cic);
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
- changed_cgroup(cic);
-#endif
- }
-
- return cic;
-}
-
-/**
- * ioc_create_icq - create and link io_cq
- * @q: request_queue of interest
- * @gfp_mask: allocation mask
- *
- * Make sure io_cq linking %current->io_context and @q exists. If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
- *
- * The caller is responsible for ensuring @ioc won't go away and @q is
- * alive and will stay alive until this function returns.
- */
-static struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
-{
- struct elevator_type *et = q->elevator->type;
- struct io_context *ioc;
- struct io_cq *icq;
-
- /* allocate stuff */
- ioc = create_io_context(current, gfp_mask, q->node);
- if (!ioc)
- return NULL;
-
- icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
- q->node);
- if (!icq)
- return NULL;
-
- if (radix_tree_preload(gfp_mask) < 0) {
- kmem_cache_free(et->icq_cache, icq);
- return NULL;
- }
-
- icq->ioc = ioc;
- icq->q = q;
- INIT_LIST_HEAD(&icq->q_node);
- INIT_HLIST_NODE(&icq->ioc_node);
-
- /* lock both q and ioc and try to link @icq */
- spin_lock_irq(q->queue_lock);
- spin_lock(&ioc->lock);
-
- if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
- hlist_add_head(&icq->ioc_node, &ioc->icq_list);
- list_add(&icq->q_node, &q->icq_list);
- if (et->ops.elevator_init_icq_fn)
- et->ops.elevator_init_icq_fn(icq);
- } else {
- kmem_cache_free(et->icq_cache, icq);
- icq = ioc_lookup_icq(ioc, q);
- if (!icq)
- printk(KERN_ERR "cfq: icq link failed!\n");
- }
-
- spin_unlock(&ioc->lock);
- spin_unlock_irq(q->queue_lock);
- radix_tree_preload_end();
- return icq;
-}
-
-/**
* cfq_get_cic - acquire cfq_io_cq and bump refcnt on io_context
* @cfqd: cfqd to setup cic for
* @gfp_mask: allocation mask