diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-19 23:39:40 +0200 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 20:29:39 +0200 |
commit | b5deef901282628d88c784f4c9d2f0583ec3b355 (patch) | |
tree | 1d3be92f18c9afd9426b06739c8f76931acbf03f /block | |
parent | a3b05e8f58c95dfccbf2c824d0c68e5990571f24 (diff) | |
download | kernel_samsung_tuna-b5deef901282628d88c784f4c9d2f0583ec3b355.zip kernel_samsung_tuna-b5deef901282628d88c784f4c9d2f0583ec3b355.tar.gz kernel_samsung_tuna-b5deef901282628d88c784f4c9d2f0583ec3b355.tar.bz2 |
[PATCH] Make sure all block/io scheduler setups are node aware
Some were kmalloc_node(), some were still kmalloc(). Change them all to
kmalloc_node().
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 8 | ||||
-rw-r--r-- | block/cfq-iosched.c | 13 | ||||
-rw-r--r-- | block/elevator.c | 11 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 13 | ||||
-rw-r--r-- | block/noop-iosched.c | 2 |
5 files changed, 25 insertions, 22 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 8e1fef1..f6dc954 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -210,9 +210,9 @@ static struct as_io_context *alloc_as_io_context(void) * If the current task has no AS IO context then create one and initialise it. * Then take a ref on the task's io context and return it. */ -static struct io_context *as_get_io_context(void) +static struct io_context *as_get_io_context(int node) { - struct io_context *ioc = get_io_context(GFP_ATOMIC); + struct io_context *ioc = get_io_context(GFP_ATOMIC, node); if (ioc && !ioc->aic) { ioc->aic = alloc_as_io_context(); if (!ioc->aic) { @@ -1148,7 +1148,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) data_dir = rq_is_sync(rq); - rq->elevator_private = as_get_io_context(); + rq->elevator_private = as_get_io_context(q->node); if (RQ_IOC(rq)) { as_update_iohist(ad, RQ_IOC(rq)->aic, rq); @@ -1292,7 +1292,7 @@ static int as_may_queue(request_queue_t *q, int rw) struct io_context *ioc; if (ad->antic_status == ANTIC_WAIT_REQ || ad->antic_status == ANTIC_WAIT_NEXT) { - ioc = as_get_io_context(); + ioc = as_get_io_context(q->node); if (ad->io_context == ioc) ret = ELV_MQUEUE_MUST; put_io_context(ioc); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 85f1d87..0452108 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1148,8 +1148,9 @@ static void cfq_exit_io_context(struct io_context *ioc) static struct cfq_io_context * cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { - struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); + struct cfq_io_context *cic; + cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); if (cic) { memset(cic, 0, sizeof(*cic)); cic->last_end_request = jiffies; @@ -1277,11 +1278,11 @@ retry: * free memory. */ spin_unlock_irq(cfqd->queue->queue_lock); - new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask|__GFP_NOFAIL); + new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); spin_lock_irq(cfqd->queue->queue_lock); goto retry; } else { - cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); + cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); if (!cfqq) goto out; } @@ -1407,7 +1408,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) might_sleep_if(gfp_mask & __GFP_WAIT); - ioc = get_io_context(gfp_mask); + ioc = get_io_context(gfp_mask, cfqd->queue->node); if (!ioc) return NULL; @@ -1955,7 +1956,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) struct cfq_data *cfqd; int i; - cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); + cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); if (!cfqd) return NULL; @@ -1970,7 +1971,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) INIT_LIST_HEAD(&cfqd->empty_list); INIT_LIST_HEAD(&cfqd->cic_list); - cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); + cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node); if (!cfqd->cfq_hash) goto out_free; diff --git a/block/elevator.c b/block/elevator.c index 788d2d8..e643291 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -161,12 +161,12 @@ __setup("elevator=", elevator_setup); static struct kobj_type elv_ktype; -static elevator_t *elevator_alloc(struct elevator_type *e) +static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) { elevator_t *eq; int i; - eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); + eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node); if (unlikely(!eq)) goto err; @@ -178,7 +178,8 @@ static elevator_t *elevator_alloc(struct elevator_type *e) eq->kobj.ktype = &elv_ktype; mutex_init(&eq->sysfs_lock); - eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL); + eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, + GFP_KERNEL, q->node); if (!eq->hash) goto err; @@ -224,7 +225,7 @@ int elevator_init(request_queue_t *q, char *name) e = elevator_get("noop"); } - eq = elevator_alloc(e); + eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; @@ -987,7 +988,7 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) /* * Allocate new elevator */ - e = elevator_alloc(new_e); + e = elevator_alloc(q, new_e); if (!e) return 0; diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 4b7b446..c6dfa88 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); static int __make_request(request_queue_t *q, struct bio *bio); +static struct io_context *current_io_context(gfp_t gfp_flags, int node); /* * For the allocated request tables @@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[rw]+1 >= q->nr_requests) { - ioc = current_io_context(GFP_ATOMIC); + ioc = current_io_context(GFP_ATOMIC, q->node); /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". @@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, * up to a big batch of them for a small period time. * See ioc_batching, ioc_set_batching */ - ioc = current_io_context(GFP_NOIO); + ioc = current_io_context(GFP_NOIO, q->node); ioc_set_batching(q, ioc); spin_lock_irq(q->queue_lock); @@ -3641,7 +3642,7 @@ void exit_io_context(void) * but since the current task itself holds a reference, the context can be * used in general code, so long as it stays within `current` context. */ -struct io_context *current_io_context(gfp_t gfp_flags) +static struct io_context *current_io_context(gfp_t gfp_flags, int node) { struct task_struct *tsk = current; struct io_context *ret; @@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) if (likely(ret)) return ret; - ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); + ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); if (ret) { atomic_set(&ret->refcount, 1); ret->task = current; @@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context); * * This is always called in the context of the task which submitted the I/O. */ -struct io_context *get_io_context(gfp_t gfp_flags) +struct io_context *get_io_context(gfp_t gfp_flags, int node) { struct io_context *ret; - ret = current_io_context(gfp_flags); + ret = current_io_context(gfp_flags, node); if (likely(ret)) atomic_inc(&ret->refcount); return ret; diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 56a7c62..79af431 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c @@ -69,7 +69,7 @@ static void *noop_init_queue(request_queue_t *q, elevator_t *e) { struct noop_data *nd; - nd = kmalloc(sizeof(*nd), GFP_KERNEL); + nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); if (!nd) return NULL; INIT_LIST_HEAD(&nd->queue); |