diff options
author | Andrew Morton <akpm@osdl.org> | 2005-06-27 20:14:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-27 20:31:02 -0700 |
commit | 99f95e5286df2f69edab8a04c7080d986ee4233b (patch) | |
tree | 91963ac1a2e0f55ec5a619cf0390bff1453a5de4 | |
parent | 6e5a32754c67f0d156c2f196d604b2e9129a1fd5 (diff) | |
download | kernel_samsung_crespo-99f95e5286df2f69edab8a04c7080d986ee4233b.zip kernel_samsung_crespo-99f95e5286df2f69edab8a04c7080d986ee4233b.tar.gz kernel_samsung_crespo-99f95e5286df2f69edab8a04c7080d986ee4233b.tar.bz2 |
[PATCH] cfq build fix
drivers/block/cfq-iosched.c: In function 'cfq_put_queue':
drivers/block/cfq-iosched.c:303: sorry, unimplemented: inlining failed in call to 'cfq_pending_requests': function body not available
drivers/block/cfq-iosched.c:1080: sorry, unimplemented: called from here
drivers/block/cfq-iosched.c: In function '__cfq_may_queue':
drivers/block/cfq-iosched.c:1955: warning: the address of 'cfq_cfqq_must_alloc_slice', will always evaluate as 'true'
make[1]: *** [drivers/block/cfq-iosched.o] Error 1
make: *** [drivers/block/cfq-iosched.o] Error 2
Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/block/cfq-iosched.c | 49 |
1 files changed, 24 insertions, 25 deletions
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index ff1cc96..de5746e 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c @@ -300,7 +300,6 @@ CFQ_CRQ_FNS(requeued); static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); static void cfq_put_cfqd(struct cfq_data *cfqd); -static inline int cfq_pending_requests(struct cfq_data *cfqd); #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) @@ -348,6 +347,28 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) return NULL; } +static inline int cfq_pending_requests(struct cfq_data *cfqd) +{ + return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; +} + +/* + * scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing + */ +static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) +{ + if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) + kblockd_schedule_work(&cfqd->unplug_work); +} + +static int cfq_queue_empty(request_queue_t *q) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + + return !cfq_pending_requests(cfqd); +} + /* * Lifted from AS - choose which of crq1 and crq2 that is best served now. * We choose the request that is closest to the head right now. Distance @@ -1072,16 +1093,6 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) } /* - * scheduler run of queue, if there are requests pending and no one in the - * driver that will restart queueing - */ -static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) -{ - if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) - kblockd_schedule_work(&cfqd->unplug_work); -} - -/* * get next queue for service */ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force) @@ -1846,18 +1857,6 @@ cfq_insert_request(request_queue_t *q, struct request *rq, int where) } } -static inline int cfq_pending_requests(struct cfq_data *cfqd) -{ - return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; -} - -static int cfq_queue_empty(request_queue_t *q) -{ - struct cfq_data *cfqd = q->elevator->elevator_data; - - return !cfq_pending_requests(cfqd); -} - static void cfq_completed_request(request_queue_t *q, struct request *rq) { struct cfq_rq *crq = RQ_DATA(rq); @@ -1952,7 +1951,7 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, { #if 1 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && - !cfq_cfqq_must_alloc_slice) { + !cfq_cfqq_must_alloc_slice(cfqq)) { cfq_mark_cfqq_must_alloc_slice(cfqq); return ELV_MQUEUE_MUST; } @@ -1969,7 +1968,7 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we * can quickly flood the queue with writes from a single task */ - if (rw == READ || !cfq_cfqq_must_alloc_slice) { + if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { cfq_mark_cfqq_must_alloc_slice(cfqq); return ELV_MQUEUE_MUST; } |