diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-06-03 09:33:18 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-06-03 09:33:18 +0200 |
commit | a05c0205ba031c01bba33a21bf0a35920eb64833 (patch) | |
tree | 517c7682fc415cdb122ee7fcfc75eff674cc7b78 | |
parent | dbdc9dd342f0a7e32f40f0d4ade662bdfe057484 (diff) | |
download | kernel_samsung_tuna-a05c0205ba031c01bba33a21bf0a35920eb64833.zip kernel_samsung_tuna-a05c0205ba031c01bba33a21bf0a35920eb64833.tar.gz kernel_samsung_tuna-a05c0205ba031c01bba33a21bf0a35920eb64833.tar.bz2 |
block: Fix bounce limit setting in DM
blk_queue_bounce_limit() is more than a wrapper about the request queue
limits.bounce_pfn variable. Introduce blk_queue_bounce_pfn() which can
be called by stacking drivers that wish to set the bounce limit
explicitly.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-settings.c | 17 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 |
3 files changed, 19 insertions, 1 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 8d33934..9acd0b7 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -194,6 +194,23 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) EXPORT_SYMBOL(blk_queue_bounce_limit); /** + * blk_queue_bounce_pfn - set the bounce buffer limit for queue + * @q: the request queue for the device + * @pfn: max address + * + * Description: + * This function is similar to blk_queue_bounce_limit except it + * neither changes allocation flags, nor does it set up the ISA DMA + * pool. This function should only be used by stacking drivers. + * Hardware drivers should use blk_queue_bounce_limit instead. + */ +void blk_queue_bounce_pfn(struct request_queue *q, u64 pfn) +{ + q->limits.bounce_pfn = pfn; +} +EXPORT_SYMBOL(blk_queue_bounce_pfn); + +/** * blk_queue_max_sectors - set max sectors for a request for this queue * @q: the request queue for the device * @max_sectors: max sectors in the usual 512b unit diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e9a73bb..3ca1604 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -920,7 +920,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) blk_queue_max_segment_size(q, t->limits.max_segment_size); blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); - blk_queue_bounce_limit(q, t->limits.bounce_pfn); + blk_queue_bounce_pfn(q, t->limits.bounce_pfn); if (t->limits.no_cluster) queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5e740a1..989aa17 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -910,6 +910,7 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_bounce_pfn(struct request_queue *, u64); extern void blk_queue_max_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |