aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 10:09:16 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 10:09:16 -0700
commit92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0 (patch)
tree8d83c0dc3c6b935d8367e331872f242b742f0a8a /include
parentf20bf6125605acbbc7eb8c9420d7221c91aa83eb (diff)
parent644bd2f048972d75eb1979b1fdca257d528ce687 (diff)
downloadkernel_samsung_tuna-92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0.zip
kernel_samsung_tuna-92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0.tar.gz
kernel_samsung_tuna-92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0.tar.bz2
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/dma-mapping.h2
-rw-r--r--include/asm-ia64/scatterlist.h2
-rw-r--r--include/asm-powerpc/dma-mapping.h158
-rw-r--r--include/asm-powerpc/scatterlist.h2
-rw-r--r--include/asm-sparc/scatterlist.h2
-rw-r--r--include/asm-sparc64/scatterlist.h2
-rw-r--r--include/asm-x86/dma-mapping_32.h13
-rw-r--r--include/asm-x86/dma-mapping_64.h3
-rw-r--r--include/asm-x86/scatterlist_32.h2
-rw-r--r--include/asm-x86/scatterlist_64.h2
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/i2o.h3
-rw-r--r--include/linux/ide.h7
-rw-r--r--include/linux/libata.h16
-rw-r--r--include/linux/scatterlist.h84
-rw-r--r--include/scsi/scsi.h7
-rw-r--r--include/scsi/scsi_cmnd.h7
-rw-r--r--include/scsi/scsi_host.h13
19 files changed, 165 insertions, 187 deletions
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 3ca6d5c..f1735a2 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -6,7 +6,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/machvec.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#define dma_alloc_coherent platform_dma_alloc_coherent
/* coherent mem. is cheap */
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
index a452ea2..7d5234d 100644
--- a/include/asm-ia64/scatterlist.h
+++ b/include/asm-ia64/scatterlist.h
@@ -30,4 +30,6 @@ struct scatterlist {
#define sg_dma_len(sg) ((sg)->dma_length)
#define sg_dma_address(sg) ((sg)->dma_address)
+#define ARCH_HAS_SG_CHAIN
+
#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index d058916..2af321f 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -6,149 +6,6 @@
*/
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/cache.h>
-/* need struct page definitions */
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-#include <asm/io.h>
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-#ifdef CONFIG_NOT_COHERENT_CACHE
-/*
- * DMA-consistent mapping functions for PowerPCs that don't support
- * cache snooping. These allocate/free a region of uncached mapped
- * memory space for use with DMA devices. Alternatively, you could
- * allocate the space "normally" and use the cache management functions
- * to ensure it is consistent.
- */
-extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
-extern void __dma_free_coherent(size_t size, void *vaddr);
-extern void __dma_sync(void *vaddr, size_t size, int direction);
-extern void __dma_sync_page(struct page *page, unsigned long offset,
- size_t size, int direction);
-
-#else /* ! CONFIG_NOT_COHERENT_CACHE */
-/*
- * Cache coherent cores.
- */
-
-#define __dma_alloc_coherent(gfp, size, handle) NULL
-#define __dma_free_coherent(size, addr) ((void)0)
-#define __dma_sync(addr, size, rw) ((void)0)
-#define __dma_sync_page(pg, off, sz, rw) ((void)0)
-
-#endif /* ! CONFIG_NOT_COHERENT_CACHE */
-
-#ifdef CONFIG_PPC64
-/*
- * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
- */
-struct dma_mapping_ops {
- void * (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *ptr,
- size_t size, enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- int (*dma_supported)(struct device *dev, u64 mask);
- int (*set_dma_mask)(struct device *dev, u64 dma_mask);
-};
-
-static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
-{
- /* We don't handle the NULL dev case for ISA for now. We could
- * do it via an out of line call but it is not needed for now. The
- * only ISA DMA device we support is the floppy and we have a hack
- * in the floppy driver directly to get a device for us.
- */
- if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
- return NULL;
- return dev->archdata.dma_ops;
-}
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- if (unlikely(dma_ops == NULL))
- return 0;
- if (dma_ops->dma_supported == NULL)
- return 1;
- return dma_ops->dma_supported(dev, mask);
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- if (unlikely(dma_ops == NULL))
- return -EIO;
- if (dma_ops->set_dma_mask != NULL)
- return dma_ops->set_dma_mask(dev, dma_mask);
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
- *dev->dma_mask = dma_mask;
- return 0;
-}
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- return dma_ops->map_single(dev, cpu_addr, size, direction);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- return dma_ops->map_single(dev, page_address(page) + offset, size,
- direction);
-}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size,
@@ -276,14 +133,15 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
}
static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
BUG_ON(direction == DMA_NONE);
- for (i = 0; i < nents; i++, sg++) {
+ for_each_sg(sgl, sg, nents, i) {
BUG_ON(!sg->page);
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
sg->dma_address = page_to_bus(sg->page) + sg->offset;
@@ -318,26 +176,28 @@ static inline void dma_sync_single_for_device(struct device *dev,
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nents,
+ struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
BUG_ON(direction == DMA_NONE);
- for (i = 0; i < nents; i++, sg++)
+ for_each_sg(sgl, sg, nents, i)
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nents,
+ struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
BUG_ON(direction == DMA_NONE);
- for (i = 0; i < nents; i++, sg++)
+ for_each_sg(sgl, sg, nents, i)
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
diff --git a/include/asm-powerpc/scatterlist.h b/include/asm-powerpc/scatterlist.h
index 8c992d1..b075f61 100644
--- a/include/asm-powerpc/scatterlist.h
+++ b/include/asm-powerpc/scatterlist.h
@@ -41,5 +41,7 @@ struct scatterlist {
#define ISA_DMA_THRESHOLD (~0UL)
#endif
+#define ARCH_HAS_SG_CHAIN
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h
index a4fcf9a..4055af9 100644
--- a/include/asm-sparc/scatterlist.h
+++ b/include/asm-sparc/scatterlist.h
@@ -19,4 +19,6 @@ struct scatterlist {
#define ISA_DMA_THRESHOLD (~0UL)
+#define ARCH_HAS_SG_CHAIN
+
#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/include/asm-sparc64/scatterlist.h b/include/asm-sparc64/scatterlist.h
index 048fdb4..703c5bb 100644
--- a/include/asm-sparc64/scatterlist.h
+++ b/include/asm-sparc64/scatterlist.h
@@ -20,4 +20,6 @@ struct scatterlist {
#define ISA_DMA_THRESHOLD (~0UL)
+#define ARCH_HAS_SG_CHAIN
+
#endif /* !(_SPARC64_SCATTERLIST_H) */
diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
index f1d72d1..6a2d26c 100644
--- a/include/asm-x86/dma-mapping_32.h
+++ b/include/asm-x86/dma-mapping_32.h
@@ -2,10 +2,10 @@
#define _ASM_I386_DMA_MAPPING_H
#include <linux/mm.h>
+#include <linux/scatterlist.h>
#include <asm/cache.h>
#include <asm/io.h>
-#include <asm/scatterlist.h>
#include <asm/bug.h>
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -35,18 +35,19 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
}
static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nents == 0 || sg[0].length == 0);
+ WARN_ON(nents == 0 || sglist[0].length == 0);
- for (i = 0; i < nents; i++ ) {
- BUG_ON(!sg[i].page);
+ for_each_sg(sglist, sg, nents, i) {
+ BUG_ON(!sg->page);
- sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+ sg->dma_address = page_to_phys(sg->page) + sg->offset;
}
flush_write_buffers();
diff --git a/include/asm-x86/dma-mapping_64.h b/include/asm-x86/dma-mapping_64.h
index 6897e2a..ecd0f61 100644
--- a/include/asm-x86/dma-mapping_64.h
+++ b/include/asm-x86/dma-mapping_64.h
@@ -6,8 +6,7 @@
* documentation.
*/
-
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h
index d7e45a8..bd5164a 100644
--- a/include/asm-x86/scatterlist_32.h
+++ b/include/asm-x86/scatterlist_32.h
@@ -10,6 +10,8 @@ struct scatterlist {
unsigned int length;
};
+#define ARCH_HAS_SG_CHAIN
+
/* These macros should be used after a pci_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h
index eaf7ada..ef3986b 100644
--- a/include/asm-x86/scatterlist_64.h
+++ b/include/asm-x86/scatterlist_64.h
@@ -11,6 +11,8 @@ struct scatterlist {
unsigned int dma_length;
};
+#define ARCH_HAS_SG_CHAIN
+
#define ISA_DMA_THRESHOLD (0x00ffffff)
/* These macros should be used after a pci_map_sg call has been done
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 089a8bc..4da4413 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -176,13 +176,28 @@ struct bio {
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
-#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
-#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
+#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
+
+static inline unsigned int bio_cur_sectors(struct bio *bio)
+{
+ if (bio->bi_vcnt)
+ return bio_iovec(bio)->bv_len >> 9;
+
+ return 0;
+}
+
+static inline void *bio_data(struct bio *bio)
+{
+ if (bio->bi_vcnt)
+ return page_address(bio_page(bio)) + bio_offset(bio);
+
+ return NULL;
+}
/*
* will die
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5ed888b..bbf906a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -330,7 +330,6 @@ typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
-typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
@@ -368,7 +367,6 @@ struct request_queue
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
- issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
@@ -540,6 +538,7 @@ enum {
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
+#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
@@ -729,7 +728,9 @@ static inline void blk_run_address_space(struct address_space *mapping)
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *, int);
-extern void end_request(struct request *req, int uptodate);
+extern void end_request(struct request *, int);
+extern void end_queued_request(struct request *, int);
+extern void end_dequeued_request(struct request *, int);
extern void blk_complete_request(struct request *);
/*
@@ -767,7 +768,6 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
extern int blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 9752307..7da5b98 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -32,6 +32,7 @@
#include <linux/workqueue.h> /* work_struct */
#include <linux/mempool.h>
#include <linux/mutex.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -837,7 +838,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
*mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
#endif
- sg++;
+ sg = sg_next(sg);
}
*sg_ptr = mptr;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 02a27e8..30a1931 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -772,7 +772,7 @@ typedef struct hwif_s {
unsigned int nsect;
unsigned int nleft;
- unsigned int cursg;
+ struct scatterlist *cursg;
unsigned int cursg_ofs;
int rqsize; /* max sectors per request */
@@ -1093,11 +1093,6 @@ extern ide_startstop_t ide_do_reset (ide_drive_t *);
extern void ide_init_drive_cmd (struct request *rq);
/*
- * this function returns error location sector offset in case of a write error
- */
-extern u64 ide_get_error_location(ide_drive_t *, char *);
-
-/*
* "action" parameter type for ide_do_drive_cmd() below.
*/
typedef enum {
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 229a9ff..377e6d4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -29,7 +29,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/ata.h>
#include <linux/workqueue.h>
@@ -416,6 +416,7 @@ struct ata_queued_cmd {
unsigned long flags; /* ATA_QCFLAG_xxx */
unsigned int tag;
unsigned int n_elem;
+ unsigned int n_iter;
unsigned int orig_n_elem;
int dma_dir;
@@ -426,7 +427,7 @@ struct ata_queued_cmd {
unsigned int nbytes;
unsigned int curbytes;
- unsigned int cursg;
+ struct scatterlist *cursg;
unsigned int cursg_ofs;
struct scatterlist sgent;
@@ -1043,7 +1044,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
return 1;
if (qc->pad_len)
return 0;
- if (((sg - qc->__sg) + 1) == qc->n_elem)
+ if (qc->n_iter == qc->n_elem)
return 1;
return 0;
}
@@ -1051,6 +1052,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
static inline struct scatterlist *
ata_qc_first_sg(struct ata_queued_cmd *qc)
{
+ qc->n_iter = 0;
if (qc->n_elem)
return qc->__sg;
if (qc->pad_len)
@@ -1063,8 +1065,8 @@ ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
{
if (sg == &qc->pad_sgent)
return NULL;
- if (++sg - qc->__sg < qc->n_elem)
- return sg;
+ if (++qc->n_iter < qc->n_elem)
+ return sg_next(sg);
if (qc->pad_len)
return &qc->pad_sgent;
return NULL;
@@ -1309,9 +1311,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
qc->dma_dir = DMA_NONE;
qc->__sg = NULL;
qc->flags = 0;
- qc->cursg = qc->cursg_ofs = 0;
+ qc->cursg = NULL;
+ qc->cursg_ofs = 0;
qc->nbytes = qc->curbytes = 0;
qc->n_elem = 0;
+ qc->n_iter = 0;
qc->err_mask = 0;
qc->pad_len = 0;
qc->sect_size = ATA_SECT_SIZE;
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4efbd9c..2dc7464 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -20,4 +20,88 @@ static inline void sg_init_one(struct scatterlist *sg, const void *buf,
sg_set_buf(sg, buf, buflen);
}
+/*
+ * We overload the LSB of the page pointer to indicate whether it's
+ * a valid sg entry, or whether it points to the start of a new scatterlist.
+ * Those low bits are there for everyone! (thanks mason :-)
+ */
+#define sg_is_chain(sg) ((unsigned long) (sg)->page & 0x01)
+#define sg_chain_ptr(sg) \
+ ((struct scatterlist *) ((unsigned long) (sg)->page & ~0x01))
+
+/**
+ * sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Usually the next entry will be @sg@ + 1, but if this sg element is part
+ * of a chained scatterlist, it could jump to the start of a new
+ * scatterlist array.
+ *
+ * Note that the caller must ensure that there are further entries after
+ * the current entry, this function will NOT return NULL for an end-of-list.
+ *
+ */
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{
+ sg++;
+
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+
+ return sg;
+}
+
+/*
+ * Loop over each sg element, following the pointer to a new list if necessary
+ */
+#define for_each_sg(sglist, sg, nr, __i) \
+ for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+
+/**
+ * sg_last - return the last scatterlist entry in a list
+ * @sgl: First entry in the scatterlist
+ * @nents: Number of entries in the scatterlist
+ *
+ * Should only be used casually, it (currently) scan the entire list
+ * to get the last entry.
+ *
+ * Note that the @sgl@ pointer passed in need not be the first one,
+ * the important bit is that @nents@ denotes the number of entries that
+ * exist from @sgl@.
+ *
+ */
+static inline struct scatterlist *sg_last(struct scatterlist *sgl,
+ unsigned int nents)
+{
+#ifndef ARCH_HAS_SG_CHAIN
+ struct scatterlist *ret = &sgl[nents - 1];
+#else
+ struct scatterlist *sg, *ret = NULL;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ ret = sg;
+
+#endif
+ return ret;
+}
+
+/**
+ * sg_chain - Chain two sglists together
+ * @prv: First scatterlist
+ * @prv_nents: Number of entries in prv
+ * @sgl: Second scatterlist
+ *
+ * Links @prv@ and @sgl@ together, to form a longer scatterlist.
+ *
+ */
+static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
+ struct scatterlist *sgl)
+{
+#ifndef ARCH_HAS_SG_CHAIN
+ BUG();
+#endif
+ prv[prv_nents - 1].page = (struct page *) ((unsigned long) sgl | 0x01);
+}
+
#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 9f8f80a..702fcfe 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -11,13 +11,6 @@
#include <linux/types.h>
/*
- * The maximum sg list length SCSI can cope with
- * (currently must be a power of 2 between 32 and 256)
- */
-#define SCSI_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
-
-
-/*
* SCSI command lengths
*/
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 65ab514..3f47e52 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/timer.h>
+#include <linux/scatterlist.h>
struct request;
struct scatterlist;
@@ -68,7 +69,7 @@ struct scsi_cmnd {
/* These elements define the operation we ultimately want to perform */
unsigned short use_sg; /* Number of pieces of scatter-gather */
- unsigned short sglist_len; /* size of malloc'd scatter-gather list */
+ unsigned short __use_sg;
unsigned underflow; /* Return error if less than
this amount is transferred */
@@ -128,7 +129,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
extern void scsi_kunmap_atomic_sg(void *virt);
extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
-extern void scsi_free_sgtable(struct scatterlist *, int);
+extern void scsi_free_sgtable(struct scsi_cmnd *);
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
@@ -148,6 +149,6 @@ static inline int scsi_get_resid(struct scsi_cmnd *cmd)
}
#define scsi_for_each_sg(cmd, sg, nseg, __i) \
- for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++)
+ for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7d210cd..0fd4746 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -39,6 +39,9 @@ struct blk_queue_tags;
#define DISABLE_CLUSTERING 0
#define ENABLE_CLUSTERING 1
+#define DISABLE_SG_CHAINING 0
+#define ENABLE_SG_CHAINING 1
+
enum scsi_eh_timer_return {
EH_NOT_HANDLED,
EH_HANDLED,
@@ -443,6 +446,15 @@ struct scsi_host_template {
unsigned ordered_tag:1;
/*
+ * true if the low-level driver can support sg chaining. this
+ * will be removed eventually when all the drivers are
+ * converted to support sg chaining.
+ *
+ * Status: OBSOLETE
+ */
+ unsigned use_sg_chaining:1;
+
+ /*
* Countdown for host blocking with no commands outstanding
*/
unsigned int max_host_blocked;
@@ -586,6 +598,7 @@ struct Scsi_Host {
unsigned unchecked_isa_dma:1;
unsigned use_clustering:1;
unsigned use_blk_tcq:1;
+ unsigned use_sg_chaining:1;
/*
* Host has requested that no further requests come through for the