aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-02 07:46:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-02 07:46:41 -0700
commit4c61f72c72e0e1181f91831f106c5f7306238706 (patch)
tree448350a5bf4a8b53417cfa97ad234be05c2a4590 /arch
parent2f819ae8816990aaad46dd6d1748a096d136df68 (diff)
parentc819914e0d5cc85cccf5fe7542fcb9586286bc28 (diff)
downloadkernel_samsung_tuna-4c61f72c72e0e1181f91831f106c5f7306238706.zip
kernel_samsung_tuna-4c61f72c72e0e1181f91831f106c5f7306238706.tar.gz
kernel_samsung_tuna-4c61f72c72e0e1181f91831f106c5f7306238706.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6: sparc64: remove unused calc_npages() in iommu_common.h sparc64: add the segment boundary checking to IOMMUs while merging SG entries [SPARC64]: Don't open-code {get,put}_cpu_var() in flush_tlb_pending().
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/iommu.c12
-rw-r--r--arch/sparc64/kernel/iommu_common.h18
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c12
-rw-r--r--arch/sparc64/mm/tlb.c7
4 files changed, 31 insertions, 18 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index b781d3d..756fa24 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -516,9 +516,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
+ unsigned long seg_boundary_size;
int outcount, incount, i;
struct strbuf *strbuf;
struct iommu *iommu;
+ unsigned long base_shift;
BUG_ON(direction == DMA_NONE);
@@ -549,8 +551,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
+ seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
+ base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
- unsigned long paddr, npages, entry, slen;
+ unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
slen = s->length;
@@ -593,7 +598,9 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
- (outs->dma_length + s->length > max_seg_size)) {
+ (outs->dma_length + s->length > max_seg_size) ||
+ (is_span_boundary(out_entry, base_shift,
+ seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
@@ -607,6 +614,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
+ out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index 0713bd5..f3575a6 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/device.h>
+#include <linux/iommu-helper.h>
#include <asm/iommu.h>
#include <asm/scatterlist.h>
@@ -45,17 +46,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr,
return npages;
}
-static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
+static inline int is_span_boundary(unsigned long entry,
+ unsigned long shift,
+ unsigned long boundary_size,
+ struct scatterlist *outs,
+ struct scatterlist *sg)
{
- unsigned long i, npages = 0;
- struct scatterlist *sg;
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
+ int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
- for_each_sg(sglist, sg, nelems, i) {
- unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
- npages += iommu_num_pages(paddr, sg->length);
- }
-
- return npages;
+ return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
extern unsigned long iommu_range_alloc(struct device *dev,
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index ddca6c6..0183970 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -335,8 +335,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long flags, handle, prot;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
+ unsigned long seg_boundary_size;
int outcount, incount, i;
struct iommu *iommu;
+ unsigned long base_shift;
long err;
BUG_ON(direction == DMA_NONE);
@@ -362,8 +364,11 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev);
+ seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
+ base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
- unsigned long paddr, npages, entry, slen;
+ unsigned long paddr, npages, entry, out_entry = 0, slen;
slen = s->length;
/* Sanity check */
@@ -406,7 +411,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
- (outs->dma_length + s->length > max_seg_size)) {
+ (outs->dma_length + s->length > max_seg_size) ||
+ (is_span_boundary(out_entry, base_shift,
+ seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
@@ -420,6 +427,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
+ out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
index a0f000b..ae24919 100644
--- a/arch/sparc64/mm/tlb.c
+++ b/arch/sparc64/mm/tlb.c
@@ -23,11 +23,8 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
void flush_tlb_pending(void)
{
- struct mmu_gather *mp;
+ struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
- preempt_disable();
-
- mp = &__get_cpu_var(mmu_gathers);
if (mp->tlb_nr) {
flush_tsb_user(mp);
@@ -43,7 +40,7 @@ void flush_tlb_pending(void)
mp->tlb_nr = 0;
}
- preempt_enable();
+ put_cpu_var(mmu_gathers);
}
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)