diff options
Diffstat (limited to 'drivers/parisc')
-rw-r--r-- | drivers/parisc/ccio-dma.c | 6 | ||||
-rw-r--r-- | drivers/parisc/hppb.c | 2 | ||||
-rw-r--r-- | drivers/parisc/iommu-helpers.h | 7 | ||||
-rw-r--r-- | drivers/parisc/sba_iommu.c | 2 |
4 files changed, 11 insertions, 6 deletions
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 7c60cbd85dc8..d08b284de196 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -363,7 +363,7 @@ ccio_alloc_range(struct ioc *ioc, size_t size) if (pages_needed <= 8) { /* * LAN traffic will not thrash the TLB IFF the same NIC - * uses 8 adjacent pages to map seperate payload data. + * uses 8 adjacent pages to map separate payload data. * ie the same byte in the resource bit map. */ #if 0 @@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ - coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range); + coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range); /* ** Program the I/O Pdir @@ -1589,7 +1589,7 @@ static int __init ccio_probe(struct parisc_device *dev) } /** - * ccio_init - ccio initalization procedure. + * ccio_init - ccio initialization procedure. * * Register this driver. */ diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c index a728a7cd2fc8..65eee67aa2ae 100644 --- a/drivers/parisc/hppb.c +++ b/drivers/parisc/hppb.c @@ -95,7 +95,7 @@ static struct parisc_driver hppb_driver = { }; /** - * hppb_init - HP-PB bus initalization procedure. + * hppb_init - HP-PB bus initialization procedure. * * Register this driver. */ diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 0a1f99a2e93e..97ba8286c596 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h @@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, */ static inline unsigned int -iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, +iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, + struct scatterlist *startsg, int nents, int (*iommu_alloc_range)(struct ioc *, size_t)) { struct scatterlist *contig_sg; /* contig chunk head */ unsigned long dma_offset, dma_len; /* start/len of DMA stream */ unsigned int n_mappings = 0; + unsigned int max_seg_size = dma_get_max_seg_size(dev); while (nents > 0) { @@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, IOVP_SIZE) > DMA_CHUNK_SIZE)) break; + if (startsg->length + dma_len > max_seg_size) + break; + /* ** Next see if we can append the next chunk (i.e. ** it must end on one page and begin on another diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index e527a0e1d6c0..d06627c3f353 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ - coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range); + coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range); /* ** Program the I/O Pdir |