summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-07 12:56:23 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-07 12:56:23 -0800
commit1d8b0e79083d229829a408cd016dc4a038f637dd (patch)
tree120e466200b31570e59357c0da9ebc1eb938fc5f
parent2626820d838f9e98f323bf47b4fb7722d1c52e53 (diff)
parent164afb1d85b872907cfac048b46c094db596d529 (diff)
Merge tag 'iommu-fixes-v4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU fixes from Joerg Roedel: - Two build issues, one in the ipmmu-vmsa driver and one for the new generic dma-api implemention used on arm64 - A performance fix for said dma-api implemention - An issue caused by a wrong offset in map_sg in the same code as above * tag 'iommu-fixes-v4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/dma: Use correct offset in map_sg iommu/ipmmu-vmsa: Don't truncate ttbr if LPAE is not enabled iommu/dma: Avoid unlikely high-order allocations iommu/dma: Add some missing #includes
-rw-r--r--drivers/iommu/dma-iommu.c11
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
2 files changed, 9 insertions, 4 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 3a20db4f8604..72d6182666cb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,10 +21,13 @@
#include <linux/device.h>
#include <linux/dma-iommu.h>
+#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
int iommu_dma_init(void)
{
@@ -191,6 +194,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages);
+ unsigned int order = MAX_ORDER;
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL);
@@ -204,14 +208,15 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
while (count) {
struct page *page = NULL;
- int j, order = __fls(count);
+ int j;
/*
* Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until
* falling back to single-page allocations.
*/
- for (order = min(order, MAX_ORDER); order > 0; order--) {
+ for (order = min_t(unsigned int, order, __fls(count));
+ order > 0; order--) {
page = alloc_pages(gfp | __GFP_NORETRY, order);
if (!page)
continue;
@@ -453,7 +458,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t s_offset = iova_offset(iovad, s->offset);
size_t s_length = s->length;
- sg_dma_address(s) = s->offset;
+ sg_dma_address(s) = s_offset;
sg_dma_len(s) = s_length;
s->offset -= s_offset;
s_length = iova_align(iovad, s_length + s_offset);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 8cf605fa9946..dfb868e2d129 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -295,7 +295,7 @@ static struct iommu_gather_ops ipmmu_gather_ops = {
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
- phys_addr_t ttbr;
+ u64 ttbr;
/*
* Allocate the page table operations.