summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/alignment.c11
-rw-r--r--arch/arm/mm/context.c29
-rw-r--r--arch/arm/mm/dma-mapping.c108
-rw-r--r--arch/arm/mm/idmap.c1
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
5 files changed, 112 insertions, 39 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b820edaf3184..db26e2e543f4 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
- mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
int isize = 4;
@@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
instrptr = instruction_pointer(regs);
- fs = get_fs();
- set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
- fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
+ u16 *ptr = (u16 *)(instrptr & ~1);
+ fault = probe_kernel_address(ptr, tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
- fault = __get_user(tinst2, (u16 *)(instrptr+2));
+ fault = probe_kernel_address(ptr + 1, tinst2);
instr = (tinstr << 16) | tinst2;
thumb2_32b = 1;
} else {
@@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
} else
- fault = __get_user(instr, (u32 *)instrptr);
- set_fs(fs);
+ fault = probe_kernel_address(instrptr, instr);
if (fault) {
type = TYPE_FAULT;
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 7a0511191f6b..a5a4b2bc42ba 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
return 0;
}
-static void new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
- u64 asid = mm->context.id;
+ u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
cpumask_clear(mm_cpumask(mm));
}
- mm->context.id = asid;
+ return asid;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
+ u64 asid;
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
@@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
*/
cpu_set_reserved_ttbr0();
- if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+ asid = atomic64_read(&mm->context.id);
+ if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
- if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- new_context(mm, cpu);
-
- atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
- cpumask_set_cpu(cpu, mm_cpumask(mm));
+ asid = atomic64_read(&mm->context.id);
+ if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+ local_flush_bp_all();
local_flush_tlb_all();
+ }
+
+ atomic64_set(&per_cpu(active_asids, cpu), asid);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index dda3904dc64c..c7e3759f16d3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev)
static void __dma_clear_buffer(struct page *page, size_t size)
{
- void *ptr;
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
- ptr = page_address(page);
- if (ptr) {
+ if (PageHighMem(page)) {
+ phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
+ phys_addr_t end = base + size;
+ while (size > 0) {
+ void *ptr = kmap_atomic(page);
+ memset(ptr, 0, PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ kunmap_atomic(ptr);
+ page++;
+ size -= PAGE_SIZE;
+ }
+ outer_flush_range(base, end);
+ } else {
+ void *ptr = page_address(page);
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page);
+ pgprot_t prot, struct page **ret_page,
+ const void *caller);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@@ -346,10 +358,11 @@ static int __init atomic_pool_init(void)
goto no_pages;
if (IS_ENABLED(CONFIG_CMA))
- ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
+ atomic_pool_init);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, NULL);
+ &page, atomic_pool_init);
if (ptr) {
int i;
@@ -542,27 +555,41 @@ static int __free_from_pool(void *start, size_t size)
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page)
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
+ void *ptr;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
return NULL;
__dma_clear_buffer(page, size);
- __dma_remap(page, size, prot);
+ if (PageHighMem(page)) {
+ ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
+ if (!ptr) {
+ dma_release_from_contiguous(dev, page, count);
+ return NULL;
+ }
+ } else {
+ __dma_remap(page, size, prot);
+ ptr = page_address(page);
+ }
*ret_page = page;
- return page_address(page);
+ return ptr;
}
static void __free_from_contiguous(struct device *dev, struct page *page,
- size_t size)
+ void *cpu_addr, size_t size)
{
- __dma_remap(page, size, pgprot_kernel);
+ if (PageHighMem(page))
+ __dma_free_remap(cpu_addr, size);
+ else
+ __dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -583,9 +610,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
#define __free_from_pool(cpu_addr, size) 0
-#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
@@ -645,7 +672,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -739,7 +766,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
- __free_from_contiguous(dev, page, size);
+ __free_from_contiguous(dev, page, cpu_addr, size);
}
}
@@ -1002,6 +1029,9 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
unsigned int count, start;
unsigned long flags;
+ if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
+ order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
+
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
@@ -1068,12 +1098,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
return pages;
}
+ /*
+ * IOMMU can map any pages, so himem can also be used here
+ */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
while (count) {
int j, order = __fls(count);
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ pages[i] = alloc_pages(gfp, order);
while (!pages[i] && order)
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ pages[i] = alloc_pages(gfp, --order);
if (!pages[i])
goto error;
@@ -1257,11 +1292,11 @@ err_mapping:
return NULL;
}
-static void __iommu_free_atomic(struct device *dev, struct page **pages,
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
dma_addr_t handle, size_t size)
{
__iommu_remove_mapping(dev, handle, size);
- __free_from_pool(page_address(pages[0]), size);
+ __free_from_pool(cpu_addr, size);
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
@@ -1344,7 +1379,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, pages, handle, size);
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
return;
}
@@ -1732,6 +1767,8 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .set_dma_mask = arm_dma_set_mask,
};
struct dma_map_ops iommu_coherent_ops = {
@@ -1745,6 +1782,8 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
+
+ .set_dma_mask = arm_dma_set_mask,
};
/**
@@ -1799,6 +1838,7 @@ err2:
err:
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref)
{
@@ -1815,6 +1855,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
if (mapping)
kref_put(&mapping->kref, release_iommu_mapping);
}
+EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
/**
* arm_iommu_attach_device
@@ -1843,5 +1884,32 @@ int arm_iommu_attach_device(struct device *dev,
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
}
+EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
+ iommu_detach_device(mapping->domain, dev);
+ kref_put(&mapping->kref, release_iommu_mapping);
+ mapping = NULL;
+ set_dma_ops(dev, NULL);
+
+ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
#endif
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2dffc010cc41..5ee505c937d1 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
{
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
+ local_flush_bp_all();
#ifdef CONFIG_CPU_HAS_ASID
/*
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 50bf1dafc9ea..6ffd78c0f9ab 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -48,7 +48,7 @@
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
- and r3, r1, #0xff
+ asid r3, r1
mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0
isb