summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary King <GKing@nvidia.com>2010-02-04 16:55:43 -0800
committerGary King <gking@nvidia.com>2010-03-04 13:22:21 -0800
commit8e5965dc0524ef93c2ed4718a6356060bc07b38b (patch)
treec064e417dd28d5ee95e79ba582a62430b682879f
parent7f1eb91b3d4eca6845a290f71199424de1f51ee6 (diff)
nvmap: implement nvmap as a full memory manager driver
previously, the task of managing RM-managed memory handles was split between nvos (OS page allocation), the RM (heap management for carveout & IRAM heaps, and handle life-time management), nvreftrack (abnormal process termination) and nvmap (user-space read/write/map of memory handles). this resulted in an opaque system that was wasteful of kernel virtual address space, didn't support CPU cache attributes for kernel mappings and couldn't fully unwind leaked handles (e.g., if the application leaked a pinned handle the memory might never be reclaimed). nvmap is now a full re-implementation of the RM memory manager, unifying all of the functionality from nvreftrack, nvos, nvmap and nvrm into one driver used by both user and kernel-space clients. add configs to control paranoid operation. when paranoid is enabled, every handle reference passed into the kernel is verified to actually have been created by nvmap; furthermore, handles which are not global (the GET_ID ioctl has not been called for it) will fail validation if they are referenced by any process other than the one which created them, or a super-user process (opened via /dev/knvmap). each file descriptor maintains its own table of nvmap_handle_ref references, so the handle value returned to each process is unique; furthermore, nvmap_handle_ref objects track how many times they have been pinned, to ensure that processes which abnormally terminate with pinned handles can be unwound correctly. as a compile-time option, fully-unpinned handles which require IOVMM mappings may be stored in a segmented (by size) MRU (most-recently unpinned) eviction cache; if IOVMM space is over-committed across multiple processes, a pin operation may reclaim any or all of the IOVMM areas in the MRU cache. MRU is used as the eviction policy since graphics operations frequently operate cyclically, and the least-recently used entry may be needed almost immediately if the higher-level client starts (e.g.) rendering the next frame. introduce a concept of "secure" handles. secure handles may only be mapped into IOVMM space, and when unpinned their mapping in IOVMM space will be zapped immediately, to prevent malicious processes from being able to access the handle. expose carveout heap attributes for each carveout heap in sysfs, under the nvmap device with sub-device name heap-<heap name> * total size * free size * total block count * free block count * largest block * largest free block * base address * name * heap usage bitmask carveout heaps may be split at run-time, if sufficient memory is available in the heap. the split heap can be (should be) assigned a different name and usage bitmask than the original heap. this allows a large initial carveout to be split into smaller carveouts, to reserve sections of carveout memory for specific usages (e.g., camera and/or video clients). add a split entry in the sysfs tree for each carveout heap, to support run-time splitting of carveout heaps into reserved regions. format is: <size>,<usage>,<name> * size should be parsable with memparse (suffixes k/K and m/M are legal) * usage is the new heap's usage bitmask * name is the name of the new heap (must be unique) carveout heaps are managed using a first-fit allocator with an explicit free list, all blocks are kept in a dynamically-sized array (doubles in size every time all blocks are exhausted); to reduce fragmentation caused by allocations with different alignment requirements, the allocator will compare left-justifying and right-justifying the allocation within the first-fit block, and choose the justification that results in the largest remaining free block (this is particularly important for 1M-aligned split heaps). other code which duplicated functionality subsumed by this changelist (RM memory manager, NvOs carveout command line parser, etc.) is deleted; implementations of the RM memory manager on top of nvmap are provided to support backwards compatibility bug 634812 Change-Id: Ic89d83fed31b4cadc68653d0e825c368b9c92f81 Reviewed-on: http://git-master/r/590 Reviewed-by: Gary King <gking@nvidia.com> Tested-by: Gary King <gking@nvidia.com>
-rw-r--r--arch/arm/mach-tegra/include/linux/nvmem_ioctl.h124
-rw-r--r--arch/arm/mach-tegra/include/nvrm_heap.h219
-rw-r--r--arch/arm/mach-tegra/include/nvrm_memmgr_private.h88
-rwxr-xr-xarch/arm/mach-tegra/init_common.c7
-rw-r--r--arch/arm/mach-tegra/nvos/nvos.c47
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/Makefile1
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c257
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c43
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c1
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h42
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/Makefile1
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c257
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/Makefile4
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c188
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c128
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c555
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h85
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c1226
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c2
-rwxr-xr-xarch/arm/mach-tegra/tegra_sysmap.c2
-rw-r--r--drivers/char/Kconfig19
-rw-r--r--drivers/char/mem.c17
-rw-r--r--drivers/char/nvmap.c3068
-rwxr-xr-xinclude/linux/tegra_devices.h4
24 files changed, 2968 insertions, 3417 deletions
diff --git a/arch/arm/mach-tegra/include/linux/nvmem_ioctl.h b/arch/arm/mach-tegra/include/linux/nvmem_ioctl.h
index 7b27f3ea84dd..555e19a90e9e 100644
--- a/arch/arm/mach-tegra/include/linux/nvmem_ioctl.h
+++ b/arch/arm/mach-tegra/include/linux/nvmem_ioctl.h
@@ -30,56 +30,85 @@
#define _MACH_TEGRA_NVMEM_IOCTL_H_
struct nvmem_create_handle {
- union {
- unsigned long key; // stores the Key for ClaimPreservedHandle
- unsigned long id; // stores the ID for FromId
- unsigned long size; // stores the size for CreateHandle
- };
- uintptr_t handle;
+ union {
+ __u32 key; /* ClaimPreservedHandle */
+ __u32 id; /* FromId */
+ __u32 size; /* CreateHandle */
+ };
+ __u32 handle;
};
-#define NVMEM_ALLOC_HANDLE_FLAG(_COH, _ALIGN) (((_COH)<<16) | ((_ALIGN)&0xffff))
+#define NVMEM_HEAP_SYSMEM (1ul<<31)
+#define NVMEM_HEAP_IOVMM (1ul<<30)
+
+/* common carveout heaps */
+#define NVMEM_HEAP_CARVEOUT_IRAM (1ul<<29)
+#define NVMEM_HEAP_CARVEOUT_GENERIC (1ul<<0)
+
+#define NVMEM_HEAP_CARVEOUT_MASK (NVMEM_HEAP_IOVMM - 1)
+
+#define NVMEM_HANDLE_UNCACHEABLE (0x0ul << 0)
+#define NVMEM_HANDLE_WRITE_COMBINE (0x1ul << 0)
+#define NVMEM_HANDLE_INNER_CACHEABLE (0x2ul << 0)
+#define NVMEM_HANDLE_CACHEABLE (0x3ul << 0)
+
+#define NVMEM_HANDLE_SECURE (0x1ul << 2)
+
struct nvmem_alloc_handle {
- uintptr_t handle; // hmem
- unsigned int heap_mask; // bitmask of legal heaps, or 0 for default heaps
- unsigned int flags; // munging of coherency and alignment into 1 word
+ __u32 handle;
+ __u32 heap_mask;
+ __u32 flags;
+ __u32 align;
};
struct nvmem_map_caller {
- uintptr_t handle; // hmem
- void __user *addr; // user pointer
- size_t offset; // offset into hmem; should be page-aligned
- size_t length; // number of bytes to map; should be page-aligned
- unsigned int flags;
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem; should be page-aligned */
+ __u32 length; /* number of bytes to map */
+ __u32 flags;
+ unsigned long addr; /* user pointer */
};
struct nvmem_rw_handle {
- uintptr_t handle; // hmem
- void __user *addr; // user pointer
- size_t offset; // offset into hmem
- size_t elem_size; // individual atom size
- size_t hmem_stride; // delta in bytes between atoms in hmem
- size_t user_stride; // delta in bytes between atoms in user
- size_t count; // number of atoms to copy
+ unsigned long addr; /* user pointer */
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem */
+ __u32 elem_size; /* individual atom size */
+ __u32 hmem_stride; /* delta in bytes between atoms in hmem */
+ __u32 user_stride; /* delta in bytes between atoms in user */
+ __u32 count; /* number of atoms to copy */
};
struct nvmem_pin_handle {
- unsigned long __user *handles; // array of handles to pin/unpin
- unsigned long __user *addr; // array of addresses to return
- unsigned long count; // number of entries in handles
+ unsigned long handles; /* array of handles to pin/unpin */
+ unsigned long addr; /* array of addresses to return */
+ __u32 count; /* number of entries in handles */
+};
+
+struct nvmem_handle_param {
+ __u32 handle;
+ __u32 param;
+ unsigned long result;
};
enum {
- NVMEM_CACHE_OP_WB = 0,
- NVMEM_CACHE_OP_INV,
- NVMEM_CACHE_OP_WB_INV,
+ NVMEM_HANDLE_PARAM_SIZE = 1,
+ NVMEM_HANDLE_PARAM_ALIGNMENT,
+ NVMEM_HANDLE_PARAM_BASE,
+ NVMEM_HANDLE_PARAM_HEAP,
+};
+
+enum {
+ NVMEM_CACHE_OP_WB = 0,
+ NVMEM_CACHE_OP_INV,
+ NVMEM_CACHE_OP_WB_INV,
};
struct nvmem_cache_op {
- uintptr_t handle;
- void __user *addr;
- size_t len;
- int op;
+ unsigned long addr;
+ __u32 handle;
+ __u32 len;
+ __s32 op;
};
#define NVMEM_IOC_MAGIC 'N'
@@ -87,17 +116,16 @@ struct nvmem_cache_op {
/* Creates a new memory handle. On input, the argument is the size of the new
* handle; on return, the argument is the name of the new handle
*/
-#define NVMEM_IOC_CREATE _IOWR(NVMEM_IOC_MAGIC, 0, struct nvmem_create_handle)
-#define NVMEM_IOC_CLAIM _IOWR(NVMEM_IOC_MAGIC, 1, struct nvmem_create_handle)
-#define NVMEM_IOC_FROM_ID _IOWR(NVMEM_IOC_MAGIC, 2, struct nvmem_create_handle)
-
+#define NVMEM_IOC_CREATE _IOWR(NVMEM_IOC_MAGIC, 0, struct nvmem_create_handle)
+#define NVMEM_IOC_CLAIM _IOWR(NVMEM_IOC_MAGIC, 1, struct nvmem_create_handle)
+#define NVMEM_IOC_FROM_ID _IOWR(NVMEM_IOC_MAGIC, 2, struct nvmem_create_handle)
/* Actually allocates memory for the specified handle */
#define NVMEM_IOC_ALLOC _IOW (NVMEM_IOC_MAGIC, 3, struct nvmem_alloc_handle)
/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
*/
-#define NVMEM_IOC_FREE _IOW (NVMEM_IOC_MAGIC, 4, uintptr_t)
+#define NVMEM_IOC_FREE _IO (NVMEM_IOC_MAGIC, 4)
/* Maps the region of the specified handle into a user-provided virtual address
* that was previously created via an mmap syscall on this fd */
@@ -108,23 +136,19 @@ struct nvmem_cache_op {
#define NVMEM_IOC_WRITE _IOW (NVMEM_IOC_MAGIC, 6, struct nvmem_rw_handle)
#define NVMEM_IOC_READ _IOW (NVMEM_IOC_MAGIC, 7, struct nvmem_rw_handle)
-/* Pins a single memory handle and ensures a contiguous mapping exists in
- * either phsyical memory or the GART. If the memory handle is backed by a
- * swap device, all pages will be resident in memory before this ioctl returns.
- * Pin operations may be performed recursively on memory handles.
- */
-#define NVMEM_IOC_PIN _IOWR(NVMEM_IOC_MAGIC, 8, uintptr_t)
+#define NVMEM_IOC_PARAM _IOWR(NVMEM_IOC_MAGIC, 8, struct nvmem_handle_param)
-/* Unpins a single memory handle. If the memory handle is backed by a swap
- * device, unpinning a memory handle may result in the handle being decommitted
- */
-#define NVMEM_IOC_UNPIN _IOW (NVMEM_IOC_MAGIC, 9, uintptr_t)
-
-/* Like IOC_PIN and IOC_UNPIN, but operates on a list of memory handles */
+/* Pins a list of memory handles into IO-addressable memory (either IOVMM
+ * space or physical memory, depending on the allocation), and returns the
+ * address. Handles may be pinned recursively. */
#define NVMEM_IOC_PIN_MULT _IOWR(NVMEM_IOC_MAGIC, 10, struct nvmem_pin_handle)
#define NVMEM_IOC_UNPIN_MULT _IOW (NVMEM_IOC_MAGIC, 11, struct nvmem_pin_handle)
#define NVMEM_IOC_CACHE _IOW (NVMEM_IOC_MAGIC, 12, struct nvmem_cache_op)
-#define NVMEM_IOC_MAXNR (_IOC_NR(NVMEM_IOC_CACHE))
+/* Returns a global ID usable to allow a remote process to create a handle
+ * reference to the same handle */
+#define NVMEM_IOC_GET_ID _IOWR(NVMEM_IOC_MAGIC, 13, struct nvmem_create_handle)
+
+#define NVMEM_IOC_MAXNR (_IOC_NR(NVMEM_IOC_GET_ID))
#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_heap.h b/arch/arm/mach-tegra/include/nvrm_heap.h
deleted file mode 100644
index 468dc494caed..000000000000
--- a/arch/arm/mach-tegra/include/nvrm_heap.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef NVRM_HEAP_H
-#define NVRM_HEAP_H
-
-#include "nvrm_memmgr.h"
-#include "nvassert.h"
-#include "nvos.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-typedef NvRmPhysAddr (*NvRmHeapAlloc)(NvU32 size);
-typedef void (*NvRmHeapFree)(NvRmPhysAddr);
-
-typedef struct NvRmPrivHeapRec
-{
- NvRmHeap heap;
- NvRmPhysAddr PhysicalAddress;
- NvU32 length;
-
-} NvRmPrivHeap;
-
-
-void
-NvRmPrivPreservedMemHandleInit(NvRmDeviceHandle hRm);
-
-NvRmPrivHeap *
-NvRmPrivHeapCarveoutInit(NvU32 length,
- NvRmPhysAddr base);
-
-void
-NvRmPrivHeapCarveoutDeinit(void);
-
-NvError
-NvRmPrivHeapCarveoutPreAlloc(NvRmPhysAddr Address, NvU32 Length);
-
-NvError
-NvRmPrivHeapCarveoutAlloc(NvU32 size, NvU32 align, NvRmPhysAddr *PAddr);
-
-void
-NvRmPrivHeapCarveoutFree(NvRmPhysAddr addr);
-
-void *
-NvRmPrivHeapCarveoutMemMap(NvRmPhysAddr base, NvU32 length, NvOsMemAttribute attribute);
-
-void
-NvRmPrivHeapCarveoutGetInfo(NvU32 *CarveoutPhysBase,
- void **pCarveout,
- NvU32 *CarveoutSize);
-
-NvS32
-NvRmPrivHeapCarveoutMemoryUsed(void);
-
-NvS32
-NvRmPrivHeapCarveoutLargestFreeBlock(void);
-
-/**
- * \Note Not necessarily same as CarveoutSize returned by
- * NvRmPrivHeapCarveoutGetInfo. No dependency on
- * carveout being mapped in.
- */
-NvS32
-NvRmPrivHeapCarveoutTotalSize(void);
-
-NvRmPrivHeap *
-NvRmPrivHeapIramInit(NvU32 length,
- NvRmPhysAddr base);
-
-void
-NvRmPrivHeapIramDeinit(void);
-
-NvError
-NvRmPrivHeapIramAlloc(NvU32 size, NvU32 align, NvRmPhysAddr *PAddr);
-
-NvError
-NvRmPrivHeapIramPreAlloc(NvRmPhysAddr Address, NvU32 Length);
-
-void
-NvRmPrivHeapIramFree(NvRmPhysAddr addr);
-
-void *
-NvRmPrivHeapIramMemMap(NvRmPhysAddr base, NvU32 length, NvOsMemAttribute attribute);
-
-
-// -- GART --
-
-#define GART_PAGE_SIZE (4096)
-#define GART_MAX_PAGES (4096)
-
-/**
- * Initialize the GART heap. This identifies the GART heap's base address
- * and total size to the internal heap manager, so that it may allocate
- * pages appropriately.
- *
- * @param hDevice An RM device handle.
- * Size of the GART heap (bytes) and Base address of the GART heap space
- * are in GartMemoryInfo substructure of hDevice
- *
- * @retval Pointer to the heap data structure, with updated values.
- */
-NvRmPrivHeap *
-NvRmPrivHeapGartInit(NvRmDeviceHandle hDevice);
-
-void
-NvRmPrivHeapGartDeinit(void);
-
-/**
- * Allocate GART storage space of the specified size (in units of GART_PAGE_SIZE).
- * Alignment is handled internally by this API, since it must align with the
- * GART_PAGE_SIZE. This API also updates the GART registers and returns the base
- * address pointer of the space allocated within the GART heap.
- *
- * @see NvRmPrivHeapGartFree()
- *
- * @param hDevice An RM device handle.
- * @param pPhysAddrArray Contains an array of page addresses. This array should
- * be created using an NVOS call that acquires the underlying memory address
- * for each page to be mapped by the GART.
- * @param NumberOfPages The size (in pages, not bytes) of mapping requested. Must
- * be greater than 0.
- * @param PAddr Points to variable that will be updated with the base address of
- * the next available GART page.
- *
- * @retval The address of the first available GART page of the requested size.
- */
-NvError
-NvRmPrivAp15HeapGartAlloc(
- NvRmDeviceHandle hDevice,
- NvOsPageAllocHandle hPageHandle,
- NvU32 NumberOfPages,
- NvRmPhysAddr *PAddr);
-
-NvError
-NvRmPrivAp20HeapGartAlloc(
- NvRmDeviceHandle hDevice,
- NvOsPageAllocHandle hPageHandle,
- NvU32 NumberOfPages,
- NvRmPhysAddr *PAddr);
-
-/**
- * Free the specified GART memory pages.
- *
- * @see NvRmPrivHeapGartAlloc()
- *
- * @param hDevice An RM device handle.
- * @param addr Base address (GART space) of the memory page(s) to free.
- * NULL address pointers are ignored.
- * @param NumberOfPages The size (in pages, not bytes) of mapping to free.
- * This needs to match the size indicated when allocated.
- */
-void
-NvRmPrivAp15HeapGartFree(
- NvRmDeviceHandle hDevice,
- NvRmPhysAddr addr,
- NvU32 NumberOfPages);
-
-void
-NvRmPrivAp20HeapGartFree(
- NvRmDeviceHandle hDevice,
- NvRmPhysAddr addr,
- NvU32 NumberOfPages);
-
-/**
- * Suspend GART.
- */
-void
-NvRmPrivAp15GartSuspend(NvRmDeviceHandle hDevice);
-
-void
-NvRmPrivAp20GartSuspend(NvRmDeviceHandle hDevice);
-
-/**
- * Resume GART.
- */
-void
-NvRmPrivAp15GartResume(NvRmDeviceHandle hDevice);
-
-void
-NvRmPrivAp20GartResume(NvRmDeviceHandle hDevice);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_memmgr_private.h b/arch/arm/mach-tegra/include/nvrm_memmgr_private.h
deleted file mode 100644
index 422b26c059d5..000000000000
--- a/arch/arm/mach-tegra/include/nvrm_memmgr_private.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-
-#ifndef INCLUDED_NVDDK_MEMMGR_PRIVATE_H
-#define INCLUDED_NVDDK_MEMMGR_PRIVATE_H
-
-#include "nvrm_heap.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif /* __cplusplus */
-
-#define NVRM_HMEM_CHECK_MAGIC NV_DEBUG
-
-#define NV_RM_HMEM_IS_ALLOCATED(hMem) \
- (((hMem)->PhysicalAddress != NV_RM_INVALID_PHYS_ADDRESS) || \
- ((hMem)->VirtualAddress != NULL) || \
- ((hMem)->hPageHandle != NULL) )
-
-typedef struct NvRmMemRec
-{
- void *VirtualAddress;
- NvRmDeviceHandle hRmDevice;
- NvOsPageAllocHandle hPageHandle;
- NvRmPhysAddr PhysicalAddress;
- NvU32 size;
- NvU32 alignment;
-
- /* Used for GART heap to keep track of the number of GART pages
- * in use by this handle.
- */
- NvU32 Pages;
-
- NvS32 refcount;
- NvS32 pin_count;
-
- NvOsMemAttribute coherency;
- NvRmHeap heap;
-
- NvBool mapped;
- NvU8 priority;
-
-#if NVRM_HMEM_CHECK_MAGIC
- NvU32 magic; // set to NVRM_MEM_MAGIC if valid
-#endif
-} NvRmMem;
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-
-#endif
-
-
-
-
diff --git a/arch/arm/mach-tegra/init_common.c b/arch/arm/mach-tegra/init_common.c
index 50c4dcfdf764..4a2217d2af68 100755
--- a/arch/arm/mach-tegra/init_common.c
+++ b/arch/arm/mach-tegra/init_common.c
@@ -43,6 +43,7 @@
#include "nvrm_arm_cp.h"
#include "nvrm_interrupt.h"
#include "ap20/arusb.h"
+#include "linux/nvmem_ioctl.h"
const char *tegra_partition_list = NULL;
char *tegra_boot_device = NULL;
@@ -959,6 +960,12 @@ extern int __init tegra_dma_init(void);
void __init tegra_common_init(void)
{
+ if (tegra_get_module_inst_size("iram", 0)) {
+ nvmap_add_carveout_heap(tegra_get_module_inst_base("iram", 0),
+ tegra_get_module_inst_size("iram", 0) *
+ NvRmModuleGetNumInstances(s_hRmGlobal, NvRmPrivModuleID_Iram),
+ "iram", NVMEM_HEAP_CARVEOUT_IRAM);
+ }
NV_ASSERT_SUCCESS(NvRmOpen(&s_hRmGlobal,0));
NV_ASSERT_SUCCESS(NvRmGpioOpen(s_hRmGlobal, &s_hGpioGlobal));
diff --git a/arch/arm/mach-tegra/nvos/nvos.c b/arch/arm/mach-tegra/nvos/nvos.c
index 08f30ebb0d2f..d0b19f792c86 100644
--- a/arch/arm/mach-tegra/nvos/nvos.c
+++ b/arch/arm/mach-tegra/nvos/nvos.c
@@ -1427,7 +1427,6 @@ NvError NvOsBootArgSet(NvU32 key, void *arg, NvU32 size)
return NvError_NotImplemented;
}
-static NvError NvOsGetCarveoutParam(NvBootArgsCarveout* arg, NvU32 size);
NvError NvOsBootArgGet(NvU32 key, void *arg, NvU32 size)
{
const void *src;
@@ -1465,8 +1464,6 @@ NvError NvOsBootArgGet(NvU32 key, void *arg, NvU32 size)
src = &s_BootArgs.ChipShmooPhysArgs;
size_src = sizeof(NvBootArgsChipShmooPhys);
break;
- case NvBootArgKey_Carveout:
- return NvOsGetCarveoutParam((NvBootArgsCarveout*)arg, size);
case NvBootArgKey_WarmBoot:
src = &s_BootArgs.WarmbootArgs;
size_src = sizeof(NvBootArgsWarmboot);
@@ -1533,50 +1530,6 @@ void NvOsSetResourceAllocFileLine(void* userptr, const char* file, int line)
}
#endif
-#define MAX_CARVEOUTS 2
-
-static int gs_NumCarveouts = 0;
-
-typedef struct CarveoutRegionRec {
- unsigned long base;
- unsigned long size;
-} CarveoutRegion;
-
-static CarveoutRegion gs_Carveouts[MAX_CARVEOUTS];
-
-static NvError NvOsGetCarveoutParam(NvBootArgsCarveout* arg, NvU32 size)
-{
- if (size != sizeof(*arg))
- return NvError_BadParameter;
-
- if (!gs_NumCarveouts)
- return NvError_NotSupported;
-
- arg->base = gs_Carveouts[0].base;
- arg->size = gs_Carveouts[0].size;
- return NvSuccess;
-}
-
-static int __init carveout_memory_setup(char *options)
-{
- unsigned long start, size;
- char *p = options;
-
- start = -1;
- size = memparse(p, &p);
- if (*p == '@')
- start = memparse(p + 1, &p);
-
- if (gs_NumCarveouts < NV_ARRAY_SIZE(gs_Carveouts))
- {
- gs_Carveouts[gs_NumCarveouts].base = start;
- gs_Carveouts[gs_NumCarveouts++].size = size;
- }
-
- return 0;
-}
-__setup("nvmem=", carveout_memory_setup);
-
static int __init parse_tegra_tag(const struct tag *tag)
{
const struct tag_nvidia_tegra *nvtag = &tag->u.tegra;
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/Makefile b/arch/arm/mach-tegra/nvrm/core/ap15/Makefile
index 85675a55c0fa..2c87c36b3245 100644
--- a/arch/arm/mach-tegra/nvrm/core/ap15/Makefile
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/Makefile
@@ -12,7 +12,6 @@ ccflags-y += -Iarch/arm/mach-tegra/nvrm/core
obj-y += ap15rm_interrupt_generic.o
obj-y += ap15rm_hwmap.o
-obj-y += ap15rm_gart.o
obj-y += ap15rm_clocks.o
obj-y += ap15rm_clock_config.o
obj-y += ap15rm_clocks_info.o
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c
deleted file mode 100644
index 352016929d4b..000000000000
--- a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "ap15/armc.h"
-#include "nvrm_heap.h"
-#include "nvrm_heap_simple.h"
-#include "nvrm_hwintf.h"
-#include "ap15rm_private.h"
-#include "nvassert.h"
-#include "nvcommon.h"
-#include "nvrm_drf.h"
-
-
-/**
- * Initialize the GART entries, and enable the GART
- */
-
-#define GART_PAGE_SHIFT (12)
-#define GART_PAGE_SIZE (4096)
-
-extern NvBool gs_GartInited;
-extern NvRmHeapSimple gs_GartAllocator;
-extern NvU32 *gs_GartSave;
-
-/**
- * Initializes all of the TLB entries in the GART and enables GART translations
- * All entries are initially marked invalid.
- *
- * @param hDevice The RM device handle.
- */
-static NvError
-NvRmPrivAp15InitGART(NvRmDeviceHandle hDevice);
-static NvError
-NvRmPrivAp15InitGART(NvRmDeviceHandle hDevice)
-{
- NvU32 GartSize;
- NvU32 GartEntries;
- NvU32 GartEntry;
- NvU32 reg;
- NvU32 data;
-
- NV_ASSERT(hDevice != NULL);
-
- NvRmModuleGetBaseAddress(
- hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
-
- GartEntries = GartSize / GART_PAGE_SIZE;
-
- gs_GartSave = NvOsAlloc( sizeof(NvU32) * GartEntries );
- if ( NULL == gs_GartSave )
- return NvError_InsufficientMemory;
-
- data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
- for (GartEntry = 0; GartEntry < GartEntries; ++GartEntry)
- {
- // set the address
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, GartEntry);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
-
- // mark the entry invalid
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
- }
-
- // now enable the GART
- reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_CONFIG_0, reg);
- return NvSuccess;
-}
-
-NvError
-NvRmPrivAp15HeapGartAlloc(
- NvRmDeviceHandle hDevice,
- NvOsPageAllocHandle hPageHandle,
- NvU32 NumberOfPages,
- NvRmPhysAddr *PAddr)
-{
- NvError result = NvSuccess;
- NvU32 reg;
- NvU32 i, data;
- NvU32 FirstGartPage;
-
- NV_ASSERT(hDevice);
- NV_ASSERT(hPageHandle);
-
- result = NvRmPrivHeapSimpleAlloc(
- &gs_GartAllocator,
- NumberOfPages*GART_PAGE_SIZE,
- GART_PAGE_SIZE,
- PAddr);
-
- if (result != NvSuccess)
- return result;
-
- FirstGartPage = *PAddr;
-
- /* Check that the GART address exists and is page aligned */
- NV_ASSERT(FirstGartPage);
- NV_ASSERT((FirstGartPage & (GART_PAGE_SIZE - 1)) == 0);
-
- NvOsMutexLock(hDevice->mutex);
-
- // FIXME: Normally we would do this at init time, but it takes and
- // egregious amount of csim time, so I'm defering it or the 3d guys
- // will complain to me, and then to my boss, and then their boss, and then their bosses boss...
- if (gs_GartInited == NV_FALSE)
- {
- result = NvRmPrivAp15InitGART(hDevice);
- if ( NvSuccess != result )
- goto fail;
- gs_GartInited = NV_TRUE;
- }
-
- for (i = 0; i < NumberOfPages; i++)
- {
- data = (NvU32)NvOsPageAddress(hPageHandle, i * GART_PAGE_SIZE);
-
- /* Check that each physical address is page aligned */
- NV_ASSERT((data & (GART_PAGE_SIZE - 1)) == 0);
-
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((FirstGartPage + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
-
- reg =
- NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 1) |
- NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR, (data >> GART_PAGE_SHIFT));
-
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, reg);
- // lame, on csim we have to read this back to make sure the GART entry is valid before we hit the mc
- // with data to this address.
- (void)NV_REGR(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
- }
-fail:
- NvOsMutexUnlock(hDevice->mutex);
-
- return result;
-}
-
-void
-NvRmPrivAp15HeapGartFree(
- NvRmDeviceHandle hDevice,
- NvRmPhysAddr addr,
- NvU32 NumberOfPages)
-{
- NvU32 i;
- NvU32 reg;
- NvU32 data;
-
- NV_ASSERT(hDevice);
-
- if (addr && NumberOfPages)
- {
- // Invalidate GART page table entries
- data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
- for (i = 0; i < NumberOfPages; i++)
- {
- // set the address
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((addr + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
-
- // mark the entry invalid
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
- }
- NvRmPrivHeapSimpleFree(&gs_GartAllocator, addr);
- }
-}
-
-
-void
-NvRmPrivAp15GartSuspend(NvRmDeviceHandle hDevice)
-{
- NvU32 reg;
- NvU32 GartSize;
- NvU32 GartEntries;
- NvU32 GartEntry;
-
- NvOsMutexLock(hDevice->mutex);
- if (gs_GartInited == NV_TRUE)
- {
- NvRmModuleGetBaseAddress(
- hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
- GartEntries = GartSize / GART_PAGE_SIZE;
-
- for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
- {
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
- GartEntry);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_ENTRY_ADDR_0, reg);
- gs_GartSave[GartEntry] = NV_REGR(hDevice,
- NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
- }
- }
- NvOsMutexUnlock(hDevice->mutex);
-}
-
-void
-NvRmPrivAp15GartResume(NvRmDeviceHandle hDevice)
-{
- NvU32 reg;
- NvU32 GartSize;
- NvU32 GartEntries;
- NvU32 GartEntry;
-
- NvOsMutexLock(hDevice->mutex);
- if (gs_GartInited == NV_TRUE)
- {
- NvRmModuleGetBaseAddress(
- hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
- GartEntries = GartSize / GART_PAGE_SIZE;
-
- for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
- {
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
- GartEntry);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_ENTRY_ADDR_0, reg);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_ENTRY_DATA_0, gs_GartSave[GartEntry] );
- }
-
- reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_CONFIG_0, reg);
-
- }
- NvOsMutexUnlock(hDevice->mutex);
-}
-
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c
index 9392cdab7679..ff532b9c70d2 100644
--- a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c
@@ -39,7 +39,6 @@
#include "nvrm_rmctrace.h"
#include "nvrm_configuration.h"
#include "nvrm_chiplib.h"
-#include "nvrm_heap.h"
#include "nvrm_pmu_private.h"
#include "nvrm_processor.h"
#include "nvrm_xpc.h"
@@ -143,8 +142,6 @@ NvRmOpenNew(NvRmDeviceHandle *pHandle)
NvRmDevice *rm = 0;
NvU32 *table = 0;
- NvU32 CarveoutBaseAddr;
- NvU32 CarveoutSize = 0;
NvU32 BctCustomerOption = 0;
NvU64 Uid = 0;
@@ -279,31 +276,8 @@ NvRmOpenNew(NvRmDeviceHandle *pHandle)
rm->bPreInit = NV_TRUE;
- {
- NvBootArgsCarveout Carveout;
- if (NvOsBootArgGet(NvBootArgKey_Carveout, &Carveout,
- sizeof(Carveout)) == NvSuccess)
- {
- CarveoutSize = Carveout.size;
- CarveoutBaseAddr = (NvU32) Carveout.base;
- }
- else
- {
- CarveoutSize = NvOdmQueryCarveoutSize();
- CarveoutBaseAddr = rm->ExtMemoryInfo.base +
- NvOdmQueryMemSize(NvOdmMemoryType_Sdram) - CarveoutSize;
- }
- }
-
- NvRmPrivHeapCarveoutInit(CarveoutSize, CarveoutBaseAddr);
- NvRmPrivHeapIramInit(rm->IramMemoryInfo.size, rm->IramMemoryInfo.base);
- NvRmPrivPreservedMemHandleInit(rm);
-
if (!NVOS_IS_WINDOWS_X86)
{
- // Initialize the GART heap (size & base address)
- NvRmPrivHeapGartInit( rm );
-
NvRmPrivCheckBondOut( rm );
/* bring modules out of reset */
@@ -523,15 +497,6 @@ NvRmClose(NvRmDeviceHandle handle)
}
- NvRmPrivHeapCarveoutDeinit();
- NvRmPrivHeapIramDeinit();
-
- if (!NVOS_IS_WINDOWS_X86)
- {
- // De-Initialize the GART heap
- NvRmPrivHeapGartDeinit();
- }
-
NvRmRmcClose( &handle->rmc );
/* deallocate the instance table */
@@ -594,14 +559,6 @@ NvRmPrivMemoryInfo( NvRmDeviceHandle hDevice )
inst++;
}
- if (!(NVCPU_IS_X86 && NVOS_IS_WINDOWS))
- {
- /* Get GART memory module info */
- inst = tbl->ModInst +
- (tbl->Modules)[NvRmPrivModuleID_Gart].Index;
- hDevice->GartMemoryInfo.base = inst->PhysAddr;
- hDevice->GartMemoryInfo.size = inst->Length;
- }
}
NvError
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c
index 292cd21e67bc..fe3496ed65cd 100644
--- a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c
@@ -39,7 +39,6 @@
#include "nvrm_rmctrace.h"
#include "nvrm_configuration.h"
#include "nvrm_chiplib.h"
-#include "nvrm_heap.h"
#include "nvrm_pmu_private.h"
#include "nvrm_processor.h"
#include "nvrm_structure.h"
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h
index e6a1bd130fbc..6d8a99037952 100644
--- a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h
@@ -175,42 +175,6 @@ void
NvRmPrivInitPinMux(NvRmDeviceHandle hDevice);
/**
- * Create a mapping from a list of physical sdram pages to the GART. No error
- * checking is done here, so you can clobber your friend's mappings if you
- * want. Every map for itself! This function programs registers and cannot
- * fail. Invalid parameters will result in asserts for debug purposes.
- *
- * @see NvRmPrivGartAlloc()
- *
- * @param hDevice The RM device handle.
- * @param pPhysAddrArray Points to an array of physical page addresses. Each
- * entry represents the base address for a 4KB page of memory. These entries
- * do not need to be contiguous memory blocks; after all, that's why you're
- * using this API.
- * @param NumberOfPages Specifies the number of physical address entries. A
- * value of 0 has no effect.
- * @param FirstGartPage Specifies the base address of the first available GART
- * page. This value should be obtained via a call to NvRmPrivGartAlloc().
- */
-void
-NvRmPrivCreateGARTMap(
- NvRmDeviceHandle hDevice,
- NvU32 *pPhysAddrArray,
- NvU32 NumberOfPages,
- NvU32 FirstGartPage);
-
-/**
- * Suspend GART.
- */
-void NvRmPrivGartSuspend( NvRmDeviceHandle hDevice );
-
-/**
- * Resume GART.
- */
-
-void NvRmPrivGartResume(NvRmDeviceHandle hDevice);
-
-/**
* Initializes the clock manager.
*
* @param hRmDevice The RM device handle
@@ -229,12 +193,6 @@ NvRmPrivClocksInit(NvRmDeviceHandle hRmDevice);
void
NvRmPrivClocksDeinit(NvRmDeviceHandle hRmDevice);
-/**
- * Increments a memory handle reference count.
- */
-void
-NvRmPrivMemIncrRef( NvRmMemHandle hMem );
-
/*** Private Interrupt API's ***/
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/Makefile b/arch/arm/mach-tegra/nvrm/core/ap20/Makefile
index 565115df45cb..ba4cd877b0fb 100644
--- a/arch/arm/mach-tegra/nvrm/core/ap20/Makefile
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/Makefile
@@ -18,4 +18,3 @@ obj-y += ap20rm_power_dfs.o
obj-y += ap20rm_pinmux_tables.o
obj-y += ap20rm_fuse.o
obj-y += ap20rm_clocks_info.o
-obj-y += ap20rm_gart.o
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c
deleted file mode 100644
index 0cca272fdd89..000000000000
--- a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "ap20/armc.h"
-#include "nvrm_heap.h"
-#include "nvrm_heap_simple.h"
-#include "nvrm_hwintf.h"
-#include "ap15/ap15rm_private.h"
-#include "nvassert.h"
-#include "nvcommon.h"
-#include "nvrm_drf.h"
-
-
-/**
- * Initialize the GART entries, and enable the GART
- */
-
-#define GART_PAGE_SHIFT (12)
-#define GART_PAGE_SIZE (4096)
-
-extern NvBool gs_GartInited;
-extern NvRmHeapSimple gs_GartAllocator;
-extern NvU32 *gs_GartSave;
-
-/**
- * Initializes all of the TLB entries in the GART and enables GART translations
- * All entries are initially marked invalid.
- *
- * @param hDevice The RM device handle.
- */
-static NvError
-NvRmPrivAp20InitGART(NvRmDeviceHandle hDevice);
-static NvError
-NvRmPrivAp20InitGART(NvRmDeviceHandle hDevice)
-{
- NvU32 GartSize;
- NvU32 GartEntries;
- NvU32 GartEntry;
- NvU32 reg;
- NvU32 data;
-
- NV_ASSERT(hDevice != NULL);
-
- NvRmModuleGetBaseAddress(
- hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
-
- GartEntries = GartSize / GART_PAGE_SIZE;
-
- gs_GartSave = NvOsAlloc( sizeof(NvU32) * GartEntries );
- if ( NULL == gs_GartSave )
- return NvError_InsufficientMemory;
-
- data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
- for (GartEntry = 0; GartEntry < GartEntries; ++GartEntry)
- {
- // set the address
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, GartEntry);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
-
- // mark the entry invalid
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
- }
-
- // now enable the GART
- reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_CONFIG_0, reg);
- return NvSuccess;
-}
-
-NvError
-NvRmPrivAp20HeapGartAlloc(
- NvRmDeviceHandle hDevice,
- NvOsPageAllocHandle hPageHandle,
- NvU32 NumberOfPages,
- NvRmPhysAddr *PAddr)
-{
- NvError result = NvSuccess;
- NvU32 reg;
- NvU32 i, data;
- NvU32 FirstGartPage;
-
- NV_ASSERT(hDevice);
- NV_ASSERT(hPageHandle);
-
- result = NvRmPrivHeapSimpleAlloc(
- &gs_GartAllocator,
- NumberOfPages*GART_PAGE_SIZE,
- GART_PAGE_SIZE,
- PAddr);
-
- if (result != NvSuccess)
- return result;
-
- FirstGartPage = *PAddr;
-
- /* Check that the GART address exists and is page aligned */
- NV_ASSERT(FirstGartPage);
- NV_ASSERT((FirstGartPage & (GART_PAGE_SIZE - 1)) == 0);
-
- NvOsMutexLock(hDevice->mutex);
-
- // FIXME: Normally we would do this at init time, but it takes and
- // egregious amount of csim time, so I'm defering it or the 3d guys
- // will complain to me, and then to my boss, and then their boss, and then their bosses boss...
- if (gs_GartInited == NV_FALSE)
- {
- result = NvRmPrivAp20InitGART(hDevice);
- if ( NvSuccess != result )
- goto fail;
- gs_GartInited = NV_TRUE;
- }
-
- for (i = 0; i < NumberOfPages; i++)
- {
- data = (NvU32)NvOsPageAddress(hPageHandle, i * GART_PAGE_SIZE);
-
- /* Check that each physical address is page aligned */
- NV_ASSERT((data & (GART_PAGE_SIZE - 1)) == 0);
-
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((FirstGartPage + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
-
- reg =
- NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 1) |
- NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR, (data >> GART_PAGE_SHIFT));
-
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, reg);
- // lame, on csim we have to read this back to make sure the GART entry is valid before we hit the mc
- // with data to this address.
- (void)NV_REGR(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
- }
-fail:
- NvOsMutexUnlock(hDevice->mutex);
-
- return result;
-}
-
-void
-NvRmPrivAp20HeapGartFree(
- NvRmDeviceHandle hDevice,
- NvRmPhysAddr addr,
- NvU32 NumberOfPages)
-{
- NvU32 i;
- NvU32 reg;
- NvU32 data;
-
- NV_ASSERT(hDevice);
-
- if (addr && NumberOfPages)
- {
- // Invalidate GART page table entries
- data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
- for (i = 0; i < NumberOfPages; i++)
- {
- // set the address
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((addr + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
-
- // mark the entry invalid
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
- }
- NvRmPrivHeapSimpleFree(&gs_GartAllocator, addr);
- }
-}
-
-
-void
-NvRmPrivAp20GartSuspend(NvRmDeviceHandle hDevice)
-{
- NvU32 reg;
- NvU32 GartSize;
- NvU32 GartEntries;
- NvU32 GartEntry;
-
- NvOsMutexLock(hDevice->mutex);
- if (gs_GartInited == NV_TRUE)
- {
- NvRmModuleGetBaseAddress(
- hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
- GartEntries = GartSize / GART_PAGE_SIZE;
-
- for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
- {
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
- GartEntry);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_ENTRY_ADDR_0, reg);
- gs_GartSave[GartEntry] = NV_REGR(hDevice,
- NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
- }
- }
- NvOsMutexUnlock(hDevice->mutex);
-}
-
-void
-NvRmPrivAp20GartResume(NvRmDeviceHandle hDevice)
-{
- NvU32 reg;
- NvU32 GartSize;
- NvU32 GartEntries;
- NvU32 GartEntry;
-
- NvOsMutexLock(hDevice->mutex);
- if (gs_GartInited == NV_TRUE)
- {
- NvRmModuleGetBaseAddress(
- hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
- GartEntries = GartSize / GART_PAGE_SIZE;
-
- for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
- {
- reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
- GartEntry);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_ENTRY_ADDR_0, reg);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_ENTRY_DATA_0, gs_GartSave[GartEntry] );
- }
-
- reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
- NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
- MC_GART_CONFIG_0, reg);
-
- }
- NvOsMutexUnlock(hDevice->mutex);
-}
-
diff --git a/arch/arm/mach-tegra/nvrm/core/common/Makefile b/arch/arm/mach-tegra/nvrm/core/common/Makefile
index 55c7f3042dbd..9adfebb4cf81 100644
--- a/arch/arm/mach-tegra/nvrm/core/common/Makefile
+++ b/arch/arm/mach-tegra/nvrm/core/common/Makefile
@@ -12,10 +12,6 @@ ccflags-y += -Iarch/arm/mach-tegra/nvrm/core/common
ccflags-y += -Iarch/arm/mach-tegra/nvrm/core
obj-y += nvrm_pinmux.o
-obj-y += nvrm_heap_simple.o
-obj-y += nvrm_memmgr.o
-obj-y += nvrm_heap_carveout.o
-obj-y += nvrm_heap_iram.o
obj-y += nvrm_keylist.o
obj-y += nvrm_configuration.o
obj-y += nvrm_pmu.o
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c
deleted file mode 100644
index e5751a8168c8..000000000000
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "nvcommon.h"
-#include "nvos.h"
-#include "nvrm_memmgr.h"
-#include "nvrm_heap.h"
-#include "nvrm_heap_simple.h"
-#include "nvrm_hardware_access.h"
-
-static NvRmPrivHeap gs_CarveoutHeap;
-static NvRmPhysAddr gs_CarveoutBaseAddr;
-static void *gs_CarveoutVaddr;
-static NvBool gs_CarveoutGloballyMapped;
-
-static NvRmHeapSimple gs_CarveoutAllocator;
-
-
-NvError NvRmPrivHeapCarveoutAlloc(
- NvU32 size,
- NvU32 align,
- NvRmPhysAddr *PAddr)
-{
- return NvRmPrivHeapSimpleAlloc(&gs_CarveoutAllocator, size, align, PAddr);
-}
-
-NvError NvRmPrivHeapCarveoutPreAlloc(NvRmPhysAddr Address, NvU32 Length)
-{
- return NvRmPrivHeapSimplePreAlloc(&gs_CarveoutAllocator, Address, Length);
-}
-
-void NvRmPrivHeapCarveoutFree(NvRmPhysAddr addr)
-{
- NvRmPrivHeapSimpleFree(&gs_CarveoutAllocator, addr);
-}
-
-NvS32 NvRmPrivHeapCarveoutMemoryUsed(void)
-{
- return NvRmPrivHeapSimpleMemoryUsed(&gs_CarveoutAllocator);
-}
-
-NvS32 NvRmPrivHeapCarveoutLargestFreeBlock(void)
-{
- return NvRmPrivHeapSimpleLargestFreeBlock(&gs_CarveoutAllocator);
-}
-
-NvS32 NvRmPrivHeapCarveoutTotalSize(void)
-{
- return gs_CarveoutHeap.length;
-}
-
-
-void *NvRmPrivHeapCarveoutMemMap(
- NvRmPhysAddr base,
- NvU32 length,
- NvOsMemAttribute attribute)
-{
- NvU32 StartOffset = base - gs_CarveoutBaseAddr;
- NvU32 EndOffset = StartOffset + length - 1;
-
- if (!gs_CarveoutVaddr)
- return NULL;
-
- NV_ASSERT(length != 0);
-
- // sanity checking
- if (StartOffset < gs_CarveoutHeap.length &&
- EndOffset < gs_CarveoutHeap.length)
- {
- NvUPtr uptr = (NvUPtr)gs_CarveoutVaddr;
- return (void *)(uptr + StartOffset);
- }
-
- NV_ASSERT(!"Attempt to map something that is not part of the carveout");
- return NULL;
-}
-
-
-NvRmPrivHeap *NvRmPrivHeapCarveoutInit(NvU32 length, NvRmPhysAddr base)
-{
- NvError err;
- NvBool bGloballyMapped = NV_FALSE;
- void *vAddr = NULL;
-
-#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
- /* try to map the memory, if we can't map it then bail out */
- err = NvRmPhysicalMemMap(base, length,
- NVOS_MEM_READ_WRITE | NVOS_MEM_GLOBAL_ADDR,
- NvOsMemAttribute_Uncached, &vAddr);
-
- if (err == NvSuccess)
- {
- bGloballyMapped = NV_TRUE;
- }
- else
- {
- // try again to map carveout, but with global flag gone
- err = NvRmPhysicalMemMap(base, length, NVOS_MEM_READ_WRITE,
- NvOsMemAttribute_Uncached, &vAddr);
-
- if (err != NvSuccess)
- return NULL;
- }
-#endif
-
- err = NvRmPrivHeapSimple_HeapAlloc(base, length, &gs_CarveoutAllocator);
-
- if (err != NvSuccess)
- {
- if (vAddr)
- NvRmPhysicalMemUnmap(vAddr, length);
- return NULL;
- }
-
- gs_CarveoutHeap.heap = NvRmHeap_ExternalCarveOut;
- gs_CarveoutHeap.length = length;
- gs_CarveoutHeap.PhysicalAddress = base;
- gs_CarveoutBaseAddr = base;
- gs_CarveoutVaddr = vAddr;
- gs_CarveoutGloballyMapped = bGloballyMapped;
-
- return &gs_CarveoutHeap;
-}
-
-
-void NvRmPrivHeapCarveoutDeinit(void)
-{
- // deinit the carveout allocator
- if (gs_CarveoutVaddr)
- {
- NvRmPhysicalMemUnmap(gs_CarveoutVaddr, gs_CarveoutHeap.length);
- gs_CarveoutVaddr = NULL;
- }
-
- NvRmPrivHeapSimple_HeapFree(&gs_CarveoutAllocator);
- NvOsMemset(&gs_CarveoutHeap, 0, sizeof(gs_CarveoutHeap));
- NvOsMemset(&gs_CarveoutAllocator, 0, sizeof(gs_CarveoutAllocator));
-}
-
-
-void NvRmPrivHeapCarveoutGetInfo(
- NvU32 *CarveoutPhysBase,
- void **pCarveout,
- NvU32 *CarveoutSize)
-{
- if (gs_CarveoutGloballyMapped)
- {
- *CarveoutPhysBase = gs_CarveoutHeap.PhysicalAddress;
- *pCarveout = gs_CarveoutVaddr;
- *CarveoutSize = gs_CarveoutHeap.length;
- }
- else
- {
- *CarveoutPhysBase = 0;
- *pCarveout = NULL;
- *CarveoutSize = 0;
- }
-}
-
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c
deleted file mode 100644
index 6f1b0cce7514..000000000000
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "nvrm_heap.h"
-#include "nvrm_heap_simple.h"
-#include "nvrm_hardware_access.h"
-
-
-static NvRmPrivHeap gs_IramHeap;
-static NvUPtr gs_IramBaseAddr;
-static void *gs_IramVaddr;
-static NvRmHeapSimple gs_IramAllocator;
-
-NvError NvRmPrivHeapIramAlloc(NvU32 size, NvU32 align, NvRmPhysAddr *PAddr)
-{
- NvError err;
- err = NvRmPrivHeapSimpleAlloc(&gs_IramAllocator, size, align, PAddr);
- return err;
-}
-
-NvError NvRmPrivHeapIramPreAlloc(NvRmPhysAddr Address, NvU32 Length)
-{
- return NvRmPrivHeapSimplePreAlloc(&gs_IramAllocator, Address, Length);
-}
-
-void NvRmPrivHeapIramFree(NvRmPhysAddr addr)
-{
- NvRmPrivHeapSimpleFree(&gs_IramAllocator, addr);
-}
-
-NvRmPrivHeap *NvRmPrivHeapIramInit(NvU32 length, NvRmPhysAddr base)
-{
- void *vAddr = NULL;
- NvError err;
-
-#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
- /* try to map the memory, if we can't map it then bail out */
- err = NvRmPhysicalMemMap(base, length, NVOS_MEM_READ_WRITE,
- NvOsMemAttribute_Uncached, &vAddr);
- if (err != NvSuccess)
- return NULL;
-#endif
-
- err = NvRmPrivHeapSimple_HeapAlloc(base, length, &gs_IramAllocator);
-
- if (err != NvSuccess)
- {
- if (vAddr)
- NvRmPhysicalMemUnmap(vAddr, length);
- return NULL;
- }
-
- gs_IramHeap.heap = NvRmHeap_IRam;
- gs_IramHeap.length = length;
- gs_IramHeap.PhysicalAddress = base;
- gs_IramBaseAddr = (NvUPtr)base;
- gs_IramVaddr = vAddr;
-
- return &gs_IramHeap;
-}
-
-void NvRmPrivHeapIramDeinit(void)
-{
- // deinit the carveout allocator
- if (gs_IramVaddr)
- {
- NvRmPhysicalMemUnmap(gs_IramVaddr, gs_IramHeap.length);
- gs_IramVaddr = NULL;
- }
-
- NvRmPrivHeapSimple_HeapFree(&gs_IramAllocator);
- NvOsMemset(&gs_IramHeap, 0, sizeof(gs_IramHeap));
- NvOsMemset(&gs_IramAllocator, 0, sizeof(gs_IramAllocator));
-}
-
-void *NvRmPrivHeapIramMemMap(
- NvRmPhysAddr base,
- NvU32 length,
- NvOsMemAttribute attribute)
-{
- NvU32 StartOffset = base - gs_IramBaseAddr;
- NvU32 EndOffset = StartOffset + length - 1;
-
- NV_ASSERT(length != 0);
-
- if (!gs_IramVaddr)
- return NULL;
-
- // sanity checking
- if (StartOffset < gs_IramHeap.length &&
- EndOffset < gs_IramHeap.length)
- {
- NvUPtr uptr = (NvUPtr)gs_IramVaddr;
- return (void *)(uptr + StartOffset);
- }
-
- NV_ASSERT(!"Attempt to map something that is not part of the iram");
- return NULL;
-}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c
deleted file mode 100644
index 7029cb13e560..000000000000
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/* This is an extremely simplistic memory allocator where the bookkeeping data
- * is kept out of band from the allocated memory. It's intended to be used
- * for framebuffer/carveout type allocations.
- *
- * Implementation is a simple first fit. Book-keeping is kept as a singly
- * linked list, which means allocation time is O(n) w.r.t. the total number of
- * allocations.
- */
-
-
-#include "nvrm_heap_simple.h"
-#include "nvassert.h"
-#include "nvos.h"
-
-#define INITIAL_GROW_SIZE 8
-#define MAX_GROW_SIZE 512
-#define INVALID_INDEX (NvU32)-1
-
-#define DEBUG_HEAP 0
-
-#if DEBUG_HEAP
-
-static void
-SanityCheckHeap(NvRmHeapSimple *pHeap)
-{
- NvU32 index;
-
- // first clear all the touched bits
- for (index = 0; index < pHeap->ArraySize; ++index)
- {
- pHeap->RawBlockArray[index].touched = 0;
- }
-
-
- // Walk the BlockArray
- index = pHeap->BlockIndex;
- for (;;)
- {
- if (index == INVALID_INDEX)
- break;
-
- // make sure we're not off in the weeds.
- NV_ASSERT( index < pHeap->ArraySize );
- NV_ASSERT( pHeap->RawBlockArray[index].touched == 0);
-
- pHeap->RawBlockArray[index].touched = 1;
- index = pHeap->RawBlockArray[index].NextIndex;
- }
-
-
- // Walk the SpareArray
- index = pHeap->SpareBlockIndex;
- for (;;)
- {
- if (index == INVALID_INDEX)
- break;
-
- // make sure we're not off in the weeds.
- NV_ASSERT( index < pHeap->ArraySize );
- NV_ASSERT( pHeap->RawBlockArray[index].touched == 0);
-
- pHeap->RawBlockArray[index].touched = 2;
- index = pHeap->RawBlockArray[index].NextIndex;
- }
-
-
- // check that all blocks get touched.
- for (index = 0; index < pHeap->ArraySize; ++index)
- {
- NV_ASSERT(pHeap->RawBlockArray[index].touched != 0);
- }
-}
-
-#else
-# define SanityCheckHeap(a)
-#endif
-
-
-
-NvError NvRmPrivHeapSimple_HeapAlloc(NvRmPhysAddr base, NvU32 size, NvRmHeapSimple *pNewHeap)
-{
- int i;
- NvError err = NvError_InsufficientMemory;
-
- NV_ASSERT(pNewHeap != NULL);
- NV_ASSERT(size > 0);
-
- NvOsMemset(pNewHeap, 0, sizeof(*pNewHeap));
-
- pNewHeap->base = base;
- pNewHeap->size = size;
- pNewHeap->ArraySize = INITIAL_GROW_SIZE;
-
- pNewHeap->RawBlockArray = NvOsAlloc(sizeof(NvRmHeapSimpleBlock) * INITIAL_GROW_SIZE);
- if (!pNewHeap->RawBlockArray)
- {
- err = NvError_InsufficientMemory;
- goto fail;
- }
- NvOsMemset(pNewHeap->RawBlockArray, 0, sizeof(NvRmHeapSimpleBlock) * INITIAL_GROW_SIZE);
-
-
- // setup all of the pointers (indices, whatever)
- for (i = 0; i < INITIAL_GROW_SIZE; ++i)
- {
- pNewHeap->RawBlockArray[i].NextIndex = i + 1;
- }
- pNewHeap->RawBlockArray[i-1].NextIndex = INVALID_INDEX;
-
- pNewHeap->BlockIndex = 0;
- pNewHeap->SpareBlockIndex = 1;
-
- pNewHeap->RawBlockArray[pNewHeap->BlockIndex].IsFree = NV_TRUE;
- pNewHeap->RawBlockArray[pNewHeap->BlockIndex].PhysAddr = base;
- pNewHeap->RawBlockArray[pNewHeap->BlockIndex].size = size;
- pNewHeap->RawBlockArray[pNewHeap->BlockIndex].NextIndex = INVALID_INDEX;
-
- err = NvOsMutexCreate(&pNewHeap->mutex);
- if (err)
- goto fail;
-
- SanityCheckHeap(pNewHeap);
- return NvSuccess;
-
-fail:
- NvOsFree(pNewHeap->RawBlockArray);
- return err;
-}
-
-
-/**
- * Frees up a heap structure, and all items that were associated with this heap
- *
- * @param pHeap Pointer to the heap structure returned from NvRmPrivHeapSimpleHeapAlloc
- */
-
-void NvRmPrivHeapSimple_HeapFree(NvRmHeapSimple *pHeap)
-{
- if (pHeap)
- {
- SanityCheckHeap(pHeap);
- NvOsMutexDestroy(pHeap->mutex);
- NvOsFree(pHeap->RawBlockArray);
- }
-}
-
-static NvError NvRmPrivHeapSimpleGrowBlockArray(
- NvRmHeapSimple *pHeap)
-{
- NvU32 SpareIndex = pHeap->SpareBlockIndex;
- NvU32 NumFree = 0;
- const NvU32 MinFree = 2;
-
- while (SpareIndex!=INVALID_INDEX && NumFree<MinFree)
- {
- NumFree++;
- SpareIndex = pHeap->RawBlockArray[SpareIndex].NextIndex;
- }
-
- if (NumFree < MinFree)
- {
- NvU32 i;
- NvU32 NewArraySize;
- NvU32 GrowSize;
- NvRmHeapSimpleBlock *NewBlockArray;
-
- GrowSize = pHeap->ArraySize + (pHeap->ArraySize >> 1);
- GrowSize = NV_MIN(GrowSize, MAX_GROW_SIZE);
-
- // Grow by 8 ensures that we have at least 2 blocks and also is a little
- // more efficient than growing by 1 each time.
- NewArraySize = pHeap->ArraySize + GrowSize;
- NewBlockArray = NvOsAlloc( sizeof(NvRmHeapSimpleBlock)*NewArraySize);
- if (!NewBlockArray)
- {
- SanityCheckHeap(pHeap);
- return NvError_InsufficientMemory;
- }
- NvOsMemset(NewBlockArray, 0, sizeof(NvRmHeapSimpleBlock)*NewArraySize);
-
- NvOsMemcpy(NewBlockArray, pHeap->RawBlockArray, sizeof(NvRmHeapSimpleBlock)*pHeap->ArraySize);
-
- // setup the NextIndex in the new part of the array
- for (i = pHeap->ArraySize; i < NewArraySize; i++)
- {
- NewBlockArray[i].NextIndex = i + 1;
- }
-
- // Point the last element of the new array to the old SpareBlockList
- NewBlockArray[NewArraySize - 1].NextIndex = pHeap->SpareBlockIndex;
- NvOsFree(pHeap->RawBlockArray);
-
- // Update all our information
- pHeap->RawBlockArray = NewBlockArray;
- pHeap->SpareBlockIndex = pHeap->ArraySize;
- pHeap->ArraySize = NewArraySize;
- }
-
- return NvSuccess;
-}
-
-NvError NvRmPrivHeapSimplePreAlloc(
- NvRmHeapSimple *pHeap,
- NvRmPhysAddr Address,
- NvU32 Length)
-{
- NvRmHeapSimpleBlock *pBlock;
- NvU32 BlockIndex;
-
- NV_ASSERT(pHeap!=NULL);
-
- // All preallocated blocks must start at a minimum of a 32B alignment
- if ((Address & 31) || (Length & 31))
- return NvError_NotSupported;
-
- NvOsMutexLock(pHeap->mutex);
-
- if (NvRmPrivHeapSimpleGrowBlockArray(pHeap)!=NvSuccess)
- {
- NvOsMutexUnlock(pHeap->mutex);
- return NvError_InsufficientMemory;
- }
-
- // Iteratively search through all the blocks for the block whose
- // physical address region contains the requested pre-allocated
- // region, and which isn't already allocated.
- for (BlockIndex = pHeap->BlockIndex; BlockIndex!=INVALID_INDEX;
- BlockIndex = pHeap->RawBlockArray[BlockIndex].NextIndex)
- {
- pBlock = &pHeap->RawBlockArray[BlockIndex];
-
- if (pBlock->PhysAddr<=Address &&
- (pBlock->PhysAddr+pBlock->size) >= (Address+Length) &&
- pBlock->IsFree)
- {
- // If the free region starts before the preallocated region,
- // split the free region into two blocks.
- if (pBlock->PhysAddr < Address)
- {
- NvRmHeapSimpleBlock *NewBlock;
- NvU32 NewBlockIndex;
-
- // Grab a block off the spare list and link it into place
- NewBlockIndex = pHeap->SpareBlockIndex;
- NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
- pHeap->SpareBlockIndex = NewBlock->NextIndex;
-
- NewBlock->NextIndex = pBlock->NextIndex;
- pBlock->NextIndex = NewBlockIndex;
-
- // Set up the new block
- NewBlock->IsFree = NV_TRUE;
- NewBlock->PhysAddr = Address;
- NewBlock->size = pBlock->size;
-
- // Shrink the current block to
- pBlock->size = (Address - pBlock->PhysAddr);
- NewBlock->size -= pBlock->size;
-
- // Advance to the block we are actually going to allocate out of
- pBlock = NewBlock;
- }
-
- if ((pBlock->PhysAddr + pBlock->size) > (Address + Length))
- {
- NvRmHeapSimpleBlock *NewBlock;
- NvU32 NewBlockIndex;
-
- NewBlockIndex = pHeap->SpareBlockIndex;
- NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
- pHeap->SpareBlockIndex = NewBlock->NextIndex;
-
- NewBlock->NextIndex = pBlock->NextIndex;
- pBlock->NextIndex = NewBlockIndex;
-
- NewBlock->IsFree = NV_TRUE;
- NewBlock->PhysAddr = (pBlock->PhysAddr + Length);
- NewBlock->size = (pBlock->size - Length);
-
- pBlock->size = Length;
- }
-
- NV_ASSERT(pBlock->PhysAddr == Address &&
- pBlock->size == Length);
-
- pBlock->IsFree = NV_FALSE;
- SanityCheckHeap(pHeap);
-
- NvOsMutexUnlock(pHeap->mutex);
- return NvSuccess;
- }
- }
-
- SanityCheckHeap(pHeap);
- NvOsMutexUnlock(pHeap->mutex);
- return NvError_InsufficientMemory;
-}
-
-
-NvError NvRmPrivHeapSimpleAlloc(
- NvRmHeapSimple *pHeap,
- NvU32 size,
- NvU32 align,
- NvRmPhysAddr *pPAddr)
-{
- NvRmHeapSimpleBlock *pBlock;
- NvU32 BlockIndex;
-
- // Must align to a power of two
- // Alignment offset should be less than the total alignment
- NV_ASSERT(!(align & (align-1)));
-
- NV_ASSERT(pHeap != NULL);
-
- NvOsMutexLock(pHeap->mutex);
-
- if (NvRmPrivHeapSimpleGrowBlockArray(pHeap)!=NvSuccess)
- {
- NvOsMutexUnlock(pHeap->mutex);
- return NvError_InsufficientMemory;
- }
-
- // Scan through the list of blocks
- for (BlockIndex = pHeap->BlockIndex; BlockIndex != INVALID_INDEX; BlockIndex = pHeap->RawBlockArray[BlockIndex].NextIndex)
- {
- NvRmPhysAddr NewOffset;
- NvU32 ExtraAlignSpace;
-
- pBlock = &pHeap->RawBlockArray[BlockIndex];
-
- // Skip blocks that are not free
- if (!pBlock->IsFree)
- {
- continue;
- }
-
- // Compute location where this allocation would start in this block, based
- // on the alignment and range requested
- NewOffset = pBlock->PhysAddr;
-
- NewOffset = (NewOffset + align-1) & ~(align-1);
- NV_ASSERT(NewOffset >= pBlock->PhysAddr);
- ExtraAlignSpace = NewOffset - pBlock->PhysAddr;
-
- // Is the block too small to fit this allocation, including the extra space
- // required for alignment?
- if (pBlock->size < (size + ExtraAlignSpace) )
- continue;
-
- // Do we need to split this block in two to start the allocation at the proper
- // alignment?
- if (ExtraAlignSpace > 0)
- {
- NvRmHeapSimpleBlock *NewBlock;
- NvU32 NewBlockIndex;
-
- // Grab a block off the spare list and link it into place
- NewBlockIndex = pHeap->SpareBlockIndex;
- NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
- pHeap->SpareBlockIndex = NewBlock->NextIndex;
-
- NewBlock->NextIndex = pBlock->NextIndex;
- pBlock->NextIndex = NewBlockIndex;
-
- // Set up the new block
- NewBlock->IsFree = NV_TRUE;
- NewBlock->PhysAddr = pBlock->PhysAddr + ExtraAlignSpace;
- NewBlock->size = pBlock->size - ExtraAlignSpace;
-
- // Shrink the current block to match this allocation
- pBlock->size = ExtraAlignSpace;
-
- // Advance to the block we are actually going to allocate out of
- pBlock = NewBlock;
- }
-
- // Do we need to split this block into two?
- if (pBlock->size > size)
- {
- NvRmHeapSimpleBlock *NewBlock;
- NvU32 NewBlockIndex;
-
- // Grab a block off the spare list and link it into place
- NewBlockIndex = pHeap->SpareBlockIndex;
- NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
- pHeap->SpareBlockIndex = NewBlock->NextIndex;
- NewBlock->NextIndex = pBlock->NextIndex;
- pBlock->NextIndex = NewBlockIndex;
-
- // Set up the new block
- NewBlock->IsFree = NV_TRUE;
- NewBlock->PhysAddr = pBlock->PhysAddr + size;
- NewBlock->size = pBlock->size - size;
-
- // Shrink the current block to match this allocation
- pBlock->size = size;
- }
-
- NV_ASSERT(pBlock->size == size);
- pBlock->IsFree = NV_FALSE;
-
- *pPAddr = pBlock->PhysAddr;
- SanityCheckHeap(pHeap);
-
- NvOsMutexUnlock(pHeap->mutex);
- return NvSuccess;
- }
-
- SanityCheckHeap(pHeap);
- NvOsMutexUnlock(pHeap->mutex);
- return NvError_InsufficientMemory;
-}
-
-
-
-
-void NvRmPrivHeapSimpleFree(NvRmHeapSimple *pHeap, NvRmPhysAddr PhysAddr)
-{
- NvRmHeapSimpleBlock *pBlock = NULL;
- NvRmHeapSimpleBlock *pNext = NULL;
- NvRmHeapSimpleBlock *pPrev = NULL;
-
- NvU32 BlockIndex;
- NvU32 PrevIndex = INVALID_INDEX;
- NvU32 NextIndex = INVALID_INDEX;
-
-
- NV_ASSERT(pHeap != NULL);
-
- NvOsMutexLock(pHeap->mutex);
-
- // Find the block we're being asked to free
- BlockIndex = pHeap->BlockIndex;
- pBlock = &pHeap->RawBlockArray[BlockIndex];
- while (BlockIndex != INVALID_INDEX && (pBlock->PhysAddr != PhysAddr))
- {
- PrevIndex = BlockIndex;
- BlockIndex = pBlock->NextIndex;
- pBlock = &pHeap->RawBlockArray[BlockIndex];
- }
-
- // The block we're being asked to free didn't exist or was already free
- if (BlockIndex == INVALID_INDEX || pBlock->IsFree)
- {
- SanityCheckHeap(pHeap);
- NvOsMutexUnlock(pHeap->mutex);
- return;
- }
-
- // This block is now a free block
- pBlock->IsFree = NV_TRUE;
-
- // If next block is free, merge the two into one block
- NextIndex = pBlock->NextIndex;
- pNext = &pHeap->RawBlockArray[NextIndex];
- if (NextIndex != INVALID_INDEX && pNext->IsFree)
- {
- pBlock->size += pNext->size;
- pBlock->NextIndex = pNext->NextIndex;
-
- pNext->NextIndex = pHeap->SpareBlockIndex;
- pHeap->SpareBlockIndex = NextIndex;
- }
-
- // If previous block is free, merge the two into one block
- pPrev = &pHeap->RawBlockArray[PrevIndex];
- if (PrevIndex != INVALID_INDEX && pPrev->IsFree)
- {
- pPrev->size += pBlock->size;
- pPrev->NextIndex = pBlock->NextIndex;
-
- pBlock->NextIndex = pHeap->SpareBlockIndex;
- pHeap->SpareBlockIndex = BlockIndex;
- }
- SanityCheckHeap(pHeap);
- NvOsMutexUnlock(pHeap->mutex);
-}
-
-NvS32 NvRmPrivHeapSimpleMemoryUsed(NvRmHeapSimple* pHeap)
-{
- NvS32 Index;
- NvS32 MemUsed = 0;
-
- NV_ASSERT(pHeap != NULL);
-
- NvOsMutexLock(pHeap->mutex);
-
- for (Index = pHeap->BlockIndex; Index != INVALID_INDEX; )
- {
- NvRmHeapSimpleBlock* Block = &pHeap->RawBlockArray[Index];
-
- if (!Block->IsFree)
- MemUsed += Block->size;
- Index = Block->NextIndex;
- }
-
- NvOsMutexUnlock(pHeap->mutex);
-
- return MemUsed;
-}
-
-NvS32 NvRmPrivHeapSimpleLargestFreeBlock(NvRmHeapSimple* pHeap)
-{
- NvS32 Index;
- NvS32 MaxFree = 0;
-
- NV_ASSERT(pHeap != NULL);
-
- NvOsMutexLock(pHeap->mutex);
-
- for (Index = pHeap->BlockIndex; Index != INVALID_INDEX; )
- {
- NvRmHeapSimpleBlock* Block = &pHeap->RawBlockArray[Index];
- int size = (int)Block->size;
-
- if (Block->IsFree && size > MaxFree)
- MaxFree = size;
- Index = Block->NextIndex;
- }
-
- NvOsMutexUnlock(pHeap->mutex);
- return MaxFree;
-}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h
deleted file mode 100644
index 588b87ad515b..000000000000
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef NVRM_HEAP_SIMPLE_H
-#define NVRM_HEAP_SIMPLE_H
-
-#include "nvcommon.h"
-#include "nvrm_init.h"
-#include "nvos.h"
-
-
-typedef struct NvRmHeapSimpleBlockRec NvRmHeapSimpleBlock;
-struct NvRmHeapSimpleBlockRec
-{
- NvBool IsFree;
- NvRmPhysAddr PhysAddr;
- NvU32 size;
-
- NvU32 NextIndex;
-
- // debug info
- NvU32 touched;
-};
-
-
-typedef struct NvRmHeapSimpleRec
-{
- NvRmPhysAddr base;
- NvU32 size;
- NvU32 ArraySize;
-
- NvRmHeapSimpleBlock *RawBlockArray;
-
- NvU32 BlockIndex;
- NvU32 SpareBlockIndex;
-
- NvOsMutexHandle mutex;
-} NvRmHeapSimple;
-
-NvError NvRmPrivHeapSimple_HeapAlloc(NvRmPhysAddr Base, NvU32 Size, NvRmHeapSimple *pNewHeap);
-void NvRmPrivHeapSimple_HeapFree(NvRmHeapSimple *);
-
-NvError NvRmPrivHeapSimpleAlloc(NvRmHeapSimple *, NvU32 size, NvU32 align, NvRmPhysAddr *paddr);
-
-NvError NvRmPrivHeapSimplePreAlloc(
- NvRmHeapSimple *,
- NvRmPhysAddr Address,
- NvU32 Length);
-
-void NvRmPrivHeapSimpleFree(NvRmHeapSimple *, NvRmPhysAddr paddr);
-
-NvS32 NvRmPrivHeapSimpleMemoryUsed(NvRmHeapSimple* pHeap);
-
-NvS32 NvRmPrivHeapSimpleLargestFreeBlock(NvRmHeapSimple* pHeap);
-
-#endif // INCLUDED_HEAP_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c
deleted file mode 100644
index 08997f589a60..000000000000
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c
+++ /dev/null
@@ -1,1226 +0,0 @@
-/*
- * Copyright (c) 2007-2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "nvrm_memmgr.h"
-#include "nvrm_memmgr_private.h"
-#include "nvrm_heap_simple.h"
-#include "ap15/ap15rm_private.h"
-#include "nvos.h"
-#include "nvbootargs.h"
-#include "nvrm_chiplib.h"
-
-/* FIXME: temporary hack to force all Linux allocations to be page-aligned */
-#if NVOS_IS_LINUX
-#define NVRM_ALLOC_MIN_ALIGN 4096
-#else
-#define NVRM_ALLOC_MIN_ALIGN 4
-#endif
-
-#define NVRM_CHECK_PIN 0
-
-#define NVRM_MEM_MAGIC 0xdead9812
-#define NVRM_HMEM_CHECK(hMem) \
- do { \
- if (NVRM_HMEM_CHECK_MAGIC) { \
- NV_ASSERT(((NvU32)(hMem)&1)==0); \
- if (((NvU32)(hMem)&1)) { \
- (hMem) = idtomem(hMem); \
- } \
- NV_ASSERT((hMem)->magic == NVRM_MEM_MAGIC); \
- } \
- } while(0)
-
-static NvRmMemHandle idtomem(NvRmMemHandle hMem)
-{
- NvOsDebugPrintf("RMMEM id->mem %08x\n",(int)hMem);
- return (NvRmMemHandle)((NvU32)hMem&~1UL);
-}
-
-#if NVRM_MEM_TRACE
-#undef NvRmMemHandleCreate
-#undef NvRmMemHandleFree
-#undef NvRmMemGetId
-#undef NvRmMemHandleFromId
-#endif
-
-
-/* GART related */
-NvBool gs_GartInited = NV_FALSE;
-NvRmHeapSimple gs_GartAllocator;
-NvU32 *gs_GartSave = NULL;
-static NvRmPrivHeap gs_GartHeap;
-static NvUPtr gs_GartBaseAddr;
-
-static NvError (*s_HeapGartAlloc)( NvRmDeviceHandle hDevice,
- NvOsPageAllocHandle hPageHandle,
- NvU32 NumberOfPages, NvRmPhysAddr *PAddr);
-static void (*s_HeapGartFree)( NvRmDeviceHandle hDevice,
- NvRmPhysAddr addr, NvU32 NumberOfPages);
-static void (*s_GartSuspend)( NvRmDeviceHandle hDevice ) = NULL;
-static void (*s_GartResume)( NvRmDeviceHandle hDevice ) = NULL;
-
-
-static NvU32 gs_NextPreservedMemHandleKey;
-static NvRmMemHandle gs_PreservedHandles[NV_BOOTARGS_MAX_PRESERVED_MEMHANDLES];
-
-/*
- * Notes:
- *
- * 1) The allocation of the handles should fall back to a block allocator
- * that allocates say 1024 at a time to reduce heap fragmentation.
- *
- */
-
-NvError NvRmMemHandleCreate(
- NvRmDeviceHandle hRmDevice,
- NvRmMemHandle *phMem,
- NvU32 size)
-{
- NvRmMemHandle pNewHandle = NULL;
- NvError err = NvSuccess;
-
-#if NVCPU_IS_X86
- pNewHandle = NvOsAlloc(sizeof(*pNewHandle)+4);
- pNewHandle = (NvRmMemHandle)(((NvU32)pNewHandle+3)&~3UL);
-#else
- pNewHandle = NvOsAlloc(sizeof(*pNewHandle));
-#endif
- if (!pNewHandle)
- {
- err = NvError_InsufficientMemory;
- goto exit_gracefully;
- }
-
- NV_ASSERT(((NvU32)pNewHandle & 1) == 0);
-
- NvOsMemset(pNewHandle, 0, sizeof(*pNewHandle));
- pNewHandle->size = size;
- pNewHandle->hRmDevice = hRmDevice;
- pNewHandle->PhysicalAddress = NV_RM_INVALID_PHYS_ADDRESS;
- pNewHandle->VirtualAddress = NULL;
- pNewHandle->refcount = 1;
- pNewHandle->pin_count = 0;
- pNewHandle->coherency = NvOsMemAttribute_Uncached;
-#if NVRM_HMEM_CHECK_MAGIC
- pNewHandle->magic = NVRM_MEM_MAGIC;
-#endif
-
- *phMem = pNewHandle;
-
-exit_gracefully:
- if (err != NvSuccess)
- NvOsFree(pNewHandle);
-
- return err;
-}
-
-void NvRmPrivMemIncrRef(NvRmMemHandle hMem)
-{
- NV_ASSERT(hMem);
- NvOsAtomicExchangeAdd32(&hMem->refcount, 1);
-}
-
-/* Attempt to use the pre-mapped carveout or iram aperture on Windows CE */
-#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
-static void *NvRmMemMapGlobalHeap(
- NvRmPhysAddr base,
- NvU32 len,
- NvRmHeap heap,
- NvOsMemAttribute coherency)
-{
- if (coherency == NvOsMemAttribute_WriteBack)
- return NULL;
-
- if (heap == NvRmHeap_ExternalCarveOut)
- return NvRmPrivHeapCarveoutMemMap(base, len, coherency);
- else if (heap == NvRmHeap_IRam)
- return NvRmPrivHeapIramMemMap(base, len, coherency);
-
- return NULL;
-}
-#else
-#define NvRmMemMapGlobalHeap(base,len,heap,coherency) NULL
-#endif
-
-static void NvRmPrivMemFree(NvRmMemHandle hMem)
-{
- if (!hMem)
- return;
-
- NVRM_HMEM_CHECK(hMem);
-
- if (!NV_RM_HMEM_IS_ALLOCATED(hMem))
- return;
-
- if(NVCPU_IS_X86 && !NvRmIsSimulation())
- {
- NvOsFree(hMem->VirtualAddress);
- hMem->VirtualAddress = NULL;
- }
-
- if (!NvRmMemMapGlobalHeap(hMem->PhysicalAddress, 4, hMem->heap,
- hMem->coherency) && hMem->VirtualAddress) {
- NvRmMemUnmap(hMem, hMem->VirtualAddress, hMem->size);
- hMem->VirtualAddress = NULL;
- }
-
- switch (hMem->heap)
- {
- case NvRmHeap_ExternalCarveOut:
- NvRmPrivHeapCarveoutFree(hMem->PhysicalAddress);
- break;
- case NvRmHeap_IRam:
- NvRmPrivHeapIramFree(hMem->PhysicalAddress);
- break;
- case NvRmHeap_GART:
- (*s_HeapGartFree)(hMem->hRmDevice, hMem->PhysicalAddress,
- hMem->Pages);
- NvOsPageFree(hMem->hPageHandle);
- break;
- case NvRmHeap_External:
- NvOsPageFree(hMem->hPageHandle);
- break;
- default:
- break;
- }
-
- hMem->PhysicalAddress = NV_RM_INVALID_PHYS_ADDRESS;
- hMem->VirtualAddress = NULL;
- hMem->heap = 0;
-#if NVRM_HMEM_CHECK_MAGIC
- hMem->magic = 0;
-#endif
-}
-
-void NvRmMemHandleFree(NvRmMemHandle hMem)
-{
- NvS32 old;
- NvOsMutexHandle mutex;
-
- if( !hMem )
- {
- return;
- }
-
- NVRM_HMEM_CHECK(hMem);
- old = NvOsAtomicExchangeAdd32(&hMem->refcount, -1);
- if(old > 1)
- {
- return;
- }
-
- NV_ASSERT(old != 0);
-
- mutex = hMem->hRmDevice->MemMgrMutex;
- NvOsMutexLock(mutex);
-
- NvRmPrivMemFree(hMem);
- NV_ASSERT(hMem->mapped == NV_FALSE);
- if (hMem->mapped == NV_TRUE)
- {
- NvRmMemUnmap(hMem, hMem->VirtualAddress, hMem->size);
- }
-
-#if NVRM_HMEM_CHECK_MAGIC
- hMem->magic = 0;
-#endif
-
- NvOsFree(hMem);
-
- NvOsMutexUnlock( mutex );
-}
-
-#define ERRATA_398959(ChipId) \
- ((ChipId).Id == 0x15 && (ChipId).Major == 1 && (ChipId).Minor == 1)
-
-
-NvError NvRmMemAlloc(
- NvRmMemHandle hMem,
- const NvRmHeap *Heaps,
- NvU32 NumHeaps,
- NvU32 Alignment,
- NvOsMemAttribute Coherency)
-{
- // Default heap list does not include GART due to AP15 hardware bug. GART
- // will be re-added to default heap list on AP20 and beyond.
- NvRmHeap DefaultHeaps[3];
- NvU32 i;
- NvError err;
-
-
- NV_ASSERT(hMem && (!NumHeaps || Heaps));
- NVRM_HMEM_CHECK(hMem);
-
- /* FIXME: Windows should support full caching for memory handles.
- * But not yet.
- */
- if (Coherency == NvOsMemAttribute_Uncached)
- Coherency = NvOsMemAttribute_WriteCombined;
-
- if (NV_RM_HMEM_IS_ALLOCATED(hMem))
- return NvError_AlreadyAllocated;
-
- if(NVCPU_IS_X86 && !NvRmIsSimulation())
- {
- hMem->VirtualAddress = NvOsAlloc(hMem->size);
- if(hMem->VirtualAddress)
- {
- if (Heaps)
- {
- hMem->heap = Heaps[0];
- }
-
- return NvSuccess;
- }
- return NvError_InsufficientMemory;
- }
-
- NvOsMutexLock(hMem->hRmDevice->MemMgrMutex);
-
- if (hMem->size <= NVCPU_MIN_PAGE_SIZE &&
- (!NumHeaps || Heaps[0] != NvRmHeap_IRam))
- {
- DefaultHeaps[0] = NvRmHeap_External;
- DefaultHeaps[1] = NvRmHeap_ExternalCarveOut;
- Heaps = DefaultHeaps;
- NumHeaps = 2;
- }
- else if (!NumHeaps)
- {
- DefaultHeaps[0] = NvRmHeap_ExternalCarveOut;
- DefaultHeaps[1] = NvRmHeap_External;
- NumHeaps = 2;
- if (!ERRATA_398959(hMem->hRmDevice->ChipId))
- DefaultHeaps[NumHeaps++] = NvRmHeap_GART;
- Heaps = DefaultHeaps;
- }
-
- // 4 is the minimum alignment for any heap.
- if (Alignment < NVRM_ALLOC_MIN_ALIGN)
- Alignment = NVRM_ALLOC_MIN_ALIGN;
-
- for (i=0, err=NvError_InsufficientMemory;
- i<NumHeaps && err!=NvSuccess; i++)
- {
- if (Alignment > NVCPU_MIN_PAGE_SIZE &&
- (Heaps[i]==NvRmHeap_External || Heaps[i]==NvRmHeap_GART))
- {
- NV_ASSERT(!"Invalid alignment request to GART / External heap");
- continue;
- }
-
- switch (Heaps[i])
- {
- case NvRmHeap_ExternalCarveOut:
- err = NvRmPrivHeapCarveoutAlloc(hMem->size,
- Alignment, &hMem->PhysicalAddress);
- break;
- case NvRmHeap_IRam:
- err = NvRmPrivHeapIramAlloc(hMem->size,
- Alignment, &hMem->PhysicalAddress);
- break;
- case NvRmHeap_External:
- err = NvOsPageAlloc(hMem->size, Coherency,
- NvOsPageFlags_Contiguous, NVOS_MEM_READ_WRITE,
- &hMem->hPageHandle);
- break;
- case NvRmHeap_GART:
- err = NvOsPageAlloc(hMem->size, Coherency,
- NvOsPageFlags_NonContiguous, NVOS_MEM_READ_WRITE,
- &hMem->hPageHandle);
-
- if (err != NvSuccess)
- break;
-
- hMem->Pages = (hMem->size+(GART_PAGE_SIZE-1))/GART_PAGE_SIZE;
-
- err = (*s_HeapGartAlloc)(hMem->hRmDevice,
- hMem->hPageHandle, hMem->Pages, &hMem->PhysicalAddress);
-
- if (err == NvSuccess)
- break;
-
- hMem->Pages = 0;
- NvOsPageFree(hMem->hPageHandle);
- hMem->hPageHandle = NULL;
- break;
-
- default:
- NV_ASSERT(!"Invalid heap in heaps array");
- }
-
- if (err==NvSuccess)
- break;
- }
-
- NvOsMutexUnlock(hMem->hRmDevice->MemMgrMutex);
-
- if (err == NvSuccess)
- {
- hMem->alignment = Alignment;
- hMem->heap = Heaps[i];
- hMem->coherency = Coherency;
-
- /* Don't cache virtual mappings for cacheable handles in the RM,
- * since there isn't a good way to ensure proper coherency */
- if (Coherency != NvOsMemAttribute_WriteBack)
- {
- NvRmMemMap(hMem, 0, hMem->size, NVOS_MEM_READ_WRITE,
- &hMem->VirtualAddress);
- }
- }
-
- return err;
-}
-
-NvU32 NvRmMemPin(NvRmMemHandle hMem)
-{
- NvS32 old;
-
- NV_ASSERT(hMem);
- NVRM_HMEM_CHECK(hMem);
-
- old = NvOsAtomicExchangeAdd32(&hMem->pin_count, 1);
-
- NV_ASSERT(old != -1);
-
- // FIXME: finish implementation
-
- if (NVCPU_IS_X86 && !NvRmIsSimulation())
- return 0xFFFFFFFF;
-
- switch (hMem->heap)
- {
- case NvRmHeap_External:
- return (NvU32)NvOsPageAddress(hMem->hPageHandle, 0);
- case NvRmHeap_ExternalCarveOut:
- case NvRmHeap_GART:
- case NvRmHeap_IRam:
- return hMem->PhysicalAddress;
- default:
- NV_ASSERT(!"Unknown heap");
- return 0xFFFFFFFF;
- }
-}
-
-void NvRmMemPinMult(NvRmMemHandle *hMems, NvU32 *Addrs, NvU32 Count)
-{
- NvU32 i;
- for( i = 0; i < Count; i++ )
- {
- Addrs[i] = NvRmMemPin( hMems[i] );
- }
-}
-
-void NvRmMemUnpin(NvRmMemHandle hMem)
-{
- NvS32 old;
-
- if( !hMem )
- {
- return;
- }
-
- NVRM_HMEM_CHECK(hMem);
-
- old = NvOsAtomicExchangeAdd32(&hMem->pin_count, -1);
- NV_ASSERT(old != 0);
-}
-
-void NvRmMemUnpinMult(NvRmMemHandle *hMems, NvU32 Count)
-{
- NvU32 i;
- for(i = 0; i < Count; i++)
- {
- NvRmMemUnpin(hMems[i]);
- }
-}
-
-NvU32 NvRmMemGetAddress(NvRmMemHandle hMem, NvU32 Offset)
-{
- NV_ASSERT(hMem != NULL);
- NV_ASSERT(Offset < hMem->size);
- NVRM_HMEM_CHECK(hMem);
-
-#if NVRM_CHECK_PIN
- NV_ASSERT( hMem->pin_count );
-#endif
-
- if(NVCPU_IS_X86 && !NvRmIsSimulation())
- {
- return (NvU32)-1;
- }
-
- switch (hMem->heap)
- {
- case NvRmHeap_External:
- return (NvU32)NvOsPageAddress(hMem->hPageHandle, Offset);
-
- case NvRmHeap_ExternalCarveOut:
- case NvRmHeap_GART:
- case NvRmHeap_IRam:
- return (hMem->PhysicalAddress + Offset);
-
- default:
- NV_ASSERT(!"Unknown heap");
- break;
- }
-
- return (NvU32)-1;
-}
-
-
-
-
-NvError NvRmMemMap(
- NvRmMemHandle hMem,
- NvU32 Offset,
- NvU32 Size,
- NvU32 Flags,
- void **pVirtAddr)
-{
- NV_ASSERT(Offset + Size <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
-
- if (!hMem->VirtualAddress)
- hMem->VirtualAddress = NvRmMemMapGlobalHeap(
- hMem->PhysicalAddress+Offset, Size, hMem->heap, hMem->coherency);
-
- if (NvRmIsSimulation())
- return NvError_InsufficientMemory;
-
- if (hMem->VirtualAddress)
- {
- *pVirtAddr = (NvU8 *)hMem->VirtualAddress + Offset;
- return NvSuccess;
- }
-
- switch (hMem->heap)
- {
- case NvRmHeap_ExternalCarveOut:
- case NvRmHeap_IRam:
-#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
- case NvRmHeap_GART:
-#endif
- return NvOsPhysicalMemMap(hMem->PhysicalAddress + Offset,
- Size, hMem->coherency, Flags, pVirtAddr);
- case NvRmHeap_External:
- return NvOsPageMap(hMem->hPageHandle, Offset, Size, pVirtAddr);
- default:
- *pVirtAddr = NULL;
- return NvError_NotSupported;
- }
-}
-
-
-void NvRmMemUnmap(NvRmMemHandle hMem, void *pVirtAddr, NvU32 length)
-{
- if (!hMem || !pVirtAddr || !length) {
- return;
- }
-
- NVRM_HMEM_CHECK(hMem);
-
- // No mappings ever get created in these cases
- if (NvRmIsSimulation() || NVCPU_IS_X86)
- return;
-
- /* Don't unmap from the global heap on CE */
- if (hMem->VirtualAddress &&
- NvRmMemMapGlobalHeap(hMem->PhysicalAddress, 4, hMem->heap,
- hMem->coherency))
- {
- return;
- }
-
- /* Only unmap entire allocations; leaked mappings will be cleaned up
- * when the handle is freed
- */
- if (pVirtAddr != hMem->VirtualAddress || length != hMem->size)
- return;
-
- hMem->VirtualAddress = NULL;
-
- switch (hMem->heap)
- {
- case NvRmHeap_External:
- NvOsPageUnmap(hMem->hPageHandle, pVirtAddr, length);
- break;
- case NvRmHeap_ExternalCarveOut:
- case NvRmHeap_IRam:
-#if NVOS_IS_WINDOWS
- case NvRmHeap_GART:
-#endif
- NvOsPhysicalMemUnmap(pVirtAddr, length);
- break;
- default:
- break;
- }
-}
-
-NvU8 NvRmMemRd08(NvRmMemHandle hMem, NvU32 Offset)
-{
- void *vaddr;
-
- NV_ASSERT(hMem->VirtualAddress != NULL);
- if (!hMem->VirtualAddress)
- return 0;
-
- vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
- NV_ASSERT(Offset + 1 <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
- return NV_READ8(vaddr);
-}
-
-NvU16 NvRmMemRd16(NvRmMemHandle hMem, NvU32 Offset)
-{
- void *vaddr;
-
- NV_ASSERT(hMem->VirtualAddress != NULL);
- if (!hMem->VirtualAddress)
- return 0;
-
- vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
- NV_ASSERT(Offset + 2 <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
- return NV_READ16(vaddr);
-}
-
-NvU32 NvRmMemRd32(NvRmMemHandle hMem, NvU32 Offset)
-{
- void *vaddr;
-
- NV_ASSERT(hMem->VirtualAddress != NULL);
- if (!hMem->VirtualAddress)
- return 0;
-
- vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
- NV_ASSERT(Offset + 4 <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
- return NV_READ32(vaddr);
-}
-
-void NvRmMemWr08(NvRmMemHandle hMem, NvU32 Offset, NvU8 Data)
-{
- void *vaddr;
-
- NV_ASSERT(hMem->VirtualAddress != NULL);
- if (!hMem->VirtualAddress)
- return;
-
- vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
- NV_ASSERT(Offset + 1 <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
- NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite8 0x%x 0x%x\n",
- hMem->PhysicalAddress + Offset, Data));
- NV_WRITE08(vaddr, Data);
-}
-
-void NvRmMemWr16(NvRmMemHandle hMem, NvU32 Offset, NvU16 Data)
-{
- void *vaddr;
-
- NV_ASSERT(hMem->VirtualAddress != NULL);
- if (!hMem->VirtualAddress)
- return;
-
- vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
- NV_ASSERT(Offset + 2 <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
- NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite16 0x%x 0x%x\n",
- hMem->PhysicalAddress + Offset, Data));
- NV_WRITE16(vaddr, Data);
-}
-
-void NvRmMemWr32(NvRmMemHandle hMem, NvU32 Offset, NvU32 Data)
-{
- void *vaddr;
-
- NV_ASSERT(hMem->VirtualAddress != NULL);
- if (!hMem->VirtualAddress)
- return;
-
- vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
- NV_ASSERT(Offset + 4 <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
- NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite32 0x%x 0x%x\n",
- hMem->PhysicalAddress + Offset, Data));
- NV_WRITE32(vaddr, Data);
-}
-
-void NvRmMemRead(NvRmMemHandle hMem, NvU32 Offset, void *pDst, NvU32 Size)
-{
- void *vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
- NV_ASSERT(Offset + Size <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
- NV_READ(pDst, vaddr, Size);
-}
-
-void NvRmMemWrite(
- NvRmMemHandle hMem,
- NvU32 Offset,
- const void *pSrc,
- NvU32 Size)
-{
- void *vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
-#if NV_DEF_RMC_TRACE
- NvU32 i;
-#endif
-
- NV_ASSERT(Offset + Size <= hMem->size);
- NVRM_HMEM_CHECK(hMem);
-
-#if NV_DEF_RMC_TRACE
- for (i = 0; i < Size; i++)
- {
- NvU8 Data = ((const NvU8 *)pSrc)[i];
- NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite8 0x%x 0x%x\n",
- hMem->PhysicalAddress + i, Data));
- }
-#endif
-
- NV_WRITE(vaddr, pSrc, Size);
-}
-
-void NvRmMemReadStrided(
- NvRmMemHandle hMem,
- NvU32 Offset,
- NvU32 SrcStride,
- void *pDst,
- NvU32 DstStride,
- NvU32 ElementSize,
- NvU32 Count)
-{
- if ((ElementSize == SrcStride) && (ElementSize == DstStride))
- {
- NvRmMemRead(hMem, Offset, pDst, ElementSize * Count);
- }
- else
- {
- while (Count--)
- {
- NvRmMemRead(hMem, Offset, pDst, ElementSize);
- Offset += SrcStride;
- pDst = (NvU8 *)pDst + DstStride;
- }
- }
-}
-
-void NvRmMemWriteStrided(
- NvRmMemHandle hMem,
- NvU32 Offset,
- NvU32 DstStride,
- const void *pSrc,
- NvU32 SrcStride,
- NvU32 ElementSize,
- NvU32 Count)
-{
- if ((ElementSize == SrcStride) && (ElementSize == DstStride))
- {
- NvRmMemWrite(hMem, Offset, pSrc, ElementSize * Count);
- }
- else
- {
- while (Count--)
- {
- NvRmMemWrite(hMem, Offset, pSrc, ElementSize);
- Offset += DstStride;
- pSrc = (const NvU8 *)pSrc + SrcStride;
- }
- }
-}
-
-void NvRmMemMove(
- NvRmMemHandle dstHMem,
- NvU32 dstOffset,
- NvRmMemHandle srcHMem,
- NvU32 srcOffset,
- NvU32 Size)
-{
- NvU32 i;
-
- NV_ASSERT(dstOffset + Size <= dstHMem->size);
- NV_ASSERT(srcOffset + Size <= srcHMem->size);
- NVRM_HMEM_CHECK(dstHMem);
- NVRM_HMEM_CHECK(srcHMem);
-
- if (((dstHMem->PhysicalAddress |
- srcHMem->PhysicalAddress |
- dstOffset |
- srcOffset |
- Size) & 3) == 0)
- {
- // everything is nicely word aligned
- if (dstHMem == srcHMem && srcOffset < dstOffset)
- {
- for (i=Size; i; )
- {
- NvU32 data;
- i -= 4;
- data = NvRmMemRd32(srcHMem, srcOffset+i);
- NvRmMemWr32(dstHMem, dstOffset+i, data);
- }
- }
- else
- {
- for (i=0; i < Size; i+=4)
- {
- NvU32 data = NvRmMemRd32(srcHMem, srcOffset+i);
- NvRmMemWr32(dstHMem, dstOffset+i, data);
- }
- }
- }
- else
- {
- // fall back to writing one byte at a time
- if (dstHMem == srcHMem && srcOffset < dstOffset)
- {
- for (i=Size; i--;)
- {
- NvU8 data = NvRmMemRd08(srcHMem, srcOffset+i);
- NvRmMemWr08(dstHMem, dstOffset+i, data);
- }
- }
- else
- {
- for (i=0; i < Size; ++i)
- {
- NvU8 data = NvRmMemRd08(srcHMem, srcOffset+i);
- NvRmMemWr08(dstHMem, dstOffset+i, data);
- }
- }
- }
-}
-
-void NvRmMemCacheMaint(
- NvRmMemHandle hMem,
- void *pMapping,
- NvU32 Size,
- NvBool Writeback,
- NvBool Invalidate)
-{
- if (!hMem || !pMapping || !Size || !(Writeback || Invalidate))
- return;
-
- NVRM_HMEM_CHECK(hMem);
- NV_ASSERT((NvU8*)pMapping+Size <= (NvU8*)hMem->VirtualAddress+Size);
- if (Writeback && Invalidate)
- NvOsDataCacheWritebackInvalidateRange(pMapping, Size);
- else if (Writeback)
- NvOsDataCacheWritebackRange(pMapping, Size);
- else {
- NV_ASSERT(!"Invalidate-only cache maintenance not supported in NvOs");
- }
-}
-
-NvU32 NvRmMemGetSize(NvRmMemHandle hMem)
-{
- NV_ASSERT(hMem);
- NVRM_HMEM_CHECK(hMem);
- return hMem->size;
-}
-
-NvU32 NvRmMemGetAlignment(NvRmMemHandle hMem)
-{
- NV_ASSERT(hMem);
- NVRM_HMEM_CHECK(hMem);
- return hMem->alignment;
-}
-
-NvU32 NvRmMemGetCacheLineSize(void)
-{
- // !!! FIXME: Currently for all our chips (ap15)
- // both the L1 and L2 cache line sizes
- // are 32 bytes. If this ever changes
- // we need a way to figure it out on
- // a chip by chip basis.
- return 32;
-}
-
-NvRmHeap NvRmMemGetHeapType(NvRmMemHandle hMem, NvU32 *BaseAddr)
-{
- NV_ASSERT(hMem);
- NVRM_HMEM_CHECK(hMem);
-
- if (hMem->heap == NvRmHeap_External)
- *BaseAddr = (NvU32)NvOsPageAddress(hMem->hPageHandle, 0);
- else
- *BaseAddr = hMem->PhysicalAddress;
-
- return hMem->heap;
-}
-
-
-void *NvRmHostAlloc(size_t size)
-{
- return NvOsAlloc(size);
-}
-
-void NvRmHostFree(void *ptr)
-{
- NvOsFree(ptr);
-}
-
-
-NvError NvRmMemMapIntoCallerPtr(
- NvRmMemHandle hMem,
- void *pCallerPtr,
- NvU32 Offset,
- NvU32 Size)
-{
- NvError err;
- NVRM_HMEM_CHECK(hMem);
-
- // The caller should be asking for an even number of pages. not strictly
- // required, but the caller has already had to do the work to calculate the
- // required number of pages so they might as well pass in a nice round
- // number, which makes it easier to find bugs.
- NV_ASSERT( (Size & (NVCPU_MIN_PAGE_SIZE-1)) == 0);
-
- // Make sure the supplied virtual address is page aligned.
- NV_ASSERT( (((NvUPtr)pCallerPtr) & (NVCPU_MIN_PAGE_SIZE-1)) == 0);
-
- if (hMem->heap == NvRmHeap_External ||
- hMem->heap == NvRmHeap_GART)
- {
- err = NvOsPageMapIntoPtr(hMem->hPageHandle,
- pCallerPtr,
- Offset,
- Size);
- }
- else if (hMem->heap == NvRmHeap_ExternalCarveOut ||
- hMem->heap == NvRmHeap_IRam)
- {
- // The caller is responsible for sending a size that
- // is the correct number of pages, including this pageoffset
- // at the beginning of the first page.
- NvU32 PhysicalAddr = hMem->PhysicalAddress + Offset;
- PhysicalAddr = PhysicalAddr & ~(NVCPU_MIN_PAGE_SIZE-1);
-
- err = NvOsPhysicalMemMapIntoCaller(
- pCallerPtr,
- PhysicalAddr,
- Size,
- NvOsMemAttribute_Uncached,
- NVOS_MEM_WRITE | NVOS_MEM_READ);
- }
- else
- {
- return NvError_NotImplemented;
- }
-
- return err;
-}
-
-
-NvU32 NvRmMemGetId(NvRmMemHandle hMem)
-{
- NvU32 id = (NvU32)hMem;
-
- // !!! FIXME: Need to really create a unique id to handle the case where
- // hMem is freed, and then the next allocated hMem returns the same pointer
- // value.
-
- NVRM_HMEM_CHECK(hMem);
- NV_ASSERT(((NvU32)hMem & 1) == 0);
- if (!hMem || ((NvU32)hMem & 1))
- return 0;
-
-#if NVRM_MEM_CHECK_ID
- id |= 1;
-#endif
-
- return id;
-}
-
-NvError NvRmMemHandleFromId(NvU32 id, NvRmMemHandle *phMem)
-{
- NvRmMemHandle hMem;
- // !!! FIXME: (see comment in GetId). Specifically handle the case where
- // the memory handle has already been freed.
-
-#if NVRM_MEM_CHECK_ID
- *phMem = NULL;
- NV_ASSERT(id & 1);
- if (!(id & 1))
- return NvError_BadParameter;
-#endif
-
- hMem = (NvRmMemHandle)(id & ~1UL);
-
- NVRM_HMEM_CHECK(hMem);
-
- NvRmPrivMemIncrRef(hMem);
-
- *phMem = hMem;
- return NvSuccess;
-}
-
-NvError NvRmMemHandlePreserveHandle(
- NvRmMemHandle hMem,
- NvU32 *pKey)
-{
- NvError e;
- NvBootArgsPreservedMemHandle ArgMh;
-
- NV_ASSERT(hMem && pKey);
- NvOsMutexLock(hMem->hRmDevice->MemMgrMutex);
- if (gs_NextPreservedMemHandleKey >=
- (NvU32)NvBootArgKey_PreservedMemHandle_Num)
- {
- e = NvError_InsufficientMemory;
- goto clean;
- }
-
- ArgMh.Address = (NvUPtr)hMem->PhysicalAddress;
- ArgMh.Size = hMem->size;
-
- e = NvOsBootArgSet(gs_NextPreservedMemHandleKey, &ArgMh, sizeof(ArgMh));
-
- if (e==NvSuccess)
- {
- *pKey = gs_NextPreservedMemHandleKey;
- gs_NextPreservedMemHandleKey++;
- }
- else
- {
- *pKey = 0;
- e = NvError_InsufficientMemory;
- }
-
- clean:
- NvOsMutexUnlock(hMem->hRmDevice->MemMgrMutex);
- return e;
-}
-
-
-NvError NvRmMemHandleClaimPreservedHandle(
- NvRmDeviceHandle hRm,
- NvU32 Key,
- NvRmMemHandle *pMem)
-{
- NvU32 i;
- NV_ASSERT(hRm && pMem && Key);
- if (!pMem || !hRm ||
- Key<NvBootArgKey_PreservedMemHandle_0 ||
- Key>=NvBootArgKey_PreservedMemHandle_Num)
- return NvError_BadParameter;
-
- *pMem = NULL;
-
- NvOsMutexLock(hRm->MemMgrMutex);
- i = Key - NvBootArgKey_PreservedMemHandle_0;
- *pMem = gs_PreservedHandles[i];
- gs_PreservedHandles[i] = NULL;
- NvOsMutexUnlock(hRm->MemMgrMutex);
-
- if (*pMem)
- return NvSuccess;
-
- return NvError_InsufficientMemory;
-}
-
-
-NvRmPrivHeap *NvRmPrivHeapGartInit(NvRmDeviceHandle hRmDevice)
-{
- NvError err;
- NvU32 length = hRmDevice->GartMemoryInfo.size;
- NvRmPhysAddr base = hRmDevice->GartMemoryInfo.base;
- NvRmModuleCapability caps[2];
- NvRmModuleCapability *pCap = NULL;
-
- caps[0].MajorVersion = 1; // AP15, AP16
- caps[0].MinorVersion = 0;
- caps[0].EcoLevel = 0;
- caps[0].Capability = &caps[0];
-
- caps[1].MajorVersion = 1; // AP20/T20
- caps[1].MinorVersion = 1;
- caps[1].EcoLevel = 0;
- caps[1].Capability = &caps[1];
-
- NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(
- hRmDevice,
- NvRmPrivModuleID_MemoryController,
- caps,
- NV_ARRAY_SIZE(caps),
- (void**)&pCap));
-
- err = NvRmPrivHeapSimple_HeapAlloc(
- base,
- length,
- &gs_GartAllocator);
-
- if (err != NvSuccess)
- return NULL;
-
- gs_GartHeap.heap = NvRmHeap_GART;
- gs_GartHeap.length = length;
- gs_GartHeap.PhysicalAddress = base;
-
- gs_GartBaseAddr = (NvUPtr)base;
- (void)gs_GartBaseAddr;
-
- if ((pCap->MajorVersion == 1) && (pCap->MinorVersion == 0))
- {
- s_HeapGartAlloc = NvRmPrivAp15HeapGartAlloc;
- s_HeapGartFree = NvRmPrivAp15HeapGartFree;
- s_GartSuspend = NvRmPrivAp15GartSuspend;
- s_GartResume = NvRmPrivAp15GartResume;
- }
- else
- {
- s_HeapGartAlloc = NvRmPrivAp20HeapGartAlloc;
- s_HeapGartFree = NvRmPrivAp20HeapGartFree;
- s_GartSuspend = NvRmPrivAp20GartSuspend;
- s_GartResume = NvRmPrivAp20GartResume;
- }
-
- return &gs_GartHeap;
-}
-
-void NvRmPrivHeapGartDeinit(void)
-{
- // deinit the gart allocator
-
- NvRmPrivHeapSimple_HeapFree(&gs_GartAllocator);
- NvOsMemset(&gs_GartHeap, 0, sizeof(gs_GartHeap));
- NvOsMemset(&gs_GartAllocator, 0, sizeof(gs_GartAllocator));
- NvOsFree( gs_GartSave );
- gs_GartInited = NV_FALSE;
-}
-
-void NvRmPrivGartSuspend(NvRmDeviceHandle hDevice)
-{
- NV_ASSERT(s_GartSuspend);
- (*s_GartSuspend)( hDevice );
-}
-
-void NvRmPrivGartResume(NvRmDeviceHandle hDevice)
-{
- NV_ASSERT(s_GartResume);
- (*s_GartResume)( hDevice );
-}
-
-void NvRmPrivPreservedMemHandleInit(NvRmDeviceHandle hRm)
-{
- unsigned int i;
- NvBootArgsPreservedMemHandle mem;
-
- NvOsMemset(gs_PreservedHandles, 0, sizeof(gs_PreservedHandles));
- gs_NextPreservedMemHandleKey = (NvU32)NvBootArgKey_PreservedMemHandle_0;
-
- for (i=NvBootArgKey_PreservedMemHandle_0;
- i<NvBootArgKey_PreservedMemHandle_Num; i++)
- {
- NvRmMemHandle hMem;
- NvU32 j;
-
- if (NvOsBootArgGet(i, &mem, sizeof(mem))!=NvSuccess)
- break;
-
- if (!mem.Address || !mem.Size)
- break;
-
- if (NvRmMemHandleCreate(hRm, &hMem, mem.Size)!=NvSuccess)
- continue;
-
- hMem->PhysicalAddress = mem.Address;
- j = mem.Address;
- hMem->alignment = 1;
- while ((j & 1) == 0)
- {
- hMem->alignment <<= 1;
- j >>= 1;
- }
-
- if (NvRmPrivHeapCarveoutPreAlloc(mem.Address, mem.Size)==NvSuccess)
- {
- hMem->heap = NvRmHeap_ExternalCarveOut;
- hMem->VirtualAddress = NvRmPrivHeapCarveoutMemMap(mem.Address,
- mem.Size, NvOsMemAttribute_Uncached);
- }
- else if (NvRmPrivHeapIramPreAlloc(mem.Address, mem.Size)==NvSuccess)
- {
- hMem->heap = NvRmHeap_IRam;
- hMem->VirtualAddress = NvRmPrivHeapIramMemMap(mem.Address,
- mem.Size, NvOsMemAttribute_Uncached);
- }
-
- if (hMem->heap)
- gs_PreservedHandles[i-NvBootArgKey_PreservedMemHandle_0] = hMem;
- else
- NvRmMemHandleFree(hMem);
- }
-}
-
-NvError NvRmMemGetStat(NvRmMemStat Stat, NvS32* Result)
-{
- /* Main point of this function is to be compatible backwards and forwards,
- * i.e., breaking analysis apps is the thing to avoid.
- * Minimum hassle - maximum impact.
- * Performance is not that big of a deal.
- * Could be extended to use NvS64 as return value. However, NvS64 is
- * slightly more challenging in terms of printing etc. at the client side.
- * This function should return counts as raw data as possible; conversions
- * to percentages or anything like that should be left to the client.
- */
- if (Stat == NvRmMemStat_TotalCarveout)
- {
- *Result = NvRmPrivHeapCarveoutTotalSize();
- }
- else if (Stat == NvRmMemStat_UsedCarveout)
- {
- *Result = NvRmPrivHeapCarveoutMemoryUsed();
- }
- else if (Stat == NvRmMemStat_LargestFreeCarveoutBlock)
- {
- *Result = NvRmPrivHeapCarveoutLargestFreeBlock();
- }
- else if (Stat == NvRmMemStat_TotalGart)
- {
- *Result = gs_GartHeap.length;
- }
- else if (Stat == NvRmMemStat_UsedGart)
- {
- *Result = NvRmPrivHeapSimpleMemoryUsed(&gs_GartAllocator);
- }
- else if (Stat == NvRmMemStat_LargestFreeGartBlock)
- {
- *Result = NvRmPrivHeapSimpleLargestFreeBlock(&gs_GartAllocator);
- }
- else
- {
- return NvError_BadParameter;
- }
- return NvSuccess;
-}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c
index 480fe9482185..9bb8ae412807 100644
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c
@@ -1474,7 +1474,6 @@ NvError
NvRmKernelPowerSuspend( NvRmDeviceHandle hRmDeviceHandle )
{
- NvRmPrivGartSuspend(hRmDeviceHandle);
NvRmPrivPmuInterruptMask(hRmDeviceHandle, NV_TRUE);
NvRmPrivDfsSuspend(NvOdmQueryLowestSocPowerState()->LowestPowerState);
@@ -1523,7 +1522,6 @@ NvError
NvRmKernelPowerResume( NvRmDeviceHandle hRmDeviceHandle )
{
NvRmPrivPmuInterruptMask(hRmDeviceHandle, NV_FALSE);
- NvRmPrivGartResume(hRmDeviceHandle);
return NvSuccess;
}
diff --git a/arch/arm/mach-tegra/tegra_sysmap.c b/arch/arm/mach-tegra/tegra_sysmap.c
index 9e7d26e98a00..05da3c3d8b0a 100755
--- a/arch/arm/mach-tegra/tegra_sysmap.c
+++ b/arch/arm/mach-tegra/tegra_sysmap.c
@@ -42,6 +42,8 @@ static NvRmModuleID tegra_map_name_to_mod(const char *name, int inst)
return NVRM_MODULE_ID(NvRmPrivModuleID_MemoryController, inst);
else if (!strcmp(name, "gart"))
return NVRM_MODULE_ID(NvRmPrivModuleID_Gart, inst);
+ else if (!strcmp(name, "iram"))
+ return NVRM_MODULE_ID(NvRmPrivModuleID_Iram, inst);
return (NvRmModuleID) 0;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d9405d51b154..b23ecd1dd34f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -129,6 +129,25 @@ config DEVNVMAP
is used by NVIDIA Tegra graphics and multimedia drivers for managing
graphics memory.
+config DEVNVMAP_PARANOID
+ bool "Validate all user-provided /dev/nvmap object references"
+ depends on DEVNVMAP
+ default n
+ help
+ Say Y here to enable additional process-level validations and
+ permissions for /dev/nvmap object references provided via ioctls.
+ May result in a decrease in performance.
+
+config DEVNVMAP_RECLAIM_UNPINNED_VM
+ bool "Allow /dev/nvmap to reclaim unpinned I/O virtual memory"
+ depends on DEVNVMAP && TEGRA_IOVMM
+ default n
+ help
+ Say Y here to enable /dev/nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other objects. This can
+ allow a larger virtual I/O VM space than would normally be
+ supported by the hardware, at a slight cost in performance.
+
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 9785f1091721..f91999c7b735 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -903,11 +903,6 @@ static const struct file_operations kmsg_fops = {
.write = kmsg_write,
};
-#ifdef CONFIG_DEVNVMAP
-extern const struct file_operations nvmap_fops;
-extern const struct file_operations knvmap_fops;
-#endif
-
static int memory_open(struct inode * inode, struct file * filp)
{
int ret = 0;
@@ -957,14 +952,6 @@ static int memory_open(struct inode * inode, struct file * filp)
filp->f_op = &oldmem_fops;
break;
#endif
-#ifdef CONFIG_DEVNVMAP
- case 13:
- filp->f_op = &nvmap_fops;
- break;
- case 14:
- filp->f_op = &knvmap_fops;
- break;
-#endif
default:
unlock_kernel();
return -ENXIO;
@@ -1003,10 +990,6 @@ static const struct {
#ifdef CONFIG_CRASH_DUMP
{12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
#endif
-#ifdef CONFIG_DEVNVMAP
- {13, "nvmap", S_IRUGO | S_IWUGO, &nvmap_fops},
- {14, "knvmap", S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, &knvmap_fops },
-#endif
};
static struct class *mem_class;
diff --git a/drivers/char/nvmap.c b/drivers/char/nvmap.c
index f1a3f1ff11b9..8546061016b6 100644
--- a/drivers/char/nvmap.c
+++ b/drivers/char/nvmap.c
@@ -1,9 +1,9 @@
/*
* drivers/char/nvmap.c
*
- * Memory mapping driver for Tegra anonymous memory handles
+ * Memory manager for Tegra GPU memory handles
*
- * Copyright (c) 2009, NVIDIA Corporation.
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/bitmap.h>
#include <linux/wait.h>
@@ -36,11 +37,17 @@
#include <linux/sched.h>
#include <linux/io.h>
#include <linux/tegra_devices.h>
+#include <linux/rbtree.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
#include <asm/tlbflush.h>
+#include <mach/iovmm.h>
#include "linux/nvmem_ioctl.h"
#include "nvcommon.h"
#include "nvrm_memmgr.h"
-#include "nvrm_memmgr_private.h"
+#include "nvbootargs.h"
+
+/*#define IOVMM_FIRST*/ /* enable to force most allocations from iovmm */
static void nvmap_vma_open(struct vm_area_struct *vma);
@@ -57,11 +64,25 @@ static int nvmap_mmap(struct file *filp, struct vm_area_struct *vma);
static long nvmap_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
-static int nvmap_cache_maint(struct file *filp, void __user *arg);
+static int nvmap_ioctl_getid(struct file *filp, void __user *arg);
+
+static int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
+
+static int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
+
+static int nvmap_ioctl_free(struct file *filp, unsigned long arg);
+
+static int nvmap_ioctl_create(struct file *filp,
+ unsigned int cmd, void __user *arg);
+
+static int nvmap_ioctl_pinop(struct file *filp,
+ bool is_pin, void __user *arg);
+
+static int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
static int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
-static int nvmap_rw_handle(struct file *filp, int is_read,
+static int nvmap_ioctl_rw_handle(struct file *filp, int is_read,
void __user* arg);
extern void NvRmPrivMemIncrRef(NvRmMemHandle hmem);
@@ -81,9 +102,451 @@ static struct backing_dev_info nvmap_bdi = {
static pte_t *nvmap_pte[NUM_NVMAP_PTES];
static unsigned long nvmap_ptebits[NVMAP_PAGES/BITS_PER_LONG];
+
static DEFINE_SPINLOCK(nvmap_ptelock);
static DECLARE_WAIT_QUEUE_HEAD(nvmap_ptefull);
+/* used to lost the master tree of memory handles */
+static DEFINE_SPINLOCK(nvmap_handle_lock);
+
+/* only one task may be performing pin / unpin operations at once, to
+ * prevent deadlocks caused by interleaved IOVMM re-allocations */
+static DEFINE_MUTEX(nvmap_pin_lock);
+
+/* queue of tasks which are blocking on pin, for IOVMM room */
+static DECLARE_WAIT_QUEUE_HEAD(nvmap_pin_wait);
+static struct rb_root nvmap_handles = RB_ROOT;
+
+static struct tegra_iovmm_client *nvmap_vm_client = NULL;
+
+/* first-fit linear allocator carveout heap manager */
+struct nvmap_mem_block {
+ unsigned long base;
+ size_t size;
+ short next; /* next absolute (address-order) block */
+ short prev; /* previous absolute (address-order) block */
+ short next_free;
+ short prev_free;
+};
+
+struct nvmap_carveout {
+ unsigned short num_blocks;
+ short spare_index;
+ short free_index;
+ short block_index;
+ spinlock_t lock;
+ const char *name;
+ struct nvmap_mem_block *blocks;
+};
+
+enum {
+ CARVEOUT_STAT_TOTAL_SIZE,
+ CARVEOUT_STAT_FREE_SIZE,
+ CARVEOUT_STAT_NUM_BLOCKS,
+ CARVEOUT_STAT_FREE_BLOCKS,
+ CARVEOUT_STAT_LARGEST_BLOCK,
+ CARVEOUT_STAT_LARGEST_FREE,
+ CARVEOUT_STAT_BASE,
+};
+
+static inline pgprot_t _nvmap_flag_to_pgprot(unsigned long flag, pgprot_t base)
+{
+ switch (flag) {
+ case NVMEM_HANDLE_UNCACHEABLE:
+ base = pgprot_noncached(base);
+ break;
+ case NVMEM_HANDLE_WRITE_COMBINE:
+ base = pgprot_writecombine(base);
+ break;
+ case NVMEM_HANDLE_INNER_CACHEABLE:
+ base = pgprot_inner_writeback(base);
+ break;
+ }
+ return base;
+}
+
+static unsigned long _nvmap_carveout_blockstat(struct nvmap_carveout *co,
+ int stat)
+{
+ unsigned long val = 0;
+ short idx;
+ spin_lock(&co->lock);
+
+ if (stat==CARVEOUT_STAT_BASE) {
+ if (co->block_index==-1)
+ val = ~0;
+ else
+ val = co->blocks[co->block_index].base;
+ spin_unlock(&co->lock);
+ return val;
+ }
+
+ if (stat==CARVEOUT_STAT_TOTAL_SIZE ||
+ stat==CARVEOUT_STAT_NUM_BLOCKS ||
+ stat==CARVEOUT_STAT_LARGEST_BLOCK)
+ idx = co->block_index;
+ else
+ idx = co->free_index;
+
+ while (idx!=-1) {
+ switch (stat) {
+ case CARVEOUT_STAT_TOTAL_SIZE:
+ val += co->blocks[idx].size;
+ idx = co->blocks[idx].next;
+ break;
+ case CARVEOUT_STAT_NUM_BLOCKS:
+ val++;
+ idx = co->blocks[idx].next;
+ break;
+ case CARVEOUT_STAT_LARGEST_BLOCK:
+ val = max_t(unsigned long, val, co->blocks[idx].size);
+ idx = co->blocks[idx].next;
+ break;
+ case CARVEOUT_STAT_FREE_SIZE:
+ val += co->blocks[idx].size;
+ idx = co->blocks[idx].next_free;
+ break;
+ case CARVEOUT_STAT_FREE_BLOCKS:
+ val ++;
+ idx = co->blocks[idx].next_free;
+ break;
+ case CARVEOUT_STAT_LARGEST_FREE:
+ val = max_t(unsigned long, val, co->blocks[idx].size);
+ idx = co->blocks[idx].next_free;
+ break;
+ }
+ }
+
+ spin_unlock(&co->lock);
+ return val;
+}
+
+#define co_is_free(_co, _idx) \
+ ((_co)->free_index==(_idx) || ((_co)->blocks[(_idx)].prev_free!=-1))
+
+static int _nvmap_init_carveout(struct nvmap_carveout *co,
+ const char *name, unsigned long base_address, size_t len)
+{
+ const unsigned int min_blocks = 16;
+ struct nvmap_mem_block *blocks = NULL;
+ int i;
+
+ blocks = kzalloc(sizeof(*blocks)*min_blocks, GFP_KERNEL);
+
+ if (!blocks) goto fail;
+ co->name = kstrdup(name, GFP_KERNEL);
+ if (!co->name) goto fail;
+
+ for (i=1; i<min_blocks; i++) {
+ blocks[i].next = i+1;
+ blocks[i].prev = i-1;
+ blocks[i].next_free = -1;
+ blocks[i].prev_free = -1;
+ }
+ blocks[i-1].next = -1;
+ blocks[1].prev = -1;
+
+ blocks[0].next = blocks[0].prev = -1;
+ blocks[0].next_free = blocks[0].prev_free = -1;
+ blocks[0].base = base_address;
+ blocks[0].size = len;
+ co->blocks = blocks;
+ co->num_blocks = min_blocks;
+ spin_lock_init(&co->lock);
+ co->block_index = 0;
+ co->spare_index = 1;
+ co->free_index = 0;
+ return 0;
+
+fail:
+ if (blocks) kfree(blocks);
+ return -ENOMEM;
+}
+
+static int nvmap_grow_blocks(struct nvmap_carveout *co)
+{
+ struct nvmap_mem_block *blocks;
+ unsigned int i;
+
+ if (co->num_blocks >= 1<<(8*sizeof(co->free_index)-1)) return -ENOMEM;
+ blocks = kzalloc(sizeof(*blocks)*(co->num_blocks*2), GFP_KERNEL);
+ if (!blocks) return -ENOMEM;
+
+ memcpy(blocks, co->blocks, sizeof(*blocks)*(co->num_blocks));
+ kfree(co->blocks);
+ co->blocks = blocks;
+ for (i=co->num_blocks; i<co->num_blocks*2; i++) {
+ blocks[i].next = i+1;
+ blocks[i].prev = i-1;
+ blocks[i].next_free = -1;
+ blocks[i].prev_free = -1;
+ }
+ blocks[co->num_blocks].prev = -1;
+ blocks[i-1].next = -1;
+ blocks[i].next_free = -1;
+ blocks[i].prev_free = -1;
+ co->spare_index = co->num_blocks;
+ co->num_blocks *= 2;
+ return 0;
+}
+
+static int nvmap_get_spare(struct nvmap_carveout *co) {
+ int idx;
+
+ if (co->spare_index == -1)
+ if (nvmap_grow_blocks(co))
+ return -1;
+
+ BUG_ON(co->spare_index == -1);
+ idx = co->spare_index;
+ co->spare_index = co->blocks[idx].next;
+ co->blocks[idx].next = -1;
+ co->blocks[idx].prev = -1;
+ co->blocks[idx].next_free = -1;
+ co->blocks[idx].prev_free = -1;
+ return idx;
+}
+
+#define BLOCK(_co, _idx) ((_idx)==-1 ? NULL : &(_co)->blocks[(_idx)])
+
+static void nvmap_zap_free(struct nvmap_carveout *co, int idx)
+{
+ struct nvmap_mem_block *block;
+
+ block = BLOCK(co, idx);
+ if (block->prev_free != -1)
+ BLOCK(co, block->prev_free)->next_free = block->next_free;
+ else
+ co->free_index = block->next_free;
+
+ if (block->next_free != -1)
+ BLOCK(co, block->next_free)->prev_free = block->prev_free;
+
+ block->prev_free = -1;
+ block->next_free = -1;
+}
+
+static void nvmap_split_block(struct nvmap_carveout *co,
+ int idx, size_t start, size_t size)
+{
+ if (BLOCK(co, idx)->base < start) {
+ int spare_idx = nvmap_get_spare(co);
+ struct nvmap_mem_block *spare = BLOCK(co, spare_idx);
+ struct nvmap_mem_block *block = BLOCK(co, idx);
+ if (spare) {
+ spare->size = start - block->base;
+ spare->base = block->base;
+ block->size -= (start - block->base);
+ block->base = start;
+ spare->next = idx;
+ spare->prev = block->prev;
+ block->prev = spare_idx;
+ if (spare->prev != -1)
+ co->blocks[spare->prev].next = spare_idx;
+ else
+ co->block_index = spare_idx;
+ spare->prev_free = -1;
+ spare->next_free = co->free_index;
+ if (co->free_index != -1)
+ co->blocks[co->free_index].prev_free = spare_idx;
+ co->free_index = spare_idx;
+ } else {
+ if (block->prev != -1) {
+ spare = BLOCK(co, block->prev);
+ spare->size += start - block->base;
+ block->base = start;
+ }
+ }
+ }
+
+ if (BLOCK(co, idx)->size > size) {
+ int spare_idx = nvmap_get_spare(co);
+ struct nvmap_mem_block *spare = BLOCK(co, spare_idx);
+ struct nvmap_mem_block *block = BLOCK(co, idx);
+ if (spare) {
+ spare->base = block->base + size;
+ spare->size = block->size - size;
+ block->size = size;
+ spare->prev = idx;
+ spare->next = block->next;
+ block->next = spare_idx;
+ if (spare->next != -1)
+ co->blocks[spare->next].prev = spare_idx;
+ spare->prev_free = -1;
+ spare->next_free = co->free_index;
+ if (co->free_index != -1)
+ co->blocks[co->free_index].prev_free = spare_idx;
+ co->free_index = spare_idx;
+ }
+ }
+
+ nvmap_zap_free(co, idx);
+}
+
+#define next_spare next
+#define prev_spare prev
+
+#define nvmap_insert_block(_list, _co, _idx) \
+ do { \
+ struct nvmap_mem_block *b = BLOCK((_co), (_idx)); \
+ struct nvmap_mem_block *s = BLOCK((_co), (_co)->_list##_index);\
+ if (s) s->prev_##_list = (_idx); \
+ b->prev_##_list = -1; \
+ b->next_##_list = (_co)->_list##_index; \
+ (_co)->_list##_index = (_idx); \
+ } while (0);
+
+static void nvmap_carveout_free(struct nvmap_carveout *co, int idx)
+{
+ struct nvmap_mem_block *b;
+
+ spin_lock(&co->lock);
+
+ b = BLOCK(co, idx);
+
+ if (b->next!=-1 && co_is_free(co, b->next)) {
+ int zap = b->next;
+ struct nvmap_mem_block *n = BLOCK(co, zap);
+ b->size += n->size;
+
+ b->next = n->next;
+ if (n->next != -1) co->blocks[n->next].prev = idx;
+
+ nvmap_zap_free(co, zap);
+ nvmap_insert_block(spare, co, zap);
+ }
+
+ if (b->prev!=-1 && co_is_free(co, b->prev)) {
+ int zap = b->prev;
+ struct nvmap_mem_block *p = BLOCK(co, zap);
+
+ b->base = p->base;
+ b->size += p->size;
+
+ b->prev = p->prev;
+
+ if (p->prev != -1) co->blocks[p->prev].next = idx;
+ else co->block_index = idx;
+
+ nvmap_zap_free(co, zap);
+ nvmap_insert_block(spare, co, zap);
+ }
+
+ nvmap_insert_block(free, co, idx);
+ spin_unlock(&co->lock);
+}
+
+static int nvmap_carveout_alloc(struct nvmap_carveout *co,
+ size_t align, size_t size)
+{
+ short idx;
+
+ spin_lock(&co->lock);
+
+ idx = co->free_index;
+
+ while (idx != -1) {
+ struct nvmap_mem_block *b = BLOCK(co, idx);
+ /* try to be a bit more clever about generating block-
+ * droppings by comparing the results of a left-justified vs
+ * right-justified block split, and choosing the
+ * justification style which yields the largest remaining
+ * block */
+ size_t end = b->base + b->size;
+ size_t ljust = (b->base + align - 1) & ~(align-1);
+ size_t rjust = (end - size) & ~(align-1);
+ size_t l_max, r_max;
+
+ if (rjust < b->base) rjust = ljust;
+ l_max = max_t(size_t, ljust - b->base, end - (ljust + size));
+ r_max = max_t(size_t, rjust - b->base, end - (rjust + size));
+
+ if (b->base + b->size >= ljust + size) {
+ if (l_max >= r_max)
+ nvmap_split_block(co, idx, ljust, size);
+ else
+ nvmap_split_block(co, idx, rjust, size);
+ break;
+ }
+ idx = b->next_free;
+ }
+
+ spin_unlock(&co->lock);
+ return idx;
+}
+
+#undef next_spare
+#undef prev_spare
+
+struct nvmap_handle {
+ struct rb_node node;
+ atomic_t ref;
+ atomic_t pin;
+ unsigned long flags;
+ size_t size;
+ size_t orig_size;
+ struct task_struct *owner;
+ unsigned int poison;
+ union {
+ struct {
+ struct page **pages;
+ struct tegra_iovmm_area *area;
+ struct list_head mru_list;
+ bool contig;
+ bool dirty; /* IOVMM area allocated since last pin */
+ } pgalloc;
+ struct {
+ struct nvmap_carveout *co_heap;
+ int block_idx;
+ unsigned long base;
+ unsigned int key; /* preserved by bootloader */
+ } carveout;
+ };
+ bool global;
+ bool secure; /* only allocated in IOVM space, zapped on unpin */
+ bool heap_pgalloc;
+ bool alloc;
+ void *kern_map; /* used for RM memmgr backwards compat */
+};
+
+/* handle_ref objects are file-descriptor-local references to nvmap_handle
+ * objects. they track the number of references and pins performed by
+ * the specific caller (since nvmap_handle objects may be global), so that
+ * a client which terminates without properly unwinding all handles (or
+ * all nested pins) can be unwound by nvmap. */
+struct nvmap_handle_ref {
+ struct nvmap_handle *h;
+ struct rb_node node;
+ atomic_t refs;
+ atomic_t pin;
+};
+
+struct nvmap_file_priv {
+ struct rb_root handle_refs;
+ atomic_t iovm_commit;
+ size_t iovm_limit;
+ spinlock_t ref_lock;
+ bool su;
+};
+
+struct nvmap_carveout_node {
+ struct device dev;
+ struct list_head heap_list;
+ unsigned int heap_bit;
+ struct nvmap_carveout carveout;
+};
+
+/* the master structure for all nvmap-managed carveouts and all handle_ref
+ * objects allocated inside the kernel. heaps are sorted by their heap_bit
+ * (highest heap_bit first) so that carveout allocation will be first
+ * attempted by the heap with the highest heap_bit set in the allocation's
+ * heap mask */
+static struct {
+ struct nvmap_file_priv init_data;
+ struct rw_semaphore list_sem;
+ struct list_head heaps;
+} nvmap_context;
+
static struct vm_operations_struct nvmap_vma_ops = {
.open = nvmap_vma_open,
.close = nvmap_vma_close,
@@ -107,20 +570,643 @@ const struct file_operations knvmap_fops = {
};
struct nvmap_vma_priv {
- NvRmMemHandle hmem;
- size_t offs;
- atomic_t ref;
+ struct nvmap_handle *h;
+ size_t offs;
+ atomic_t ref;
};
+static struct proc_dir_entry *nvmap_procfs_root;
+static struct proc_dir_entry *nvmap_procfs_proc;
+
+static void _nvmap_handle_free(struct nvmap_handle *h);
+
+#define NVMAP_CARVEOUT_ATTR_RO(_name) \
+ struct device_attribute nvmap_heap_attr_##_name = \
+ __ATTR(_name, S_IRUGO, _nvmap_sysfs_show_heap_##_name, NULL)
+
+#define NVMAP_CARVEOUT_ATTR_WO(_name, _mode) \
+ struct device_attribute nvmap_heap_attr_##_name = \
+ __ATTR(_name, _mode, NULL, _nvmap_sysfs_set_heap_##_name)
+
+
+static ssize_t _nvmap_sysfs_show_heap_usage(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%08x\n", c->heap_bit);
+}
+
+static ssize_t _nvmap_sysfs_show_heap_name(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%s\n", c->carveout.name);
+}
+
+static ssize_t _nvmap_sysfs_show_heap_base(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%08lx\n",
+ _nvmap_carveout_blockstat(&c->carveout, CARVEOUT_STAT_BASE));
+}
+
+static ssize_t _nvmap_sysfs_show_heap_free_size(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%lu\n",
+ _nvmap_carveout_blockstat(&c->carveout,
+ CARVEOUT_STAT_FREE_SIZE));
+}
+
+static ssize_t _nvmap_sysfs_show_heap_free_count(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%lu\n",
+ _nvmap_carveout_blockstat(&c->carveout,
+ CARVEOUT_STAT_FREE_BLOCKS));
+}
+
+static ssize_t _nvmap_sysfs_show_heap_free_max(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%lu\n",
+ _nvmap_carveout_blockstat(&c->carveout,
+ CARVEOUT_STAT_LARGEST_FREE));
+}
+
+static ssize_t _nvmap_sysfs_show_heap_total_count(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%lu\n",
+ _nvmap_carveout_blockstat(&c->carveout,
+ CARVEOUT_STAT_NUM_BLOCKS));
+}
+
+static ssize_t _nvmap_sysfs_show_heap_total_max(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%lu\n",
+ _nvmap_carveout_blockstat(&c->carveout,
+ CARVEOUT_STAT_LARGEST_BLOCK));
+}
+
+static ssize_t _nvmap_sysfs_show_heap_total_size(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ return sprintf(buf, "%lu\n",
+ _nvmap_carveout_blockstat(&c->carveout,
+ CARVEOUT_STAT_TOTAL_SIZE));
+}
+
+static int nvmap_split_carveout_heap(struct nvmap_carveout *co, size_t size,
+ const char *name, unsigned int new_bitmask);
+
+static ssize_t _nvmap_sysfs_set_heap_split(struct device *d,
+ struct device_attribute *attr, const char * buf, size_t count)
+{
+ struct nvmap_carveout_node *c = container_of(d,
+ struct nvmap_carveout_node, dev);
+ char *tmp, *local = kzalloc(count+1, GFP_KERNEL);
+ char *sizestr = NULL, *bitmaskstr = NULL, *name = NULL;
+ char **format[] = { &sizestr, &bitmaskstr, &name };
+ char ***f_iter = format;
+ unsigned int i;
+ unsigned long size, bitmask;
+ int err;
+
+ if (!local) {
+ pr_err("%s: unable to read string\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(local, buf, count);
+ tmp = local;
+ for (i=0, **f_iter = local; i<count &&
+ (f_iter - format)<ARRAY_SIZE(format)-1; i++) {
+ if (local[i]==',') {
+ local[i] = '\0';
+ f_iter++;
+ **f_iter = &local[i+1];
+ }
+ }
+
+ if (!sizestr || !bitmaskstr || !name) {
+ pr_err("%s: format error\n", __func__);
+ kfree(tmp);
+ return -EINVAL;
+ }
+
+ for (local=name; !isspace(*local); local++);
+
+ if (local==name) {
+ pr_err("%s: invalid name %s\n", __func__, name);
+ kfree(tmp);
+ return -EINVAL;
+ }
+
+ *local=0;
+
+ size = memparse(sizestr, &sizestr);
+ if (!size) {
+ kfree(tmp);
+ return -EINVAL;
+ }
+
+ if (strict_strtoul(bitmaskstr, 0, &bitmask)==-EINVAL) {
+ kfree(tmp);
+ return -EINVAL;
+ }
+
+ err = nvmap_split_carveout_heap(&c->carveout, size, name, bitmask);
+
+ if (err) pr_err("%s: failed to create split heap %s\n", __func__, name);
+ kfree(tmp);
+ return err ? err : count;
+}
+
+static NVMAP_CARVEOUT_ATTR_RO(usage);
+static NVMAP_CARVEOUT_ATTR_RO(name);
+static NVMAP_CARVEOUT_ATTR_RO(base);
+static NVMAP_CARVEOUT_ATTR_RO(free_size);
+static NVMAP_CARVEOUT_ATTR_RO(free_count);
+static NVMAP_CARVEOUT_ATTR_RO(free_max);
+static NVMAP_CARVEOUT_ATTR_RO(total_size);
+static NVMAP_CARVEOUT_ATTR_RO(total_count);
+static NVMAP_CARVEOUT_ATTR_RO(total_max);
+static NVMAP_CARVEOUT_ATTR_WO(split, (S_IWUSR | S_IWGRP));
+
+static struct attribute *nvmap_heap_default_attrs[] = {
+ &nvmap_heap_attr_usage.attr,
+ &nvmap_heap_attr_name.attr,
+ &nvmap_heap_attr_split.attr,
+ &nvmap_heap_attr_base.attr,
+ &nvmap_heap_attr_total_size.attr,
+ &nvmap_heap_attr_free_size.attr,
+ &nvmap_heap_attr_total_count.attr,
+ &nvmap_heap_attr_free_count.attr,
+ &nvmap_heap_attr_total_max.attr,
+ &nvmap_heap_attr_free_max.attr,
+ NULL
+};
+
+static struct attribute_group nvmap_heap_defattr_group = {
+ .attrs = nvmap_heap_default_attrs
+};
+
+static struct device *__nvmap_heap_parent_dev(void);
+#define _nvmap_heap_parent_dev __nvmap_heap_parent_dev()
+
+/* unpinned I/O VMM areas may be reclaimed by nvmap to make room for
+ * new surfaces. unpinned surfaces are stored in segregated linked-lists
+ * sorted in most-recently-unpinned order (i.e., head insertion, head
+ * removal */
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+static DEFINE_SPINLOCK(nvmap_mru_vma_lock);
+static const size_t nvmap_mru_cutoff[] = {
+ 262144, 393216, 786432, 1048576, 1572864
+};
+
+static struct list_head nvmap_mru_vma_lists[ARRAY_SIZE(nvmap_mru_cutoff)];
+
+static inline struct list_head *_nvmap_list(size_t size)
+{
+ unsigned int i;
+
+ for (i=0; i<ARRAY_SIZE(nvmap_mru_cutoff); i++)
+ if (size <= nvmap_mru_cutoff[i]) return &nvmap_mru_vma_lists[i];
+
+ return &nvmap_mru_vma_lists[ARRAY_SIZE(nvmap_mru_cutoff)-1];
+}
+#endif
+
+static inline struct nvmap_handle *_nvmap_handle_get(struct nvmap_handle *h)
+{
+ if (unlikely(h->poison)) {
+ pr_err("%s: %s getting poisoned handle\n", __func__,
+ current->group_leader->comm);
+ return NULL;
+ } else if (unlikely(atomic_inc_return(&h->ref)<=1)) {
+ pr_err("%s: %s getting a freed handle\n",
+ __func__, current->group_leader->comm);
+ return NULL;
+ }
+ return h;
+}
+
+static inline void _nvmap_handle_put(struct nvmap_handle *h)
+{
+ int cnt = atomic_dec_return(&h->ref);
+
+ if (unlikely(cnt<0)) {
+ pr_err("%s: %s put to negative references\n",
+ __func__, current->comm);
+ dump_stack();
+ } else if (!cnt) _nvmap_handle_free(h);
+}
+
+static struct nvmap_handle *_nvmap_claim_preserved(
+ struct task_struct *new_owner, unsigned long key)
+{
+ struct rb_node *n;
+ struct nvmap_handle *b = NULL;
+
+ if (!key) return NULL;
+
+ spin_lock(&nvmap_handle_lock);
+ n = rb_first(&nvmap_handles);
+
+ while (n) {
+ b = rb_entry(n, struct nvmap_handle, node);
+ if (b->alloc && !b->heap_pgalloc && b->carveout.key == key) {
+ b->carveout.key = 0;
+ b->owner = new_owner;
+ break;
+ }
+ b = NULL;
+ n = rb_next(n);
+ }
+
+ spin_unlock(&nvmap_handle_lock);
+ return b;
+}
+
+static struct nvmap_handle *_nvmap_validate_get(unsigned long handle, bool su)
+{
+ struct nvmap_handle *b = NULL;
+
+#ifdef CONFIG_DEVNVMAP_PARANOID
+ struct rb_node *n;
+
+ spin_lock(&nvmap_handle_lock);
+
+ n = nvmap_handles.rb_node;
+
+ while (n) {
+ b = rb_entry(n, struct nvmap_handle, node);
+ if ((unsigned long)b == handle) {
+ if (su || b->global || b->owner==current->group_leader)
+ b = _nvmap_handle_get(b);
+ else
+ b = NULL;
+ spin_unlock(&nvmap_handle_lock);
+ return b;
+ }
+ if (handle > (unsigned long)b) n = n->rb_right;
+ else n = n->rb_left;
+ }
+ spin_unlock(&nvmap_handle_lock);
+ return NULL;
+#else
+ if (!handle) return NULL;
+ b = _nvmap_handle_get((struct nvmap_handle *)handle);
+ return b;
+#endif
+}
+
+static inline void _nvmap_insert_mru_vma(struct nvmap_handle *h)
+{
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_lock(&nvmap_mru_vma_lock);
+ list_add(&h->pgalloc.mru_list, _nvmap_list(h->pgalloc.area->iovm_length));
+ spin_unlock(&nvmap_mru_vma_lock);
+#endif
+}
+
+static void _nvmap_remove_mru_vma(struct nvmap_handle *h)
+{
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_lock(&nvmap_mru_vma_lock);
+ if (!list_empty(&h->pgalloc.mru_list))
+ list_del(&h->pgalloc.mru_list);
+ spin_unlock(&nvmap_mru_vma_lock);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+#endif
+}
+
+static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
+{
+#ifndef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ BUG_ON(!h->pgalloc.area);
+ BUG_ON(h->size > h->pgalloc.area->iovm_length);
+ BUG_ON((h->size | h->pgalloc.area->iovm_length) & ~PAGE_MASK);
+ return h->pgalloc.area;
+#else
+ struct list_head *mru;
+ struct nvmap_handle *evict = NULL;
+ struct tegra_iovmm_area *vm = NULL;
+ unsigned int i, idx;
+
+ spin_lock(&nvmap_mru_vma_lock);
+
+ if (h->pgalloc.area) {
+ BUG_ON(list_empty(&h->pgalloc.mru_list));
+ list_del(&h->pgalloc.mru_list);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ spin_unlock(&nvmap_mru_vma_lock);
+ return h->pgalloc.area;
+ }
+
+ vm = tegra_iovmm_create_vm(nvmap_vm_client, NULL, h->size,
+ _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+
+ if (vm) {
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ spin_unlock(&nvmap_mru_vma_lock);
+ return vm;
+ }
+ /* attempt to re-use the most recently unpinned IOVMM area in the
+ * same size bin as the current handle. If that fails, iteratively
+ * evict handles (starting from the current bin) until an allocation
+ * succeeds or no more areas can be evicted */
+
+ mru = _nvmap_list(h->size);
+ if (!list_empty(mru))
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+ if (evict && evict->pgalloc.area->iovm_length >= h->size) {
+ list_del(&evict->pgalloc.mru_list);
+ vm = evict->pgalloc.area;
+ evict->pgalloc.area = NULL;
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ spin_unlock(&nvmap_mru_vma_lock);
+ return vm;
+ }
+
+ idx = mru - nvmap_mru_vma_lists;
+
+ for (i=0; i<ARRAY_SIZE(nvmap_mru_vma_lists) && !vm; i++, idx++) {
+ if (idx >= ARRAY_SIZE(nvmap_mru_vma_lists))
+ idx -= ARRAY_SIZE(nvmap_mru_vma_lists);
+ mru = &nvmap_mru_vma_lists[idx];
+ while (!list_empty(mru) && !vm) {
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ BUG_ON(atomic_read(&evict->pin)!=0);
+ BUG_ON(!evict->pgalloc.area);
+ list_del(&evict->pgalloc.mru_list);
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ tegra_iovmm_free_vm(evict->pgalloc.area);
+ evict->pgalloc.area = NULL;
+ vm = tegra_iovmm_create_vm(nvmap_vm_client,
+ NULL, h->size,
+ _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+ }
+ }
+
+ spin_unlock(&nvmap_mru_vma_lock);
+ return vm;
+#endif
+}
+
+static int _nvmap_do_cache_maint(struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned long op, bool get);
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+ int e;
+ spin_lock(&nvmap_handle_lock);
+
+ /* if 2 contexts call _get and _put simultaneously, the reference
+ * count may drop to 0 and then increase to 1 before the handle
+ * can be freed. */
+ if (atomic_read(&h->ref)>0) {
+ spin_unlock(&nvmap_handle_lock);
+ return;
+ }
+ BUG_ON(atomic_read(&h->ref)<0);
+ BUG_ON(atomic_read(&h->pin)!=0);
+
+ rb_erase(&h->node, &nvmap_handles);
+
+ spin_unlock(&nvmap_handle_lock);
+
+ if (h->owner) put_task_struct(h->owner);
+
+ /* remove when NvRmMemMgr compatibility is eliminated */
+ if (h->kern_map) {
+ BUG_ON(!h->alloc);
+ if (h->heap_pgalloc)
+ vm_unmap_ram(h->kern_map, h->size>>PAGE_SHIFT);
+ else {
+ unsigned long addr = (unsigned long)h->kern_map;
+ addr &= ~PAGE_MASK;
+ iounmap((void *)addr);
+ }
+ }
+
+ /* ensure that no stale data remains in the cache for this handle */
+ e = _nvmap_do_cache_maint(h, 0, h->size, NVMEM_CACHE_OP_WB_INV, false);
+
+ if (h->alloc && !h->heap_pgalloc)
+ nvmap_carveout_free(h->carveout.co_heap, h->carveout.block_idx);
+ else if (h->alloc) {
+ unsigned int i;
+ BUG_ON(h->size & ~PAGE_MASK);
+ BUG_ON(!h->pgalloc.pages);
+ _nvmap_remove_mru_vma(h);
+ if (h->pgalloc.area) tegra_iovmm_free_vm(h->pgalloc.area);
+ for (i=0; i<h->size>>PAGE_SHIFT; i++) {
+ ClearPageReserved(h->pgalloc.pages[i]);
+ __free_page(h->pgalloc.pages[i]);
+ }
+ if ((h->size>>PAGE_SHIFT)*sizeof(struct page*)>=PAGE_SIZE)
+ vfree(h->pgalloc.pages);
+ else
+ kfree(h->pgalloc.pages);
+ }
+ h->poison = 0xa5a5a5a5;
+ kfree(h);
+}
+
+#define nvmap_gfp (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+
+static int _nvmap_alloc_do_coalloc(struct nvmap_handle *h,
+ struct nvmap_carveout *co, size_t align)
+{
+ int idx;
+
+ idx = nvmap_carveout_alloc(co, align, h->size);
+ if (idx != -1) {
+ h->alloc = true;
+ h->heap_pgalloc = false;
+ h->carveout.co_heap = co;
+ h->carveout.block_idx = idx;
+ spin_lock(&co->lock);
+ h->carveout.base = co->blocks[idx].base;
+ spin_unlock(&co->lock);
+ }
+
+ return (idx==-1) ? -ENOMEM : 0;
+}
+
+/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
+static void _nvmap_handle_iovmm_map(struct nvmap_handle *h)
+{
+ tegra_iovmm_addr_t va;
+ unsigned long i;
+
+ BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
+ BUG_ON(h->size & ~PAGE_MASK);
+ WARN_ON(!h->pgalloc.dirty);
+
+ for (va = h->pgalloc.area->iovm_start, i=0;
+ va < (h->pgalloc.area->iovm_start + h->size);
+ i++, va+=PAGE_SIZE) {
+ BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
+ tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
+ page_to_pfn(h->pgalloc.pages[i]));
+ }
+ h->pgalloc.dirty = false;
+}
+
+static int _nvmap_alloc_do_pgalloc(struct nvmap_handle *h,
+ bool contiguous, bool secure)
+{
+ unsigned int i = 0, cnt = (h->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ struct page **pages;
+
+ if (cnt*sizeof(*pages)>=PAGE_SIZE)
+ pages = vmalloc(cnt*sizeof(*pages));
+ else
+ pages = kzalloc(sizeof(*pages)*cnt, GFP_KERNEL);
+
+ if (!pages) return -ENOMEM;
+
+ if (cnt==1 && !secure) contiguous = true;
+
+ /* secure surfaces should only be allocated in discontiguous (IOVM-
+ * managed) space, so that the mapping can be zapped after it is
+ * unpinned */
+ WARN_ON(secure && contiguous);
+
+ if (contiguous) {
+ size_t order = get_order(h->size);
+ struct page *compound_page;
+ compound_page = alloc_pages(nvmap_gfp, order);
+ if (!compound_page) goto fail;
+ split_page(compound_page, order);
+ for (i=0; i<cnt; i++)
+ pages[i] = nth_page(compound_page, i);
+ for (; i<(1<<order); i++)
+ __free_page(nth_page(compound_page, i));
+ } else {
+ for (i=0; i<cnt; i++) {
+ pages[i] = alloc_page(nvmap_gfp);
+ if (!pages[i]) {
+ pr_err("failed to allocate %u pages after %u entries\n",
+ cnt, i);
+ goto fail;
+ }
+ }
+ }
+
+ h->pgalloc.area = NULL;
+#ifndef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ if (!contiguous) {
+ h->pgalloc.area = tegra_iovmm_create_vm(nvmap_vm_client,
+ NULL, cnt << PAGE_SHIFT,
+ _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+ if (!h->pgalloc.area) goto fail;
+ h->pgalloc.dirty = true;
+ }
+#endif
+
+ for (i=0; i<cnt; i++) {
+ void *km;
+ SetPageReserved(pages[i]);
+ km = kmap(pages[i]);
+ if (km) __cpuc_flush_dcache_page(km);
+ outer_flush_range(page_to_phys(pages[i]),
+ page_to_phys(pages[i])+PAGE_SIZE);
+ kunmap(pages[i]);
+ }
+
+ h->size = cnt<<PAGE_SHIFT;
+ h->pgalloc.pages = pages;
+ h->heap_pgalloc = true;
+ h->pgalloc.contig = contiguous;
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ h->alloc = true;
+ return 0;
+
+fail:
+ while (i--) __free_page(pages[i]);
+ if (pages && (cnt*sizeof(*pages)>=PAGE_SIZE)) vfree(pages);
+ else if (pages) kfree(pages);
+ return -ENOMEM;
+}
+
+static struct nvmap_handle *_nvmap_handle_create(
+ struct task_struct *owner, size_t size)
+{
+ struct nvmap_handle *h = kzalloc(sizeof(*h), GFP_KERNEL);
+ struct nvmap_handle *b;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ if (!h) return NULL;
+ atomic_set(&h->ref, 1);
+ atomic_set(&h->pin, 0);
+ h->owner = owner;
+ h->size = h->orig_size = size;
+ h->flags = NVMEM_HANDLE_WRITE_COMBINE;
+
+ spin_lock(&nvmap_handle_lock);
+ p = &nvmap_handles.rb_node;
+ while (*p) {
+ parent = *p;
+ b = rb_entry(parent, struct nvmap_handle, node);
+ if (h > b) p = &parent->rb_right;
+ else p = &parent->rb_left;
+ }
+ rb_link_node(&h->node, parent, p);
+ rb_insert_color(&h->node, &nvmap_handles);
+ spin_unlock(&nvmap_handle_lock);
+ if (owner) get_task_struct(owner);
+ return h;
+}
+
+/* nvmap pte manager */
+
+static void _nvmap_set_pte_at(unsigned long addr, unsigned long pfn,
+ pgprot_t prot)
+{
+ u32 off;
+ int idx;
+ pte_t *pte;
+
+ BUG_ON(!addr);
+ idx = NVMAP_PTE_INDEX(addr);
+ off = NVMAP_PTE_OFFSET(addr) & (PTRS_PER_PTE-1);
+
+ pte = nvmap_pte[idx] + off;
+ set_pte_ext(pte, pfn_pte(pfn, prot), 0);
+ flush_tlb_kernel_page(addr);
+}
+
static int _nvmap_map_pte(unsigned long pfn, pgprot_t prot, void **vaddr)
{
static unsigned int last_bit = 0;
unsigned long bit;
- pte_t *pte;
unsigned long addr;
unsigned long flags;
- u32 off;
- int idx;
spin_lock_irqsave(&nvmap_ptelock, flags);
@@ -141,12 +1227,7 @@ static int _nvmap_map_pte(unsigned long pfn, pgprot_t prot, void **vaddr)
addr = NVMAP_BASE + bit*PAGE_SIZE;
- idx = NVMAP_PTE_INDEX(addr);
- off = NVMAP_PTE_OFFSET(addr) & (PTRS_PER_PTE-1);
-
- pte = nvmap_pte[idx] + off;
- set_pte_ext(pte, pfn_pte(pfn, prot), 0);
- flush_tlb_kernel_page(addr);
+ _nvmap_set_pte_at(addr, pfn, prot);
*vaddr = (void *)addr;
return 0;
}
@@ -195,33 +1276,20 @@ static void nvmap_vma_close(struct vm_area_struct *vma) {
struct nvmap_vma_priv *priv = vma->vm_private_data;
if (priv && !atomic_dec_return(&priv->ref)) {
- NvRmMemHandle hmem = priv->hmem;
- if (hmem) {
- if (hmem->coherency==NvOsMemAttribute_WriteBack)
- dmac_clean_all();
- NvRmMemHandleFree(priv->hmem);
- }
+ if (priv->h) _nvmap_handle_put(priv->h);
kfree(priv);
}
vma->vm_private_data = NULL;
}
-extern struct page *NvOsPageGetPage(NvOsPageAllocHandle, size_t);
-#define nvmap_range(x,y) (x), (x)+(y)
-
-#define is_same_page(a, b) \
- ((unsigned long)(a)>>PAGE_SHIFT == (unsigned long)(b)>>PAGE_SHIFT)
-
static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct nvmap_vma_priv *priv;
- struct page *page;
- unsigned long pfn;
unsigned long offs;
offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
priv = vma->vm_private_data;
- if (!priv || !priv->hmem)
+ if (!priv || !priv->h || !priv->h->alloc)
return VM_FAULT_SIGBUS;
offs += priv->offs;
@@ -229,27 +1297,22 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* offset from the original VMA */
offs += (vma->vm_pgoff << PAGE_SHIFT);
- if (offs >= priv->hmem->size)
+ if (offs >= priv->h->size)
return VM_FAULT_SIGBUS;
- switch (priv->hmem->heap) {
- case NvRmHeap_ExternalCarveOut:
- case NvRmHeap_IRam:
- pfn = ((priv->hmem->PhysicalAddress+offs) >> PAGE_SHIFT);
+ if (!priv->h->heap_pgalloc) {
+ unsigned long pfn;
+ BUG_ON(priv->h->carveout.base & ~PAGE_MASK);
+ pfn = ((priv->h->carveout.base + offs) >> PAGE_SHIFT);
vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
return VM_FAULT_NOPAGE;
-
- case NvRmHeap_GART:
- case NvRmHeap_External:
- if (!priv->hmem->hPageHandle)
- return VM_FAULT_SIGBUS;
- page = NvOsPageGetPage(priv->hmem->hPageHandle, offs);
+ } else {
+ struct page *page;
+ offs >>= PAGE_SHIFT;
+ page = priv->h->pgalloc.pages[offs];
if (page) get_page(page);
vmf->page = page;
return (page) ? 0 : VM_FAULT_SIGBUS;
-
- default:
- return VM_FAULT_SIGBUS;
}
}
@@ -274,17 +1337,44 @@ static long nvmap_ioctl(struct file *filp,
return -EFAULT;
switch (cmd) {
+ case NVMEM_IOC_CREATE:
+ case NVMEM_IOC_CLAIM:
+ case NVMEM_IOC_FROM_ID:
+ err = nvmap_ioctl_create(filp, cmd, uarg);
+ break;
+
+ case NVMEM_IOC_GET_ID:
+ err = nvmap_ioctl_getid(filp, uarg);
+ break;
+
+ case NVMEM_IOC_PARAM:
+ err = nvmap_ioctl_get_param(filp, uarg);
+ break;
+
+ case NVMEM_IOC_UNPIN_MULT:
+ case NVMEM_IOC_PIN_MULT:
+ err = nvmap_ioctl_pinop(filp, cmd==NVMEM_IOC_PIN_MULT, uarg);
+ break;
+
+ case NVMEM_IOC_ALLOC:
+ err = nvmap_ioctl_alloc(filp, uarg);
+ break;
+
+ case NVMEM_IOC_FREE:
+ err = nvmap_ioctl_free(filp, arg);
+ break;
+
case NVMEM_IOC_MMAP:
err = nvmap_map_into_caller_ptr(filp, uarg);
break;
case NVMEM_IOC_WRITE:
case NVMEM_IOC_READ:
- err = nvmap_rw_handle(filp, cmd==NVMEM_IOC_READ, uarg);
+ err = nvmap_ioctl_rw_handle(filp, cmd==NVMEM_IOC_READ, uarg);
break;
case NVMEM_IOC_CACHE:
- err = nvmap_cache_maint(filp, uarg);
+ err = nvmap_ioctl_cache_maint(filp, uarg);
break;
default:
@@ -293,46 +1383,746 @@ static long nvmap_ioctl(struct file *filp,
return err;
}
-static int nvmap_release(struct inode *inode, struct file *file)
+/* must be called with the ref_lock held - given a user-space handle ID
+ * ref, validate that the handle_ref object may be used by the caller */
+struct nvmap_handle_ref *_nvmap_ref_lookup_locked(
+ struct nvmap_file_priv *priv, unsigned long ref)
{
+ struct rb_node *n = priv->handle_refs.rb_node;
+
+ while (n) {
+ struct nvmap_handle_ref *r;
+ r = rb_entry(n, struct nvmap_handle_ref, node);
+ if ((unsigned long)r->h == ref) return r;
+ else if (ref > (unsigned long)r->h) n = n->rb_right;
+ else n = n->rb_left;
+ }
+
+ return NULL;
+}
+
+/* must be called inside nvmap_pin_lock, to ensure that an entire stream
+ * of pins will complete without competition from a second stream. returns
+ * 0 if the pin was successful, -ENOMEM on failure */
+static int _nvmap_handle_pin_locked(struct nvmap_handle *h)
+{
+ struct tegra_iovmm_area *area;
+ BUG_ON(!h->alloc);
+
+ h = _nvmap_handle_get(h);
+ if (!h) return -ENOMEM;
+
+ if (atomic_inc_return(&h->pin)==1) {
+ if (h->heap_pgalloc && !h->pgalloc.contig) {
+ area = _nvmap_get_vm(h);
+ if (!area) {
+ /* no race here, inside the pin mutex */
+ atomic_dec(&h->pin);
+ _nvmap_handle_put(h);
+ return -ENOMEM;
+ }
+ if (area != h->pgalloc.area)
+ h->pgalloc.dirty = true;
+ h->pgalloc.area = area;
+ }
+ }
+ return 0;
+}
+
+/* doesn't need to be called inside nvmap_pin_lock, since this will only
+ * expand the available VM area */
+static int _nvmap_handle_unpin(struct nvmap_handle *h)
+{
+ int ret = 0;
+
+ if (atomic_read(&h->pin)==0) {
+ pr_err("%s: %s attempting to unpin an unpinned handle\n",
+ __func__, current->comm);
+ dump_stack();
+ return 0;
+ }
+
+ BUG_ON(!h->alloc || atomic_read(&h->pin)==0);
+ if (!atomic_dec_return(&h->pin)) {
+ if (h->heap_pgalloc && h->pgalloc.area) {
+ /* if a secure handle is clean (i.e., mapped into
+ * IOVMM, it needs to be zapped on unpin. */
+ if (h->secure && !h->pgalloc.dirty) {
+ tegra_iovmm_zap_vm(h->pgalloc.area);
+ h->pgalloc.dirty = true;
+ }
+ _nvmap_insert_mru_vma(h);
+ ret=1;
+ }
+ }
+ _nvmap_handle_put(h);
+ return ret;
+}
+
+/* pin a list of handles, mapping IOVMM areas if needed. may sleep, if
+ * a handle's IOVMM area has been reclaimed and insufficient IOVMM space
+ * is available to complete the list pin. no intervening pin operations
+ * will interrupt this, and no validation is performed on the handles
+ * that are provided. */
+static int _nvmap_handle_pin_fast(unsigned int nr, struct nvmap_handle **h)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&nvmap_pin_lock);
+ for (i=0; i<nr && !ret; i++) {
+ ret = wait_event_interruptible(nvmap_pin_wait,
+ !_nvmap_handle_pin_locked(h[i]));
+ }
+ mutex_unlock(&nvmap_pin_lock);
+
+ if (ret) {
+ int do_wake = 0;
+ while (i--) do_wake |= _nvmap_handle_unpin(h[i]);
+ if (do_wake) wake_up(&nvmap_pin_wait);
+ return -EINTR;
+ } else {
+ for (i=0; i<nr; i++)
+ if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
+ _nvmap_handle_iovmm_map(h[i]);
+ }
+
+ return 0;
+}
+
+static int _nvmap_do_global_unpin(unsigned long ref)
+{
+ struct nvmap_handle *h;
+ int w;
+
+ h = _nvmap_validate_get(ref, true);
+ if (unlikely(!h)) {
+ pr_err("%s: %s attempting to unpin non-existent handle\n",
+ __func__, current->group_leader->comm);
+ return 0;
+ }
+
+ pr_err("%s: %s unpinning %s's %uB %s handle without local context\n",
+ __func__, current->group_leader->comm,
+ (h->owner) ? h->owner->comm : "kernel", h->orig_size,
+ (h->heap_pgalloc && !h->pgalloc.contig) ? "iovmm" :
+ (h->heap_pgalloc) ? "sysmem" : "carveout");
+
+ w = _nvmap_handle_unpin(h);
+ _nvmap_handle_put(h);
+ return w;
+}
+
+static void _nvmap_do_unpin(struct nvmap_file_priv *priv,
+ unsigned int nr, unsigned long *refs)
+{
+ struct nvmap_handle_ref *r;
+ unsigned int i;
+ int do_wake = 0;
+
+ spin_lock(&priv->ref_lock);
+ for (i=0; i<nr; i++) {
+ if (!refs[i]) continue;
+ r = _nvmap_ref_lookup_locked(priv, refs[i]);
+ if (unlikely(!r)) {
+ if (priv->su)
+ do_wake |= _nvmap_do_global_unpin(refs[i]);
+ else
+ pr_err("%s: %s unpinning invalid handle\n",
+ __func__, current->comm);
+ } else if (unlikely(!atomic_add_unless(&r->pin, -1, 0)))
+ pr_err("%s: %s unpinning unpinned handle\n",
+ __func__, current->comm);
+ else
+ do_wake |= _nvmap_handle_unpin(r->h);
+ }
+ spin_unlock(&priv->ref_lock);
+ if (do_wake) wake_up(&nvmap_pin_wait);
+}
+
+/* pins a list of handle_ref objects; same conditions apply as to
+ * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
+static int _nvmap_do_pin(struct nvmap_file_priv *priv,
+ unsigned int nr, unsigned long *refs)
+{
+ int ret = 0;
+ unsigned int i;
+ struct nvmap_handle **h = (struct nvmap_handle **)refs;
+ struct nvmap_handle_ref *r;
+
+ /* to optimize for the common case (client provided valid handle
+ * references and the pin succeeds), increment the handle_ref pin
+ * count during validation. in error cases, the tree will need to
+ * be re-walked, since the handle_ref is discarded so that an
+ * allocation isn't required. if a handle_ref is not found,
+ * locally validate that the caller has permission to pin the handle;
+ * handle_refs are not created in this case, so it is possible that
+ * if the caller crashes after pinning a global handle, the handle
+ * will be permanently leaked. */
+ spin_lock(&priv->ref_lock);
+ for (i=0; i<nr && !ret; i++) {
+ r = _nvmap_ref_lookup_locked(priv, refs[i]);
+ if (!r && (!(priv->su || h[i]->global ||
+ current->group_leader == h[i]->owner)))
+ ret = -EPERM;
+ else if (r) atomic_inc(&r->pin);
+ else {
+ pr_err("%s: %s pinning %s's %uB handle without "
+ "local context\n", __func__,
+ current->group_leader->comm,
+ h[i]->owner->comm, h[i]->orig_size);
+ }
+ }
+
+ while (ret && i--) {
+ r = _nvmap_ref_lookup_locked(priv, refs[i]);
+ if (r) atomic_dec(&r->pin);
+ }
+ spin_unlock(&priv->ref_lock);
+
+ if (ret) return ret;
+
+ mutex_lock(&nvmap_pin_lock);
+ for (i=0; i<nr && !ret; i++) {
+ ret = wait_event_interruptible(nvmap_pin_wait,
+ !_nvmap_handle_pin_locked(h[i]));
+ }
+ mutex_unlock(&nvmap_pin_lock);
+
+ if (ret) {
+ int do_wake = 0;
+ spin_lock(&priv->ref_lock);
+ while (i--) {
+ r = _nvmap_ref_lookup_locked(priv, refs[i]);
+ do_wake |= _nvmap_handle_unpin(r->h);
+ if (r) atomic_dec(&r->pin);
+ }
+ spin_unlock(&priv->ref_lock);
+ if (do_wake) wake_up(&nvmap_pin_wait);
+ return -EINTR;
+ } else {
+ for (i=0; i<nr; i++) {
+ if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
+ _nvmap_handle_iovmm_map(h[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int nvmap_ioctl_pinop(struct file *filp,
+ bool is_pin, void __user *arg)
+{
+ struct nvmem_pin_handle op;
+ struct nvmap_handle *h;
+ unsigned long on_stack[16];
+ unsigned long *refs;
+ unsigned long __user *output;
+ unsigned int i;
+ int err;
+
+ err = copy_from_user(&op, arg, sizeof(op));
+ if (err) return err;
+
+ if (!op.count) return -EINVAL;
+
+ if (op.count > 1) {
+ size_t bytes = op.count * sizeof(unsigned long *);
+ if (!access_ok(VERIFY_READ, (void *)op.handles, bytes))
+ return -EPERM;
+ if (is_pin && !access_ok(VERIFY_WRITE, (void *)op.addr, bytes))
+ return -EPERM;
+
+ if (op.count <= ARRAY_SIZE(on_stack)) refs = on_stack;
+ else refs = kzalloc(bytes, GFP_KERNEL);
+
+ if (!refs) return -ENOMEM;
+ err = copy_from_user(refs, (void*)op.handles, bytes);
+ if (err) goto out;
+ } else {
+ refs = on_stack;
+ on_stack[0] = (unsigned long)op.handles;
+ }
+
+ if (is_pin)
+ err = _nvmap_do_pin(filp->private_data, op.count, refs);
+ else
+ _nvmap_do_unpin(filp->private_data, op.count, refs);
+
+ /* skip the output stage on unpin */
+ if (err || !is_pin) goto out;
+
+ /* it is guaranteed that if _nvmap_do_pin returns 0 that
+ * all of the handle_ref objects are valid, so dereferencing directly
+ * here is safe */
+ if (op.count > 1)
+ output = (unsigned long __user *)op.addr;
+ else {
+ struct nvmem_pin_handle __user *tmp = arg;
+ output = (unsigned long __user *)&(tmp->addr);
+ }
+
+ for (i=0; i<op.count; i++) {
+ unsigned long addr;
+ h = (struct nvmap_handle *)refs[i];
+ if (h->heap_pgalloc && h->pgalloc.contig)
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc)
+ addr = h->pgalloc.area->iovm_start;
+ else
+ addr = h->carveout.base;
+
+ __put_user(addr, &output[i]);
+ }
+
+out:
+ if (refs != on_stack) kfree(refs);
+ return err;
+}
+
+static int nvmap_release(struct inode *inode, struct file *filp)
+{
+ struct nvmap_file_priv *priv = filp->private_data;
+ struct rb_node *n;
+ struct nvmap_handle_ref *r;
+ int refs;
+ int do_wake = 0;
+ int pins;
+
+ if (!priv) return 0;
+
+ while ((n = rb_first(&priv->handle_refs))) {
+ r = rb_entry(n, struct nvmap_handle_ref, node);
+ rb_erase(&r->node, &priv->handle_refs);
+ pins = atomic_read(&r->pin);
+ atomic_set(&r->pin, 0);
+ while (pins--) do_wake |= _nvmap_handle_unpin(r->h);
+ refs = atomic_read(&r->refs);
+ if (r->h->alloc && r->h->heap_pgalloc && !r->h->pgalloc.contig)
+ atomic_sub(r->h->size, &priv->iovm_commit);
+ while (refs--) _nvmap_handle_put(r->h);
+ kfree(r);
+ }
+ if (do_wake) wake_up(&nvmap_pin_wait);
+ kfree(priv);
return 0;
}
static int nvmap_open(struct inode *inode, struct file *filp)
{
/* eliminate read, write and llseek support on this node */
+ struct nvmap_file_priv *priv;
int ret;
+ /* nvmap doesn't track total number of pinned references, so its
+ * IOVMM client is always locked. */
+ if (!nvmap_vm_client) {
+ mutex_lock(&nvmap_pin_lock);
+ if (!nvmap_vm_client) {
+ nvmap_vm_client = tegra_iovmm_alloc_client("gpu", NULL);
+ if (nvmap_vm_client)
+ tegra_iovmm_client_lock(nvmap_vm_client);
+ }
+ mutex_unlock(&nvmap_pin_lock);
+ }
+
ret = nonseekable_open(inode, filp);
if (unlikely(ret))
return ret;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) return -ENOMEM;
+ priv->handle_refs = RB_ROOT;
+ priv->su = (filp->f_op == &knvmap_fops);
+
+ atomic_set(&priv->iovm_commit, 0);
+
+ if (nvmap_vm_client)
+ priv->iovm_limit = tegra_iovmm_get_vm_size(nvmap_vm_client);
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ /* to prevent fragmentation-caused deadlocks, derate the size of
+ * the IOVM space to 75% */
+ priv->iovm_limit >>= 2;
+ priv->iovm_limit *= 3;
+#endif
+
+ spin_lock_init(&priv->ref_lock);
+
filp->f_mapping->backing_dev_info = &nvmap_bdi;
- filp->private_data = NULL;
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvmap_ioctl_getid(struct file *filp, void __user *arg)
+{
+ struct nvmem_create_handle op;
+ struct nvmap_handle *h = NULL;
+ int err;
+
+ err = copy_from_user(&op, arg, sizeof(op));
+ if (err) return err;
+
+ if (!op.handle) return -EINVAL;
+
+ h = _nvmap_validate_get((unsigned long)op.handle,
+ filp->f_op==&knvmap_fops);
+
+ if (h) {
+ op.id = (__u32)h;
+ /* when the owner of a handle gets its ID, this is treated
+ * as a granting of the handle for use by other processes.
+ * however, the super-user is not capable of promoting a
+ * handle to global status if it was created in another
+ * process. */
+ if (current->group_leader == h->owner) h->global = true;
+
+ /* getid is not supposed to result in a ref count increase */
+ _nvmap_handle_put(h);
+
+ return copy_to_user(arg, &op, sizeof(op));
+ }
+ return -EPERM;
+}
+
+/* attempts to allocate from either contiguous system memory or IOVMM space */
+static int _nvmap_do_page_alloc(struct nvmap_file_priv *priv,
+ struct nvmap_handle *h, unsigned int heap_mask,
+ size_t align, bool secure)
+{
+ int ret = -ENOMEM;
+ size_t page_size = (h->size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
+#ifdef IOVMM_FIRST
+ unsigned int fallback[] = { NVMEM_HEAP_IOVMM, NVMEM_HEAP_SYSMEM, 0 };
+#else
+ unsigned int fallback[] = { NVMEM_HEAP_SYSMEM, NVMEM_HEAP_IOVMM, 0 };
+#endif
+ unsigned int *m = fallback;
+
+ /* secure allocations must not be performed from sysmem */
+ if (secure) heap_mask &= ~NVMEM_HEAP_SYSMEM;
+
+ if (align > PAGE_SIZE) return -EINVAL;
+
+
+ while (*m && ret) {
+ if (heap_mask & NVMEM_HEAP_SYSMEM & *m)
+ ret = _nvmap_alloc_do_pgalloc(h, true, secure);
+
+ else if (heap_mask & NVMEM_HEAP_IOVMM & *m) {
+ /* increment the committed IOVM space prior to
+ * allocation, to avoid race conditions with other
+ * threads simultaneously allocating. this is
+ * conservative, but guaranteed to work */
+
+ int oc;
+ oc = atomic_add_return(page_size, &priv->iovm_commit);
+
+ if (oc <= priv->iovm_limit)
+ ret = _nvmap_alloc_do_pgalloc(h, false, secure);
+ else
+ ret = -ENOMEM;
+ /* on failure, or when do_pgalloc promotes a non-
+ * contiguous request into a contiguous request,
+ * release the commited iovm space */
+ if (ret || h->pgalloc.contig)
+ atomic_sub(page_size, &priv->iovm_commit);
+ }
+ m++;
+ }
+ return ret;
+}
+
+/* attempts to allocate from the carveout heaps */
+static int _nvmap_do_carveout_alloc(struct nvmap_handle *h,
+ unsigned int heap_mask, size_t align)
+{
+ int ret = -ENOMEM;
+ struct nvmap_carveout_node *n;
+
+ down_read(&nvmap_context.list_sem);
+ list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
+ if (heap_mask & n->heap_bit)
+ ret = _nvmap_alloc_do_coalloc(h, &n->carveout, align);
+ if (!ret) break;
+ }
+ up_read(&nvmap_context.list_sem);
+ return ret;
+}
+
+static int _nvmap_do_alloc(struct nvmap_file_priv *priv,
+ unsigned long href, unsigned int heap_mask, size_t align,
+ unsigned int flags, bool secure, bool carveout_first)
+{
+ int ret = -ENOMEM;
+ struct nvmap_handle_ref *r;
+ struct nvmap_handle *h;
+
+ if (!href) return -EINVAL;
+
+ spin_lock(&priv->ref_lock);
+ r = _nvmap_ref_lookup_locked(priv, href);
+ spin_unlock(&priv->ref_lock);
+
+ if (!r) return -EPERM;
+
+ h = r->h;
+ if (h->alloc) return 0;
+ h->flags = flags;
+
+ align = max_t(size_t, align, L1_CACHE_BYTES);
+
+ if (secure) heap_mask &= ~NVMEM_HEAP_CARVEOUT_MASK;
+
+ if (carveout_first || (heap_mask & NVMEM_HEAP_CARVEOUT_IRAM)) {
+ ret = _nvmap_do_carveout_alloc(h, heap_mask, align);
+ if (ret) ret = _nvmap_do_page_alloc(priv, h,
+ heap_mask, align, secure);
+ } else {
+ ret = _nvmap_do_page_alloc(priv, h, heap_mask, align, secure);
+ if (ret) ret = _nvmap_do_carveout_alloc(h, heap_mask, align);
+ }
+
+ BUG_ON((!ret && !h->alloc) || (ret && h->alloc));
+ return ret;
+}
+
+static int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
+{
+ struct nvmem_alloc_handle op;
+ struct nvmap_file_priv *priv = filp->private_data;
+ bool secure = false;
+#ifdef IOVMM_FIRST
+ bool carveout_first = false;
+#else
+ bool carveout_first = true;
+#endif
+ int err;
+
+ err = copy_from_user(&op, arg, sizeof(op));
+ if (err) return err;
+
+ if (op.align & (op.align-1)) return -EINVAL;
+
+ /* user-space handles are aligned to page boundaries, to prevent
+ * data leakage. */
+ op.align = max_t(size_t, op.align, PAGE_SIZE);
+
+ if (op.flags & NVMEM_HANDLE_SECURE) secure = true;
+
+ /* TODO: implement a way to specify carveout-first vs
+ * carveout-second */
+ return _nvmap_do_alloc(priv, op.handle, op.heap_mask,
+ op.align, (op.flags & 0x3), secure, carveout_first);
+}
+
+static int _nvmap_do_free(struct nvmap_file_priv *priv, unsigned long href)
+{
+ struct nvmap_handle_ref *r;
+ struct nvmap_handle *h;
+ int do_wake = 0;
+ if (!href) return 0;
+
+ spin_lock(&priv->ref_lock);
+ r = _nvmap_ref_lookup_locked(priv, href);
+
+ if (!r) {
+ spin_unlock(&priv->ref_lock);
+ pr_err("%s attempting to free unrealized handle\n",
+ current->group_leader->comm);
+ return -EPERM;
+ }
+
+ h = r->h;
+
+ if (!atomic_dec_return(&r->refs)) {
+ int pins = atomic_read(&r->pin);
+ rb_erase(&r->node, &priv->handle_refs);
+ spin_unlock(&priv->ref_lock);
+ if (pins) pr_err("%s: %s freeing %s's pinned %s %s %uB handle\n",
+ __func__, current->comm,
+ (r->h->owner) ? r->h->owner->comm : "kernel",
+ (r->h->global) ? "global" : "private",
+ (r->h->alloc && r->h->heap_pgalloc)?"page-alloc" :
+ (r->h->alloc) ? "carveout" : "unallocated",
+ r->h->orig_size);
+ while (pins--) do_wake |= _nvmap_handle_unpin(r->h);
+ kfree(r);
+ if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
+ atomic_sub(h->size, &priv->iovm_commit);
+ if (do_wake) wake_up(&nvmap_pin_wait);
+ } else
+ spin_unlock(&priv->ref_lock);
+
+ BUG_ON(!atomic_read(&h->ref));
+ _nvmap_handle_put(h);
return 0;
}
+static int nvmap_ioctl_free(struct file *filp, unsigned long arg)
+{
+ return _nvmap_do_free(filp->private_data, arg);
+}
+
+/* given a size, pre-existing handle ID, or a preserved handle key, create
+ * a handle and a reference to the handle in the per-context data */
+static int _nvmap_do_create(struct nvmap_file_priv *priv,
+ unsigned int cmd, unsigned long key, bool su,
+ struct nvmap_handle_ref **ref)
+{
+ struct nvmap_handle_ref *r = NULL;
+ struct nvmap_handle *h = NULL;
+ struct rb_node **p, *parent = NULL;
+
+ if (cmd == NVMEM_IOC_FROM_ID) {
+ /* only ugly corner case to handle with from ID:
+ *
+ * normally, if the handle that is being duplicated is IOVMM-
+ * backed, the handle should fail to duplicate if duping it
+ * would over-commit IOVMM space. however, if the handle is
+ * already duplicated in the client process (or the client
+ * is duplicating a handle it created originally), IOVMM space
+ * should not be doubly-reserved.
+ */
+ h = _nvmap_validate_get(key, priv->su);
+
+ if (!h) {
+ pr_err("%s: %s duplicate handle failed\n", __func__,
+ current->group_leader->comm);
+ return -EPERM;
+ }
+
+ if (!h->alloc) {
+ pr_err("%s: attempting to clone unallocated "
+ "handle\n", __func__);
+ _nvmap_handle_put(h);
+ h = NULL;
+ return -EINVAL;
+ }
+
+ spin_lock(&priv->ref_lock);
+ r = _nvmap_ref_lookup_locked(priv, (unsigned long)h);
+ spin_unlock(&priv->ref_lock);
+ if (r) {
+ atomic_inc(&r->refs);
+ *ref = r;
+ return 0;
+ }
+
+ /* verify that adding this handle to the process' access list
+ * won't exceed the IOVM limit */
+ if (h->heap_pgalloc && !h->pgalloc.contig) {
+ int oc = atomic_add_return(h->size, &priv->iovm_commit);
+ if (oc > priv->iovm_limit) {
+ atomic_sub(h->size, &priv->iovm_commit);
+ _nvmap_handle_put(h);
+ h = NULL;
+ pr_err("%s: %s duplicating handle would "
+ "over-commit iovmm space (%d / %dB\n",
+ __func__, current->group_leader->comm,
+ oc, priv->iovm_limit);
+ return -ENOMEM;
+ }
+ }
+ } else if (cmd == NVMEM_IOC_CREATE) {
+ h = _nvmap_handle_create(current->group_leader, key);
+ if (!h) return -ENOMEM;
+ } else {
+ h = _nvmap_claim_preserved(current->group_leader, key);
+ if (!h) return -EINVAL;
+ }
+
+ BUG_ON(!h);
+
+ /* if the client does something strange, like calling CreateFromId
+ * when it was the original creator, avoid creating two handle refs
+ * for the same handle */
+ spin_lock(&priv->ref_lock);
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ spin_unlock(&priv->ref_lock);
+ if (h) _nvmap_handle_put(h);
+ return -ENOMEM;
+ }
+
+ atomic_set(&r->refs, 1);
+ r->h = h;
+ atomic_set(&r->pin, 0);
+
+ p = &priv->handle_refs.rb_node;
+ while (*p) {
+ struct nvmap_handle_ref *l;
+ parent = *p;
+ l = rb_entry(parent, struct nvmap_handle_ref, node);
+ if (r->h > l->h) p = &parent->rb_right;
+ else p = &parent->rb_left;
+ }
+ rb_link_node(&r->node, parent, p);
+ rb_insert_color(&r->node, &priv->handle_refs);
+
+ spin_unlock(&priv->ref_lock);
+ *ref = r;
+ return 0;
+}
+
+static int nvmap_ioctl_create(struct file *filp,
+ unsigned int cmd, void __user *arg)
+{
+ struct nvmem_create_handle op;
+ struct nvmap_handle_ref *r = NULL;
+ struct nvmap_file_priv *priv = filp->private_data;
+ unsigned long key;
+ int err = 0;
+
+ err = copy_from_user(&op, arg, sizeof(op));
+ if (err) return err;
+
+ if (!priv) return -ENODEV;
+
+ /* user-space-created handles are expanded to be page-aligned,
+ * so that mmap() will not accidentally leak a different allocation */
+ if (cmd==NVMEM_IOC_CREATE)
+ key = (op.size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
+ else if (cmd==NVMEM_IOC_CLAIM)
+ key = op.key;
+ else if (cmd==NVMEM_IOC_FROM_ID)
+ key = op.id;
+
+ err = _nvmap_do_create(priv, cmd, key, (filp->f_op==&knvmap_fops), &r);
+
+ if (!err) {
+ op.handle = (uintptr_t)r->h;
+ /* since the size is spoofed to a page-multiple above,
+ * clobber the orig_size field back to the requested value for
+ * debugging. */
+ if (cmd == NVMEM_IOC_CREATE) r->h->orig_size = op.size;
+ err = copy_to_user(arg, &op, sizeof(op));
+ if (err) _nvmap_do_free(priv, op.handle);
+ }
+
+ return err;
+}
+
static int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
{
struct nvmem_map_caller op;
- struct nvmap_vma_priv *priv;
+ struct nvmap_vma_priv *vpriv;
struct vm_area_struct *vma;
- NvRmMemHandle hmem;
+ struct nvmap_handle *h;
int err = 0;
err = copy_from_user(&op, arg, sizeof(op));
- if (err)
- return err;
+ if (err) return err;
- hmem = (NvRmMemHandle)op.handle;
- if (!hmem)
- return -EINVAL;
+ if (!op.handle) return -EINVAL;
+
+ h = _nvmap_validate_get(op.handle, (filp->f_op==&knvmap_fops));
+ if (!h) return -EINVAL;
down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, (unsigned long)op.addr);
+ vma = find_vma(current->mm, op.addr);
if (!vma || !vma->vm_private_data) {
err = -ENOMEM;
goto out;
@@ -343,54 +2133,49 @@ static int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
goto out;
}
- if ((op.offset + op.length) >
- ((hmem->size+PAGE_SIZE-1)&PAGE_MASK)) {
+ if ((op.offset + op.length) > h->size) {
err = -EADDRNOTAVAIL;
goto out;
}
- priv = vma->vm_private_data;
- BUG_ON(!priv);
+ vpriv = vma->vm_private_data;
+ BUG_ON(!vpriv);
/* the VMA must exactly match the requested mapping operation, and the
* VMA that is targetted must have been created originally by /dev/nvmap
*/
- if (((void*)vma->vm_start != op.addr) ||
- (vma->vm_ops != &nvmap_vma_ops) ||
- (vma->vm_end-vma->vm_start != op.length)) {
+ if ((vma->vm_start != op.addr) || (vma->vm_ops != &nvmap_vma_ops) ||
+ (vma->vm_end-vma->vm_start != op.length)) {
err = -EPERM;
goto out;
}
/* verify that each mmap() system call creates a unique VMA */
- if (priv->hmem && hmem==priv->hmem)
+ if (vpriv->h && h==vpriv->h)
goto out;
- else if (priv->hmem) {
+ else if (vpriv->h) {
err = -EADDRNOTAVAIL;
goto out;
}
- if (hmem->alignment & ~PAGE_MASK) {
+ if (!h->heap_pgalloc && (h->carveout.base & ~PAGE_MASK)) {
err = -EFAULT;
goto out;
}
- priv->hmem = hmem;
- priv->offs = op.offset;
+ vpriv->h = h;
+ vpriv->offs = op.offset;
/* if the hmem is not writeback-cacheable, drop back to a page mapping
* which will guarantee DMA coherency
*/
- if (hmem->coherency == NvOsMemAttribute_WriteBack)
- vma->vm_page_prot = pgprot_inner_writeback(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- NvRmPrivMemIncrRef(hmem);
+ vma->vm_page_prot = _nvmap_flag_to_pgprot(h->flags,
+ vma->vm_page_prot);
- out:
+out:
up_read(&current->mm->mmap_sem);
+ if (err) _nvmap_handle_put(h);
return err;
}
/* Initially, the nvmap mmap system call is used to allocate an inaccessible
@@ -412,7 +2197,7 @@ static int nvmap_mmap(struct file *filp, struct vm_area_struct *vma)
return -ENOMEM;
priv->offs = 0;
- priv->hmem = NULL;
+ priv->h = NULL;
atomic_set(&priv->ref, 1);
vma->vm_flags |= VM_SHARED;
@@ -423,205 +2208,442 @@ static int nvmap_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-static int nvmap_cache_maint(struct file *filp, void __user *arg)
+/* perform cache maintenance on a handle; caller's handle must be pre-
+ * validated. */
+static int _nvmap_do_cache_maint(struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned long op, bool get)
+{
+ pgprot_t prot;
+ void *addr = NULL;
+ void (*inner_maint)(const void*, const void*);
+ void (*outer_maint)(unsigned long, unsigned long);
+ int err = 0;
+
+ if (get) h = _nvmap_handle_get(h);
+
+ if (!h) return -EINVAL;
+
+ /* don't waste time on cache maintenance if the handle isn't cached */
+ if (h->flags == NVMEM_HANDLE_UNCACHEABLE ||
+ h->flags == NVMEM_HANDLE_WRITE_COMBINE)
+ goto out;
+
+ if (op == NVMEM_CACHE_OP_WB) {
+ inner_maint = smp_dma_clean_range;
+ if (h->flags == NVMEM_HANDLE_CACHEABLE)
+ outer_maint = outer_clean_range;
+ else
+ outer_maint = NULL;
+ } else if (op == NVMEM_CACHE_OP_WB_INV) {
+ inner_maint = smp_dma_flush_range;
+ if (h->flags == NVMEM_HANDLE_CACHEABLE)
+ outer_maint = outer_flush_range;
+ else
+ outer_maint = NULL;
+ } else {
+ inner_maint = smp_dma_inv_range;
+ if (h->flags == NVMEM_HANDLE_CACHEABLE)
+ outer_maint = outer_inv_range;
+ else
+ outer_maint = NULL;
+ }
+
+ prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
+
+ /* for any write-back operation, it is safe to writeback the entire
+ * cache rather than just the requested region. for large regions, it
+ * is faster to do this than to iterate over every line.
+ * only implemented for L1-only cacheable handles currently */
+ if (h->flags == NVMEM_HANDLE_INNER_CACHEABLE &&
+ end-start >= PAGE_SIZE*3 && op != NVMEM_CACHE_OP_INV) {
+ if (op==NVMEM_CACHE_OP_WB) dmac_clean_all();
+ else dmac_flush_all();
+ goto out;
+ }
+
+ while (start < end) {
+ struct page *page = NULL;
+ unsigned long phys;
+ void *src;
+ size_t count;
+
+ if (h->heap_pgalloc) {
+ page = h->pgalloc.pages[start>>PAGE_SHIFT];
+ BUG_ON(!page);
+ get_page(page);
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
+ } else {
+ phys = h->carveout.base + start;
+ }
+
+ if (!addr) {
+ err = nvmap_map_pte(__phys_to_pfn(phys), prot, &addr);
+ if (err) {
+ if (page) put_page(page);
+ break;
+ }
+ } else {
+ _nvmap_set_pte_at((unsigned long)addr,
+ __phys_to_pfn(phys), prot);
+ }
+
+ src = addr + (phys & ~PAGE_MASK);
+ count = min_t(size_t, end-start, PAGE_SIZE-(phys&~PAGE_MASK));
+
+ inner_maint(src, src+count);
+ if (outer_maint) outer_maint(phys, phys+count);
+ start += count;
+ if (page) put_page(page);
+ }
+
+out:
+ if (h->flags == NVMEM_HANDLE_INNER_CACHEABLE) outer_sync();
+ if (addr) nvmap_unmap_pte(addr);
+ if (get) _nvmap_handle_put(h);
+ return err;
+}
+
+static int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
{
struct nvmem_cache_op op;
int err = 0;
struct vm_area_struct *vma;
- struct nvmap_vma_priv *priv;
- size_t offs;
- size_t end;
- unsigned long count;
- pgprot_t prot = pgprot_inner_writeback(pgprot_kernel);
+ struct nvmap_vma_priv *vpriv;
+ unsigned long start;
+ unsigned long end;
err = copy_from_user(&op, arg, sizeof(op));
if (err) return err;
if (!op.handle || !op.addr || op.op<NVMEM_CACHE_OP_WB ||
- op.op>NVMEM_CACHE_OP_WB_INV)
+ op.op>NVMEM_CACHE_OP_WB_INV)
return -EINVAL;
vma = find_vma(current->active_mm, (unsigned long)op.addr);
if (!vma || vma->vm_ops!=&nvmap_vma_ops ||
- (unsigned long)op.addr + op.len > vma->vm_end) {
- err = -EADDRNOTAVAIL;
- goto out;
- }
+ (unsigned long)op.addr + op.len > vma->vm_end)
+ return -EADDRNOTAVAIL;
- priv = (struct nvmap_vma_priv *)vma->vm_private_data;
+ vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
- if (priv->hmem != (NvRmMemHandle)op.handle) {
- err = -EFAULT;
- goto out;
- }
+ if ((unsigned long)vpriv->h != op.handle)
+ return -EFAULT;
- /* don't waste time on cache maintenance if the handle isn't cached */
- if (priv->hmem->coherency != NvOsMemAttribute_WriteBack)
- goto out;
+ start = (unsigned long)op.addr - vma->vm_start;
+ end = start + op.len;
- offs = (unsigned long)op.addr - vma->vm_start;
- end = offs + op.len;
+ return _nvmap_do_cache_maint(vpriv->h, start, end, op.op, true);
+}
- /* for any write-back operation, it is safe to writeback the entire
- * cache rather than one line at a time. for large regions, it
- * is faster to do this than to iterate over every line. */
- if (end-offs >= PAGE_SIZE*3 && op.op != NVMEM_CACHE_OP_INV) {
- if (op.op==NVMEM_CACHE_OP_WB) dmac_clean_all();
- else dmac_flush_all();
- goto out;
+/* copies a single element from the pre-get()'ed handle h, returns
+ * the number of bytes copied, and the address in the nvmap mapping range
+ * which was used (to eliminate re-allocation when copying multiple
+ * elements */
+static ssize_t _nvmap_do_one_rw_handle(struct nvmap_handle *h, int is_read,
+ int is_user, unsigned long start, unsigned long rw_addr,
+ unsigned long bytes, void **nvmap_addr)
+{
+ pgprot_t prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
+ unsigned long end = start + bytes;
+ unsigned long orig_start = start;
+
+ if (is_user) {
+ if (is_read && !access_ok(VERIFY_WRITE, (void*)rw_addr, bytes))
+ return -EPERM;
+ if (!is_read && !access_ok(VERIFY_READ, (void*)rw_addr, bytes))
+ return -EPERM;
}
- while (offs < end) {
+ while (start < end) {
struct page *page = NULL;
- void *addr = NULL, *src;
+ unsigned long phys;
+ size_t count;
+ void *src;
- if (priv->hmem->hPageHandle) {
- struct page *page;
- page = NvOsPageGetPage(priv->hmem->hPageHandle, offs);
+ if (h->heap_pgalloc) {
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ BUG_ON(!page);
get_page(page);
- err = nvmap_map_pte(page_to_pfn(page), prot, &addr);
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
} else {
- unsigned long phys = priv->hmem->PhysicalAddress + offs;
- err = nvmap_map_pte(phys>>PAGE_SHIFT, prot, &addr);
+ phys = h->carveout.base + start;
}
- if (err) {
- if (page) put_page(page);
- break;
+ if (!*nvmap_addr) {
+ int err = nvmap_map_pte(__phys_to_pfn(phys),
+ prot, nvmap_addr);
+ if (err) {
+ if (page) put_page(page);
+ count = start - orig_start;
+ return (count) ? count : err;
+ }
+ } else {
+ _nvmap_set_pte_at((unsigned long)*nvmap_addr,
+ __phys_to_pfn(phys), prot);
+
}
- src = addr + (offs & ~PAGE_MASK);
- count = min_t(size_t, end-offs, PAGE_SIZE-(offs & ~PAGE_MASK));
+ src = *nvmap_addr + (phys & ~PAGE_MASK);
+ count = min_t(size_t, end-start, PAGE_SIZE-(phys&~PAGE_MASK));
- switch (op.op) {
- case NVMEM_CACHE_OP_WB:
- smp_dma_clean_range(src, src+count);
- break;
- case NVMEM_CACHE_OP_INV:
- smp_dma_inv_range(src, src+count);
- break;
- case NVMEM_CACHE_OP_WB_INV:
- smp_dma_flush_range(src, src+count);
- break;
- }
- offs += count;
- nvmap_unmap_pte(addr);
+ if (is_user && is_read)
+ copy_to_user((void*)rw_addr, src, count);
+ else if (is_user)
+ copy_from_user(src, (void*)rw_addr, count);
+ else if (is_read)
+ memcpy((void*)rw_addr, src, count);
+ else
+ memcpy(src, (void*)rw_addr, count);
+
+ rw_addr += count;
+ start += count;
if (page) put_page(page);
}
- out:
- return err;
+ return (ssize_t)start - orig_start;
}
-static int nvmap_do_rw_handle(NvRmMemHandle hmem, int is_read,
- unsigned long l_hmem_offs, unsigned long l_user_addr,
- unsigned long bytes, pgprot_t prot)
+static ssize_t _nvmap_do_rw_handle(struct nvmap_handle *h, int is_read,
+ int is_user, unsigned long h_offs, unsigned long sys_addr,
+ unsigned long h_stride, unsigned long sys_stride,
+ unsigned long elem_size, unsigned long count)
{
- void *addr = NULL, *dest;
- struct page *page = NULL;
- int ret = 0;
-
- if (hmem->hPageHandle) {
- page = NvOsPageGetPage(hmem->hPageHandle, l_hmem_offs);
- get_page(page);
- ret = nvmap_map_pte(page_to_pfn(page), prot, &addr);
- } else {
- unsigned long phys = hmem->PhysicalAddress + l_hmem_offs;
- ret = nvmap_map_pte(phys>>PAGE_SHIFT, prot, &addr);
+ ssize_t bytes_copied = 0;
+ void *addr = NULL;
+
+ h = _nvmap_handle_get(h);
+ if (!h) return -EINVAL;
+
+ if (elem_size == h_stride &&
+ elem_size == sys_stride) {
+ elem_size *= count;
+ h_stride = elem_size;
+ sys_stride = elem_size;
+ count = 1;
}
- dest = addr + (l_hmem_offs & ~PAGE_MASK);
-
- if (is_read && !access_ok(VERIFY_WRITE, (void *)l_user_addr, bytes))
- ret = -EPERM;
-
- if (!is_read && !access_ok(VERIFY_READ, (void *)l_user_addr, bytes))
- ret = -EPERM;
-
- if (!ret) {
- if (is_read) copy_to_user((void *)l_user_addr, dest, bytes);
- else copy_from_user(dest, (void*)l_user_addr, bytes);
+ while (count--) {
+ size_t ret = _nvmap_do_one_rw_handle(h, is_read,
+ is_user, h_offs, sys_addr, elem_size, &addr);
+ if (ret < 0) {
+ if (!bytes_copied) bytes_copied = ret;
+ break;
+ }
+ bytes_copied += ret;
+ if (ret < elem_size) break;
+ sys_addr += elem_size;
+ h_offs += elem_size;
}
if (addr) nvmap_unmap_pte(addr);
- if (page) put_page(page);
- return ret;
+ _nvmap_handle_put(h);
+ return bytes_copied;
}
-static int nvmap_rw_handle(struct file *filp, int is_read,
- void __user* arg)
+static int nvmap_ioctl_rw_handle(struct file *filp,
+ int is_read, void __user* arg)
{
+ struct nvmem_rw_handle __user *uarg = arg;
struct nvmem_rw_handle op;
- NvRmMemHandle hmem;
- uintptr_t user_addr, hmem_offs;
+ struct nvmap_handle *h;
+ ssize_t copied;
int err = 0;
- pgprot_t prot;
err = copy_from_user(&op, arg, sizeof(op));
- if (err)
- return err;
+ if (err) return err;
if (!op.handle || !op.addr || !op.count || !op.elem_size)
return -EINVAL;
- hmem = (NvRmMemHandle)op.handle;
+ h = _nvmap_validate_get(op.handle, (filp->f_op == &knvmap_fops));
+ if (!h) return -EINVAL; /* -EPERM? */
- if (op.elem_size == op.hmem_stride &&
- op.elem_size == op.user_stride) {
- op.elem_size *= op.count;
- op.hmem_stride = op.elem_size;
- op.user_stride = op.elem_size;
- op.count = 1;
- }
+ copied = _nvmap_do_rw_handle(h, is_read, 1, op.offset,
+ (unsigned long)op.addr, op.hmem_stride,
+ op.user_stride, op.elem_size, op.count);
- user_addr = (uintptr_t)op.addr;
- hmem_offs = (uintptr_t)op.offset;
+ if (copied < 0) { err = copied; copied = 0; }
+ else if (copied < (op.count*op.elem_size)) err = -EINTR;
- if (hmem->coherency==NvOsMemAttribute_WriteBack)
- prot = pgprot_inner_writeback(pgprot_kernel);
- else if (hmem->coherency==NvOsMemAttribute_WriteCombined)
- prot = pgprot_writecombine(pgprot_kernel);
- else
- prot = pgprot_noncached(pgprot_kernel);
+ __put_user(copied, &uarg->count);
- while (op.count--) {
- unsigned long remain;
- unsigned long l_hmem_offs = hmem_offs;
- unsigned long l_user_addr = user_addr;
+ _nvmap_handle_put(h);
- remain = op.elem_size;
+ return err;
+}
- while (remain && !err) {
- unsigned long bytes;
+static unsigned int _nvmap_do_get_param(struct nvmap_handle *h,
+ unsigned int param)
+{
+ if (param==NVMEM_HANDLE_PARAM_SIZE)
+ return h->orig_size;
- bytes = min(remain, PAGE_SIZE-(l_hmem_offs&~PAGE_MASK));
- bytes = min(bytes, PAGE_SIZE-(l_user_addr&~PAGE_MASK));
+ else if (param==NVMEM_HANDLE_PARAM_ALIGNMENT) {
+ unsigned int i = 0;
- err = nvmap_do_rw_handle(hmem, is_read, l_hmem_offs,
- l_user_addr, bytes, prot);
+ if (!h->alloc) return 0;
- if (!err) {
- remain -= bytes;
- l_hmem_offs += bytes;
- l_user_addr += bytes;
- }
+ if (h->heap_pgalloc) return PAGE_SIZE;
+ else {
+ while (!(i & h->carveout.base)) i<<=1;
}
+ return i;
+ } else if (param==NVMEM_HANDLE_PARAM_BASE) {
+
+ WARN_ON(!h->alloc || !atomic_read(&h->pin));
+
+ if (!h->alloc || !atomic_read(&h->pin)) return ~0ul;
+
+ if (!h->heap_pgalloc)
+ return h->carveout.base;
+
+ if (h->pgalloc.contig)
+ return page_to_phys(h->pgalloc.pages[0]);
- user_addr += op.user_stride;
- hmem_offs += op.hmem_stride;
+ if (h->pgalloc.area)
+ return h->pgalloc.area->iovm_start;
+
+ return ~0ul;
+ } else if (param==NVMEM_HANDLE_PARAM_HEAP) {
+
+ if (!h->alloc) return 0;
+
+ if (!h->heap_pgalloc) {
+ /* FIXME: hard-coded physical address */
+ if ((h->carveout.base & 0xf0000000ul)==0x40000000ul)
+ return NVMEM_HEAP_CARVEOUT_IRAM;
+ else
+ return NVMEM_HEAP_CARVEOUT_GENERIC;
+ }
+
+ if (!h->pgalloc.contig)
+ return NVMEM_HEAP_IOVMM;
+
+ return NVMEM_HEAP_SYSMEM;
}
- dmb();
+ return 0;
+}
+
+static int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
+{
+ struct nvmem_handle_param op;
+ struct nvmap_handle *h;
+ int err;
+
+ err = copy_from_user(&op, arg, sizeof(op));
+ if (err) return err;
+
+ if (op.param < NVMEM_HANDLE_PARAM_SIZE ||
+ op.param > NVMEM_HANDLE_PARAM_HEAP)
+ return -EINVAL;
+
+ h = _nvmap_validate_get(op.handle, (filp->f_op==&knvmap_fops));
+ if (!h) return -EINVAL;
+
+ op.result = _nvmap_do_get_param(h, op.param);
+ err = copy_to_user(arg, &op, sizeof(op));
+
+ _nvmap_handle_put(h);
return err;
}
-static int __init nvmap_pte_init(void)
+static struct miscdevice misc_nvmap_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nvmap",
+ .fops = &nvmap_fops
+};
+
+static struct miscdevice misc_knvmap_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "knvmap",
+ .fops = &knvmap_fops
+};
+
+static struct device *__nvmap_heap_parent_dev(void)
+{
+ return misc_nvmap_dev.this_device;
+}
+
+/* creates the sysfs attribute files for a carveout heap; if called
+ * before fs initialization, silently returns.
+ */
+static void _nvmap_create_heap_attrs(struct nvmap_carveout_node *n)
+{
+ if (!_nvmap_heap_parent_dev) return;
+ dev_set_name(&n->dev, "heap-%s", n->carveout.name);
+ n->dev.parent = _nvmap_heap_parent_dev;
+ n->dev.driver = NULL;
+ n->dev.release = NULL;
+ if (device_register(&n->dev)) {
+ pr_err("%s: failed to create heap-%s device\n",
+ __func__, n->carveout.name);
+ return;
+ }
+ if (sysfs_create_group(&n->dev.kobj, &nvmap_heap_defattr_group))
+ pr_err("%s: failed to create attribute group for heap-%s "
+ "device\n", __func__, n->carveout.name);
+}
+
+static int __init nvmap_dev_init(void)
+{
+ struct nvmap_carveout_node *n;
+
+ if (misc_register(&misc_nvmap_dev))
+ pr_err("%s error registering %s\n", __func__,
+ misc_nvmap_dev.name);
+
+ if (misc_register(&misc_knvmap_dev))
+ pr_err("%s error registering %s\n", __func__,
+ misc_knvmap_dev.name);
+
+ /* create sysfs attribute entries for all the heaps which were
+ * created prior to nvmap_dev_init */
+ down_read(&nvmap_context.list_sem);
+ list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
+ _nvmap_create_heap_attrs(n);
+ }
+ up_read(&nvmap_context.list_sem);
+
+ nvmap_procfs_root = proc_mkdir("nvmap", NULL);
+ if (nvmap_procfs_root) {
+ nvmap_procfs_proc = proc_mkdir("proc", nvmap_procfs_root);
+ }
+ return 0;
+}
+fs_initcall(nvmap_dev_init);
+
+/* initialization of core data structures split out to earlier in the
+ * init sequence, to allow kernel drivers access to nvmap before devfs
+ * is initialized */
+#define NR_CARVEOUTS 2
+static unsigned int nvmap_carveout_cmds = 0;
+static unsigned long nvmap_carveout_cmd_base[NR_CARVEOUTS];
+static unsigned long nvmap_carveout_cmd_size[NR_CARVEOUTS];
+
+static int __init nvmap_core_init(void)
{
u32 base = NVMAP_BASE;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int i = 0;
+ unsigned int i;
+
+ init_rwsem(&nvmap_context.list_sem);
+ nvmap_context.init_data.handle_refs = RB_ROOT;
+ atomic_set(&nvmap_context.init_data.iovm_commit, 0);
+ /* no IOVMM allocations for kernel-created handles */
+ spin_lock_init(&nvmap_context.init_data.ref_lock);
+ nvmap_context.init_data.su = true;
+ nvmap_context.init_data.iovm_limit = 0;
+ INIT_LIST_HEAD(&nvmap_context.heaps);
+
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ for (i=0; i<ARRAY_SIZE(nvmap_mru_cutoff); i++)
+ INIT_LIST_HEAD(&nvmap_mru_vma_lists[i]);
+#endif
+
+ i = 0;
do {
pgd = pgd_offset(&init_mm, base);
pmd = pmd_alloc(&init_mm, pgd, base);
@@ -638,7 +2660,641 @@ static int __init nvmap_pte_init(void)
base += (1<<PGDIR_SHIFT);
} while (base < NVMAP_END);
+ for (i=0; i<nvmap_carveout_cmds; i++) {
+ char tmp[16];
+ snprintf(tmp, sizeof(tmp), "generic-%u", i);
+ nvmap_add_carveout_heap(nvmap_carveout_cmd_base[i],
+ nvmap_carveout_cmd_size[i], tmp, 0x1);
+ }
+
return 0;
}
-core_initcall(nvmap_pte_init);
+core_initcall(nvmap_core_init);
+
+static int __init nvmap_heap_arg(char *options)
+{
+ unsigned long start, size;
+ char *p = options;
+
+ start = -1;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+
+ if (nvmap_carveout_cmds < ARRAY_SIZE(nvmap_carveout_cmd_size)) {
+ nvmap_carveout_cmd_base[nvmap_carveout_cmds] = start;
+ nvmap_carveout_cmd_size[nvmap_carveout_cmds] = size;
+ nvmap_carveout_cmds++;
+ }
+ return 0;
+}
+__setup("nvmem=", nvmap_heap_arg);
+
+static int _nvmap_try_create_preserved(struct nvmap_carveout *co,
+ struct nvmap_handle *h, unsigned long base,
+ size_t size, unsigned int key)
+{
+ unsigned long end = base + size;
+ short idx;
+
+ h->carveout.base = ~0;
+ h->carveout.key = key;
+ h->carveout.co_heap = NULL;
+
+ spin_lock(&co->lock);
+ idx = co->free_index;
+ while (idx != -1) {
+ struct nvmap_mem_block *b = BLOCK(co, idx);
+ unsigned long blk_end = b->base + b->size;
+ if (b->base <= base && blk_end >= end) {
+ nvmap_split_block(co, idx, base, size);
+ h->carveout.block_idx = idx;
+ h->carveout.base = co->blocks[idx].base;
+ h->carveout.co_heap = co;
+ h->alloc = true;
+ break;
+ }
+ idx = b->next_free;
+ }
+ spin_unlock(&co->lock);
+ return (h->carveout.co_heap == NULL) ? -ENXIO : 0;
+}
+
+static void _nvmap_create_nvos_preserved(struct nvmap_carveout *co)
+{
+ unsigned int i, key;
+ NvBootArgsPreservedMemHandle mem;
+ static int was_created[NvBootArgKey_PreservedMemHandle_Num -
+ NvBootArgKey_PreservedMemHandle_0] = { 0 };
+
+ for (i=0, key=NvBootArgKey_PreservedMemHandle_0;
+ i<ARRAY_SIZE(was_created); i++, key++) {
+ struct nvmap_handle *h;
+
+ if (was_created[i]) continue;
+
+ if (NvOsBootArgGet(key, &mem, sizeof(mem))!=NvSuccess) continue;
+ if (!mem.Address || !mem.Size) continue;
+
+ h = _nvmap_handle_create(NULL, mem.Size);
+ if (!h) continue;
+
+ if (!_nvmap_try_create_preserved(co, h, mem.Address,
+ mem.Size, key))
+ was_created[i] = 1;
+ else
+ _nvmap_handle_put(h);
+ }
+}
+
+int nvmap_add_carveout_heap(unsigned long base, size_t size,
+ const char *name, unsigned int bitmask)
+{
+ struct nvmap_carveout_node *n;
+ struct nvmap_carveout_node *l;
+
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) return -ENOMEM;
+
+ BUG_ON(bitmask & ~NVMEM_HEAP_CARVEOUT_MASK);
+ n->heap_bit = bitmask;
+
+ if (_nvmap_init_carveout(&n->carveout, name, base, size)) {
+ kfree(n);
+ return -ENOMEM;
+ }
+
+ down_write(&nvmap_context.list_sem);
+
+ /* called inside the list_sem lock to ensure that the was_created
+ * array is protected against simultaneous access */
+ _nvmap_create_nvos_preserved(&n->carveout);
+ _nvmap_create_heap_attrs(n);
+
+ list_for_each_entry(l, &nvmap_context.heaps, heap_list) {
+ if (n->heap_bit > l->heap_bit) {
+ list_add_tail(&n->heap_list, &l->heap_list);
+ up_write(&nvmap_context.list_sem);
+ return 0;
+ }
+ }
+ list_add_tail(&n->heap_list, &nvmap_context.heaps);
+ up_write(&nvmap_context.list_sem);
+ return 0;
+}
+
+int nvmap_create_preserved_handle(unsigned long base, size_t size,
+ unsigned int key)
+{
+ struct nvmap_carveout_node *i;
+ struct nvmap_handle *h;
+
+ h = _nvmap_handle_create(NULL, size);
+ if (!h) return -ENOMEM;
+
+ down_read(&nvmap_context.list_sem);
+ list_for_each_entry(i, &nvmap_context.heaps, heap_list) {
+ struct nvmap_carveout *co = &i->carveout;
+ if (!_nvmap_try_create_preserved(co, h, base, size, key))
+ break;
+ }
+ up_read(&nvmap_context.list_sem);
+
+ /* the base may not be correct if block splitting fails */
+ if (!h->carveout.co_heap || h->carveout.base != base) {
+ _nvmap_handle_put(h);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* attempts to create a new carveout heap with a new usage bitmask by
+ * taking an allocation from a previous carveout with a different bitmask */
+static int nvmap_split_carveout_heap(struct nvmap_carveout *co, size_t size,
+ const char *name, unsigned int new_bitmask)
+{
+ struct nvmap_carveout_node *i, *n;
+ int idx = -1;
+ unsigned int blkbase, blksize;
+
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) return -ENOMEM;
+ n->heap_bit = new_bitmask;
+
+ /* align split carveouts to 1M */
+ idx = nvmap_carveout_alloc(co, SZ_1M, size);
+ if (idx != -1) {
+ /* take the spin lock to avoid race conditions with
+ * intervening allocations triggering grow_block operations */
+ spin_lock(&co->lock);
+ blkbase = co->blocks[idx].base;
+ blksize = co->blocks[idx].size;
+ spin_unlock(&co->lock);
+
+ if (_nvmap_init_carveout(&n->carveout,name, blkbase, blksize)) {
+ nvmap_carveout_free(&i->carveout, idx);
+ idx = -1;
+ } else {
+ spin_lock(&co->lock);
+ if (co->blocks[idx].prev) {
+ co->blocks[co->blocks[idx].prev].next =
+ co->blocks[idx].next;
+ }
+ if (co->blocks[idx].next) {
+ co->blocks[co->blocks[idx].next].prev =
+ co->blocks[idx].prev;
+ }
+ if (co->block_index==idx)
+ co->block_index = co->blocks[idx].next;
+ co->blocks[idx].next_free = -1;
+ co->blocks[idx].prev_free = -1;
+ co->blocks[idx].next = co->spare_index;
+ if (co->spare_index!=-1)
+ co->blocks[co->spare_index].prev = idx;
+ co->spare_index = idx;
+ spin_unlock(&co->lock);
+ }
+ }
+
+ if (idx==-1) {
+ kfree(n);
+ return -ENOMEM;
+ }
+
+ down_write(&nvmap_context.list_sem);
+ _nvmap_create_heap_attrs(n);
+ list_for_each_entry(i, &nvmap_context.heaps, heap_list) {
+ if (n->heap_bit > i->heap_bit) {
+ list_add_tail(&n->heap_list, &i->heap_list);
+ up_write(&nvmap_context.list_sem);
+ return 0;
+ }
+ }
+ list_add_tail(&n->heap_list, &nvmap_context.heaps);
+ up_write(&nvmap_context.list_sem);
+ return 0;
+}
+
+/* NvRmMemMgr APIs implemented on top of nvmap */
+
+#include <linux/freezer.h>
+
+NvU32 NvRmMemGetAddress(NvRmMemHandle hMem, NvU32 Offset)
+{
+ struct nvmap_handle *h = (struct nvmap_handle *)hMem;
+ unsigned long addr;
+
+ if (unlikely(!atomic_read(&h->pin) || !h->alloc)) return ~0ul;
+ if (unlikely(Offset >= h->orig_size)) return ~0UL;
+
+ if (h->heap_pgalloc && h->pgalloc.contig)
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc) {
+ BUG_ON(!h->pgalloc.area);
+ BUG_ON(!atomic_read(&h->pin));
+ addr = h->pgalloc.area->iovm_start;
+ } else
+ addr = h->carveout.base;
+
+ return (NvU32)addr+Offset;
+
+}
+
+void NvRmMemPinMult(NvRmMemHandle *hMems, NvU32 *addrs, NvU32 Count)
+{
+ struct nvmap_handle **h = (struct nvmap_handle **)hMems;
+ unsigned int i;
+ int ret;
+
+ do {
+ ret = _nvmap_handle_pin_fast(Count, h);
+ if (ret && !try_to_freeze()) {
+ pr_err("%s: failed to pin handles\n", __func__);
+ dump_stack();
+ }
+ } while (ret);
+
+ for (i=0; i<Count; i++) {
+ addrs[i] = NvRmMemGetAddress(hMems[i], 0);
+ BUG_ON(addrs[i]==~0ul);
+ }
+}
+
+void NvRmMemUnpinMult(NvRmMemHandle *hMems, NvU32 Count)
+{
+ int do_wake = 0;
+ unsigned int i;
+
+ for (i=0; i<Count; i++) {
+ struct nvmap_handle *h = (struct nvmap_handle *)hMems[i];
+ BUG_ON(atomic_read(&h->pin)==0);
+ do_wake |= _nvmap_handle_unpin(h);
+ }
+
+ if (do_wake) wake_up(&nvmap_pin_wait);
+}
+
+NvU32 NvRmMemPin(NvRmMemHandle hMem)
+{
+ NvU32 addr;
+ NvRmMemPinMult(&hMem, &addr, 1);
+ return addr;
+}
+
+void NvRmMemUnpin(NvRmMemHandle hMem)
+{
+ NvRmMemUnpinMult(&hMem, 1);
+}
+
+void NvRmMemHandleFree(NvRmMemHandle hMem)
+{
+ _nvmap_do_free(&nvmap_context.init_data, (unsigned long)hMem);
+}
+
+NvError NvRmMemMap(NvRmMemHandle hMem, NvU32 Offset, NvU32 Size,
+ NvU32 Flags, void **pVirtAddr)
+{
+ struct nvmap_handle *h = (struct nvmap_handle *)hMem;
+ pgprot_t prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
+
+ BUG_ON(!h->alloc);
+
+ if (Offset+Size > h->size)
+ return NvError_BadParameter;
+
+ if (!h->kern_map && h->heap_pgalloc) {
+ BUG_ON(h->size & ~PAGE_MASK);
+ h->kern_map = vm_map_ram(h->pgalloc.pages,
+ h->size>>PAGE_SHIFT, -1, prot);
+ } else if (!h->kern_map) {
+ unsigned int size;
+ unsigned long addr;
+
+ addr = h->carveout.base;
+ size = h->size + (addr & ~PAGE_MASK);
+ addr &= PAGE_MASK;
+ size = (size + PAGE_SIZE - 1) & PAGE_MASK;
+
+ h->kern_map = ioremap_wc(addr, size);
+ if (h->kern_map) {
+ addr = h->carveout.base - addr;
+ h->kern_map += addr;
+ }
+ }
+
+ if (h->kern_map) {
+ *pVirtAddr = (h->kern_map + Offset);
+ return NvSuccess;
+ }
+
+ return NvError_InsufficientMemory;
+}
+
+void NvRmMemUnmap(NvRmMemHandle hMem, void *pVirtAddr, NvU32 Size)
+{
+ return;
+}
+
+NvU32 NvRmMemGetId(NvRmMemHandle hMem)
+{
+ struct nvmap_handle *h = (struct nvmap_handle *)hMem;
+ if (!h->owner) h->global = true;
+ return (NvU32)h;
+}
+
+NvError NvRmMemHandleFromId(NvU32 id, NvRmMemHandle *hMem)
+{
+ struct nvmap_handle_ref *r;
+
+ int err = _nvmap_do_create(&nvmap_context.init_data,
+ NVMEM_IOC_FROM_ID, id, true, &r);
+
+ if (err || !r) return NvError_NotInitialized;
+
+ *hMem = (NvRmMemHandle)r->h;
+ return NvSuccess;
+}
+
+NvError NvRmMemHandleClaimPreservedHandle(NvRmDeviceHandle hRm,
+ NvU32 Key, NvRmMemHandle *hMem)
+{
+ struct nvmap_handle_ref *r;
+
+ int err = _nvmap_do_create(&nvmap_context.init_data,
+ NVMEM_IOC_CLAIM, (unsigned long)Key, true, &r);
+
+ if (err || !r) return NvError_NotInitialized;
+
+ *hMem = (NvRmMemHandle)r->h;
+ return NvSuccess;
+}
+
+NvError NvRmMemHandleCreate(NvRmDeviceHandle hRm,
+ NvRmMemHandle *hMem, NvU32 Size)
+{
+ struct nvmap_handle_ref *r;
+ int err = _nvmap_do_create(&nvmap_context.init_data,
+ NVMEM_IOC_CREATE, (unsigned long)Size, true, &r);
+
+ if (err || !r) return NvError_InsufficientMemory;
+ *hMem = (NvRmMemHandle)r->h;
+ return NvSuccess;
+}
+
+NvError NvRmMemAlloc(NvRmMemHandle hMem, const NvRmHeap *Heaps,
+ NvU32 NumHeaps, NvU32 Alignment, NvOsMemAttribute Coherency)
+{
+ unsigned int heap_mask = 0;
+ unsigned int flags = pgprot_kernel;
+ int err;
+
+ BUG_ON(Alignment & (Alignment-1));
+
+ if (Coherency == NvOsMemAttribute_WriteBack)
+ flags = NVMEM_HANDLE_INNER_CACHEABLE;
+ else
+ flags = NVMEM_HANDLE_WRITE_COMBINE;
+
+ if (!NumHeaps || !Heaps)
+ heap_mask = (NVMEM_HEAP_SYSMEM | NVMEM_HEAP_CARVEOUT_GENERIC);
+ else {
+ unsigned int i;
+ for (i=0; i<NumHeaps; i++) {
+ switch (Heaps[i]) {
+ case NvRmHeap_GART:
+ heap_mask |= NVMEM_HEAP_IOVMM;
+ break;
+ case NvRmHeap_External:
+ heap_mask |= NVMEM_HEAP_SYSMEM;
+ break;
+ case NvRmHeap_ExternalCarveOut:
+ heap_mask |= NVMEM_HEAP_CARVEOUT_GENERIC;
+ break;
+ case NvRmHeap_IRam:
+ heap_mask |= NVMEM_HEAP_CARVEOUT_IRAM;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ if (!heap_mask) return NvError_InsufficientMemory;
+
+ err = _nvmap_do_alloc(&nvmap_context.init_data, (unsigned long)hMem,
+ heap_mask, (size_t)Alignment, flags, false, true);
+
+ if (err) return NvError_InsufficientMemory;
+ return NvSuccess;
+}
+
+void NvRmMemReadStrided(NvRmMemHandle hMem, NvU32 Offset, NvU32 SrcStride,
+ void *pDst, NvU32 DstStride, NvU32 ElementSize, NvU32 Count)
+{
+ ssize_t bytes = 0;
+
+ bytes = _nvmap_do_rw_handle((struct nvmap_handle *)hMem, true,
+ false, Offset, (unsigned long)pDst, SrcStride,
+ DstStride, ElementSize, Count);
+
+ BUG_ON(bytes != (ssize_t)(Count*ElementSize));
+}
+
+void NvRmMemWriteStrided(NvRmMemHandle hMem, NvU32 Offset, NvU32 DstStride,
+ const void *pSrc, NvU32 SrcStride, NvU32 ElementSize, NvU32 Count)
+{
+ ssize_t bytes = 0;
+
+ bytes = _nvmap_do_rw_handle((struct nvmap_handle *)hMem, false,
+ false, Offset, (unsigned long)pSrc, DstStride,
+ SrcStride, ElementSize, Count);
+
+ BUG_ON(bytes != (ssize_t)(Count*ElementSize));
+}
+
+NvU32 NvRmMemGetSize(NvRmMemHandle hMem)
+{
+ struct nvmap_handle *h = (struct nvmap_handle *)hMem;
+ return h->orig_size;
+}
+
+NvRmHeap NvRmMemGetHeapType(NvRmMemHandle hMem, NvU32 *BaseAddr)
+{
+ struct nvmap_handle *h = (struct nvmap_handle *)hMem;
+ NvRmHeap heap;
+
+ if (!h->alloc) {
+ *BaseAddr = ~0ul;
+ return (NvRmHeap)0;
+ }
+
+ if (h->heap_pgalloc && !h->pgalloc.contig)
+ heap = NvRmHeap_GART;
+ else if (h->heap_pgalloc)
+ heap = NvRmHeap_External;
+ else if ((h->carveout.base & 0xf0000000ul) == 0x40000000ul)
+ heap = NvRmHeap_IRam;
+ else
+ heap = NvRmHeap_ExternalCarveOut;
+
+ if (h->heap_pgalloc && h->pgalloc.contig)
+ *BaseAddr = (NvU32)page_to_phys(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc && atomic_read(&h->pin))
+ *BaseAddr = h->pgalloc.area->iovm_start;
+ else if (h->heap_pgalloc)
+ *BaseAddr = ~0ul;
+ else
+ *BaseAddr = (NvU32)h->carveout.base;
+
+ return heap;
+}
+
+void NvRmMemCacheMaint(NvRmMemHandle hMem, void *pMapping,
+ NvU32 Size, NvBool Writeback, NvBool Inv)
+{
+ struct nvmap_handle *h = (struct nvmap_handle *)hMem;
+ unsigned long start;
+ unsigned int op;
+
+ if (!h->kern_map || h->flags==NVMEM_HANDLE_UNCACHEABLE ||
+ h->flags==NVMEM_HANDLE_WRITE_COMBINE) return;
+
+ if (!Writeback && !Inv) return;
+
+ if (Writeback && Inv) op = NVMEM_CACHE_OP_WB_INV;
+ else if (Writeback) op = NVMEM_CACHE_OP_WB;
+ else op = NVMEM_CACHE_OP_INV;
+
+ start = (unsigned long)pMapping - (unsigned long)h->kern_map;
+
+ _nvmap_do_cache_maint(h, start, start+Size, op, true);
+ return;
+}
+
+NvU32 NvRmMemGetAlignment(NvRmMemHandle hMem)
+{
+ struct nvmap_handle *h = (struct nvmap_handle *)hMem;
+ return _nvmap_do_get_param(h, NVMEM_HANDLE_PARAM_ALIGNMENT);
+}
+
+NvError NvRmMemGetStat(NvRmMemStat Stat, NvS32 *Result)
+{
+ unsigned long total_co = 0;
+ unsigned long free_co = 0;
+ unsigned long max_free = 0;
+ struct nvmap_carveout_node *n;
+
+ down_read(&nvmap_context.list_sem);
+ list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
+
+ if (!(n->heap_bit & NVMEM_HEAP_CARVEOUT_GENERIC)) continue;
+ total_co += _nvmap_carveout_blockstat(&n->carveout,
+ CARVEOUT_STAT_TOTAL_SIZE);
+ free_co += _nvmap_carveout_blockstat(&n->carveout,
+ CARVEOUT_STAT_FREE_SIZE);
+ max_free = max(max_free,
+ _nvmap_carveout_blockstat(&n->carveout,
+ CARVEOUT_STAT_LARGEST_FREE));
+ }
+ up_read(&nvmap_context.list_sem);
+
+ if (Stat==NvRmMemStat_TotalCarveout) {
+ *Result = (NvU32)total_co;
+ return NvSuccess;
+ } else if (Stat==NvRmMemStat_UsedCarveout) {
+ *Result = (NvU32)total_co - (NvU32)free_co;
+ return NvSuccess;
+ } else if (Stat==NvRmMemStat_LargestFreeCarveoutBlock) {
+ *Result = (NvU32)max_free;
+ return NvSuccess;
+ }
+
+ return NvError_BadParameter;
+}
+
+NvU8 NvRmMemRd08(NvRmMemHandle hMem, NvU32 Offset)
+{
+ NvU8 val;
+ NvRmMemRead(hMem, Offset, &val, sizeof(val));
+ return val;
+}
+
+NvU16 NvRmMemRd16(NvRmMemHandle hMem, NvU32 Offset)
+{
+ NvU16 val;
+ NvRmMemRead(hMem, Offset, &val, sizeof(val));
+ return val;
+}
+
+NvU32 NvRmMemRd32(NvRmMemHandle hMem, NvU32 Offset)
+{
+ NvU32 val;
+ NvRmMemRead(hMem, Offset, &val, sizeof(val));
+ return val;
+}
+
+void NvRmMemWr08(NvRmMemHandle hMem, NvU32 Offset, NvU8 Data)
+{
+ NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
+}
+
+void NvRmMemWr16(NvRmMemHandle hMem, NvU32 Offset, NvU16 Data)
+{
+ NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
+}
+
+void NvRmMemWr32(NvRmMemHandle hMem, NvU32 Offset, NvU32 Data)
+{
+ NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
+}
+
+void NvRmMemRead(NvRmMemHandle hMem, NvU32 Offset, void *pDst, NvU32 Size)
+{
+ NvRmMemReadStrided(hMem, Offset, Size, pDst, Size, Size, 1);
+}
+
+void NvRmMemWrite(NvRmMemHandle hMem, NvU32 Offset,
+ const void *pSrc, NvU32 Size)
+{
+ NvRmMemWriteStrided(hMem, Offset, Size, pSrc, Size, Size, 1);
+}
+
+void NvRmMemMove(NvRmMemHandle dstHMem, NvU32 dstOffset,
+ NvRmMemHandle srcHMem, NvU32 srcOffset, NvU32 Size)
+{
+ while (Size--) {
+ NvU8 tmp = NvRmMemRd08(srcHMem, srcOffset);
+ NvRmMemWr08(dstHMem, dstOffset, tmp);
+ dstOffset++;
+ srcOffset++;
+ }
+}
+
+NvU32 NvRmMemGetCacheLineSize(void)
+{
+ return 32;
+}
+
+void *NvRmHostAlloc(size_t size)
+{
+ return NvOsAlloc(size);
+}
+
+void NvRmHostFree(void *ptr)
+{
+ NvOsFree(ptr);
+}
+
+NvError NvRmMemMapIntoCallerPtr(NvRmMemHandle hMem, void *pCallerPtr,
+ NvU32 Offset, NvU32 Size)
+{
+ return NvError_NotSupported;
+}
+
+NvError NvRmMemHandlePreserveHandle(NvRmMemHandle hMem, NvU32 *pKey)
+{
+ return NvError_NotSupported;
+}
diff --git a/include/linux/tegra_devices.h b/include/linux/tegra_devices.h
index adbfffe92f53..4f1d4c8fb267 100755
--- a/include/linux/tegra_devices.h
+++ b/include/linux/tegra_devices.h
@@ -84,5 +84,9 @@ struct tegra_sdio_platform_data {
NvU32 StartOffset; /* start sector offset to MBR for the card */
};
+#ifdef CONFIG_DEVNVMAP
+int nvmap_add_carveout_heap(unsigned long base, size_t size,
+ const char *name, unsigned int bitmask);
+#endif
#endif