summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/tegra-aes.c1148
-rw-r--r--drivers/crypto/tegra-aes.h114
-rw-r--r--drivers/i2c/busses/Kconfig7
-rw-r--r--drivers/i2c/busses/Makefile1
-rwxr-xr-xdrivers/i2c/busses/i2c-tegra.c758
-rw-r--r--drivers/input/touchscreen/Kconfig13
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/panjit_i2c.c323
-rw-r--r--drivers/media/video/Kconfig1
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/tegra/Kconfig10
-rw-r--r--drivers/media/video/tegra/Makefile2
-rw-r--r--drivers/media/video/tegra/avp/Kconfig25
-rw-r--r--drivers/media/video/tegra/avp/Makefile6
-rw-r--r--drivers/media/video/tegra/avp/avp.c1736
-rw-r--r--drivers/media/video/tegra/avp/avp.h32
-rw-r--r--drivers/media/video/tegra/avp/avp_msg.h342
-rw-r--r--drivers/media/video/tegra/avp/avp_svc.c732
-rw-r--r--drivers/media/video/tegra/avp/headavp.S66
-rw-r--r--drivers/media/video/tegra/avp/headavp.h41
-rw-r--r--drivers/media/video/tegra/avp/tegra_rpc.c796
-rw-r--r--drivers/media/video/tegra/avp/trpc.h80
-rw-r--r--drivers/media/video/tegra/avp/trpc_local.c333
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.c220
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.h28
-rw-r--r--drivers/media/video/tegra/tegra_camera.c368
-rw-r--r--drivers/mfd/Kconfig4
-rw-r--r--drivers/mfd/tps6586x.c208
-rw-r--r--drivers/mmc/core/mmc.c7
-rw-r--r--drivers/mmc/core/sdio.c10
-rw-r--r--drivers/mmc/host/Kconfig6
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/sdhci-tegra.c226
-rw-r--r--drivers/mmc/host/sdhci.c120
-rw-r--r--drivers/mmc/host/sdhci.h21
-rw-r--r--drivers/mtd/devices/Kconfig6
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/tegra_nand.c1605
-rw-r--r--drivers/mtd/devices/tegra_nand.h147
-rw-r--r--drivers/regulator/tps6586x-regulator.c33
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-tps6586x.c325
-rw-r--r--drivers/serial/Kconfig8
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/tegra_hsuart.c1319
-rw-r--r--drivers/spi/Kconfig7
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi_tegra.c676
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/Makefile3
-rw-r--r--drivers/usb/gadget/fsl_tegra_udc.c97
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c457
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h29
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/ehci-hcd.c8
-rw-r--r--drivers/usb/host/ehci-hub.c1
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/ehci-sched.c14
-rw-r--r--drivers/usb/host/ehci-tegra.c685
-rw-r--r--drivers/usb/host/ehci.h3
-rw-r--r--drivers/usb/otg/Kconfig8
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/tegra-otg.c393
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/fbmon.c86
-rw-r--r--drivers/video/modedb.c455
-rw-r--r--drivers/video/tegra/Kconfig65
-rw-r--r--drivers/video/tegra/Makefile4
-rw-r--r--drivers/video/tegra/dc/Makefile4
-rw-r--r--drivers/video/tegra/dc/dc.c1309
-rw-r--r--drivers/video/tegra/dc/dc_priv.h140
-rw-r--r--drivers/video/tegra/dc/dc_reg.h415
-rw-r--r--drivers/video/tegra/dc/edid.c276
-rw-r--r--drivers/video/tegra/dc/edid.h31
-rw-r--r--drivers/video/tegra/dc/hdmi.c1102
-rw-r--r--drivers/video/tegra/dc/hdmi.h183
-rw-r--r--drivers/video/tegra/dc/hdmi_reg.h430
-rw-r--r--drivers/video/tegra/dc/rgb.c92
-rw-r--r--drivers/video/tegra/fb.c807
-rw-r--r--drivers/video/tegra/host/Makefile13
-rw-r--r--drivers/video/tegra/host/bus.c571
-rw-r--r--drivers/video/tegra/host/debug.c270
-rw-r--r--drivers/video/tegra/host/dev.c790
-rw-r--r--drivers/video/tegra/host/dev.h52
-rw-r--r--drivers/video/tegra/host/nvhost_3dctx.c543
-rw-r--r--drivers/video/tegra/host/nvhost_acm.c188
-rw-r--r--drivers/video/tegra/host/nvhost_acm.h76
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.c650
-rw-r--r--drivers/video/tegra/host/nvhost_cdma.h103
-rw-r--r--drivers/video/tegra/host/nvhost_channel.c249
-rw-r--r--drivers/video/tegra/host/nvhost_channel.h89
-rw-r--r--drivers/video/tegra/host/nvhost_cpuaccess.c117
-rw-r--r--drivers/video/tegra/host/nvhost_cpuaccess.h71
-rw-r--r--drivers/video/tegra/host/nvhost_hardware.h233
-rw-r--r--drivers/video/tegra/host/nvhost_hwctx.h88
-rw-r--r--drivers/video/tegra/host/nvhost_intr.c477
-rw-r--r--drivers/video/tegra/host/nvhost_intr.h102
-rw-r--r--drivers/video/tegra/host/nvhost_mpectx.c23
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.c256
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.h150
-rw-r--r--drivers/video/tegra/nvmap/Makefile6
-rw-r--r--drivers/video/tegra/nvmap/nvmap.c725
-rw-r--r--drivers/video/tegra/nvmap/nvmap.h238
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c1106
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c518
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.c812
-rw-r--r--drivers/video/tegra/nvmap/nvmap_heap.h64
-rw-r--r--drivers/video/tegra/nvmap/nvmap_ioctl.c630
-rw-r--r--drivers/video/tegra/nvmap/nvmap_ioctl.h159
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.c194
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.h84
-rw-r--r--drivers/w1/masters/Kconfig7
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/tegra_w1.c491
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/tegra_wdt.c381
122 files changed, 29455 insertions, 131 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ea0b3863ad0f..f8ee741b16f0 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -243,4 +243,13 @@ config CRYPTO_DEV_OMAP_SHAM
OMAP processors have SHA1/MD5 hw accelerator. Select this if you
want to use the OMAP module for SHA1/MD5 algorithms.
+config CRYPTO_DEV_TEGRA_AES
+ tristate "Support for TEGRA AES hw engine"
+ depends on ARCH_TEGRA_2x_SOC
+ select CRYPTO_AES
+ select TEGRA_ARB_SEMAPHORE
+ help
+ TEGRA processors have AES module accelerator. Select this if you
+ want to use the TEGRA module for AES algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6dbbe00c4524..b00ec7817532 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
new file mode 100644
index 000000000000..d94046a4bb95
--- /dev/null
+++ b/drivers/crypto/tegra-aes.c
@@ -0,0 +1,1148 @@
+/*
+ * drivers/crypto/tegra-aes.c
+ *
+ * aes driver for NVIDIA tegra aes hardware
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include <mach/arb_sema.h>
+#include <mach/clk.h>
+
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <crypto/internal/rng.h>
+
+#include "tegra-aes.h"
+
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+#define FLAGS_GIV BIT(2)
+#define FLAGS_RNG BIT(3)
+#define FLAGS_NEW_KEY BIT(4)
+#define FLAGS_NEW_IV BIT(5)
+#define FLAGS_INIT BIT(6)
+#define FLAGS_FAST BIT(7)
+#define FLAGS_BUSY 8
+
+/*
+ * Defines AES engine Max process bytes size in one go, which takes 1 msec.
+ * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
+ * The duration CPU can use the BSE to 1 msec, then the number of available
+ * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
+ * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
+ */
+#define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
+
+/*
+ * The key table length is 64 bytes
+ * (This includes first upto 32 bytes key + 16 bytes original initial vector
+ * and 16 bytes updated initial vector)
+ */
+#define AES_HW_KEY_TABLE_LENGTH_BYTES 64
+
+#define AES_HW_IV_SIZE 16
+#define AES_HW_KEYSCHEDULE_LEN 256
+#define ARB_SEMA_TIMEOUT 500
+
+/*
+ * The memory being used is divides as follows:
+ * 1. Key - 32 bytes
+ * 2. Original IV - 16 bytes
+ * 3. Updated IV - 16 bytes
+ * 4. Key schedule - 256 bytes
+ *
+ * 1+2+3 constitute the hw key table.
+ */
+#define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
+
+#define DEFAULT_RNG_BLK_SZ 16
+
+/* As of now only 5 commands are USED for AES encryption/Decryption */
+#define AES_HW_MAX_ICQ_LENGTH 5
+
+#define ICQBITSHIFT_BLKCNT 0
+
+/* memdma_vd command */
+#define MEMDMA_DIR_DTOVRAM 0
+#define MEMDMA_DIR_VTODRAM 1
+#define MEMDMABITSHIFT_DIR 25
+#define MEMDMABITSHIFT_NUM_WORDS 12
+
+/* Define AES Interactive command Queue commands Bit positions */
+enum {
+ ICQBITSHIFT_KEYTABLEADDR = 0,
+ ICQBITSHIFT_KEYTABLEID = 17,
+ ICQBITSHIFT_VRAMSEL = 23,
+ ICQBITSHIFT_TABLESEL = 24,
+ ICQBITSHIFT_OPCODE = 26,
+};
+
+/* Define Ucq opcodes required for AES operation */
+enum {
+ UCQOPCODE_BLKSTARTENGINE = 0x0E,
+ UCQOPCODE_DMASETUP = 0x10,
+ UCQOPCODE_DMACOMPLETE = 0x11,
+ UCQOPCODE_SETTABLE = 0x15,
+ UCQOPCODE_MEMDMAVD = 0x22,
+};
+
+/* Define Aes command values */
+enum {
+ UCQCMD_VRAM_SEL = 0x1,
+ UCQCMD_CRYPTO_TABLESEL = 0x3,
+ UCQCMD_KEYSCHEDTABLESEL = 0x4,
+ UCQCMD_KEYTABLESEL = 0x8,
+};
+
+#define UCQCMD_KEYTABLEADDRMASK 0x1FFFF
+
+#define AES_NR_KEYSLOTS 8
+#define SSK_SLOT_NUM 4
+
+struct tegra_aes_slot {
+ struct list_head node;
+ int slot_num;
+ bool available;
+};
+
+static struct tegra_aes_slot ssk = {
+ .slot_num = SSK_SLOT_NUM,
+ .available = true,
+};
+
+struct tegra_aes_reqctx {
+ unsigned long mode;
+};
+
+#define TEGRA_AES_QUEUE_LENGTH 50
+
+struct tegra_aes_dev {
+ struct device *dev;
+ unsigned long phys_base;
+ void __iomem *io_base;
+ dma_addr_t ivkey_phys_base;
+ void __iomem *ivkey_base;
+ struct clk *iclk;
+ struct clk *pclk;
+ struct tegra_aes_ctx *ctx;
+ unsigned long flags;
+ struct completion op_complete;
+ u32 *buf_in;
+ dma_addr_t dma_buf_in;
+ u32 *buf_out;
+ dma_addr_t dma_buf_out;
+ u8 *iv;
+ u8 dt[DEFAULT_RNG_BLK_SZ];
+ int ivlen;
+ u64 ctr;
+ int res_id;
+ spinlock_t lock;
+ struct crypto_queue queue;
+ struct tegra_aes_slot *slots;
+ struct ablkcipher_request *req;
+ size_t total;
+ struct scatterlist *in_sg;
+ size_t in_offset;
+ struct scatterlist *out_sg;
+ size_t out_offset;
+};
+
+static struct tegra_aes_dev *aes_dev;
+
+struct tegra_aes_ctx {
+ struct tegra_aes_dev *dd;
+ unsigned long flags;
+ struct tegra_aes_slot *slot;
+ int keylen;
+};
+
+static struct tegra_aes_ctx rng_ctx = {
+ .flags = FLAGS_NEW_KEY,
+ .keylen = AES_KEYSIZE_128,
+};
+
+/* keep registered devices data here */
+static LIST_HEAD(dev_list);
+static DEFINE_SPINLOCK(list_lock);
+static DEFINE_MUTEX(aes_lock);
+
+static void aes_workqueue_handler(struct work_struct *work);
+static DECLARE_WORK(aes_wq, aes_workqueue_handler);
+
+extern unsigned long long tegra_chip_uid(void);
+
+static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
+{
+ return readl(dd->io_base + offset);
+}
+
+static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset)
+{
+ writel(val, dd->io_base + offset);
+}
+
+static int aes_hw_init(struct tegra_aes_dev *dd)
+{
+ int ret = 0;
+
+ ret = clk_enable(dd->pclk);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: pclock enable fail(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ tegra_periph_reset_assert(dd->iclk);
+ udelay(50);
+ tegra_periph_reset_deassert(dd->iclk);
+ udelay(50);
+
+ ret = clk_enable(dd->iclk);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: iclock enable fail(%d)\n", __func__, ret);
+ clk_disable(dd->pclk);
+ return ret;
+ }
+
+ aes_writel(dd, 0x33, INT_ENB);
+ return ret;
+}
+
+static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
+ int nblocks, int mode, bool upd_iv)
+{
+ u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
+ int qlen = 0, i, eng_busy, icq_empty, dma_busy, ret = 0;
+ u32 value;
+
+ ret = aes_hw_init(dd);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ cmdq[qlen++] = UCQOPCODE_DMASETUP << ICQBITSHIFT_OPCODE;
+ cmdq[qlen++] = in_addr;
+ cmdq[qlen++] = UCQOPCODE_BLKSTARTENGINE << ICQBITSHIFT_OPCODE |
+ (nblocks-1) << ICQBITSHIFT_BLKCNT;
+ cmdq[qlen++] = UCQOPCODE_DMACOMPLETE << ICQBITSHIFT_OPCODE;
+
+ value = aes_readl(dd, CMDQUE_CONTROL);
+ /* access SDRAM through AHB */
+ value &= ~CMDQ_CTRL_SRC_STM_SEL_FIELD;
+ value &= ~CMDQ_CTRL_DST_STM_SEL_FIELD;
+ value |= (CMDQ_CTRL_SRC_STM_SEL_FIELD | CMDQ_CTRL_DST_STM_SEL_FIELD |
+ CMDQ_CTRL_ICMDQEN_FIELD);
+ aes_writel(dd, value, CMDQUE_CONTROL);
+ dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value);
+
+ value = 0;
+ value |= CONFIG_ENDIAN_ENB_FIELD;
+ aes_writel(dd, value, CONFIG);
+ dev_dbg(dd->dev, "config=0x%x", value);
+
+ value = aes_readl(dd, SECURE_CONFIG_EXT);
+ value &= ~SECURE_OFFSET_CNT_FIELD;
+ aes_writel(dd, value, SECURE_CONFIG_EXT);
+ dev_dbg(dd->dev, "secure_cfg_xt=0x%x", value);
+
+ if (mode & FLAGS_CBC) {
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 2 : 3)
+ << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 2 : 3)
+ << SECURE_VCTRAM_SEL_SHIFT) |
+ ((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT |
+ (0 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ } else if (mode & FLAGS_RNG){
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (0 << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ ((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT |
+ (1 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ } else {
+ value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
+ ((dd->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
+ (0 << SECURE_XOR_POS_SHIFT) |
+ (0 << SECURE_INPUT_SEL_SHIFT) |
+ (((mode & FLAGS_ENCRYPT) ? 1 : 0)
+ << SECURE_CORE_SEL_SHIFT) |
+ (0 << SECURE_RNG_ENB_SHIFT) |
+ (0 << SECURE_HASH_ENB_SHIFT));
+ }
+ dev_dbg(dd->dev, "secure_in_sel=0x%x", value);
+ aes_writel(dd, value, SECURE_INPUT_SELECT);
+
+ aes_writel(dd, out_addr, SECURE_DEST_ADDR);
+
+ for (i = 0; i < qlen - 1; i++) {
+ do {
+ value = aes_readl(dd, INTR_STATUS);
+ eng_busy = value & (0x1);
+ icq_empty = value & (0x1<<3);
+ dma_busy = value & (0x1<<23);
+ } while (eng_busy & (!icq_empty) & dma_busy);
+ aes_writel(dd, cmdq[i], ICMDQUE_WR);
+ }
+
+ INIT_COMPLETION(dd->op_complete);
+ ret = wait_for_completion_timeout(&dd->op_complete, msecs_to_jiffies(150));
+ if (ret == 0) {
+ dev_err(dd->dev, "timed out (0x%x)\n",
+ aes_readl(dd, INTR_STATUS));
+ clk_disable(dd->iclk);
+ clk_disable(dd->pclk);
+ return -ETIMEDOUT;
+ }
+
+ aes_writel(dd, cmdq[qlen - 1], ICMDQUE_WR);
+ do {
+ value = aes_readl(dd, INTR_STATUS);
+ eng_busy = value & (0x1);
+ icq_empty = value & (0x1<<3);
+ dma_busy = value & (0x1<<23);
+ } while (eng_busy & (!icq_empty) & dma_busy);
+
+ clk_disable(dd->iclk);
+ clk_disable(dd->pclk);
+ return 0;
+}
+
+static void aes_release_key_slot(struct tegra_aes_dev *dd)
+{
+ spin_lock(&list_lock);
+ dd->ctx->slot->available = true;
+ dd->ctx->slot = NULL;
+ spin_unlock(&list_lock);
+}
+
+static struct tegra_aes_slot *aes_find_key_slot(struct tegra_aes_dev *dd)
+{
+ struct tegra_aes_slot *slot = NULL;
+ bool found = false;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(slot, &dev_list, node) {
+ dev_dbg(dd->dev, "empty:%d, num:%d\n", slot->available,
+ slot->slot_num);
+ if (slot->available) {
+ slot->available = false;
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return found ? slot : NULL;
+}
+
+static int aes_set_key(struct tegra_aes_dev *dd)
+{
+ u32 value, cmdq[2];
+ struct tegra_aes_ctx *ctx = dd->ctx;
+ int i, eng_busy, icq_empty, dma_busy, ret = 0;
+ bool use_ssk = false;
+
+ if (!ctx) {
+ dev_err(dd->dev, "%s: context invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ /* use ssk? */
+ if (!dd->ctx->slot) {
+ dev_dbg(dd->dev, "using ssk");
+ dd->ctx->slot = &ssk;
+ use_ssk = true;
+ }
+
+ ret = aes_hw_init(dd);
+ if (ret < 0) {
+ dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ /* disable key read from hw */
+ value = aes_readl(dd, SECURE_SEC_SEL0+(ctx->slot->slot_num*4));
+ value &= ~SECURE_SEL0_KEYREAD_ENB0_FIELD;
+ aes_writel(dd, value, SECURE_SEC_SEL0+(ctx->slot->slot_num*4));
+
+ /* enable key schedule generation in hardware */
+ value = aes_readl(dd, SECURE_CONFIG_EXT);
+ value &= ~SECURE_KEY_SCH_DIS_FIELD;
+ aes_writel(dd, value, SECURE_CONFIG_EXT);
+
+ /* select the key slot */
+ value = aes_readl(dd, SECURE_CONFIG);
+ value &= ~SECURE_KEY_INDEX_FIELD;
+ value |= (ctx->slot->slot_num << SECURE_KEY_INDEX_SHIFT);
+ aes_writel(dd, value, SECURE_CONFIG);
+
+ if (use_ssk)
+ goto out;
+
+ /* copy the key table from sdram to vram */
+ cmdq[0] = 0;
+ cmdq[0] = UCQOPCODE_MEMDMAVD << ICQBITSHIFT_OPCODE |
+ (MEMDMA_DIR_DTOVRAM << MEMDMABITSHIFT_DIR) |
+ (AES_HW_KEY_TABLE_LENGTH_BYTES/sizeof(u32))
+ << MEMDMABITSHIFT_NUM_WORDS;
+ cmdq[1] = (u32)dd->ivkey_phys_base;
+ for (i = 0; i < ARRAY_SIZE(cmdq); i++) {
+ aes_writel(dd, cmdq[i], ICMDQUE_WR);
+ do {
+ value = aes_readl(dd, INTR_STATUS);
+ eng_busy = value & (0x1);
+ icq_empty = value & (0x1<<3);
+ dma_busy = value & (0x1<<23);
+ } while (eng_busy & (!icq_empty) & dma_busy);
+ }
+
+ /* settable command to get key into internal registers */
+ value = 0;
+ value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
+ UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
+ UCQCMD_VRAM_SEL << ICQBITSHIFT_VRAMSEL |
+ (UCQCMD_KEYTABLESEL | ctx->slot->slot_num)
+ << ICQBITSHIFT_KEYTABLEID;
+ aes_writel(dd, value, ICMDQUE_WR);
+ do {
+ value = aes_readl(dd, INTR_STATUS);
+ eng_busy = value & (0x1);
+ icq_empty = value & (0x1<<3);
+ } while (eng_busy & (!icq_empty));
+
+out:
+ clk_disable(dd->iclk);
+ clk_disable(dd->pclk);
+ return 0;
+}
+
+static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct tegra_aes_ctx *ctx;
+ struct tegra_aes_reqctx *rctx;
+ struct ablkcipher_request *req;
+ unsigned long flags;
+ int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
+ int ret = 0, nblocks, total;
+ int count = 0;
+ dma_addr_t addr_in, addr_out;
+ struct scatterlist *in_sg, *out_sg;
+
+ if (!dd)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return -ENODATA;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ dev_dbg(dd->dev, "%s: get new req\n", __func__);
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ /* assign new request to device */
+ dd->req = req;
+ dd->total = req->nbytes;
+ dd->in_offset = 0;
+ dd->in_sg = req->src;
+ dd->out_offset = 0;
+ dd->out_sg = req->dst;
+
+ in_sg = dd->in_sg;
+ out_sg = dd->out_sg;
+
+ if (!in_sg || !out_sg) {
+ mutex_unlock(&aes_lock);
+ return -EINVAL;
+ }
+
+ total = dd->total;
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= FLAGS_MODE_MASK;
+ dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+ dd->iv = (u8 *)req->info;
+ dd->ivlen = AES_BLOCK_SIZE;
+
+ if ((dd->flags & FLAGS_CBC) && dd->iv)
+ dd->flags |= FLAGS_NEW_IV;
+ else
+ dd->flags &= ~FLAGS_NEW_IV;
+
+ ctx->dd = dd;
+ if (dd->ctx != ctx) {
+ /* assign new context to device */
+ dd->ctx = ctx;
+ ctx->flags |= FLAGS_NEW_KEY;
+ }
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware not available\n");
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ aes_set_key(dd);
+
+ /* set iv to the aes hw slot */
+ memset(dd->buf_in, 0 , AES_BLOCK_SIZE);
+ ret = copy_from_user((void *)dd->buf_in, (void __user *)dd->iv,
+ dd->ivlen);
+ if (ret < 0) {
+ dev_err(dd->dev, "copy_from_user fail(%d)\n", ret);
+ goto out;
+ }
+
+ ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
+ (u32)dd->dma_buf_out, 1, FLAGS_CBC, false);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+ memset(dd->buf_in, 0, AES_BLOCK_SIZE);
+
+ while (total) {
+ dev_dbg(dd->dev, "remain: 0x%x\n", total);
+
+ ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
+ if (!ret) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ goto out;
+ }
+
+ ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
+ if (!ret) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, dd->in_sg,
+ 1, DMA_TO_DEVICE);
+ goto out;
+ }
+
+ addr_in = sg_dma_address(in_sg);
+ addr_out = sg_dma_address(out_sg);
+ dd->flags |= FLAGS_FAST;
+ count = min((int)sg_dma_len(in_sg), (int)dma_max);
+ WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
+ nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
+
+ ret = aes_start_crypt(dd, addr_in, addr_out, nblocks,
+ dd->flags, true);
+
+ dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
+
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+ dd->flags &= ~FLAGS_FAST;
+
+ dev_dbg(dd->dev, "out: copied 0x%x\n", count);
+ total -= count;
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ WARN_ON(((total != 0) && (!in_sg || !out_sg)));
+ }
+
+out:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(dd->res_id);
+
+ dd->total = total;
+
+ /* release the mutex */
+ mutex_unlock(&aes_lock);
+
+ if (dd->req->base.complete)
+ dd->req->base.complete(&dd->req->base, ret);
+
+ dev_dbg(dd->dev, "%s: exit\n", __func__);
+ return ret;
+}
+
+static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_slot *key_slot;
+
+ if (!ctx || !dd) {
+ dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+ (unsigned int)ctx, (unsigned int)dd);
+ return -EINVAL;
+ }
+
+ if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) &&
+ (keylen != AES_KEYSIZE_256)) {
+ dev_err(dd->dev, "unsupported key size\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dd->dev, "keylen: %d\n", keylen);
+
+ ctx->dd = dd;
+ dd->ctx = ctx;
+
+ if (ctx->slot)
+ aes_release_key_slot(dd);
+
+ key_slot = aes_find_key_slot(dd);
+ if (!key_slot) {
+ dev_err(dd->dev, "no empty slot\n");
+ return -ENOMEM;
+ }
+
+ ctx->slot = key_slot;
+ ctx->keylen = keylen;
+ ctx->flags |= FLAGS_NEW_KEY;
+
+ /* copy the key */
+ memset(dd->ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->ivkey_base, key, keylen);
+
+ dev_dbg(dd->dev, "done\n");
+ return 0;
+}
+
+static void aes_workqueue_handler(struct work_struct *work)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ int ret;
+
+ set_bit(FLAGS_BUSY, &dd->flags);
+
+ do {
+ ret = tegra_aes_handle_req(dd);
+ } while (!ret);
+}
+
+static irqreturn_t aes_irq(int irq, void *dev_id)
+{
+ struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
+ u32 value = aes_readl(dd, INTR_STATUS);
+
+ dev_dbg(dd->dev, "irq_stat: 0x%x", value);
+ if (!((value & ENGINE_BUSY_FIELD) & !(value & ICQ_EMPTY_FIELD)))
+ complete(&dd->op_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct tegra_aes_dev *dd = aes_dev;
+ unsigned long flags;
+ int err = 0;
+ int busy;
+
+ dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ !!(mode & FLAGS_ENCRYPT),
+ !!(mode & FLAGS_CBC));
+
+ rctx->mode = mode;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ err = ablkcipher_enqueue_request(&dd->queue, req);
+ busy = test_and_set_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!busy)
+ schedule_work(&aes_wq);
+
+ return err;
+}
+
+static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, 0);
+}
+
+static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return tegra_aes_crypt(req, FLAGS_CBC);
+}
+
+static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_ctx *ctx = &rng_ctx;
+ int ret, i;
+ u8 *dest = rdata, *dt = dd->dt;
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware not available\n");
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ ctx->dd = dd;
+ dd->ctx = ctx;
+ dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
+
+ memset(dd->buf_in, 0, AES_BLOCK_SIZE);
+ memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ);
+
+ ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
+ (u32)dd->dma_buf_out, 1, dd->flags, true);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ dlen = ret;
+ goto out;
+ }
+ memcpy(dest, dd->buf_out, dlen);
+
+ /* update the DT */
+ for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
+ dt[i] += 1;
+ if (dt[i] != 0)
+ break;
+ }
+
+out:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(dd->res_id);
+ mutex_unlock(&aes_lock);
+ dev_dbg(dd->dev, "%s: done\n", __func__);
+ return dlen;
+}
+
+static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
+ unsigned int slen)
+{
+ struct tegra_aes_dev *dd = aes_dev;
+ struct tegra_aes_ctx *ctx = &rng_ctx;
+ struct tegra_aes_slot *key_slot;
+ struct timespec ts;
+ int ret = 0;
+ u64 nsec, tmp[2];
+ u8 *dt;
+
+ if (!ctx || !dd) {
+ dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+ (unsigned int)ctx, (unsigned int)dd);
+ return -EINVAL;
+ }
+
+ if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
+ dev_err(dd->dev, "seed size invalid");
+ return -ENOMEM;
+ }
+
+ /* take mutex to access the aes hw */
+ mutex_lock(&aes_lock);
+
+ if (!ctx->slot) {
+ key_slot = aes_find_key_slot(dd);
+ if (!key_slot) {
+ dev_err(dd->dev, "no empty slot\n");
+ mutex_unlock(&aes_lock);
+ return -ENOMEM;
+ }
+ ctx->slot = key_slot;
+ }
+
+ ctx->dd = dd;
+ dd->ctx = ctx;
+ dd->ctr = 0;
+
+ ctx->keylen = AES_KEYSIZE_128;
+ ctx->flags |= FLAGS_NEW_KEY;
+
+ /* copy the key to the key slot */
+ memset(dd->ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
+
+ dd->iv = seed;
+ dd->ivlen = slen;
+
+ dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
+
+ /* take the hardware semaphore */
+ if (tegra_arb_mutex_lock_timeout(dd->res_id, ARB_SEMA_TIMEOUT) < 0) {
+ dev_err(dd->dev, "aes hardware not available\n");
+ mutex_unlock(&aes_lock);
+ return -EBUSY;
+ }
+
+ aes_set_key(dd);
+
+ /* set seed to the aes hw slot */
+ memset(dd->buf_in, 0, AES_BLOCK_SIZE);
+ memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ);
+ ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
+ (u32)dd->dma_buf_out, 1, FLAGS_CBC, false);
+ if (ret < 0) {
+ dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
+ goto out;
+ }
+
+ if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
+ dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
+ } else {
+ getnstimeofday(&ts);
+ nsec = timespec_to_ns(&ts);
+ do_div(nsec, 1000);
+ nsec ^= dd->ctr << 56;
+ dd->ctr++;
+ tmp[0] = nsec;
+ tmp[1] = tegra_chip_uid();
+ dt = (u8 *)tmp;
+ }
+ memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
+
+out:
+ /* release the hardware semaphore */
+ tegra_arb_mutex_unlock(dd->res_id);
+ mutex_unlock(&aes_lock);
+
+ dev_dbg(dd->dev, "%s: done\n", __func__);
+ return ret;
+}
+
+static int tegra_aes_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
+
+ return 0;
+}
+
+static struct crypto_alg algs[] = {
+ {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_ecb_encrypt,
+ .decrypt = tegra_aes_ecb_decrypt,
+ },
+ }, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_MIN_KEY_SIZE,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_cbc_encrypt,
+ .decrypt = tegra_aes_cbc_decrypt,
+ }
+ }, {
+ .cra_name = "ansi_cprng",
+ .cra_driver_name = "rng-aes-tegra",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_aes_cra_init,
+ .cra_u.rng = {
+ .rng_make_random = tegra_aes_get_random,
+ .rng_reset = tegra_aes_rng_reset,
+ .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ),
+ }
+ }
+};
+
+static int tegra_aes_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_aes_dev *dd;
+ struct resource *res;
+ int err = -ENOMEM, i = 0, j;
+
+ if (aes_dev)
+ return -EEXIST;
+
+ dd = kzalloc(sizeof(struct tegra_aes_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ return -ENOMEM;;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ dd->slots = kzalloc(sizeof(struct tegra_aes_slot) * AES_NR_KEYSLOTS,
+ GFP_KERNEL);
+ if (dd->slots == NULL) {
+ dev_err(dev, "unable to alloc slot struct.\n");
+ goto out;
+ }
+
+ spin_lock_init(&dd->lock);
+ crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
+
+ /* Get the module base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "invalid resource type: base\n");
+ err = -ENODEV;
+ goto out;
+ }
+ dd->phys_base = res->start;
+
+ dd->io_base = ioremap(dd->phys_base, resource_size(res));
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap phys_base\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dd->res_id = TEGRA_ARB_AES;
+
+ /* Initialise the master bsev clock */
+ dd->pclk = clk_get(dev, "bsev");
+ if (!dd->pclk) {
+ dev_err(dev, "pclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Initialize the vde clock */
+ dd->iclk = clk_get(dev, "vde");
+ if (!dd->iclk) {
+ dev_err(dev, "iclock intialization failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * the foll contiguous memory is allocated as follows -
+ * - hardware key table
+ * - key schedule
+ */
+ dd->ivkey_base = dma_alloc_coherent(dev, SZ_512, &dd->ivkey_phys_base,
+ GFP_KERNEL);
+ if (!dd->ivkey_base) {
+ dev_err(dev, "can not allocate iv/key buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->dma_buf_in, GFP_KERNEL);
+ if (!dd->buf_in) {
+ dev_err(dev, "can not allocate dma-in buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ &dd->dma_buf_out, GFP_KERNEL);
+ if (!dd->buf_out) {
+ dev_err(dev, "can not allocate dma-out buffer\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ init_completion(&dd->op_complete);
+
+ /* get the irq */
+ err = request_irq(INT_VDE_BSE_V, aes_irq, IRQF_TRIGGER_HIGH,
+ "tegra-aes", dd);
+ if (err) {
+ dev_err(dev, "request_irq failed\n");
+ goto out;
+ }
+
+ spin_lock_init(&list_lock);
+ spin_lock(&list_lock);
+ for (i = 0; i < AES_NR_KEYSLOTS; i++) {
+ dd->slots[i].available = true;
+ dd->slots[i].slot_num = i;
+ INIT_LIST_HEAD(&dd->slots[i].node);
+ list_add_tail(&dd->slots[i].node, &dev_list);
+ }
+ spin_unlock(&list_lock);
+
+ aes_dev = dd;
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ INIT_LIST_HEAD(&algs[i].cra_list);
+ err = crypto_register_alg(&algs[i]);
+ if (err)
+ goto out;
+ }
+
+ dev_info(dev, "registered");
+ return 0;
+
+out:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&algs[j]);
+ if (dd->ivkey_base)
+ dma_free_coherent(dev, SZ_512, dd->ivkey_base,
+ dd->ivkey_phys_base);
+ if (dd->buf_in)
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_in, dd->dma_buf_in);
+ if (dd->buf_out)
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_out, dd->dma_buf_out);
+ if (dd->io_base)
+ iounmap(dd->io_base);
+ if (dd->iclk)
+ clk_put(dd->iclk);
+ if (dd->pclk)
+ clk_put(dd->pclk);
+
+ free_irq(INT_VDE_BSE_V, dd);
+ spin_lock(&list_lock);
+ list_del(&dev_list);
+ spin_unlock(&list_lock);
+
+ kfree(dd->slots);
+ kfree(dd);
+ aes_dev = NULL;
+ dev_err(dev, "%s: initialization failed.\n", __func__);
+ return err;
+}
+
+static int __devexit tegra_aes_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
+ int i;
+
+ if (!dd)
+ return -ENODEV;
+
+ cancel_work_sync(&aes_wq);
+ free_irq(INT_VDE_BSE_V, dd);
+ spin_lock(&list_lock);
+ list_del(&dev_list);
+ spin_unlock(&list_lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_alg(&algs[i]);
+
+ dma_free_coherent(dev, SZ_512, dd->ivkey_base,
+ dd->ivkey_phys_base);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_in, dd->dma_buf_in);
+ dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
+ dd->buf_out, dd->dma_buf_out);
+ iounmap(dd->io_base);
+ clk_put(dd->iclk);
+ clk_put(dd->pclk);
+ kfree(dd->slots);
+ kfree(dd);
+ aes_dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver tegra_aes_driver = {
+ .probe = tegra_aes_probe,
+ .remove = __devexit_p(tegra_aes_remove),
+ .driver = {
+ .name = "tegra-aes",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_aes_mod_init(void)
+{
+ mutex_init(&aes_lock);
+ INIT_LIST_HEAD(&dev_list);
+ return platform_driver_register(&tegra_aes_driver);
+}
+
+static void __exit tegra_aes_mod_exit(void)
+{
+ platform_driver_unregister(&tegra_aes_driver);
+}
+
+module_init(tegra_aes_mod_init);
+module_exit(tegra_aes_mod_exit);
+
+MODULE_DESCRIPTION("Tegra AES hw acceleration support.");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/crypto/tegra-aes.h b/drivers/crypto/tegra-aes.h
new file mode 100644
index 000000000000..83dd6bbc90e0
--- /dev/null
+++ b/drivers/crypto/tegra-aes.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __CRYPTODEV_TEGRA_AES_H
+#define __CRYPTODEV_TEGRA_AES_H
+
+#define ICMDQUE_WR 0x1000
+#define CMDQUE_CONTROL 0x1008
+#define INTR_STATUS 0x1018
+#define INT_ENB 0x1040
+#define CONFIG 0x1044
+#define IRAM_ACCESS_CFG 0x10A0
+#define SECURE_DEST_ADDR 0x1100
+#define SECURE_INPUT_SELECT 0x1104
+#define SECURE_CONFIG 0x1108
+#define SECURE_CONFIG_EXT 0x110C
+#define SECURE_SECURITY 0x1110
+#define SECURE_HASH_RESULT0 0x1120
+#define SECURE_HASH_RESULT1 0x1124
+#define SECURE_HASH_RESULT2 0x1128
+#define SECURE_HASH_RESULT3 0x112C
+#define SECURE_SEC_SEL0 0x1140
+#define SECURE_SEC_SEL1 0x1144
+#define SECURE_SEC_SEL2 0x1148
+#define SECURE_SEC_SEL3 0x114C
+#define SECURE_SEC_SEL4 0x1150
+#define SECURE_SEC_SEL5 0x1154
+#define SECURE_SEC_SEL6 0x1158
+#define SECURE_SEC_SEL7 0x115C
+
+/* interrupt status reg masks and shifts */
+#define DMA_BUSY_SHIFT 9
+#define DMA_BUSY_FIELD (0x1 << DMA_BUSY_SHIFT)
+#define ICQ_EMPTY_SHIFT 3
+#define ICQ_EMPTY_FIELD (0x1 << ICQ_EMPTY_SHIFT)
+#define ENGINE_BUSY_SHIFT 0
+#define ENGINE_BUSY_FIELD (0x1 << ENGINE_BUSY_SHIFT)
+
+/* secure select reg masks and shifts */
+#define SECURE_SEL0_KEYREAD_ENB0_SHIFT 0
+#define SECURE_SEL0_KEYREAD_ENB0_FIELD (0x1 << SECURE_SEL0_KEYREAD_ENB0_SHIFT)
+
+/* secure config ext masks and shifts */
+#define SECURE_KEY_SCH_DIS_SHIFT 15
+#define SECURE_KEY_SCH_DIS_FIELD (0x1 << SECURE_KEY_SCH_DIS_SHIFT)
+
+/* secure config masks and shifts */
+#define SECURE_KEY_INDEX_SHIFT 20
+#define SECURE_KEY_INDEX_FIELD (0x1F << SECURE_KEY_INDEX_SHIFT)
+#define SECURE_BLOCK_CNT_SHIFT 0
+#define SECURE_BLOCK_CNT_FIELD (0xFFFFF << SECURE_BLOCK_CNT_SHIFT)
+
+/* stream interface select masks and shifts */
+#define CMDQ_CTRL_SRC_STM_SEL_SHIFT 4
+#define CMDQ_CTRL_SRC_STM_SEL_FIELD (1 << CMDQ_CTRL_SRC_STM_SEL_SHIFT)
+#define CMDQ_CTRL_DST_STM_SEL_SHIFT 5
+#define CMDQ_CTRL_DST_STM_SEL_FIELD (1 << CMDQ_CTRL_DST_STM_SEL_SHIFT)
+#define CMDQ_CTRL_ICMDQEN_SHIFT 1
+#define CMDQ_CTRL_ICMDQEN_FIELD (1 << CMDQ_CTRL_SRC_STM_SEL_SHIFT)
+#define CMDQ_CTRL_UCMDQEN_SHIFT 0
+#define CMDQ_CTRL_UCMDQEN_FIELD (1 << CMDQ_CTRL_DST_STM_SEL_SHIFT)
+
+/* config regsiter masks and shifts */
+#define CONFIG_ENDIAN_ENB_SHIFT 10
+#define CONFIG_ENDIAN_ENB_FIELD (0x1 << CONFIG_ENDIAN_ENB_SHIFT)
+#define CONFIG_MODE_SEL_SHIFT 0
+#define CONFIG_MODE_SEL_FIELD (0x1F << CONFIG_MODE_SEL_SHIFT)
+
+/* extended config */
+#define SECURE_OFFSET_CNT_SHIFT 24
+#define SECURE_OFFSET_CNT_FIELD (0xFF << SECURE_OFFSET_CNT_SHIFT)
+#define SECURE_KEYSCHED_GEN_SHIFT 15
+#define SECURE_KEYSCHED_GEN_FIELD (1 << SECURE_KEYSCHED_GEN_SHIFT)
+
+/* init vector select */
+#define SECURE_IV_SELECT_SHIFT 10
+#define SECURE_IV_SELECT_FIELD (1 << SECURE_IV_SELECT_SHIFT)
+
+/* secure engine input */
+#define SECURE_INPUT_ALG_SEL_SHIFT 28
+#define SECURE_INPUT_ALG_SEL_FIELD (0xF << SECURE_INPUT_ALG_SEL_SHIFT)
+#define SECURE_INPUT_KEY_LEN_SHIFT 16
+#define SECURE_INPUT_KEY_LEN_FIELD (0xFFF << SECURE_INPUT_KEY_LEN_SHIFT)
+#define SECURE_RNG_ENB_SHIFT 11
+#define SECURE_RNG_ENB_FIELD (0x1 << SECURE_RNG_ENB_SHIFT)
+#define SECURE_CORE_SEL_SHIFT 9
+#define SECURE_CORE_SEL_FIELD (0x1 << SECURE_CORE_SEL_SHIFT)
+#define SECURE_VCTRAM_SEL_SHIFT 7
+#define SECURE_VCTRAM_SEL_FIELD (0x3 << SECURE_VCTRAM_SEL_SHIFT)
+#define SECURE_INPUT_SEL_SHIFT 5
+#define SECURE_INPUT_SEL_FIELD (0x3 << SECURE_INPUT_SEL_SHIFT)
+#define SECURE_XOR_POS_SHIFT 3
+#define SECURE_XOR_POS_FIELD (0x3 << SECURE_XOR_POS_SHIFT)
+#define SECURE_HASH_ENB_SHIFT 2
+#define SECURE_HASH_ENB_FIELD (0x1 << SECURE_HASH_ENB_SHIFT)
+#define SECURE_ON_THE_FLY_SHIFT 0
+#define SECURE_ON_THE_FLY_FIELD (1 << SECURE_ON_THE_FLY_SHIFT)
+
+#endif
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6539ac2907e9..7466333c4ee5 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -596,6 +596,13 @@ config I2C_STU300
This driver can also be built as a module. If so, the module
will be called i2c-stu300.
+config I2C_TEGRA
+ tristate "NVIDIA Tegra internal I2C controller"
+ depends on ARCH_TEGRA
+ help
+ If you say yes to this option, support will be included for the
+ I2C controller embedded in NVIDIA Tegra SOCs
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c3ef49230cba..94348a59801b 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
new file mode 100755
index 000000000000..91aa11ce0de2
--- /dev/null
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -0,0 +1,758 @@
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c-tegra.h>
+
+#include <asm/unaligned.h>
+
+#include <mach/clk.h>
+#include <mach/pinmux.h>
+
+#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
+#define BYTES_PER_FIFO_WORD 4
+
+#define I2C_CNFG 0x000
+#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_STATUS 0x01C
+#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_ADDR1 0x02c
+#define I2C_TX_FIFO 0x050
+#define I2C_RX_FIFO 0x054
+#define I2C_PACKET_TRANSFER_STATUS 0x058
+#define I2C_FIFO_CONTROL 0x05c
+#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
+#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
+#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
+#define I2C_FIFO_STATUS 0x060
+#define I2C_FIFO_STATUS_TX_MASK 0xF0
+#define I2C_FIFO_STATUS_TX_SHIFT 4
+#define I2C_FIFO_STATUS_RX_MASK 0x0F
+#define I2C_FIFO_STATUS_RX_SHIFT 0
+#define I2C_INT_MASK 0x064
+#define I2C_INT_STATUS 0x068
+#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
+#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
+#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
+#define I2C_INT_NO_ACK (1<<3)
+#define I2C_INT_ARBITRATION_LOST (1<<2)
+#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
+#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_CLK_DIVISOR 0x06c
+
+#define DVC_CTRL_REG1 0x000
+#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG2 0x004
+#define DVC_CTRL_REG3 0x008
+#define DVC_CTRL_REG3_SW_PROG (1<<26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_STATUS 0x00c
+#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+
+#define I2C_ERR_NONE 0x00
+#define I2C_ERR_NO_ACK 0x01
+#define I2C_ERR_ARBITRATION_LOST 0x02
+#define I2C_ERR_UNKNOWN_INTERRUPT 0x04
+
+#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
+#define PACKET_HEADER0_PACKET_ID_SHIFT 16
+#define PACKET_HEADER0_CONT_ID_SHIFT 12
+#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
+
+#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
+#define I2C_HEADER_CONT_ON_NAK (1<<21)
+#define I2C_HEADER_SEND_START_BYTE (1<<20)
+#define I2C_HEADER_READ (1<<19)
+#define I2C_HEADER_10BIT_ADDR (1<<18)
+#define I2C_HEADER_IE_ENABLE (1<<17)
+#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_MASTER_ADDR_SHIFT 12
+#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+struct tegra_i2c_dev;
+
+struct tegra_i2c_bus {
+ struct tegra_i2c_dev *dev;
+ const struct tegra_pingroup_config *mux;
+ int mux_len;
+ unsigned long bus_clk_rate;
+ struct i2c_adapter adapter;
+};
+
+struct tegra_i2c_dev {
+ struct device *dev;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ struct resource *iomem;
+ struct rt_mutex dev_lock;
+ void __iomem *base;
+ int cont_id;
+ int irq;
+ bool irq_disabled;
+ int is_dvc;
+ struct completion msg_complete;
+ int msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+ int msg_read;
+ int msg_transfer_complete;
+ bool is_suspended;
+ int bus_count;
+ const struct tegra_pingroup_config *last_mux;
+ int last_mux_len;
+ unsigned long last_bus_clk;
+ struct tegra_i2c_bus busses[1];
+};
+
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + reg);
+}
+
+/* i2c_writel and i2c_readl will offset the register if necessary to talk
+ * to the I2C block inside the DVC block
+ */
+static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ return readl(i2c_dev->base + reg);
+}
+
+static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask &= ~mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask |= mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_set_clk(struct tegra_i2c_dev *i2c_dev, unsigned int freq)
+{
+ clk_set_rate(i2c_dev->clk, freq * 8);
+}
+
+static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long timeout = jiffies + HZ;
+ u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
+ (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int rx_fifo_avail;
+ int word;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > rx_fifo_avail)
+ words_to_transfer = rx_fifo_avail;
+
+ for (word = 0; word < words_to_transfer; word++) {
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ put_unaligned_le32(val, buf);
+ buf += BYTES_PER_FIFO_WORD;
+ buf_remaining -= BYTES_PER_FIFO_WORD;
+ rx_fifo_avail--;
+ }
+
+ if (rx_fifo_avail > 0 && buf_remaining > 0) {
+ int bytes_to_transfer = buf_remaining;
+ int byte;
+ BUG_ON(bytes_to_transfer > 3);
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ for (byte = 0; byte < bytes_to_transfer; byte++) {
+ *buf++ = val & 0xFF;
+ val >>= 8;
+ }
+ buf_remaining -= bytes_to_transfer;
+ rx_fifo_avail--;
+ }
+ BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int tx_fifo_avail;
+ int word;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ for (word = 0; word < words_to_transfer; word++) {
+ val = get_unaligned_le32(buf);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf += BYTES_PER_FIFO_WORD;
+ buf_remaining -= BYTES_PER_FIFO_WORD;
+ tx_fifo_avail--;
+ }
+
+ if (tx_fifo_avail > 0 && buf_remaining > 0) {
+ int bytes_to_transfer = buf_remaining;
+ int byte;
+ BUG_ON(bytes_to_transfer > 3);
+ val = 0;
+ for (byte = 0; byte < bytes_to_transfer; byte++)
+ val |= (*buf++) << (byte * 8);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf_remaining -= bytes_to_transfer;
+ tx_fifo_avail--;
+ }
+ BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+/* One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller)
+ * block. This block is identical to the rest of the I2C blocks, except that
+ * it only supports master mode, it has registers moved around, and it needs
+ * some extra init to get it into I2C mode. The register moves are handled
+ * by i2c_readl and i2c_writel
+ */
+static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val = 0;
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
+ val |= DVC_CTRL_REG3_SW_PROG;
+ val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG3);
+
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG1);
+ val |= DVC_CTRL_REG1_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
+}
+
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int err = 0;
+
+ clk_enable(i2c_dev->clk);
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(i2c_dev->clk);
+
+ if (i2c_dev->is_dvc)
+ tegra_dvc_init(i2c_dev);
+
+ val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
+ i2c_writel(i2c_dev, val, I2C_CNFG);
+ i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ tegra_i2c_set_clk(i2c_dev, i2c_dev->last_bus_clk);
+
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ if (tegra_i2c_flush_fifos(i2c_dev))
+ err = -ETIMEDOUT;
+
+ clk_disable(i2c_dev->clk);
+
+ if (i2c_dev->irq_disabled) {
+ i2c_dev->irq_disabled = 0;
+ enable_irq(i2c_dev->irq);
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+{
+ u32 status;
+ const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ struct tegra_i2c_dev *i2c_dev = dev_id;
+
+ status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+ if (status == 0) {
+ dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
+ i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
+ i2c_readl(i2c_dev, I2C_STATUS),
+ i2c_readl(i2c_dev, I2C_CNFG));
+ i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT;
+
+ if (! i2c_dev->irq_disabled) {
+ disable_irq_nosync(i2c_dev->irq);
+ i2c_dev->irq_disabled = 1;
+ }
+
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (unlikely(status & status_err)) {
+ if (status & I2C_INT_NO_ACK)
+ i2c_dev->msg_err |= I2C_ERR_NO_ACK;
+ if (status & I2C_INT_ARBITRATION_LOST)
+ i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_empty_rx_fifo(i2c_dev);
+ else
+ BUG();
+ }
+
+ if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+ else
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
+ }
+
+ if (status & I2C_INT_PACKET_XFER_COMPLETE)
+ i2c_dev->msg_transfer_complete = 1;
+
+ if (i2c_dev->msg_transfer_complete && !i2c_dev->msg_buf_remaining)
+ complete(&i2c_dev->msg_complete);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+err:
+ /* An error occured, mask all interrupts */
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
+ I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
+ I2C_INT_RX_FIFO_DATA_REQ);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+}
+
+static int tegra_i2c_xfer_msg(struct tegra_i2c_bus *i2c_bus,
+ struct i2c_msg *msg, int stop)
+{
+ struct tegra_i2c_dev *i2c_dev = i2c_bus->dev;
+ u32 packet_header;
+ u32 int_mask;
+ int ret;
+
+ tegra_i2c_flush_fifos(i2c_dev);
+ i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ i2c_dev->msg_err = I2C_ERR_NONE;
+ i2c_dev->msg_transfer_complete = 0;
+ i2c_dev->msg_read = (msg->flags & I2C_M_RD);
+ INIT_COMPLETION(i2c_dev->msg_complete);
+
+ packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ PACKET_HEADER0_PROTOCOL_I2C |
+ (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
+ (1 << PACKET_HEADER0_PACKET_ID_SHIFT);
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->len - 1;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ packet_header |= I2C_HEADER_IE_ENABLE;
+ if (!stop)
+ packet_header |= I2C_HEADER_REPEAT_START;
+ if (msg->flags & I2C_M_TEN)
+ packet_header |= I2C_HEADER_10BIT_ADDR;
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ packet_header |= I2C_HEADER_CONT_ON_NAK;
+ if (msg->flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ if (!(msg->flags & I2C_M_RD))
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (msg->flags & I2C_M_RD)
+ int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
+ else if (i2c_dev->msg_buf_remaining)
+ int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ pr_debug("unmasked irq: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK));
+
+ ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ tegra_i2c_mask_irq(i2c_dev, int_mask);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+
+ tegra_i2c_init(i2c_dev);
+ return -ETIMEDOUT;
+ }
+
+ pr_debug("transfer complete: %d %d %d\n", ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err);
+
+ if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
+ return 0;
+
+ tegra_i2c_init(i2c_dev);
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ return 0;
+ return -EREMOTEIO;
+ }
+
+ return -EIO;
+}
+
+static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct tegra_i2c_bus *i2c_bus = i2c_get_adapdata(adap);
+ struct tegra_i2c_dev *i2c_dev = i2c_bus->dev;
+ int i;
+ int ret = 0;
+
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+ rt_mutex_lock(&i2c_dev->dev_lock);
+
+ if (i2c_dev->last_mux != i2c_bus->mux) {
+ tegra_pinmux_set_safe_pinmux_table(i2c_dev->last_mux,
+ i2c_dev->last_mux_len);
+ tegra_pinmux_config_pinmux_table(i2c_bus->mux,
+ i2c_bus->mux_len);
+ i2c_dev->last_mux = i2c_bus->mux;
+ i2c_dev->last_mux_len = i2c_bus->mux_len;
+ }
+
+ if (i2c_dev->last_bus_clk != i2c_bus->bus_clk_rate) {
+ tegra_i2c_set_clk(i2c_dev, i2c_bus->bus_clk_rate);
+ i2c_dev->last_bus_clk = i2c_bus->bus_clk_rate;
+ }
+
+ clk_enable(i2c_dev->clk);
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_bus, &msgs[i], stop);
+ if (ret)
+ goto out;
+ }
+ ret = i;
+
+out:
+ clk_disable(i2c_dev->clk);
+
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+
+ return ret;
+}
+
+static u32 tegra_i2c_func(struct i2c_adapter *adap)
+{
+ /* FIXME: For now keep it simple and don't support protocol mangling
+ features */
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm tegra_i2c_algo = {
+ .master_xfer = tegra_i2c_xfer,
+ .functionality = tegra_i2c_func,
+};
+
+static int tegra_i2c_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev;
+ struct tegra_i2c_platform_data *plat = pdev->dev.platform_data;
+ struct resource *res;
+ struct resource *iomem;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ void *base;
+ int irq;
+ int nbus;
+ int i = 0;
+ int ret = 0;
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (plat->bus_count <= 0 || plat->adapter_nr < 0) {
+ dev_err(&pdev->dev, "invalid platform data?\n");
+ return -ENODEV;
+ }
+
+ WARN_ON(plat->bus_count > TEGRA_I2C_MAX_BUS);
+ nbus = min(TEGRA_I2C_MAX_BUS, plat->bus_count);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Can't ioremap I2C region\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ ret = -ENODEV;
+ goto err_iounmap;
+ }
+ irq = res->start;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (!clk) {
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ i2c_clk = clk_get(&pdev->dev, "i2c");
+ if (!i2c_clk) {
+ ret = -ENOMEM;
+ goto err_clk_put;
+ }
+
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev) +
+ (nbus-1) * sizeof(struct tegra_i2c_bus), GFP_KERNEL);
+ if (!i2c_dev) {
+ ret = -ENOMEM;
+ goto err_i2c_clk_put;
+ }
+
+ i2c_dev->base = base;
+ i2c_dev->clk = clk;
+ i2c_dev->i2c_clk = i2c_clk;
+ i2c_dev->iomem = iomem;
+ i2c_dev->irq = irq;
+ i2c_dev->cont_id = pdev->id;
+ i2c_dev->dev = &pdev->dev;
+ i2c_dev->last_bus_clk = plat->bus_clk_rate[0] ?: 100000;
+ rt_mutex_init(&i2c_dev->dev_lock);
+
+ i2c_dev->is_dvc = plat->is_dvc;
+ init_completion(&i2c_dev->msg_complete);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = tegra_i2c_init(i2c_dev);
+ if (ret)
+ goto err_free;
+
+ ret = request_irq(i2c_dev->irq, tegra_i2c_isr, IRQF_DISABLED,
+ pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ goto err_free;
+ }
+
+ clk_enable(i2c_dev->i2c_clk);
+
+ for (i = 0; i < nbus; i++) {
+ struct tegra_i2c_bus *i2c_bus = &i2c_dev->busses[i];
+
+ i2c_bus->dev = i2c_dev;
+ i2c_bus->mux = plat->bus_mux[i];
+ i2c_bus->mux_len = plat->bus_mux_len[i];
+ i2c_bus->bus_clk_rate = plat->bus_clk_rate[i] ?: 100000;
+
+ i2c_bus->adapter.algo = &tegra_i2c_algo;
+ i2c_set_adapdata(&i2c_bus->adapter, i2c_bus);
+ i2c_bus->adapter.owner = THIS_MODULE;
+ i2c_bus->adapter.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_bus->adapter.name, "Tegra I2C adapter",
+ sizeof(i2c_bus->adapter.name));
+ i2c_bus->adapter.dev.parent = &pdev->dev;
+ i2c_bus->adapter.nr = plat->adapter_nr + i;
+ ret = i2c_add_numbered_adapter(&i2c_bus->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_del_bus;
+ }
+ i2c_dev->bus_count++;
+ }
+
+ return 0;
+
+err_del_bus:
+ while (i2c_dev->bus_count--)
+ i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter);
+ free_irq(i2c_dev->irq, i2c_dev);
+err_free:
+ kfree(i2c_dev);
+err_i2c_clk_put:
+ clk_put(i2c_clk);
+err_clk_put:
+ clk_put(clk);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_iounmap:
+ iounmap(base);
+ return ret;
+}
+
+static int tegra_i2c_remove(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ while (i2c_dev->bus_count--)
+ i2c_del_adapter(&i2c_dev->busses[i2c_dev->bus_count].adapter);
+
+ free_irq(i2c_dev->irq, i2c_dev);
+ clk_put(i2c_dev->i2c_clk);
+ clk_put(i2c_dev->clk);
+ release_mem_region(i2c_dev->iomem->start,
+ resource_size(i2c_dev->iomem));
+ iounmap(i2c_dev->base);
+ kfree(i2c_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ rt_mutex_lock(&i2c_dev->dev_lock);
+ i2c_dev->is_suspended = true;
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+
+ return 0;
+}
+
+static int tegra_i2c_resume(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ rt_mutex_lock(&i2c_dev->dev_lock);
+
+ ret = tegra_i2c_init(i2c_dev);
+
+ if (ret) {
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+ return ret;
+ }
+
+ i2c_dev->is_suspended = false;
+
+ rt_mutex_unlock(&i2c_dev->dev_lock);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_i2c_driver = {
+ .probe = tegra_i2c_probe,
+ .remove = tegra_i2c_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_i2c_suspend,
+ .resume = tegra_i2c_resume,
+#endif
+ .driver = {
+ .name = "tegra-i2c",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_i2c_init_driver(void)
+{
+ return platform_driver_register(&tegra_i2c_driver);
+}
+/*
+ * Some drivers (hdmi) depend on i2c busses already being present,
+ * so init at subsys time.
+ */
+subsys_initcall(tegra_i2c_init_driver);
+
+static void __exit tegra_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_i2c_driver);
+}
+module_exit(tegra_i2c_exit_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0069d9703fda..cab10a00e414 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -328,6 +328,19 @@ config TOUCHSCREEN_MIGOR
To compile this driver as a module, choose M here: the
module will be called migor_ts.
+config TOUCHSCREEN_PANJIT_I2C
+ tristate "PANJIT I2C touchscreen driver"
+ depends on I2C
+ default n
+ help
+ Say Y here to enable PANJIT I2C capacitive touchscreen support,
+ covering devices such as the MGG1010AI06 and EGG1010AI06
+
+ If unsure, say N
+
+ To compile this driver as a module, choose M here: the module will
+ be called panjit_i2c.
+
config TOUCHSCREEN_TOUCHRIGHT
tristate "Touchright serial touchscreen"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 28217e1dcafd..a8c08aa6ea66 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_HP7XX) += jornada720_ts.o
obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PANJIT_I2C) += panjit_i2c.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
diff --git a/drivers/input/touchscreen/panjit_i2c.c b/drivers/input/touchscreen/panjit_i2c.c
new file mode 100644
index 000000000000..16df9313a10f
--- /dev/null
+++ b/drivers/input/touchscreen/panjit_i2c.c
@@ -0,0 +1,323 @@
+/*
+ * drivers/input/touchscreen/panjit_i2c.c
+ *
+ * Touchscreen class input driver for Panjit touch panel using I2C bus
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/i2c/panjit_ts.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#define CSR 0x00
+ #define CSR_SCAN_EN (1 << 3)
+ #define CSR_SLEEP_EN (1 << 7)
+#define C_FLAG 0x01
+#define X1_H 0x03
+
+#define DRIVER_NAME "panjit_touch"
+
+struct pj_data {
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+ int gpio_reset;
+};
+
+struct pj_event {
+ __be16 coord[2][2];
+ __u8 fingers;
+ __u8 gesture;
+};
+
+union pj_buff {
+ struct pj_event data;
+ unsigned char buff[sizeof(struct pj_data)];
+};
+
+static void pj_reset(struct pj_data *touch)
+{
+ if (touch->gpio_reset < 0)
+ return;
+
+ gpio_set_value(touch->gpio_reset, 1);
+ msleep(50);
+ gpio_set_value(touch->gpio_reset, 0);
+ msleep(50);
+}
+
+static irqreturn_t pj_irq(int irq, void *dev_id)
+{
+ struct pj_data *touch = dev_id;
+ struct i2c_client *client = touch->client;
+ union pj_buff event;
+ int ret, i;
+
+ ret = i2c_smbus_read_i2c_block_data(client, X1_H,
+ sizeof(event.buff), event.buff);
+ if (WARN_ON(ret < 0)) {
+ dev_err(&client->dev, "error %d reading event data\n", ret);
+ return IRQ_NONE;
+ }
+ ret = i2c_smbus_write_byte_data(client, C_FLAG, 0);
+ if (WARN_ON(ret < 0)) {
+ dev_err(&client->dev, "error %d clearing interrupt\n", ret);
+ return IRQ_NONE;
+ }
+
+ input_report_key(touch->input_dev, BTN_TOUCH,
+ (event.data.fingers == 1 || event.data.fingers == 2));
+ input_report_key(touch->input_dev, BTN_2, (event.data.fingers == 2));
+
+ if (!event.data.fingers || (event.data.fingers > 2))
+ goto out;
+
+ for (i = 0; i < event.data.fingers; i++) {
+ input_report_abs(touch->input_dev, ABS_MT_POSITION_X,
+ __be16_to_cpu(event.data.coord[i][0]));
+ input_report_abs(touch->input_dev, ABS_MT_POSITION_Y,
+ __be16_to_cpu(event.data.coord[i][1]));
+ input_report_abs(touch->input_dev, ABS_MT_TRACKING_ID, i + 1);
+ input_mt_sync(touch->input_dev);
+ }
+
+out:
+ input_sync(touch->input_dev);
+ return IRQ_HANDLED;
+}
+
+static int pj_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct panjit_i2c_ts_platform_data *pdata = client->dev.platform_data;
+ struct pj_data *touch = NULL;
+ struct input_dev *input_dev = NULL;
+ int ret = 0;
+
+ touch = kzalloc(sizeof(struct pj_data), GFP_KERNEL);
+ if (!touch) {
+ dev_err(&client->dev, "%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ touch->gpio_reset = -EINVAL;
+
+ if (pdata) {
+ ret = gpio_request(pdata->gpio_reset, "panjit_reset");
+ if (!ret) {
+ ret = gpio_direction_output(pdata->gpio_reset, 1);
+ if (ret < 0)
+ gpio_free(pdata->gpio_reset);
+ }
+
+ if (!ret)
+ touch->gpio_reset = pdata->gpio_reset;
+ else
+ dev_warn(&client->dev, "unable to configure GPIO\n");
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "%s: no memory\n", __func__);
+ kfree(touch);
+ return -ENOMEM;
+ }
+
+ touch->client = client;
+ i2c_set_clientdata(client, touch);
+
+ pj_reset(touch);
+
+ /* clear interrupt */
+ ret = i2c_smbus_write_byte_data(touch->client, C_FLAG, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: clear interrupt failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ /* enable scanning */
+ ret = i2c_smbus_write_byte_data(touch->client, CSR, CSR_SCAN_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: enable interrupt failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ touch->input_dev = input_dev;
+ touch->input_dev->name = DRIVER_NAME;
+
+ set_bit(EV_SYN, touch->input_dev->evbit);
+ set_bit(EV_KEY, touch->input_dev->evbit);
+ set_bit(EV_ABS, touch->input_dev->evbit);
+ set_bit(BTN_TOUCH, touch->input_dev->keybit);
+ set_bit(BTN_2, touch->input_dev->keybit);
+
+ /* expose multi-touch capabilities */
+ set_bit(ABS_MT_POSITION_X, touch->input_dev->keybit);
+ set_bit(ABS_MT_POSITION_Y, touch->input_dev->keybit);
+ set_bit(ABS_X, touch->input_dev->keybit);
+ set_bit(ABS_Y, touch->input_dev->keybit);
+
+ /* all coordinates are reported in 0..4095 */
+ input_set_abs_params(touch->input_dev, ABS_X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT0X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT0Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT1X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_HAT1Y, 0, 4095, 0, 0);
+
+ input_set_abs_params(touch->input_dev, ABS_MT_POSITION_X, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ input_set_abs_params(touch->input_dev, ABS_MT_TRACKING_ID, 0, 2, 1, 0);
+
+ ret = input_register_device(touch->input_dev);
+ if (ret) {
+ dev_err(&client->dev, "%s: input_register_device failed\n",
+ __func__);
+ goto fail_i2c_or_register;
+ }
+
+ /* get the irq */
+ ret = request_threaded_irq(touch->client->irq, NULL, pj_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ DRIVER_NAME, touch);
+ if (ret) {
+ dev_err(&client->dev, "%s: request_irq(%d) failed\n",
+ __func__, touch->client->irq);
+ goto fail_irq;
+ }
+
+ dev_info(&client->dev, "%s: initialized\n", __func__);
+ return 0;
+
+fail_irq:
+ input_unregister_device(touch->input_dev);
+
+fail_i2c_or_register:
+ if (touch->gpio_reset >= 0)
+ gpio_free(touch->gpio_reset);
+
+ input_free_device(input_dev);
+ kfree(touch);
+ return ret;
+}
+
+static int pj_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+ int ret;
+
+ if (WARN_ON(!touch))
+ return -EINVAL;
+
+ disable_irq(client->irq);
+
+ /* disable scanning and enable deep sleep */
+ ret = i2c_smbus_write_byte_data(client, CSR, CSR_SLEEP_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: sleep enable fail\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pj_resume(struct i2c_client *client)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+ int ret = 0;
+
+ if (WARN_ON(!touch))
+ return -EINVAL;
+
+ pj_reset(touch);
+
+ /* enable scanning and disable deep sleep */
+ ret = i2c_smbus_write_byte_data(client, C_FLAG, 0);
+ if (ret >= 0)
+ ret = i2c_smbus_write_byte_data(client, CSR, CSR_SCAN_EN);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: scan enable fail\n", __func__);
+ return ret;
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static int pj_remove(struct i2c_client *client)
+{
+ struct pj_data *touch = i2c_get_clientdata(client);
+
+ if (!touch)
+ return -EINVAL;
+
+ free_irq(touch->client->irq, touch);
+ if (touch->gpio_reset >= 0)
+ gpio_free(touch->gpio_reset);
+ input_unregister_device(touch->input_dev);
+ input_free_device(touch->input_dev);
+ kfree(touch);
+ return 0;
+}
+
+static const struct i2c_device_id panjit_ts_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver panjit_driver = {
+ .probe = pj_probe,
+ .remove = pj_remove,
+ .suspend = pj_suspend,
+ .resume = pj_resume,
+ .id_table = panjit_ts_id,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __devinit panjit_init(void)
+{
+ int e;
+
+ e = i2c_add_driver(&panjit_driver);
+ if (e != 0) {
+ pr_err("%s: failed to register with I2C bus with "
+ "error: 0x%x\n", __func__, e);
+ }
+ return e;
+}
+
+static void __exit panjit_exit(void)
+{
+ i2c_del_driver(&panjit_driver);
+}
+
+module_init(panjit_init);
+module_exit(panjit_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Panjit I2C touch driver");
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f6e4d0475351..e3b374110897 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -553,6 +553,7 @@ config VIDEO_VIVI
source "drivers/media/video/davinci/Kconfig"
source "drivers/media/video/omap/Kconfig"
+source "drivers/media/video/tegra/Kconfig"
source "drivers/media/video/bt8xx/Kconfig"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 40f98fba5f88..399ff510d79c 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -179,6 +179,7 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-y += davinci/
obj-$(CONFIG_ARCH_OMAP) += omap/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/tegra/Kconfig b/drivers/media/video/tegra/Kconfig
new file mode 100644
index 000000000000..ae77e8994dc8
--- /dev/null
+++ b/drivers/media/video/tegra/Kconfig
@@ -0,0 +1,10 @@
+source "drivers/media/video/tegra/avp/Kconfig"
+
+config TEGRA_CAMERA
+ bool "Enable support for tegra camera/isp hardware"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the Tegra camera interface
+
+ If unsure, say Y
diff --git a/drivers/media/video/tegra/Makefile b/drivers/media/video/tegra/Makefile
new file mode 100644
index 000000000000..68b5c42b0e7a
--- /dev/null
+++ b/drivers/media/video/tegra/Makefile
@@ -0,0 +1,2 @@
+obj-y += avp/
+obj-$(CONFIG_TEGRA_CAMERA) += tegra_camera.o
diff --git a/drivers/media/video/tegra/avp/Kconfig b/drivers/media/video/tegra/avp/Kconfig
new file mode 100644
index 000000000000..fdd208510fcb
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Kconfig
@@ -0,0 +1,25 @@
+config TEGRA_RPC
+ bool "Enable support for Tegra RPC"
+ depends on ARCH_TEGRA
+ default y
+ help
+ Enables support for the RPC mechanism necessary for the Tegra
+ multimedia framework. It is both used to communicate locally on the
+ CPU between multiple multimedia components as well as to communicate
+ with the AVP for offloading media decode.
+
+ Exports the local tegra RPC interface on device node
+ /dev/tegra_rpc. Also provides tegra fd based semaphores needed by
+ the tegra multimedia framework.
+
+ If unsure, say Y
+
+config TEGRA_AVP
+ bool "Enable support for the AVP multimedia offload engine"
+ depends on ARCH_TEGRA && TEGRA_RPC
+ default y
+ help
+ Enables support for the multimedia offload engine used by Tegra
+ multimedia framework.
+
+ If unsure, say Y
diff --git a/drivers/media/video/tegra/avp/Makefile b/drivers/media/video/tegra/avp/Makefile
new file mode 100644
index 000000000000..6d8be11c3f81
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_TEGRA_RPC) += tegra_rpc.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_local.o
+obj-$(CONFIG_TEGRA_RPC) += trpc_sema.o
+obj-$(CONFIG_TEGRA_AVP) += avp.o
+obj-$(CONFIG_TEGRA_AVP) += avp_svc.o
+obj-$(CONFIG_TEGRA_AVP) += headavp.o
diff --git a/drivers/media/video/tegra/avp/avp.c b/drivers/media/video/tegra/avp/avp.c
new file mode 100644
index 000000000000..ced838ac6e2b
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.c
@@ -0,0 +1,1736 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/irq.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/io.h>
+#include <mach/iomap.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "headavp.h"
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+enum {
+ AVP_DBG_TRACE_XPC = 1U << 0,
+ AVP_DBG_TRACE_XPC_IRQ = 1U << 1,
+ AVP_DBG_TRACE_XPC_MSG = 1U << 2,
+ AVP_DBG_TRACE_XPC_CONN = 1U << 3,
+ AVP_DBG_TRACE_TRPC_MSG = 1U << 4,
+ AVP_DBG_TRACE_TRPC_CONN = 1U << 5,
+ AVP_DBG_TRACE_LIB = 1U << 6,
+};
+
+static u32 avp_debug_mask = 0;
+module_param_named(debug_mask, avp_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (unlikely(avp_debug_mask & (flag))) pr_info(args); } while (0)
+
+#define TEGRA_AVP_NAME "tegra-avp"
+
+#define TEGRA_AVP_KERNEL_FW "nvrm_avp.bin"
+
+#define TEGRA_AVP_RESET_VECTOR_ADDR \
+ (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
+
+#define TEGRA_AVP_RESUME_ADDR IO_ADDRESS(TEGRA_IRAM_BASE)
+
+#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
+#define FLOW_MODE_STOP (0x2 << 29)
+#define FLOW_MODE_NONE 0x0
+
+#define MBOX_FROM_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
+#define MBOX_TO_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
+
+/* Layout of the mailbox registers:
+ * bit 31 - pending message interrupt enable (mailbox full, i.e. valid=1)
+ * bit 30 - message cleared interrupt enable (mailbox empty, i.e. valid=0)
+ * bit 29 - message valid. peer clears this bit after reading msg
+ * bits 27:0 - message data
+ */
+#define MBOX_MSG_PENDING_INT_EN (1 << 31)
+#define MBOX_MSG_READ_INT_EN (1 << 30)
+#define MBOX_MSG_VALID (1 << 29)
+
+#define AVP_MSG_MAX_CMD_LEN 16
+#define AVP_MSG_AREA_SIZE (AVP_MSG_MAX_CMD_LEN + TEGRA_RPC_MAX_MSG_LEN)
+
+struct avp_info {
+ struct clk *cop_clk;
+
+ int mbox_from_avp_pend_irq;
+
+ dma_addr_t msg_area_addr;
+ u32 msg;
+ void *msg_to_avp;
+ void *msg_from_avp;
+ struct mutex to_avp_lock;
+ struct mutex from_avp_lock;
+
+ struct work_struct recv_work;
+ struct workqueue_struct *recv_wq;
+
+ struct trpc_node *rpc_node;
+ struct miscdevice misc_dev;
+ bool opened;
+ struct mutex open_lock;
+
+ spinlock_t state_lock;
+ bool initialized;
+ bool shutdown;
+ bool suspending;
+ bool defer_remote;
+
+ struct mutex libs_lock;
+ struct list_head libs;
+ struct nvmap_client *nvmap_libs;
+
+ /* client for driver allocations, persistent */
+ struct nvmap_client *nvmap_drv;
+ struct nvmap_handle_ref *kernel_handle;
+ void *kernel_data;
+ unsigned long kernel_phys;
+
+ struct nvmap_handle_ref *iram_backup_handle;
+ void *iram_backup_data;
+ unsigned long iram_backup_phys;
+ unsigned long resume_addr;
+
+ struct trpc_endpoint *avp_ep;
+ struct rb_root endpoints;
+
+ struct avp_svc_info *avp_svc;
+};
+
+struct remote_info {
+ u32 loc_id;
+ u32 rem_id;
+ struct kref ref;
+
+ struct trpc_endpoint *trpc_ep;
+ struct rb_node rb_node;
+};
+
+struct lib_item {
+ struct list_head list;
+ u32 handle;
+ char name[TEGRA_AVP_LIB_MAX_NAME];
+};
+
+static struct avp_info *tegra_avp;
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len);
+static void avp_trpc_close(struct trpc_endpoint *ep);
+static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep);
+static void libs_cleanup(struct avp_info *avp);
+
+static struct trpc_ep_ops remote_ep_ops = {
+ .send = avp_trpc_send,
+ .close = avp_trpc_close,
+ .show = avp_trpc_show,
+};
+
+static struct remote_info *rinfo_alloc(struct avp_info *avp)
+{
+ struct remote_info *rinfo;
+
+ rinfo = kzalloc(sizeof(struct remote_info), GFP_KERNEL);
+ if (!rinfo)
+ return NULL;
+ kref_init(&rinfo->ref);
+ return rinfo;
+}
+
+static void _rinfo_release(struct kref *ref)
+{
+ struct remote_info *rinfo = container_of(ref, struct remote_info, ref);
+ kfree(rinfo);
+}
+
+static inline void rinfo_get(struct remote_info *rinfo)
+{
+ kref_get(&rinfo->ref);
+}
+
+static inline void rinfo_put(struct remote_info *rinfo)
+{
+ kref_put(&rinfo->ref, _rinfo_release);
+}
+
+static int remote_insert(struct avp_info *avp, struct remote_info *rinfo)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct remote_info *tmp;
+
+ p = &avp->endpoints.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct remote_info, rb_node);
+
+ if (rinfo->loc_id < tmp->loc_id)
+ p = &(*p)->rb_left;
+ else if (rinfo->loc_id > tmp->loc_id)
+ p = &(*p)->rb_right;
+ else {
+ pr_info("%s: avp endpoint id=%x (%s) already exists\n",
+ __func__, rinfo->loc_id,
+ trpc_name(rinfo->trpc_ep));
+ return -EEXIST;
+ }
+ }
+ rb_link_node(&rinfo->rb_node, parent, p);
+ rb_insert_color(&rinfo->rb_node, &avp->endpoints);
+ rinfo_get(rinfo);
+ return 0;
+}
+
+static struct remote_info *remote_find(struct avp_info *avp, u32 local_id)
+{
+ struct rb_node *n = avp->endpoints.rb_node;
+ struct remote_info *rinfo;
+
+ while (n) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+
+ if (local_id < rinfo->loc_id)
+ n = n->rb_left;
+ else if (local_id > rinfo->loc_id)
+ n = n->rb_right;
+ else
+ return rinfo;
+ }
+ return NULL;
+}
+
+static void remote_remove(struct avp_info *avp, struct remote_info *rinfo)
+{
+ rb_erase(&rinfo->rb_node, &avp->endpoints);
+ rinfo_put(rinfo);
+}
+
+/* test whether or not the trpc endpoint provided is a valid AVP node
+ * endpoint */
+static struct remote_info *validate_trpc_ep(struct avp_info *avp,
+ struct trpc_endpoint *ep)
+{
+ struct remote_info *tmp = trpc_priv(ep);
+ struct remote_info *rinfo;
+
+ if (!tmp)
+ return NULL;
+ rinfo = remote_find(avp, tmp->loc_id);
+ if (rinfo && rinfo == tmp && rinfo->trpc_ep == ep)
+ return rinfo;
+ return NULL;
+}
+
+static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ seq_printf(s, " <unknown>\n");
+ goto out;
+ }
+ seq_printf(s, " loc_id:0x%x\n rem_id:0x%x\n",
+ rinfo->loc_id, rinfo->rem_id);
+out:
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+static inline void mbox_writel(u32 val, void __iomem *mbox)
+{
+ writel(val, mbox);
+}
+
+static inline u32 mbox_readl(void __iomem *mbox)
+{
+ return readl(mbox);
+}
+
+static inline void msg_ack_remote(struct avp_info *avp, u32 cmd, u32 arg)
+{
+ struct msg_ack *ack = avp->msg_from_avp;
+
+ /* must make sure the arg is there first */
+ ack->arg = arg;
+ wmb();
+ ack->cmd = cmd;
+ wmb();
+}
+
+static inline u32 msg_recv_get_cmd(struct avp_info *avp)
+{
+ volatile u32 *cmd = avp->msg_from_avp;
+ rmb();
+ return *cmd;
+}
+
+static inline int __msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+ void *buf, size_t len)
+{
+ memcpy(avp->msg_to_avp, hdr, hdr_len);
+ if (buf && len)
+ memcpy(avp->msg_to_avp + hdr_len, buf, len);
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+ return 0;
+}
+
+static inline int msg_write(struct avp_info *avp, void *hdr, size_t hdr_len,
+ void *buf, size_t len)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + HZ;
+
+ /* the other side ack's the message by clearing the first word,
+ * wait for it to do so */
+ rmb();
+ while (*rem_ack != 0 && time_before(jiffies, endtime)) {
+ usleep_range(100, 2000);
+ rmb();
+ }
+ if (*rem_ack != 0)
+ return -ETIMEDOUT;
+ __msg_write(avp, hdr, hdr_len, buf, len);
+ return 0;
+}
+
+static inline int msg_check_ack(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+ struct msg_ack ack;
+
+ rmb();
+ memcpy(&ack, avp->msg_to_avp, sizeof(ack));
+ if (ack.cmd != cmd)
+ return -ENOENT;
+ if (arg)
+ *arg = ack.arg;
+ return 0;
+}
+
+/* XXX: add timeout */
+static int msg_wait_ack_locked(struct avp_info *avp, u32 cmd, u32 *arg)
+{
+ /* rem_ack is a pointer into shared memory that the AVP modifies */
+ volatile u32 *rem_ack = avp->msg_to_avp;
+ unsigned long endtime = jiffies + HZ / 5;
+ int ret;
+
+ do {
+ ret = msg_check_ack(avp, cmd, arg);
+ usleep_range(1000, 5000);
+ } while (ret && time_before(jiffies, endtime));
+
+ /* if we timed out, try one more time */
+ if (ret)
+ ret = msg_check_ack(avp, cmd, arg);
+
+ /* clear out the ack */
+ *rem_ack = 0;
+ wmb();
+ return ret;
+}
+
+static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ struct msg_port_data msg;
+ int ret;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: ep=%p priv=%p buf=%p len=%d\n",
+ __func__, ep, trpc_priv(ep), buf, len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (unlikely(avp->suspending && trpc_peer(ep) != avp->avp_ep)) {
+ ret = -EBUSY;
+ goto err_state_locked;
+ } else if (avp->shutdown) {
+ ret = -ENODEV;
+ goto err_state_locked;
+ }
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ ret = -ENOTTY;
+ goto err_state_locked;
+ }
+ rinfo_get(rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ msg.cmd = CMD_MESSAGE;
+ msg.port_id = rinfo->rem_id;
+ msg.msg_len = len;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), buf, len);
+ mutex_unlock(&avp->to_avp_lock);
+
+ DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: msg sent for %s (%x->%x) (%d)\n",
+ __func__, trpc_name(ep), rinfo->loc_id, rinfo->rem_id, ret);
+ rinfo_put(rinfo);
+ return ret;
+
+err_state_locked:
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return ret;
+}
+
+static int _send_disconnect(struct avp_info *avp, u32 port_id)
+{
+ struct msg_disconnect msg;
+ int ret;
+
+ msg.cmd = CMD_DISCONNECT;
+ msg.port_id = port_id;
+
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ pr_err("%s: remote has not acked last message (%x)\n", __func__,
+ port_id);
+ goto err_msg_write;
+ }
+
+ ret = msg_wait_ack_locked(avp, CMD_ACK, NULL);
+ if (ret) {
+ pr_err("%s: remote end won't respond for %x\n", __func__,
+ port_id);
+ goto err_wait_ack;
+ }
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: sent disconnect msg for %x\n",
+ __func__, port_id);
+
+err_wait_ack:
+err_msg_write:
+ mutex_unlock(&avp->to_avp_lock);
+ return ret;
+}
+
+/* Note: Assumes that the rinfo was previously successfully added to the
+ * endpoints rb_tree. The initial refcnt of 1 is inherited by the port when the
+ * trpc endpoint is created with thi trpc_xxx functions. Thus, on close,
+ * we must drop that reference here.
+ * The avp->endpoints rb_tree keeps its own reference on rinfo objects.
+ *
+ * The try_connect function does not use this on error because it needs to
+ * split the close of trpc_ep port and the put.
+ */
+static inline void remote_close(struct remote_info *rinfo)
+{
+ trpc_close(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+}
+
+static void avp_trpc_close(struct trpc_endpoint *ep)
+{
+ struct avp_info *avp = tegra_avp;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+
+ rinfo = validate_trpc_ep(avp, ep);
+ if (!rinfo) {
+ pr_err("%s: tried to close invalid port '%s' endpoint (%p)\n",
+ __func__, trpc_name(ep), ep);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: closing '%s' (%x)\n", __func__,
+ trpc_name(ep), rinfo->rem_id);
+
+ ret = _send_disconnect(avp, rinfo->rem_id);
+ if (ret)
+ pr_err("%s: error while closing remote port '%s' (%x)\n",
+ __func__, trpc_name(ep), rinfo->rem_id);
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+}
+
+/* takes and holds avp->from_avp_lock */
+static void recv_msg_lock(struct avp_info *avp)
+{
+ unsigned long flags;
+
+ mutex_lock(&avp->from_avp_lock);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+}
+
+/* MUST be called with avp->from_avp_lock held */
+static void recv_msg_unlock(struct avp_info *avp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->defer_remote = false;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static int avp_node_try_connect(struct trpc_node *node,
+ struct trpc_node *src_node,
+ struct trpc_endpoint *from)
+{
+ struct avp_info *avp = tegra_avp;
+ const char *port_name = trpc_name(from);
+ struct remote_info *rinfo;
+ struct msg_connect msg;
+ int ret;
+ unsigned long flags;
+ int len;
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: trying connect from %s\n", __func__,
+ port_name);
+
+ if (node != avp->rpc_node || node->priv != avp)
+ return -ENODEV;
+
+ len = strlen(port_name);
+ if (len > XPC_PORT_NAME_LEN) {
+ pr_err("%s: port name (%s) to long\n", __func__, port_name);
+ return -EINVAL;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->suspending) {
+ ret = -EBUSY;
+ } else if (likely(src_node != avp->rpc_node)) {
+ /* only check for initialized when the source is not ourselves
+ * since we'll end up calling into here during initialization */
+ if (!avp->initialized)
+ ret = -ENODEV;
+ } else if (strncmp(port_name, "RPC_AVP_PORT", XPC_PORT_NAME_LEN)) {
+ /* we only allow connections to ourselves for the cpu-to-avp
+ port */
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ return ret;
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_rinfo;
+ }
+ rinfo->loc_id = (u32)rinfo;
+
+ msg.cmd = CMD_CONNECT;
+ msg.port_id = rinfo->loc_id;
+ memcpy(msg.name, port_name, len);
+ memset(msg.name + len, 0, XPC_PORT_NAME_LEN - len);
+
+ /* when trying to connect to remote, we need to block remote
+ * messages until we get our ack and can insert it into our lists.
+ * Otherwise, we can get a message from the other side for a port
+ * that we haven't finished setting up.
+ *
+ * 'defer_remote' will force the irq handler to not process messages
+ * at irq context but to schedule work to do so. The work function will
+ * take the from_avp_lock and everything should stay consistent.
+ */
+ recv_msg_lock(avp);
+ mutex_lock(&avp->to_avp_lock);
+ ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ pr_err("%s: remote has not acked last message (%s)\n", __func__,
+ port_name);
+ mutex_unlock(&avp->to_avp_lock);
+ goto err_msg_write;
+ }
+ ret = msg_wait_ack_locked(avp, CMD_RESPONSE, &rinfo->rem_id);
+ mutex_unlock(&avp->to_avp_lock);
+
+ if (ret) {
+ pr_err("%s: remote end won't respond for '%s'\n", __func__,
+ port_name);
+ goto err_wait_ack;
+ }
+ if (!rinfo->rem_id) {
+ pr_err("%s: can't connect to '%s'\n", __func__, port_name);
+ ret = -ECONNREFUSED;
+ goto err_nack;
+ }
+
+ DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: got conn ack '%s' (%x <-> %x)\n",
+ __func__, port_name, rinfo->loc_id, rinfo->rem_id);
+
+ rinfo->trpc_ep = trpc_create_peer(node, from, &remote_ep_ops,
+ rinfo);
+ if (!rinfo->trpc_ep) {
+ pr_err("%s: cannot create peer for %s\n", __func__, port_name);
+ ret = -EINVAL;
+ goto err_create_peer;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ recv_msg_unlock(avp);
+ return 0;
+
+err_ep_insert:
+ trpc_close(rinfo->trpc_ep);
+err_create_peer:
+ _send_disconnect(avp, rinfo->rem_id);
+err_nack:
+err_wait_ack:
+err_msg_write:
+ recv_msg_unlock(avp);
+ rinfo_put(rinfo);
+err_alloc_rinfo:
+ return ret;
+}
+
+static void process_disconnect_locked(struct avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_disconnect *disconn_msg = (struct msg_disconnect *)raw_msg;
+ unsigned long flags;
+ struct remote_info *rinfo;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got disconnect (%x)\n", __func__,
+ disconn_msg->port_id);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, disconn_msg,
+ sizeof(struct msg_disconnect));
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, disconn_msg->port_id);
+ if (!rinfo) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ pr_warning("%s: got disconnect for unknown port %x\n",
+ __func__, disconn_msg->port_id);
+ goto ack;
+ }
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+}
+
+static void process_connect_locked(struct avp_info *avp,
+ struct msg_data *raw_msg)
+{
+ struct msg_connect *conn_msg = (struct msg_connect *)raw_msg;
+ struct trpc_endpoint *trpc_ep;
+ struct remote_info *rinfo;
+ char name[XPC_PORT_NAME_LEN + 1];
+ int ret;
+ u32 local_port_id = 0;
+ unsigned long flags;
+
+ DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got connect (%x)\n", __func__,
+ conn_msg->port_id);
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ conn_msg, sizeof(struct msg_connect));
+
+ rinfo = rinfo_alloc(avp);
+ if (!rinfo) {
+ pr_err("%s: cannot alloc mem for rinfo\n", __func__);
+ ret = -ENOMEM;
+ goto ack;
+ }
+ rinfo->loc_id = (u32)rinfo;
+ rinfo->rem_id = conn_msg->port_id;
+
+ memcpy(name, conn_msg->name, XPC_PORT_NAME_LEN);
+ name[XPC_PORT_NAME_LEN] = '\0';
+ trpc_ep = trpc_create_connect(avp->rpc_node, name, &remote_ep_ops,
+ rinfo, 0);
+ if (IS_ERR(trpc_ep)) {
+ pr_err("%s: remote requested unknown port '%s' (%d)\n",
+ __func__, name, (int)PTR_ERR(trpc_ep));
+ goto nack;
+ }
+ rinfo->trpc_ep = trpc_ep;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ ret = remote_insert(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ if (ret)
+ goto err_ep_insert;
+
+ local_port_id = rinfo->loc_id;
+ goto ack;
+
+err_ep_insert:
+ trpc_close(trpc_ep);
+nack:
+ rinfo_put(rinfo);
+ local_port_id = 0;
+ack:
+ msg_ack_remote(avp, CMD_RESPONSE, local_port_id);
+}
+
+static int process_message(struct avp_info *avp, struct msg_data *raw_msg,
+ gfp_t gfp_flags)
+{
+ struct msg_port_data *port_msg = (struct msg_port_data *)raw_msg;
+ struct remote_info *rinfo;
+ unsigned long flags;
+ int len;
+ int ret;
+
+ len = min(port_msg->msg_len, (u32)TEGRA_RPC_MAX_MSG_LEN);
+
+ if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG) {
+ pr_info("%s: got message cmd=%x port=%x len=%d\n", __func__,
+ port_msg->cmd, port_msg->port_id, port_msg->msg_len);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, port_msg,
+ sizeof(struct msg_port_data) + len);
+ }
+
+ if (len != port_msg->msg_len)
+ pr_err("%s: message sent is too long (%d bytes)\n", __func__,
+ port_msg->msg_len);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ rinfo = remote_find(avp, port_msg->port_id);
+ if (rinfo) {
+ rinfo_get(rinfo);
+ trpc_get(rinfo->trpc_ep);
+ } else {
+ pr_err("%s: port %x not found\n", __func__, port_msg->port_id);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ ret = -ENOENT;
+ goto ack;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = trpc_send_msg(avp->rpc_node, rinfo->trpc_ep, port_msg->data,
+ len, gfp_flags);
+ if (ret == -ENOMEM) {
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ goto no_ack;
+ } else if (ret) {
+ pr_err("%s: cannot queue message for port %s/%x (%d)\n",
+ __func__, trpc_name(rinfo->trpc_ep), rinfo->loc_id,
+ ret);
+ } else {
+ DBG(AVP_DBG_TRACE_XPC_MSG, "%s: msg queued\n", __func__);
+ }
+
+ trpc_put(rinfo->trpc_ep);
+ rinfo_put(rinfo);
+ack:
+ msg_ack_remote(avp, CMD_ACK, 0);
+no_ack:
+ return ret;
+}
+
+static void process_avp_message(struct work_struct *work)
+{
+ struct avp_info *avp = container_of(work, struct avp_info, recv_work);
+ struct msg_data *msg = avp->msg_from_avp;
+
+ mutex_lock(&avp->from_avp_lock);
+ rmb();
+ switch (msg->cmd) {
+ case CMD_CONNECT:
+ process_connect_locked(avp, msg);
+ break;
+ case CMD_DISCONNECT:
+ process_disconnect_locked(avp, msg);
+ break;
+ case CMD_MESSAGE:
+ process_message(avp, msg, GFP_KERNEL);
+ break;
+ default:
+ pr_err("%s: unknown cmd (%x) received\n", __func__, msg->cmd);
+ break;
+ }
+ mutex_unlock(&avp->from_avp_lock);
+}
+
+static irqreturn_t avp_mbox_pending_isr(int irq, void *data)
+{
+ struct avp_info *avp = data;
+ struct msg_data *msg = avp->msg_from_avp;
+ u32 mbox_msg;
+ unsigned long flags;
+ int ret;
+
+ mbox_msg = mbox_readl(MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_FROM_AVP);
+
+ DBG(AVP_DBG_TRACE_XPC_IRQ, "%s: got msg %x\n", __func__, mbox_msg);
+
+ /* XXX: re-use previous message? */
+ if (!(mbox_msg & MBOX_MSG_VALID)) {
+ WARN_ON(1);
+ goto done;
+ }
+
+ mbox_msg <<= 4;
+ if (mbox_msg == 0x2f00bad0UL) {
+ pr_info("%s: petting watchdog\n", __func__);
+ goto done;
+ }
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (avp->shutdown) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto done;
+ } else if (avp->defer_remote) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ goto defer;
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ rmb();
+ if (msg->cmd == CMD_MESSAGE) {
+ ret = process_message(avp, msg, GFP_ATOMIC);
+ if (ret != -ENOMEM)
+ goto done;
+ pr_info("%s: deferring message (%d)\n", __func__, ret);
+ }
+defer:
+ queue_work(avp->recv_wq, &avp->recv_work);
+done:
+ return IRQ_HANDLED;
+}
+
+static int avp_reset(struct avp_info *avp, unsigned long reset_addr)
+{
+ unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
+ dma_addr_t stub_data_phys;
+ unsigned long timeout;
+ int ret = 0;
+
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+
+ _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
+ _tegra_avp_boot_stub_data.jump_addr = reset_addr;
+ wmb();
+ stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+
+ writel(stub_code_phys, TEGRA_AVP_RESET_VECTOR_ADDR);
+
+ tegra_periph_reset_assert(avp->cop_clk);
+ udelay(10);
+ tegra_periph_reset_deassert(avp->cop_clk);
+
+ writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
+
+ /* the AVP firmware will reprogram its reset vector as the kernel
+ * starts, so a dead kernel can be detected by polling this value */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout)) {
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) != stub_code_phys)
+ break;
+ cpu_relax();
+ }
+ if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) == stub_code_phys)
+ ret = -EINVAL;
+ WARN_ON(ret);
+ dma_unmap_single(NULL, stub_data_phys,
+ sizeof(_tegra_avp_boot_stub_data),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static void avp_halt(struct avp_info *avp)
+{
+ /* ensure the AVP is halted */
+ writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
+ tegra_periph_reset_assert(avp->cop_clk);
+
+ /* set up the initial memory areas and mailbox contents */
+ *((u32 *)avp->msg_from_avp) = 0;
+ *((u32 *)avp->msg_to_avp) = 0xfeedf00d;
+ mbox_writel(0, MBOX_FROM_AVP);
+ mbox_writel(0, MBOX_TO_AVP);
+}
+
+/* Note: CPU_PORT server and AVP_PORT client are registered with the avp
+ * node, but are actually meant to be processed on our side (either
+ * by the svc thread for processing remote calls or by the client
+ * of the char dev for receiving replies for managing remote
+ * libraries/modules. */
+
+static int avp_init(struct avp_info *avp, const char *fw_file)
+{
+ const struct firmware *avp_fw;
+ int ret;
+ struct trpc_endpoint *ep;
+
+ avp->nvmap_libs = nvmap_create_client(nvmap_dev, "avp_libs");
+ if (IS_ERR(avp->nvmap_libs)) {
+ pr_err("%s: cannot create libs nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_libs);
+ goto err_nvmap_create_libs_client;
+ }
+
+ /* put the address of the shared mem area into the mailbox for AVP
+ * to read out when its kernel boots. */
+ mbox_writel(avp->msg, MBOX_TO_AVP);
+
+ ret = request_firmware(&avp_fw, fw_file, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("%s: Cannot read firmware '%s'\n", __func__, fw_file);
+ goto err_req_fw;
+ }
+ pr_info("%s: read firmware from '%s' (%d bytes)\n", __func__,
+ fw_file, avp_fw->size);
+ memcpy(avp->kernel_data, avp_fw->data, avp_fw->size);
+ memset(avp->kernel_data + avp_fw->size, 0, SZ_1M - avp_fw->size);
+ wmb();
+ release_firmware(avp_fw);
+
+ ret = avp_reset(avp, AVP_KERNEL_VIRT_BASE);
+ if (ret) {
+ pr_err("%s: cannot reset the AVP.. aborting..\n", __func__);
+ goto err_reset;
+ }
+
+ enable_irq(avp->mbox_from_avp_pend_irq);
+ /* Initialize the avp_svc *first*. This creates RPC_CPU_PORT to be
+ * ready for remote commands. Then, connect to the
+ * remote RPC_AVP_PORT to be able to send library load/unload and
+ * suspend commands to it */
+ ret = avp_svc_start(avp->avp_svc);
+ if (ret)
+ goto err_avp_svc_start;
+
+ ep = trpc_create_connect(avp->rpc_node, "RPC_AVP_PORT", NULL,
+ NULL, -1);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't connect to RPC_AVP_PORT server\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_rpc_avp_port;
+ }
+ avp->avp_ep = ep;
+
+ avp->initialized = true;
+ smp_wmb();
+ pr_info("%s: avp init done\n", __func__);
+ return 0;
+
+err_rpc_avp_port:
+ avp_svc_stop(avp->avp_svc);
+err_avp_svc_start:
+ disable_irq(avp->mbox_from_avp_pend_irq);
+err_reset:
+ avp_halt(avp);
+err_req_fw:
+ nvmap_client_put(avp->nvmap_libs);
+err_nvmap_create_libs_client:
+ avp->nvmap_libs = NULL;
+ return ret;
+}
+
+static void avp_uninit(struct avp_info *avp)
+{
+ unsigned long flags;
+ struct rb_node *n;
+ struct remote_info *rinfo;
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ avp->initialized = false;
+ avp->shutdown = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+ cancel_work_sync(&avp->recv_work);
+
+ avp_halt(avp);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ while ((n = rb_first(&avp->endpoints)) != NULL) {
+ rinfo = rb_entry(n, struct remote_info, rb_node);
+ rinfo_get(rinfo);
+ remote_remove(avp, rinfo);
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ remote_close(rinfo);
+ rinfo_put(rinfo);
+
+ spin_lock_irqsave(&avp->state_lock, flags);
+ }
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ avp_svc_stop(avp->avp_svc);
+
+ if (avp->avp_ep) {
+ trpc_close(avp->avp_ep);
+ avp->avp_ep = NULL;
+ }
+
+ libs_cleanup(avp);
+
+ avp->shutdown = false;
+ smp_wmb();
+ pr_info("%s: avp teardown done\n", __func__);
+}
+
+/* returns the remote lib handle in lib->handle */
+static int _load_lib(struct avp_info *avp, struct tegra_avp_lib *lib)
+{
+ struct svc_lib_attach svc;
+ struct svc_lib_attach_resp resp;
+ const struct firmware *fw;
+ void *args;
+ struct nvmap_handle_ref *lib_handle;
+ void *lib_data;
+ unsigned long lib_phys;
+ int ret;
+
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: loading library '%s'\n", lib->name);
+
+ args = kmalloc(lib->args_len, GFP_KERNEL);
+ if (!args) {
+ pr_err("avp_lib: can't alloc mem for args (%d)\n",
+ lib->args_len);
+ return -ENOMEM;
+ }
+ if (copy_from_user(args, lib->args, lib->args_len)) {
+ pr_err("avp_lib: can't copy lib args\n");
+ ret = -EFAULT;
+ goto err_cp_args;
+ }
+
+ ret = request_firmware(&fw, lib->name, avp->misc_dev.this_device);
+ if (ret) {
+ pr_err("avp_lib: Cannot read firmware '%s'\n", lib->name);
+ goto err_req_fw;
+ }
+
+ lib_handle = nvmap_alloc(avp->nvmap_libs, fw->size, L1_CACHE_BYTES,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(lib_handle)) {
+ pr_err("avp_lib: can't nvmap alloc for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_alloc;
+ }
+
+ lib_data = nvmap_mmap(lib_handle);
+ if (!lib_data) {
+ pr_err("avp_lib: can't nvmap map for lib '%s'\n", lib->name);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ lib_phys = nvmap_pin(avp->nvmap_libs, lib_handle);
+ if (IS_ERR((void *)lib_phys)) {
+ pr_err("avp_lib: can't nvmap pin for lib '%s'\n", lib->name);
+ ret = PTR_ERR(lib_handle);
+ goto err_nvmap_pin;
+ }
+
+ memcpy(lib_data, fw->data, fw->size);
+
+ svc.svc_id = SVC_LIBRARY_ATTACH;
+ svc.address = lib_phys;
+ svc.args_len = lib->args_len;
+ svc.lib_size = fw->size;
+ svc.reason = lib->greedy ? AVP_LIB_REASON_ATTACH_GREEDY :
+ AVP_LIB_REASON_ATTACH;
+ memcpy(svc.args, args, lib->args_len);
+ wmb();
+
+ /* send message, wait for reply */
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret)
+ goto err_send_msg;
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get lib load reply (%d)\n", ret);
+ goto err_recv_msg;
+ } else if (resp.err) {
+ pr_err("avp_lib: got remote error (%d) while loading lib %s\n",
+ resp.err, lib->name);
+ ret = -EPROTO;
+ goto err_recv_msg;
+ }
+ lib->handle = resp.lib_id;
+ ret = 0;
+ DBG(AVP_DBG_TRACE_LIB,
+ "avp_lib: Successfully loaded library %s (lib_id=%x)\n",
+ lib->name, resp.lib_id);
+
+ /* We free the memory here because by this point the AVP has already
+ * requested memory for the library for all the sections since it does
+ * it's own relocation and memory management. So, our allocations were
+ * temporary to hand the library code over to the AVP.
+ */
+
+err_recv_msg:
+err_send_msg:
+ nvmap_unpin(avp->nvmap_libs, lib_handle);
+err_nvmap_pin:
+ nvmap_munmap(lib_handle, lib_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_libs, lib_handle);
+err_nvmap_alloc:
+ release_firmware(fw);
+err_req_fw:
+err_cp_args:
+ kfree(args);
+ return ret;
+}
+
+static int send_unload_lib_msg(struct avp_info *avp, u32 handle,
+ const char *name)
+{
+ struct svc_lib_detach svc;
+ struct svc_lib_detach_resp resp;
+ int ret;
+
+ svc.svc_id = SVC_LIBRARY_DETACH;
+ svc.reason = AVP_LIB_REASON_DETACH;
+ svc.lib_id = handle;
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("avp_lib: can't send unload message to avp for '%s'\n",
+ name);
+ goto err;
+ }
+
+ ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
+ sizeof(resp), -1);
+ if (ret != sizeof(resp)) {
+ pr_err("avp_lib: Couldn't get unload reply for '%s' (%d)\n",
+ name, ret);
+ } else if (resp.err) {
+ pr_err("avp_lib: remote error (%d) while unloading lib %s\n",
+ resp.err, name);
+ ret = -EPROTO;
+ } else
+ ret = 0;
+err:
+ return ret;
+}
+
+static struct lib_item *_find_lib_locked(struct avp_info *avp, u32 handle)
+{
+ struct lib_item *item;
+
+ list_for_each_entry(item, &avp->libs, list) {
+ if (item->handle == handle)
+ return item;
+ }
+ return NULL;
+}
+
+static int _insert_lib_locked(struct avp_info *avp, u32 handle, char *name)
+{
+ struct lib_item *item;
+
+ item = kzalloc(sizeof(struct lib_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->handle = handle;
+ strlcpy(item->name, name, TEGRA_AVP_LIB_MAX_NAME);
+ list_add_tail(&item->list, &avp->libs);
+ return 0;
+}
+
+static void _delete_lib_locked(struct avp_info *avp, struct lib_item *item)
+{
+ list_del(&item->list);
+ kfree(item);
+}
+
+static int handle_load_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+ struct tegra_avp_lib lib;
+ int ret;
+
+ if (copy_from_user(&lib, (void __user *)arg, sizeof(lib)))
+ return -EFAULT;
+ lib.name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
+
+ if (lib.args_len > TEGRA_AVP_LIB_MAX_ARGS) {
+ pr_err("%s: library args too long (%d)\n", __func__,
+ lib.args_len);
+ return -E2BIG;
+ }
+
+ mutex_lock(&avp->libs_lock);
+ ret = _load_lib(avp, &lib);
+ if (ret)
+ goto err_load_lib;
+
+ if (copy_to_user((void __user *)arg, &lib, sizeof(lib))) {
+ /* TODO: probably need to free the library from remote
+ * we just loaded */
+ ret = -EFAULT;
+ goto err_copy_to_user;
+ }
+ ret = _insert_lib_locked(avp, lib.handle, lib.name);
+ if (ret) {
+ pr_err("%s: can't insert lib (%d)\n", __func__, ret);
+ goto err_insert_lib;
+ }
+
+ mutex_unlock(&avp->libs_lock);
+ return 0;
+
+err_insert_lib:
+err_copy_to_user:
+ send_unload_lib_msg(avp, lib.handle, lib.name);
+err_load_lib:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+static int handle_unload_lib_ioctl(struct avp_info *avp, unsigned long arg)
+{
+ struct lib_item *item;
+ int ret;
+
+ mutex_lock(&avp->libs_lock);
+ item = _find_lib_locked(avp, (u32)arg);
+ if (!item) {
+ pr_err("avp_lib: avp lib with handle 0x%x not found\n",
+ (u32)arg);
+ ret = -ENOENT;
+ goto err_find;
+ }
+ ret = send_unload_lib_msg(avp, item->handle, item->name);
+ if (!ret)
+ DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", item->name);
+ else
+ pr_err("avp_lib: can't unload lib '%s'/0x%x (%d)\n", item->name,
+ item->handle, ret);
+ _delete_lib_locked(avp, item);
+
+err_find:
+ mutex_unlock(&avp->libs_lock);
+ return ret;
+}
+
+static void libs_cleanup(struct avp_info *avp)
+{
+ struct lib_item *lib;
+ struct lib_item *lib_tmp;
+
+ mutex_lock(&avp->libs_lock);
+ list_for_each_entry_safe(lib, lib_tmp, &avp->libs, list) {
+ _delete_lib_locked(avp, lib);
+ }
+
+ nvmap_client_put(avp->nvmap_libs);
+ avp->nvmap_libs = NULL;
+ mutex_unlock(&avp->libs_lock);
+}
+
+static long tegra_avp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret;
+
+ if (_IOC_TYPE(cmd) != TEGRA_AVP_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_AVP_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_AVP_IOCTL_MAX_NR)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case TEGRA_AVP_IOCTL_LOAD_LIB:
+ ret = handle_load_lib_ioctl(avp, arg);
+ break;
+ case TEGRA_AVP_IOCTL_UNLOAD_LIB:
+ ret = handle_unload_lib_ioctl(avp, arg);
+ break;
+ default:
+ pr_err("avp_lib: Unknown tegra_avp ioctl 0x%x\n", _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static int tegra_avp_open(struct inode *inode, struct file *file)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ nonseekable_open(inode, file);
+
+ mutex_lock(&avp->open_lock);
+ /* only one userspace client at a time */
+ if (avp->opened) {
+ pr_err("%s: already have client, aborting\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = avp_init(avp, TEGRA_AVP_KERNEL_FW);
+ avp->opened = !ret;
+out:
+ mutex_unlock(&avp->open_lock);
+ return ret;
+}
+
+static int tegra_avp_release(struct inode *inode, struct file *file)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ pr_info("%s: release\n", __func__);
+ mutex_lock(&avp->open_lock);
+ if (!avp->opened) {
+ pr_err("%s: releasing while in invalid state\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ avp_uninit(avp);
+
+ avp->opened = false;
+out:
+ mutex_unlock(&avp->open_lock);
+ return ret;
+}
+
+static int avp_enter_lp0(struct avp_info *avp)
+{
+ volatile u32 *avp_suspend_done =
+ avp->iram_backup_data + TEGRA_IRAM_SIZE;
+ struct svc_enter_lp0 svc;
+ unsigned long endtime;
+ int ret;
+
+ svc.svc_id = SVC_ENTER_LP0;
+ svc.src_addr = (u32)TEGRA_IRAM_BASE;
+ svc.buf_addr = (u32)avp->iram_backup_phys;
+ svc.buf_size = TEGRA_IRAM_SIZE;
+
+ *avp_suspend_done = 0;
+ wmb();
+
+ ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: cannot send AVP suspend message\n", __func__);
+ return ret;
+ }
+
+ endtime = jiffies + msecs_to_jiffies(1000);
+ rmb();
+ while ((*avp_suspend_done == 0) && time_before(jiffies, endtime)) {
+ udelay(10);
+ rmb();
+ }
+
+ rmb();
+ if (*avp_suspend_done == 0) {
+ pr_err("%s: AVP failed to suspend\n", __func__);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int tegra_avp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct avp_info *avp = tegra_avp;
+ unsigned long flags;
+ int ret;
+
+ pr_info("%s()+\n", __func__);
+ spin_lock_irqsave(&avp->state_lock, flags);
+ if (!avp->initialized) {
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+ return 0;
+ }
+ avp->suspending = true;
+ spin_unlock_irqrestore(&avp->state_lock, flags);
+
+ ret = avp_enter_lp0(avp);
+ if (ret)
+ goto err;
+
+ avp->resume_addr = readl(TEGRA_AVP_RESUME_ADDR);
+ if (!avp->resume_addr) {
+ pr_err("%s: AVP failed to set it's resume address\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("avp_suspend: resume_addr=%lx\n", avp->resume_addr);
+ avp->resume_addr &= 0xfffffffeUL;
+ pr_info("%s()-\n", __func__);
+
+ return 0;
+
+err:
+ /* TODO: we need to kill the AVP so that when we come back
+ * it could be reinitialized.. We'd probably need to kill
+ * the users of it so they don't have the wrong state.
+ */
+ return ret;
+}
+
+static int tegra_avp_resume(struct platform_device *pdev)
+{
+ struct avp_info *avp = tegra_avp;
+ int ret = 0;
+
+ pr_info("%s()+\n", __func__);
+ smp_rmb();
+ if (!avp->initialized)
+ goto out;
+
+ BUG_ON(!avp->resume_addr);
+
+ avp_reset(avp, avp->resume_addr);
+ avp->resume_addr = 0;
+ avp->suspending = false;
+ smp_wmb();
+ enable_irq(avp->mbox_from_avp_pend_irq);
+
+ pr_info("%s()-\n", __func__);
+
+out:
+ return ret;
+}
+
+static const struct file_operations tegra_avp_fops = {
+ .owner = THIS_MODULE,
+ .open = tegra_avp_open,
+ .release = tegra_avp_release,
+ .unlocked_ioctl = tegra_avp_ioctl,
+};
+
+static struct trpc_node avp_trpc_node = {
+ .name = "avp-remote",
+ .type = TRPC_NODE_REMOTE,
+ .try_connect = avp_node_try_connect,
+};
+
+static int tegra_avp_probe(struct platform_device *pdev)
+{
+ void *msg_area;
+ struct avp_info *avp;
+ int ret = 0;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "mbox_from_avp_pending");
+ if (irq < 0) {
+ pr_err("%s: invalid platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ avp = kzalloc(sizeof(struct avp_info), GFP_KERNEL);
+ if (!avp) {
+ pr_err("%s: cannot allocate avp_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ avp->nvmap_drv = nvmap_create_client(nvmap_dev, "avp_core");
+ if (IS_ERR(avp->nvmap_drv)) {
+ pr_err("%s: cannot create drv nvmap client\n", __func__);
+ ret = PTR_ERR(avp->nvmap_drv);
+ goto err_nvmap_create_drv_client;
+ }
+
+ avp->kernel_handle = nvmap_alloc(avp->nvmap_drv, SZ_1M, SZ_1M,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(avp->kernel_handle)) {
+ pr_err("%s: cannot create handle\n", __func__);
+ ret = PTR_ERR(avp->kernel_handle);
+ goto err_nvmap_alloc;
+ }
+
+ avp->kernel_data = nvmap_mmap(avp->kernel_handle);
+ if (!avp->kernel_data) {
+ pr_err("%s: cannot map kernel handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_nvmap_mmap;
+ }
+
+ avp->kernel_phys = nvmap_pin(avp->nvmap_drv, avp->kernel_handle);
+ if (IS_ERR((void *)avp->kernel_phys)) {
+ pr_err("%s: cannot pin kernel handle\n", __func__);
+ ret = PTR_ERR((void *)avp->kernel_phys);
+ goto err_nvmap_pin;
+ }
+
+ /* allocate an extra 4 bytes at the end which AVP uses to signal to
+ * us that it is done suspending.
+ */
+ avp->iram_backup_handle =
+ nvmap_alloc(avp->nvmap_drv, TEGRA_IRAM_SIZE + 4,
+ L1_CACHE_BYTES, NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR(avp->iram_backup_handle)) {
+ pr_err("%s: cannot create handle for iram backup\n", __func__);
+ ret = PTR_ERR(avp->iram_backup_handle);
+ goto err_iram_nvmap_alloc;
+ }
+ avp->iram_backup_data = nvmap_mmap(avp->iram_backup_handle);
+ if (!avp->iram_backup_data) {
+ pr_err("%s: cannot map iram backup handle\n", __func__);
+ ret = -ENOMEM;
+ goto err_iram_nvmap_mmap;
+ }
+ avp->iram_backup_phys = nvmap_pin(avp->nvmap_drv,
+ avp->iram_backup_handle);
+ if (IS_ERR((void *)avp->iram_backup_phys)) {
+ pr_err("%s: cannot pin iram backup handle\n", __func__);
+ ret = PTR_ERR((void *)avp->iram_backup_phys);
+ goto err_iram_nvmap_pin;
+ }
+
+ avp->mbox_from_avp_pend_irq = irq;
+ avp->endpoints = RB_ROOT;
+ spin_lock_init(&avp->state_lock);
+ mutex_init(&avp->open_lock);
+ mutex_init(&avp->to_avp_lock);
+ mutex_init(&avp->from_avp_lock);
+ INIT_WORK(&avp->recv_work, process_avp_message);
+
+ mutex_init(&avp->libs_lock);
+ INIT_LIST_HEAD(&avp->libs);
+
+ avp->recv_wq = alloc_workqueue("avp-msg-recv",
+ WQ_NON_REENTRANT | WQ_HIGHPRI, 1);
+ if (!avp->recv_wq) {
+ pr_err("%s: can't create recve workqueue\n", __func__);
+ ret = -ENOMEM;
+ goto err_create_wq;
+ }
+
+ avp->cop_clk = clk_get(&pdev->dev, "cop");
+ if (IS_ERR(avp->cop_clk)) {
+ pr_err("%s: Couldn't get cop clock\n", TEGRA_AVP_NAME);
+ ret = -ENOENT;
+ goto err_get_cop_clk;
+ }
+
+ msg_area = dma_alloc_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2,
+ &avp->msg_area_addr, GFP_KERNEL);
+ if (!msg_area) {
+ pr_err("%s: cannot allocate msg_area\n", __func__);
+ ret = -ENOMEM;
+ goto err_alloc_msg_area;
+ }
+ memset(msg_area, 0, AVP_MSG_AREA_SIZE * 2);
+ avp->msg = ((avp->msg_area_addr >> 4) |
+ MBOX_MSG_VALID | MBOX_MSG_PENDING_INT_EN);
+ avp->msg_to_avp = msg_area;
+ avp->msg_from_avp = msg_area + AVP_MSG_AREA_SIZE;
+
+ avp_halt(avp);
+
+ avp_trpc_node.priv = avp;
+ ret = trpc_node_register(&avp_trpc_node);
+ if (ret) {
+ pr_err("%s: Can't register avp rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ avp->rpc_node = &avp_trpc_node;
+
+ avp->avp_svc = avp_svc_init(pdev, avp->rpc_node);
+ if (IS_ERR(avp->avp_svc)) {
+ pr_err("%s: Cannot initialize avp_svc\n", __func__);
+ ret = PTR_ERR(avp->avp_svc);
+ goto err_avp_svc_init;
+ }
+
+ avp->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ avp->misc_dev.name = "tegra_avp";
+ avp->misc_dev.fops = &tegra_avp_fops;
+
+ ret = misc_register(&avp->misc_dev);
+ if (ret) {
+ pr_err("%s: Unable to register misc device!\n", TEGRA_AVP_NAME);
+ goto err_misc_reg;
+ }
+
+ ret = request_irq(irq, avp_mbox_pending_isr, 0, TEGRA_AVP_NAME, avp);
+ if (ret) {
+ pr_err("%s: cannot register irq handler\n", __func__);
+ goto err_req_irq_pend;
+ }
+ disable_irq(avp->mbox_from_avp_pend_irq);
+
+ tegra_avp = avp;
+
+ pr_info("%s: driver registered, kernel %lx(%p), msg area %lx/%lx\n",
+ __func__, avp->kernel_phys, avp->kernel_data,
+ (unsigned long)avp->msg_area_addr,
+ (unsigned long)avp->msg_area_addr + AVP_MSG_AREA_SIZE);
+
+ return 0;
+
+err_req_irq_pend:
+ misc_deregister(&avp->misc_dev);
+err_misc_reg:
+ avp_svc_destroy(avp->avp_svc);
+err_avp_svc_init:
+ trpc_node_unregister(avp->rpc_node);
+err_node_reg:
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, msg_area,
+ avp->msg_area_addr);
+err_alloc_msg_area:
+ clk_put(avp->cop_clk);
+err_get_cop_clk:
+ destroy_workqueue(avp->recv_wq);
+err_create_wq:
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_pin:
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+err_iram_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+err_iram_nvmap_alloc:
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_pin:
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+err_nvmap_mmap:
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+err_nvmap_alloc:
+ nvmap_client_put(avp->nvmap_drv);
+err_nvmap_create_drv_client:
+ kfree(avp);
+ tegra_avp = NULL;
+ return ret;
+}
+
+static int tegra_avp_remove(struct platform_device *pdev)
+{
+ struct avp_info *avp = tegra_avp;
+
+ if (!avp)
+ return 0;
+
+ mutex_lock(&avp->open_lock);
+ if (avp->opened) {
+ mutex_unlock(&avp->open_lock);
+ return -EBUSY;
+ }
+ /* ensure that noone can open while we tear down */
+ avp->opened = true;
+ mutex_unlock(&avp->open_lock);
+
+ misc_deregister(&avp->misc_dev);
+
+ avp_halt(avp);
+
+ avp_svc_destroy(avp->avp_svc);
+ trpc_node_unregister(avp->rpc_node);
+ dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, avp->msg_to_avp,
+ avp->msg_area_addr);
+ clk_put(avp->cop_clk);
+ destroy_workqueue(avp->recv_wq);
+ nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
+ nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
+ nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_munmap(avp->kernel_handle, avp->kernel_data);
+ nvmap_free(avp->nvmap_drv, avp->kernel_handle);
+ nvmap_client_put(avp->nvmap_drv);
+ kfree(avp);
+ tegra_avp = NULL;
+ return 0;
+}
+
+static struct platform_driver tegra_avp_driver = {
+ .probe = tegra_avp_probe,
+ .remove = tegra_avp_remove,
+ .suspend = tegra_avp_suspend,
+ .resume = tegra_avp_resume,
+ .driver = {
+ .name = TEGRA_AVP_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_avp_init(void)
+{
+ return platform_driver_register(&tegra_avp_driver);
+}
+
+static void __exit tegra_avp_exit(void)
+{
+ platform_driver_unregister(&tegra_avp_driver);
+}
+
+module_init(tegra_avp_init);
+module_exit(tegra_avp_exit);
diff --git a/drivers/media/video/tegra/avp/avp.h b/drivers/media/video/tegra/avp/avp.h
new file mode 100644
index 000000000000..4f2287743a06
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_H
+#define __MEDIA_VIDEO_TEGRA_AVP_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "trpc.h"
+
+struct avp_svc_info;
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node);
+void avp_svc_destroy(struct avp_svc_info *avp_svc);
+int avp_svc_start(struct avp_svc_info *svc);
+void avp_svc_stop(struct avp_svc_info *svc);
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_msg.h b/drivers/media/video/tegra/avp/avp_msg.h
new file mode 100644
index 000000000000..54d3a63793f1
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_msg.h
@@ -0,0 +1,342 @@
+/* drivers/media/video/tegra/avp/avp_msg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+#define __MEDIA_VIDEO_TEGRA_AVP_MSG_H
+
+#include <linux/tegra_avp.h>
+#include <linux/types.h>
+
+/* Note: the port name string is not NUL terminated, so make sure to
+ * allocate appropriate space locally when operating on the string */
+#define XPC_PORT_NAME_LEN 16
+
+#define SVC_ARGS_MAX_LEN 220
+#define SVC_MAX_STRING_LEN 200
+
+#define AVP_ERR_ENOTSUP 0x2
+#define AVP_ERR_EINVAL 0x4
+#define AVP_ERR_ENOMEM 0x6
+#define AVP_ERR_EACCES 0x00030010
+
+enum {
+ SVC_NVMAP_CREATE = 0,
+ SVC_NVMAP_CREATE_RESPONSE = 1,
+ SVC_NVMAP_FREE = 3,
+ SVC_NVMAP_ALLOC = 4,
+ SVC_NVMAP_ALLOC_RESPONSE = 5,
+ SVC_NVMAP_PIN = 6,
+ SVC_NVMAP_PIN_RESPONSE = 7,
+ SVC_NVMAP_UNPIN = 8,
+ SVC_NVMAP_UNPIN_RESPONSE = 9,
+ SVC_NVMAP_GET_ADDRESS = 10,
+ SVC_NVMAP_GET_ADDRESS_RESPONSE = 11,
+ SVC_NVMAP_FROM_ID = 12,
+ SVC_NVMAP_FROM_ID_RESPONSE = 13,
+ SVC_MODULE_CLOCK = 14,
+ SVC_MODULE_CLOCK_RESPONSE = 15,
+ SVC_MODULE_RESET = 16,
+ SVC_MODULE_RESET_RESPONSE = 17,
+ SVC_POWER_REGISTER = 18,
+ SVC_POWER_UNREGISTER = 19,
+ SVC_POWER_STARVATION = 20,
+ SVC_POWER_BUSY_HINT = 21,
+ SVC_POWER_BUSY_HINT_MULTI = 22,
+ SVC_DFS_GETSTATE = 23,
+ SVC_DFS_GETSTATE_RESPONSE = 24,
+ SVC_POWER_RESPONSE = 25,
+ SVC_POWER_MAXFREQ = 26,
+ SVC_ENTER_LP0 = 27,
+ SVC_ENTER_LP0_RESPONSE = 28,
+ SVC_PRINTF = 29,
+ SVC_LIBRARY_ATTACH = 30,
+ SVC_LIBRARY_ATTACH_RESPONSE = 31,
+ SVC_LIBRARY_DETACH = 32,
+ SVC_LIBRARY_DETACH_RESPONSE = 33,
+ SVC_AVP_WDT_RESET = 34,
+ SVC_DFS_GET_CLK_UTIL = 35,
+ SVC_DFS_GET_CLK_UTIL_RESPONSE = 36,
+};
+
+struct svc_msg {
+ u32 svc_id;
+ u8 data[0];
+};
+
+struct svc_common_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+struct svc_printf {
+ u32 svc_id;
+ const char str[SVC_MAX_STRING_LEN];
+};
+
+struct svc_enter_lp0 {
+ u32 svc_id;
+ u32 src_addr;
+ u32 buf_addr;
+ u32 buf_size;
+};
+
+/* nvmap messages */
+struct svc_nvmap_create {
+ u32 svc_id;
+ u32 size;
+};
+
+struct svc_nvmap_create_resp {
+ u32 svc_id;
+ u32 handle_id;
+ u32 err;
+};
+
+enum {
+ AVP_NVMAP_HEAP_EXTERNAL = 1,
+ AVP_NVMAP_HEAP_GART = 2,
+ AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT = 3,
+ AVP_NVMAP_HEAP_IRAM = 4,
+};
+
+struct svc_nvmap_alloc {
+ u32 svc_id;
+ u32 handle_id;
+ u32 heaps[4];
+ u32 num_heaps;
+ u32 align;
+ u32 mapping_type;
+};
+
+struct svc_nvmap_free {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_pin_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+struct svc_nvmap_unpin {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_from_id {
+ u32 svc_id;
+ u32 handle_id;
+};
+
+struct svc_nvmap_get_addr {
+ u32 svc_id;
+ u32 handle_id;
+ u32 offs;
+};
+
+struct svc_nvmap_get_addr_resp {
+ u32 svc_id;
+ u32 addr;
+};
+
+/* library management messages */
+enum {
+ AVP_LIB_REASON_ATTACH = 0,
+ AVP_LIB_REASON_DETACH = 1,
+ AVP_LIB_REASON_ATTACH_GREEDY = 2,
+};
+
+struct svc_lib_attach {
+ u32 svc_id;
+ u32 address;
+ u32 args_len;
+ u32 lib_size;
+ u8 args[SVC_ARGS_MAX_LEN];
+ u32 reason;
+};
+
+struct svc_lib_attach_resp {
+ u32 svc_id;
+ u32 err;
+ u32 lib_id;
+};
+
+struct svc_lib_detach {
+ u32 svc_id;
+ u32 reason;
+ u32 lib_id;
+};
+
+struct svc_lib_detach_resp {
+ u32 svc_id;
+ u32 err;
+};
+
+/* hw module management from the AVP side */
+enum {
+ AVP_MODULE_ID_AVP = 2,
+ AVP_MODULE_ID_VCP = 3,
+ AVP_MODULE_ID_BSEA = 27,
+ AVP_MODULE_ID_VDE = 28,
+ AVP_MODULE_ID_MPE = 29,
+};
+
+struct svc_module_ctrl {
+ u32 svc_id;
+ u32 module_id;
+ u32 client_id;
+ u8 enable;
+};
+
+/* power messages */
+struct svc_pwr_register {
+ u32 svc_id;
+ u32 client_id;
+ u32 unused;
+};
+
+struct svc_pwr_register_resp {
+ u32 svc_id;
+ u32 err;
+ u32 client_id;
+};
+
+struct svc_pwr_starve_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u8 starving;
+};
+
+struct svc_pwr_busy_hint {
+ u32 svc_id;
+ u32 dfs_clk_id;
+ u32 client_id;
+ u32 boost_ms; /* duration */
+ u32 boost_freq; /* in khz */
+};
+
+struct svc_pwr_max_freq {
+ u32 svc_id;
+ u32 module_id;
+};
+
+struct svc_pwr_max_freq_resp {
+ u32 svc_id;
+ u32 freq;
+};
+
+/* dfs related messages */
+enum {
+ AVP_DFS_STATE_INVALID = 0,
+ AVP_DFS_STATE_DISABLED = 1,
+ AVP_DFS_STATE_STOPPED = 2,
+ AVP_DFS_STATE_CLOSED_LOOP = 3,
+ AVP_DFS_STATE_PROFILED_LOOP = 4,
+};
+
+struct svc_dfs_get_state_resp {
+ u32 svc_id;
+ u32 state;
+};
+
+enum {
+ AVP_DFS_CLK_CPU = 1,
+ AVP_DFS_CLK_AVP = 2,
+ AVP_DFS_CLK_SYSTEM = 3,
+ AVP_DFS_CLK_AHB = 4,
+ AVP_DFS_CLK_APB = 5,
+ AVP_DFS_CLK_VDE = 6,
+ /* external memory controller */
+ AVP_DFS_CLK_EMC = 7,
+};
+
+struct avp_clk_usage {
+ u32 min;
+ u32 max;
+ u32 curr_min;
+ u32 curr_max;
+ u32 curr;
+ u32 avg; /* average activity.. whatever that means */
+};
+
+struct svc_dfs_get_clk_util {
+ u32 svc_id;
+ u32 dfs_clk_id;
+};
+
+/* all units are in kHz */
+struct svc_dfs_get_clk_util_resp {
+ u32 svc_id;
+ u32 err;
+ struct avp_clk_usage usage;
+};
+
+/************************/
+
+enum {
+ CMD_ACK = 0,
+ CMD_CONNECT = 2,
+ CMD_DISCONNECT = 3,
+ CMD_MESSAGE = 4,
+ CMD_RESPONSE = 5,
+};
+
+struct msg_data {
+ u32 cmd;
+ u8 data[0];
+};
+
+struct msg_ack {
+ u32 cmd;
+ u32 arg;
+};
+
+struct msg_connect {
+ u32 cmd;
+ u32 port_id;
+ /* not NUL terminated, just 0 padded */
+ char name[XPC_PORT_NAME_LEN];
+};
+
+struct msg_connect_reply {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect {
+ u32 cmd;
+ u32 port_id;
+};
+
+struct msg_disconnect_reply {
+ u32 cmd;
+ u32 ack;
+};
+
+struct msg_port_data {
+ u32 cmd;
+ u32 port_id;
+ u32 msg_len;
+ u8 data[0];
+};
+
+#endif
diff --git a/drivers/media/video/tegra/avp/avp_svc.c b/drivers/media/video/tegra/avp/avp_svc.c
new file mode 100644
index 000000000000..2eed2891e556
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_svc.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+
+#include <mach/clk.h>
+#include <mach/nvmap.h>
+
+#include "../../../../video/tegra/nvmap/nvmap.h"
+
+#include "avp_msg.h"
+#include "trpc.h"
+#include "avp.h"
+
+enum {
+ AVP_DBG_TRACE_SVC = 1U << 0,
+};
+
+static u32 debug_mask = 0;
+module_param_named(debug_mask, debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (unlikely(debug_mask & (flag))) pr_info(args); } while (0)
+
+enum {
+ CLK_REQUEST_VCP = 0,
+ CLK_REQUEST_BSEA = 1,
+ CLK_REQUEST_VDE = 2,
+ NUM_CLK_REQUESTS,
+};
+
+struct avp_module {
+ const char *name;
+ u32 clk_req;
+};
+
+static struct avp_module avp_modules[] = {
+ [AVP_MODULE_ID_VCP] = {
+ .name = "vcp",
+ .clk_req = CLK_REQUEST_VCP,
+ },
+ [AVP_MODULE_ID_BSEA] = {
+ .name = "bsea",
+ .clk_req = CLK_REQUEST_BSEA,
+ },
+ [AVP_MODULE_ID_VDE] = {
+ .name = "vde",
+ .clk_req = CLK_REQUEST_VDE,
+ },
+};
+#define NUM_AVP_MODULES ARRAY_SIZE(avp_modules)
+
+struct avp_clk {
+ struct clk *clk;
+ int refcnt;
+ struct avp_module *mod;
+};
+
+struct avp_svc_info {
+ struct avp_clk clks[NUM_CLK_REQUESTS];
+ /* used for dvfs */
+ struct clk *sclk;
+ struct clk *emcclk;
+
+ struct mutex clk_lock;
+
+ struct trpc_endpoint *cpu_ep;
+ struct task_struct *svc_thread;
+
+ /* client for remote allocations, for easy tear down */
+ struct nvmap_client *nvmap_remote;
+ struct trpc_node *rpc_node;
+};
+
+static void do_svc_nvmap_create(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_create *msg = (struct svc_nvmap_create *)_msg;
+ struct svc_nvmap_create_resp resp;
+ struct nvmap_handle_ref *handle;
+ u32 handle_id = 0;
+ u32 err = 0;
+
+ handle = nvmap_create_handle(avp_svc->nvmap_remote, msg->size);
+ if (unlikely(IS_ERR(handle))) {
+ pr_err("avp_svc: error creating handle (%d bytes) for remote\n",
+ msg->size);
+ err = AVP_ERR_ENOMEM;
+ } else
+ handle_id = (u32)nvmap_ref_to_id(handle);
+
+ resp.svc_id = SVC_NVMAP_CREATE_RESPONSE;
+ resp.err = err;
+ resp.handle_id = handle_id;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+ /* TODO: do we need to put the handle if send_msg failed? */
+}
+
+static void do_svc_nvmap_alloc(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_alloc *msg = (struct svc_nvmap_alloc *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle *handle;
+ u32 err = 0;
+ u32 heap_mask = 0;
+ int i;
+ size_t align;
+
+ handle = nvmap_get_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: unknown remote handle 0x%x\n", msg->handle_id);
+ err = AVP_ERR_EACCES;
+ goto out;
+ }
+
+ if (msg->num_heaps > 4) {
+ pr_err("avp_svc: invalid remote alloc request (%d heaps?!)\n",
+ msg->num_heaps);
+ /* TODO: should we error out instead ? */
+ msg->num_heaps = 0;
+ }
+ if (msg->num_heaps == 0)
+ heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC | NVMAP_HEAP_SYSMEM;
+
+ for (i = 0; i < msg->num_heaps; i++) {
+ switch (msg->heaps[i]) {
+ case AVP_NVMAP_HEAP_EXTERNAL:
+ heap_mask |= NVMAP_HEAP_SYSMEM;
+ break;
+ case AVP_NVMAP_HEAP_GART:
+ heap_mask |= NVMAP_HEAP_IOVMM;
+ break;
+ case AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
+ break;
+ case AVP_NVMAP_HEAP_IRAM:
+ heap_mask |= NVMAP_HEAP_CARVEOUT_IRAM;
+ break;
+ default:
+ break;
+ }
+ }
+
+ align = max_t(size_t, L1_CACHE_BYTES, msg->align);
+ err = nvmap_alloc_handle_id(avp_svc->nvmap_remote, msg->handle_id,
+ heap_mask, align, 0);
+ nvmap_handle_put(handle);
+ if (err) {
+ pr_err("avp_svc: can't allocate for handle 0x%x (%d)\n",
+ msg->handle_id, err);
+ err = AVP_ERR_ENOMEM;
+ }
+
+out:
+ resp.svc_id = SVC_NVMAP_ALLOC_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_free(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_free *msg = (struct svc_nvmap_free *)_msg;
+
+ nvmap_free_handle_id(avp_svc->nvmap_remote, msg->handle_id);
+}
+
+static void do_svc_nvmap_pin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_pin *msg = (struct svc_nvmap_pin *)_msg;
+ struct svc_nvmap_pin_resp resp;
+ struct nvmap_handle_ref *handle;
+ unsigned long addr = ~0UL;
+ unsigned long id = msg->handle_id;
+ int err;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote, id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't dup handle %lx\n", id);
+ goto out;
+ }
+ err = nvmap_pin_ids(avp_svc->nvmap_remote, 1, &id);
+ if (err) {
+ pr_err("avp_svc: can't pin for handle %lx (%d)\n", id, err);
+ goto out;
+ }
+ addr = nvmap_handle_address(avp_svc->nvmap_remote, id);
+
+out:
+ resp.svc_id = SVC_NVMAP_PIN_RESPONSE;
+ resp.addr = addr;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_unpin(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_unpin *msg = (struct svc_nvmap_unpin *)_msg;
+ struct svc_common_resp resp;
+ unsigned long id = msg->handle_id;
+
+ nvmap_unpin_ids(avp_svc->nvmap_remote, 1, &id);
+ nvmap_free_handle_id(avp_svc->nvmap_remote, id);
+
+ resp.svc_id = SVC_NVMAP_UNPIN_RESPONSE;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_from_id(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_from_id *msg = (struct svc_nvmap_from_id *)_msg;
+ struct svc_common_resp resp;
+ struct nvmap_handle_ref *handle;
+ int err = 0;
+
+ handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote,
+ msg->handle_id);
+ if (IS_ERR(handle)) {
+ pr_err("avp_svc: can't duplicate handle for id 0x%x (%d)\n",
+ msg->handle_id, (int)PTR_ERR(handle));
+ err = AVP_ERR_ENOMEM;
+ }
+
+ resp.svc_id = SVC_NVMAP_FROM_ID_RESPONSE;
+ resp.err = err;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_nvmap_get_addr(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_nvmap_get_addr *msg = (struct svc_nvmap_get_addr *)_msg;
+ struct svc_nvmap_get_addr_resp resp;
+
+ resp.svc_id = SVC_NVMAP_GET_ADDRESS_RESPONSE;
+ resp.addr = nvmap_handle_address(avp_svc->nvmap_remote, msg->handle_id);
+ resp.addr += msg->offs;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_register(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_register *msg = (struct svc_pwr_register *)_msg;
+ struct svc_pwr_register_resp resp;
+
+ resp.svc_id = SVC_POWER_RESPONSE;
+ resp.err = 0;
+ resp.client_id = msg->client_id;
+
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static struct avp_module *find_avp_module(struct avp_svc_info *avp_svc, u32 id)
+{
+ if (id < NUM_AVP_MODULES && avp_modules[id].name)
+ return &avp_modules[id];
+ return NULL;
+}
+
+static void do_svc_module_reset(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ if (msg->module_id == AVP_MODULE_ID_AVP)
+ pr_err("avp_svc: AVP suicidal?!?!\n");
+ else
+ pr_err("avp_svc: Unknown module reset requested: %d\n",
+ msg->module_id);
+ /* other side doesn't handle errors for reset */
+ resp.err = 0;
+ goto send_response;
+ }
+
+ aclk = &avp_svc->clks[mod->clk_req];
+ tegra_periph_reset_assert(aclk->clk);
+ udelay(10);
+ tegra_periph_reset_deassert(aclk->clk);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_RESET_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_module_clock(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
+ struct svc_common_resp resp;
+ struct avp_module *mod;
+ struct avp_clk *aclk;
+
+ mod = find_avp_module(avp_svc, msg->module_id);
+ if (!mod) {
+ pr_err("avp_svc: unknown module clock requested: %d\n",
+ msg->module_id);
+ resp.err = AVP_ERR_EINVAL;
+ goto send_response;
+ }
+
+ mutex_lock(&avp_svc->clk_lock);
+ aclk = &avp_svc->clks[mod->clk_req];
+ if (msg->enable) {
+ if (aclk->refcnt++ == 0) {
+ clk_enable(avp_svc->emcclk);
+ clk_enable(avp_svc->sclk);
+ clk_enable(aclk->clk);
+ }
+ } else {
+ if (unlikely(aclk->refcnt == 0)) {
+ pr_err("avp_svc: unbalanced clock disable for '%s'\n",
+ aclk->mod->name);
+ } else if (--aclk->refcnt == 0) {
+ clk_disable(aclk->clk);
+ clk_disable(avp_svc->sclk);
+ clk_disable(avp_svc->emcclk);
+ }
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+ resp.err = 0;
+
+send_response:
+ resp.svc_id = SVC_MODULE_CLOCK_RESPONSE;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_null_response(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len, u32 resp_svc_id)
+{
+ struct svc_common_resp resp;
+ resp.svc_id = resp_svc_id;
+ resp.err = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_state(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_state_resp resp;
+ resp.svc_id = SVC_DFS_GETSTATE_RESPONSE;
+ resp.state = AVP_DFS_STATE_STOPPED;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_dfs_get_clk_util(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_dfs_get_clk_util_resp resp;
+
+ resp.svc_id = SVC_DFS_GET_CLK_UTIL_RESPONSE;
+ resp.err = 0;
+ memset(&resp.usage, 0, sizeof(struct avp_clk_usage));
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_pwr_max_freq(struct avp_svc_info *avp_svc,
+ struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_pwr_max_freq_resp resp;
+
+ resp.svc_id = SVC_POWER_MAXFREQ;
+ resp.freq = 0;
+ trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
+ sizeof(resp), GFP_KERNEL);
+}
+
+static void do_svc_printf(struct avp_svc_info *avp_svc, struct svc_msg *_msg,
+ size_t len)
+{
+ struct svc_printf *msg = (struct svc_printf *)_msg;
+ char tmp_str[SVC_MAX_STRING_LEN];
+
+ /* ensure we null terminate the source */
+ strlcpy(tmp_str, msg->str, SVC_MAX_STRING_LEN);
+ pr_info("[AVP]: %s", tmp_str);
+}
+
+static int dispatch_svc_message(struct avp_svc_info *avp_svc,
+ struct svc_msg *msg,
+ size_t len)
+{
+ int ret = 0;
+
+ switch (msg->svc_id) {
+ case SVC_NVMAP_CREATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_create\n", __func__);
+ do_svc_nvmap_create(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_ALLOC:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_alloc\n", __func__);
+ do_svc_nvmap_alloc(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FREE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_free\n", __func__);
+ do_svc_nvmap_free(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_PIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_pin\n", __func__);
+ do_svc_nvmap_pin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_UNPIN:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_unpin\n", __func__);
+ do_svc_nvmap_unpin(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_FROM_ID:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_from_id\n", __func__);
+ do_svc_nvmap_from_id(avp_svc, msg, len);
+ break;
+ case SVC_NVMAP_GET_ADDRESS:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_get_addr\n", __func__);
+ do_svc_nvmap_get_addr(avp_svc, msg, len);
+ break;
+ case SVC_POWER_REGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_register\n", __func__);
+ do_svc_pwr_register(avp_svc, msg, len);
+ break;
+ case SVC_POWER_UNREGISTER:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_unregister\n", __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT_MULTI:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power_busy_hint_multi\n",
+ __func__);
+ /* nothing to do */
+ break;
+ case SVC_POWER_BUSY_HINT:
+ case SVC_POWER_STARVATION:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power busy/starve hint\n",
+ __func__);
+ do_svc_null_response(avp_svc, msg, len, SVC_POWER_RESPONSE);
+ break;
+ case SVC_POWER_MAXFREQ:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got power get_max_freq\n",
+ __func__);
+ do_svc_pwr_max_freq(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GETSTATE:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got dfs_get_state\n", __func__);
+ do_svc_dfs_get_state(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_RESET:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_reset\n", __func__);
+ do_svc_module_reset(avp_svc, msg, len);
+ break;
+ case SVC_MODULE_CLOCK:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock\n", __func__);
+ do_svc_module_clock(avp_svc, msg, len);
+ break;
+ case SVC_DFS_GET_CLK_UTIL:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got get_clk_util\n", __func__);
+ do_svc_dfs_get_clk_util(avp_svc, msg, len);
+ break;
+ case SVC_PRINTF:
+ DBG(AVP_DBG_TRACE_SVC, "%s: got remote printf\n", __func__);
+ do_svc_printf(avp_svc, msg, len);
+ break;
+ case SVC_AVP_WDT_RESET:
+ pr_err("avp_svc: AVP has been reset by watchdog\n");
+ break;
+ default:
+ pr_err("avp_svc: invalid SVC call 0x%x\n", msg->svc_id);
+ ret = -ENOMSG;
+ break;
+ }
+
+ return ret;
+}
+
+static int avp_svc_thread(void *data)
+{
+ struct avp_svc_info *avp_svc = data;
+ u8 buf[TEGRA_RPC_MAX_MSG_LEN];
+ struct svc_msg *msg = (struct svc_msg *)buf;
+ int ret;
+
+ BUG_ON(!avp_svc->cpu_ep);
+
+ ret = trpc_wait_peer(avp_svc->cpu_ep, -1);
+ if (ret) {
+ /* XXX: teardown?! */
+ pr_err("%s: no connection from AVP (%d)\n", __func__, ret);
+ goto err;
+ }
+
+ pr_info("%s: got remote peer\n", __func__);
+
+ while (!kthread_should_stop()) {
+ DBG(AVP_DBG_TRACE_SVC, "%s: waiting for message\n", __func__);
+ ret = trpc_recv_msg(avp_svc->rpc_node, avp_svc->cpu_ep, buf,
+ TEGRA_RPC_MAX_MSG_LEN, -1);
+ DBG(AVP_DBG_TRACE_SVC, "%s: got message\n", __func__);
+ if (ret < 0) {
+ pr_err("%s: couldn't receive msg\n", __func__);
+ /* XXX: port got closed? we should exit? */
+ goto err;
+ } else if (!ret) {
+ pr_err("%s: received msg of len 0?!\n", __func__);
+ continue;
+ }
+ dispatch_svc_message(avp_svc, msg, ret);
+ }
+
+err:
+ trpc_put(avp_svc->cpu_ep);
+ pr_info("%s: done\n", __func__);
+ return ret;
+}
+
+int avp_svc_start(struct avp_svc_info *avp_svc)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ avp_svc->nvmap_remote = nvmap_create_client(nvmap_dev, "avp_remote");
+ if (IS_ERR(avp_svc->nvmap_remote)) {
+ pr_err("%s: cannot create remote nvmap client\n", __func__);
+ ret = PTR_ERR(avp_svc->nvmap_remote);
+ goto err_nvmap_create_remote_client;
+ }
+
+ ep = trpc_create(avp_svc->rpc_node, "RPC_CPU_PORT", NULL, NULL);
+ if (IS_ERR(ep)) {
+ pr_err("%s: can't create RPC_CPU_PORT\n", __func__);
+ ret = PTR_ERR(ep);
+ goto err_cpu_port_create;
+ }
+
+ /* TODO: protect this */
+ avp_svc->cpu_ep = ep;
+
+ /* the service thread should get an extra reference for the port */
+ trpc_get(avp_svc->cpu_ep);
+ avp_svc->svc_thread = kthread_run(avp_svc_thread, avp_svc,
+ "avp_svc_thread");
+ if (IS_ERR_OR_NULL(avp_svc->svc_thread)) {
+ avp_svc->svc_thread = NULL;
+ pr_err("%s: can't create svc thread\n", __func__);
+ ret = -ENOMEM;
+ goto err_kthread;
+ }
+ return 0;
+
+err_kthread:
+ trpc_close(avp_svc->cpu_ep);
+ trpc_put(avp_svc->cpu_ep);
+ avp_svc->cpu_ep = NULL;
+err_cpu_port_create:
+ nvmap_client_put(avp_svc->nvmap_remote);
+err_nvmap_create_remote_client:
+ avp_svc->nvmap_remote = NULL;
+ return ret;
+}
+
+void avp_svc_stop(struct avp_svc_info *avp_svc)
+{
+ int ret;
+ int i;
+
+ trpc_close(avp_svc->cpu_ep);
+ ret = kthread_stop(avp_svc->svc_thread);
+ if (ret == -EINTR) {
+ /* the thread never started, drop it's extra reference */
+ trpc_put(avp_svc->cpu_ep);
+ }
+ avp_svc->cpu_ep = NULL;
+
+ nvmap_client_put(avp_svc->nvmap_remote);
+ avp_svc->nvmap_remote = NULL;
+
+ mutex_lock(&avp_svc->clk_lock);
+ for (i = 0; i < NUM_CLK_REQUESTS; i++) {
+ struct avp_clk *aclk = &avp_svc->clks[i];
+ BUG_ON(aclk->refcnt < 0);
+ if (aclk->refcnt > 0) {
+ pr_info("%s: remote left clock '%s' on\n", __func__,
+ aclk->mod->name);
+ clk_disable(aclk->clk);
+ /* sclk/emcclk was enabled once for every clock */
+ clk_disable(avp_svc->sclk);
+ clk_disable(avp_svc->emcclk);
+ }
+ aclk->refcnt = 0;
+ }
+ mutex_unlock(&avp_svc->clk_lock);
+}
+
+struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
+ struct trpc_node *rpc_node)
+{
+ struct avp_svc_info *avp_svc;
+ int ret;
+ int i;
+ int cnt = 0;
+
+ BUG_ON(!rpc_node);
+
+ avp_svc = kzalloc(sizeof(struct avp_svc_info), GFP_KERNEL);
+ if (!avp_svc) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ BUILD_BUG_ON(NUM_CLK_REQUESTS > BITS_PER_LONG);
+
+ for (i = 0; i < NUM_AVP_MODULES; i++) {
+ struct avp_module *mod = &avp_modules[i];
+ struct clk *clk;
+ if (!mod->name)
+ continue;
+ BUG_ON(mod->clk_req >= NUM_CLK_REQUESTS ||
+ cnt++ >= NUM_CLK_REQUESTS);
+
+ clk = clk_get(&pdev->dev, mod->name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("avp_svc: Couldn't get required clocks\n");
+ goto err_get_clks;
+ }
+ avp_svc->clks[mod->clk_req].clk = clk;
+ avp_svc->clks[mod->clk_req].mod = mod;
+ avp_svc->clks[mod->clk_req].refcnt = 0;
+ }
+
+ avp_svc->sclk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(avp_svc->sclk)) {
+ pr_err("avp_svc: Couldn't get sclk for dvfs\n");
+ ret = -ENOENT;
+ goto err_get_clks;
+ }
+
+ avp_svc->emcclk = clk_get(&pdev->dev, "emc");
+ if (IS_ERR(avp_svc->emcclk)) {
+ pr_err("avp_svc: Couldn't get emcclk for dvfs\n");
+ ret = -ENOENT;
+ goto err_get_clks;
+ }
+
+ /*
+ * The emc is a shared clock, it will be set to the highest
+ * requested rate from any user. Set the rate to ULONG_MAX to
+ * always request the max rate whenever this request is enabled
+ */
+ clk_set_rate(avp_svc->emcclk, ULONG_MAX);
+
+ avp_svc->rpc_node = rpc_node;
+
+ mutex_init(&avp_svc->clk_lock);
+
+ return avp_svc;
+
+err_get_clks:
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ if (avp_svc->clks[i].clk)
+ clk_put(avp_svc->clks[i].clk);
+ if (!IS_ERR_OR_NULL(avp_svc->sclk))
+ clk_put(avp_svc->sclk);
+ if (!IS_ERR_OR_NULL(avp_svc->emcclk))
+ clk_put(avp_svc->emcclk);
+err_alloc:
+ return ERR_PTR(ret);
+}
+
+void avp_svc_destroy(struct avp_svc_info *avp_svc)
+{
+ int i;
+
+ for (i = 0; i < NUM_CLK_REQUESTS; i++)
+ clk_put(avp_svc->clks[i].clk);
+ clk_put(avp_svc->sclk);
+ clk_put(avp_svc->emcclk);
+
+ kfree(avp_svc);
+}
diff --git a/drivers/media/video/tegra/avp/headavp.S b/drivers/media/video/tegra/avp/headavp.S
new file mode 100644
index 000000000000..5304067f0d83
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.S
@@ -0,0 +1,66 @@
+/*
+ * arch/arm/mach-tegra/headavp.S
+ *
+ * AVP kernel launcher stub; programs the AVP MMU and jumps to the
+ * kernel code. Must use ONLY ARMv4 instructions, and must be compiled
+ * in ARM mode.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include "headavp.h"
+
+#define PTE0_COMPARE 0
+/* the default translation will translate any VA within
+ * 0x0010:0000..0x001f:ffff to the (megabyte-aligned) value written to
+ * _tegra_avp_boot_stub_data.map_phys_addr
+ */
+#define PTE0_DEFAULT (AVP_KERNEL_VIRT_BASE | 0x3ff0)
+
+#define PTE0_TRANSLATE 4
+
+#define TRANSLATE_DATA (1 << 11)
+#define TRANSLATE_CODE (1 << 10)
+#define TRANSLATE_WR (1 << 9)
+#define TRANSLATE_RD (1 << 8)
+#define TRANSLATE_HIT (1 << 7)
+#define TRANSLATE_EN (1 << 2)
+
+#define TRANSLATE_OPT (TRANSLATE_DATA | TRANSLATE_CODE | TRANSLATE_WR | \
+ TRANSLATE_RD | TRANSLATE_HIT)
+
+ENTRY(_tegra_avp_boot_stub)
+ adr r4, _tegra_avp_boot_stub_data
+ ldmia r4, {r0-r3}
+ str r2, [r0, #PTE0_COMPARE]
+ bic r3, r3, #0xff0
+ bic r3, r3, #0x00f
+ orr r3, r3, #TRANSLATE_OPT
+ orr r3, r3, #TRANSLATE_EN
+ str r3, [r0, #PTE0_TRANSLATE]
+ bx r1
+ b .
+ENDPROC(_tegra_avp_boot_stub)
+ .type _tegra_avp_boot_stub_data, %object
+ENTRY(_tegra_avp_boot_stub_data)
+ .long AVP_MMU_TLB_BASE
+ .long 0xdeadbeef
+ .long PTE0_DEFAULT
+ .long 0xdeadd00d
+ .size _tegra_avp_boot_stub_data, . - _tegra_avp_boot_stub_data
diff --git a/drivers/media/video/tegra/avp/headavp.h b/drivers/media/video/tegra/avp/headavp.h
new file mode 100644
index 000000000000..2bcc3297bfa4
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/mach-tegra/headavp.h
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_HEADAVP_H
+#define _MACH_TEGRA_HEADAVP_H
+
+#define AVP_MMU_TLB_BASE 0xF000F000
+#define AVP_KERNEL_VIRT_BASE 0x00100000
+
+#ifndef __ASSEMBLY__
+
+struct tegra_avp_boot_stub_data {
+ unsigned long mmu_tlb_base;
+ unsigned long jump_addr;
+ unsigned long map_virt_addr;
+ unsigned long map_phys_addr;
+};
+
+extern void _tegra_avp_boot_stub(void);
+extern struct tegra_avp_boot_stub_data _tegra_avp_boot_stub_data;
+
+#endif
+
+#endif
diff --git a/drivers/media/video/tegra/avp/tegra_rpc.c b/drivers/media/video/tegra/avp/tegra_rpc.c
new file mode 100644
index 000000000000..6110d0bd066c
--- /dev/null
+++ b/drivers/media/video/tegra/avp/tegra_rpc.c
@@ -0,0 +1,796 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by:
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+
+struct trpc_port;
+struct trpc_endpoint {
+ struct list_head msg_list;
+ wait_queue_head_t msg_waitq;
+
+ struct trpc_endpoint *out;
+ struct trpc_port *port;
+
+ struct trpc_node *owner;
+
+ struct completion *connect_done;
+ bool ready;
+ struct trpc_ep_ops *ops;
+ void *priv;
+};
+
+struct trpc_port {
+ char name[TEGRA_RPC_MAX_NAME_LEN];
+
+ /* protects peer and closed state */
+ spinlock_t lock;
+ struct trpc_endpoint peers[2];
+ bool closed;
+
+ /* private */
+ struct kref ref;
+ struct rb_node rb_node;
+};
+
+enum {
+ TRPC_TRACE_MSG = 1U << 0,
+ TRPC_TRACE_CONN = 1U << 1,
+ TRPC_TRACE_PORT = 1U << 2,
+};
+
+static u32 trpc_debug_mask = 0;
+module_param_named(debug_mask, trpc_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+#define DBG(flag, args...) \
+ do { if (trpc_debug_mask & (flag)) pr_info(args); } while (0)
+
+struct tegra_rpc_info {
+ struct kmem_cache *msg_cache;
+
+ spinlock_t ports_lock;
+ struct rb_root ports;
+
+ struct list_head node_list;
+ struct mutex node_lock;
+};
+
+struct trpc_msg {
+ struct list_head list;
+
+ size_t len;
+ u8 payload[TEGRA_RPC_MAX_MSG_LEN];
+};
+
+static struct tegra_rpc_info *tegra_rpc;
+static struct dentry *trpc_debug_root;
+
+static struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep);
+
+/* a few accessors for the outside world to keep the trpc_endpoint struct
+ * definition private to this module */
+void *trpc_priv(struct trpc_endpoint *ep)
+{
+ return ep->priv;
+}
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep)
+{
+ return ep->out;
+}
+
+const char *trpc_name(struct trpc_endpoint *ep)
+{
+ return ep->port->name;
+}
+
+static inline bool is_connected(struct trpc_port *port)
+{
+ return port->peers[0].ready && port->peers[1].ready;
+}
+
+static inline bool is_closed(struct trpc_port *port)
+{
+ return port->closed;
+}
+
+static void rpc_port_free(struct tegra_rpc_info *info, struct trpc_port *port)
+{
+ struct trpc_msg *msg;
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ struct list_head *list = &port->peers[i].msg_list;
+ while (!list_empty(list)) {
+ msg = list_first_entry(list, struct trpc_msg, list);
+ list_del(&msg->list);
+ kmem_cache_free(info->msg_cache, msg);
+ }
+ }
+ kfree(port);
+}
+
+static void _rpc_port_release(struct kref *kref)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = container_of(kref, struct trpc_port, ref);
+ unsigned long flags;
+
+ DBG(TRPC_TRACE_PORT, "%s: releasing port '%s' (%p)\n", __func__,
+ port->name, port);
+ spin_lock_irqsave(&info->ports_lock, flags);
+ rb_erase(&port->rb_node, &info->ports);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ rpc_port_free(info, port);
+}
+
+/* note that the refcount is actually on the port and not on the endpoint */
+void trpc_put(struct trpc_endpoint *ep)
+{
+ kref_put(&ep->port->ref, _rpc_port_release);
+}
+
+void trpc_get(struct trpc_endpoint *ep)
+{
+ kref_get(&ep->port->ref);
+}
+
+/* Searches the rb_tree for a port with the provided name. If one is not found,
+ * the new port in inserted. Otherwise, the existing port is returned.
+ * Must be called with the ports_lock held */
+static struct trpc_port *rpc_port_find_insert(struct tegra_rpc_info *info,
+ struct trpc_port *port)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct trpc_port *tmp;
+ int ret = 0;
+
+ p = &info->ports.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct trpc_port, rb_node);
+
+ ret = strncmp(port->name, tmp->name, TEGRA_RPC_MAX_NAME_LEN);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else
+ return tmp;
+ }
+ rb_link_node(&port->rb_node, parent, p);
+ rb_insert_color(&port->rb_node, &info->ports);
+ DBG(TRPC_TRACE_PORT, "%s: inserted port '%s' (%p)\n", __func__,
+ port->name, port);
+ return port;
+}
+
+static int nodes_try_connect(struct tegra_rpc_info *info,
+ struct trpc_node *src,
+ struct trpc_endpoint *from)
+{
+ struct trpc_node *node;
+ int ret;
+
+ mutex_lock(&info->node_lock);
+ list_for_each_entry(node, &info->node_list, list) {
+ if (!node->try_connect)
+ continue;
+ ret = node->try_connect(node, src, from);
+ if (!ret) {
+ mutex_unlock(&info->node_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&info->node_lock);
+ return -ECONNREFUSED;
+}
+
+static struct trpc_port *rpc_port_alloc(const char *name)
+{
+ struct trpc_port *port;
+ int i;
+
+ port = kzalloc(sizeof(struct trpc_port), GFP_KERNEL);
+ if (!port) {
+ pr_err("%s: can't alloc rpc_port\n", __func__);
+ return NULL;
+ }
+ BUILD_BUG_ON(2 != ARRAY_SIZE(port->peers));
+
+ spin_lock_init(&port->lock);
+ kref_init(&port->ref);
+ strlcpy(port->name, name, TEGRA_RPC_MAX_NAME_LEN);
+ for (i = 0; i < 2; i++) {
+ struct trpc_endpoint *ep = port->peers + i;
+ INIT_LIST_HEAD(&ep->msg_list);
+ init_waitqueue_head(&ep->msg_waitq);
+ ep->port = port;
+ }
+ port->peers[0].out = &port->peers[1];
+ port->peers[1].out = &port->peers[0];
+
+ return port;
+}
+
+/* must be holding the ports lock */
+static inline void handle_port_connected(struct trpc_port *port)
+{
+ int i;
+
+ DBG(TRPC_TRACE_CONN, "tegra_rpc: port '%s' connected\n", port->name);
+
+ for (i = 0; i < 2; i++)
+ if (port->peers[i].connect_done)
+ complete(port->peers[i].connect_done);
+}
+
+static inline void _ready_ep(struct trpc_endpoint *ep,
+ struct trpc_node *owner,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ ep->ready = true;
+ ep->owner = owner;
+ ep->ops = ops;
+ ep->priv = priv;
+}
+
+/* this keeps a reference on the port */
+static struct trpc_endpoint *_create_peer(struct tegra_rpc_info *info,
+ struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(port->closed);
+ if (peer->ready || !ep->ready) {
+ peer = NULL;
+ goto out;
+ }
+ _ready_ep(peer, owner, ops, priv);
+ if (WARN_ON(!is_connected(port)))
+ pr_warning("%s: created peer but no connection established?!\n",
+ __func__);
+ else
+ handle_port_connected(port);
+ trpc_get(peer);
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return peer;
+}
+
+/* Exported code. This is out interface to the outside world */
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *ep;
+ struct trpc_port *new_port;
+ struct trpc_port *port;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ /* we always allocate a new port even if one already might exist. This
+ * is slightly inefficient, but it allows us to do the allocation
+ * without holding our ports_lock spinlock. */
+ new_port = rpc_port_alloc(name);
+ if (!new_port) {
+ pr_err("%s: can't allocate memory for '%s'\n", __func__, name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ port = rpc_port_find_insert(info, new_port);
+ if (port != new_port) {
+ rpc_port_free(info, new_port);
+ /* There was already a port by that name in the rb_tree,
+ * so just try to create its peer[1], i.e. peer for peer[0]
+ */
+ ep = _create_peer(info, owner, &port->peers[0], ops, priv);
+ if (!ep) {
+ pr_err("%s: port '%s' is not in a connectable state\n",
+ __func__, port->name);
+ ep = ERR_PTR(-EINVAL);
+ }
+ goto out;
+ }
+ /* don't need to grab the individual port lock here since we must be
+ * holding the ports_lock to add the new element, and never dropped
+ * it, and thus noone could have gotten a reference to this port
+ * and thus the state couldn't have been touched */
+ ep = &port->peers[0];
+ _ready_ep(ep, owner, ops, priv);
+out:
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return ep;
+}
+
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer;
+ unsigned long flags;
+
+ BUG_ON(!owner);
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ peer = _create_peer(info, owner, ep, ops, priv);
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+ return peer;
+}
+
+/* timeout == -1, waits forever
+ * timeout == 0, return immediately
+ */
+int trpc_connect(struct trpc_endpoint *from, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = from->port;
+ struct trpc_node *src = from->owner;
+ int ret;
+ bool no_retry = !timeout;
+ unsigned long endtime = jiffies + msecs_to_jiffies(timeout);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* XXX: add state for connections and ports to prevent invalid
+ * states like multiple connections, etc. ? */
+ if (unlikely(is_closed(port))) {
+ ret = -ECONNRESET;
+ pr_err("%s: can't connect to %s, closed\n", __func__,
+ port->name);
+ goto out;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ do {
+ ret = nodes_try_connect(info, src, from);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_connected(port)) {
+ ret = 0;
+ goto out;
+ } else if (no_retry) {
+ goto out;
+ } else if (signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ usleep_range(5000, 20000);
+ } while (timeout < 0 || time_before(jiffies, endtime));
+
+ return -ETIMEDOUT;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+/* convenience function for doing this common pattern in a single call */
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src,
+ char *name,
+ struct trpc_ep_ops *ops,
+ void *priv,
+ long timeout)
+{
+ struct trpc_endpoint *ep;
+ int ret;
+
+ ep = trpc_create(src, name, ops, priv);
+ if (IS_ERR(ep))
+ return ep;
+
+ ret = trpc_connect(ep, timeout);
+ if (ret) {
+ trpc_close(ep);
+ return ERR_PTR(ret);
+ }
+
+ return ep;
+}
+
+void trpc_close(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ struct trpc_endpoint *peer = ep->out;
+ bool need_close_op = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ BUG_ON(!ep->ready);
+ ep->ready = false;
+ port->closed = true;
+ if (peer->ready) {
+ need_close_op = true;
+ /* the peer may be waiting for a message */
+ wake_up_all(&peer->msg_waitq);
+ if (peer->connect_done)
+ complete(peer->connect_done);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (need_close_op && peer->ops && peer->ops->close)
+ peer->ops->close(peer);
+ trpc_put(ep);
+}
+
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout)
+{
+ struct trpc_port *port = ep->port;
+ DECLARE_COMPLETION_ONSTACK(event);
+ int ret;
+ unsigned long flags;
+
+ if (timeout < 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else if (timeout > 0)
+ timeout = msecs_to_jiffies(timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (ep->connect_done) {
+ ret = -EBUSY;
+ goto done;
+ } else if (is_connected(port)) {
+ ret = 0;
+ goto done;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto done;
+ } else if (!timeout) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ ep->connect_done = &event;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = wait_for_completion_interruptible_timeout(&event, timeout);
+
+ spin_lock_irqsave(&port->lock, flags);
+ ep->connect_done = NULL;
+
+ if (is_connected(port)) {
+ ret = 0;
+ } else {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ }
+
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+static inline int _ep_id(struct trpc_endpoint *ep)
+{
+ return ep - ep->port->peers;
+}
+
+static int queue_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+ struct trpc_msg *msg;
+ unsigned long flags;
+ int ret;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+ /* shouldn't be enqueueing to the endpoint */
+ BUG_ON(peer->ops && peer->ops->send);
+
+ DBG(TRPC_TRACE_MSG, "%s: queueing message for %s.%d\n", __func__,
+ port->name, _ep_id(peer));
+
+ msg = kmem_cache_alloc(info->msg_cache, gfp_flags);
+ if (!msg) {
+ pr_err("%s: can't alloc memory for msg\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(msg->payload, buf, len);
+ msg->len = len;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (is_closed(port)) {
+ pr_err("%s: cannot send message for closed port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ECONNRESET;
+ goto err;
+ } else if (!is_connected(port)) {
+ pr_err("%s: cannot send message for unconnected port %s.%d\n",
+ __func__, port->name, _ep_id(peer));
+ ret = -ENOTCONN;
+ goto err;
+ }
+
+ list_add_tail(&msg->list, &peer->msg_list);
+ if (peer->ops && peer->ops->notify_recv)
+ peer->ops->notify_recv(peer);
+ wake_up_all(&peer->msg_waitq);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&port->lock, flags);
+ kmem_cache_free(info->msg_cache, msg);
+ return ret;
+}
+
+/* Returns -ENOMEM if failed to allocate memory for the message. */
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *from,
+ void *buf, size_t len, gfp_t gfp_flags)
+{
+ struct trpc_endpoint *peer = from->out;
+ struct trpc_port *port = from->port;
+
+ BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
+
+ DBG(TRPC_TRACE_MSG, "%s: sending message from %s.%d to %s.%d\n",
+ __func__, port->name, _ep_id(from), port->name, _ep_id(peer));
+
+ if (peer->ops && peer->ops->send) {
+ might_sleep();
+ return peer->ops->send(peer, buf, len);
+ } else {
+ might_sleep_if(gfp_flags & __GFP_WAIT);
+ return queue_msg(src, from, buf, len, gfp_flags);
+ }
+}
+
+static inline struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep)
+{
+ struct trpc_msg *msg = NULL;
+
+ if (!list_empty(&ep->msg_list)) {
+ msg = list_first_entry(&ep->msg_list, struct trpc_msg, list);
+ list_del_init(&msg->list);
+ }
+
+ return msg;
+}
+
+static bool __should_wake(struct trpc_endpoint *ep)
+{
+ struct trpc_port *port = ep->port;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = !list_empty(&ep->msg_list) || is_closed(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t buf_len, long timeout)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+ struct trpc_port *port = ep->port;
+ struct trpc_msg *msg;
+ size_t len;
+ long ret;
+ unsigned long flags;
+
+ BUG_ON(buf_len > TEGRA_RPC_MAX_MSG_LEN);
+
+ spin_lock_irqsave(&port->lock, flags);
+ /* we allow closed ports to finish receiving already-queued messages */
+ msg = dequeue_msg_locked(ep);
+ if (msg) {
+ goto got_msg;
+ } else if (is_closed(port)) {
+ ret = -ECONNRESET;
+ goto out;
+ } else if (!is_connected(port)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ if (timeout == 0) {
+ ret = 0;
+ goto out;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ DBG(TRPC_TRACE_MSG, "%s: waiting for message for %s.%d\n", __func__,
+ port->name, _ep_id(ep));
+
+ ret = wait_event_interruptible_timeout(ep->msg_waitq, __should_wake(ep),
+ timeout);
+
+ DBG(TRPC_TRACE_MSG, "%s: woke up for %s\n", __func__, port->name);
+ spin_lock_irqsave(&port->lock, flags);
+ msg = dequeue_msg_locked(ep);
+ if (!msg) {
+ if (is_closed(port))
+ ret = -ECONNRESET;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ pr_err("%s: error (%d) while receiving msg for '%s'\n",
+ __func__, (int)ret, port->name);
+ goto out;
+ }
+
+got_msg:
+ spin_unlock_irqrestore(&port->lock, flags);
+ len = min(buf_len, msg->len);
+ memcpy(buf, msg->payload, len);
+ kmem_cache_free(info->msg_cache, msg);
+ return len;
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+int trpc_node_register(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ if (!info)
+ return -ENOMEM;
+
+ pr_info("%s: Adding '%s' to node list\n", __func__, node->name);
+
+ mutex_lock(&info->node_lock);
+ if (node->type == TRPC_NODE_LOCAL)
+ list_add(&node->list, &info->node_list);
+ else
+ list_add_tail(&node->list, &info->node_list);
+ mutex_unlock(&info->node_lock);
+ return 0;
+}
+
+void trpc_node_unregister(struct trpc_node *node)
+{
+ struct tegra_rpc_info *info = tegra_rpc;
+
+ mutex_lock(&info->node_lock);
+ list_del(&node->list);
+ mutex_unlock(&info->node_lock);
+}
+
+static int trpc_debug_ports_show(struct seq_file *s, void *data)
+{
+ struct tegra_rpc_info *info = s->private;
+ struct rb_node *n;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&info->ports_lock, flags);
+ for (n = rb_first(&info->ports); n; n = rb_next(n)) {
+ struct trpc_port *port = rb_entry(n, struct trpc_port, rb_node);
+ seq_printf(s, "port: %s\n closed:%s\n", port->name,
+ port->closed ? "yes" : "no");
+
+ spin_lock(&port->lock);
+ for (i = 0; i < ARRAY_SIZE(port->peers); i++) {
+ struct trpc_endpoint *ep = &port->peers[i];
+ seq_printf(s, " peer%d: %s\n ready:%s\n", i,
+ ep->owner ? ep->owner->name: "<none>",
+ ep->ready ? "yes" : "no");
+ if (ep->ops && ep->ops->show)
+ ep->ops->show(s, ep);
+ }
+ spin_unlock(&port->lock);
+ }
+ spin_unlock_irqrestore(&info->ports_lock, flags);
+
+ return 0;
+}
+
+static int trpc_debug_ports_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, trpc_debug_ports_show, inode->i_private);
+}
+
+static struct file_operations trpc_debug_ports_fops = {
+ .open = trpc_debug_ports_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void trpc_debug_init(struct tegra_rpc_info *info)
+{
+ trpc_debug_root = debugfs_create_dir("tegra_rpc", NULL);
+ if (IS_ERR_OR_NULL(trpc_debug_root)) {
+ pr_err("%s: couldn't create debug files\n", __func__);
+ return;
+ }
+
+ debugfs_create_file("ports", 0664, trpc_debug_root, info,
+ &trpc_debug_ports_fops);
+}
+
+static int __init tegra_rpc_init(void)
+{
+ struct tegra_rpc_info *rpc_info;
+ int ret;
+
+ rpc_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL);
+ if (!rpc_info) {
+ pr_err("%s: error allocating rpc_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ rpc_info->ports = RB_ROOT;
+ spin_lock_init(&rpc_info->ports_lock);
+ INIT_LIST_HEAD(&rpc_info->node_list);
+ mutex_init(&rpc_info->node_lock);
+
+ rpc_info->msg_cache = KMEM_CACHE(trpc_msg, 0);
+ if (!rpc_info->msg_cache) {
+ pr_err("%s: unable to create message cache\n", __func__);
+ ret = -ENOMEM;
+ goto err_kmem_cache;
+ }
+
+ trpc_debug_init(rpc_info);
+ tegra_rpc = rpc_info;
+
+ return 0;
+
+err_kmem_cache:
+ kfree(rpc_info);
+ return ret;
+}
+
+subsys_initcall(tegra_rpc_init);
diff --git a/drivers/media/video/tegra/avp/trpc.h b/drivers/media/video/tegra/avp/trpc.h
new file mode 100644
index 000000000000..e7b0d2d55788
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_H
+#define __ARM_MACH_TEGRA_RPC_H
+
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/tegra_rpc.h>
+
+struct trpc_endpoint;
+struct trpc_ep_ops {
+ /* send is allowed to sleep */
+ int (*send)(struct trpc_endpoint *ep, void *buf, size_t len);
+ /* notify_recv is NOT allowed to sleep */
+ void (*notify_recv)(struct trpc_endpoint *ep);
+ /* close is allowed to sleep */
+ void (*close)(struct trpc_endpoint *ep);
+ /* not allowed to sleep, not allowed to call back into trpc */
+ void (*show)(struct seq_file *s, struct trpc_endpoint *ep);
+};
+
+enum {
+ TRPC_NODE_LOCAL,
+ TRPC_NODE_REMOTE,
+};
+
+struct trpc_node {
+ struct list_head list;
+ const char *name;
+ int type;
+ void *priv;
+
+ int (*try_connect)(struct trpc_node *node,
+ struct trpc_node *src,
+ struct trpc_endpoint *from);
+};
+
+struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep);
+void *trpc_priv(struct trpc_endpoint *ep);
+const char *trpc_name(struct trpc_endpoint *ep);
+
+void trpc_put(struct trpc_endpoint *ep);
+void trpc_get(struct trpc_endpoint *ep);
+
+int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *ep, void *buf,
+ size_t len, gfp_t gfp_flags);
+int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
+ void *buf, size_t len, long timeout);
+struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
+ struct trpc_ep_ops *ops, void *priv);
+struct trpc_endpoint *trpc_create_connect(struct trpc_node *src, char *name,
+ struct trpc_ep_ops *ops, void *priv,
+ long timeout);
+int trpc_connect(struct trpc_endpoint *from, long timeout);
+struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
+ struct trpc_endpoint *ep,
+ struct trpc_ep_ops *ops,
+ void *priv);
+void trpc_close(struct trpc_endpoint *ep);
+int trpc_wait_peer(struct trpc_endpoint *ep, long timeout);
+
+int trpc_node_register(struct trpc_node *node);
+void trpc_node_unregister(struct trpc_node *node);
+
+#endif
diff --git a/drivers/media/video/tegra/avp/trpc_local.c b/drivers/media/video/tegra/avp/trpc_local.c
new file mode 100644
index 000000000000..5a941a78fc40
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_local.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * Based on original NVRM code from NVIDIA, and a partial rewrite by
+ * Gary King <gking@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_rpc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc.h"
+#include "trpc_sema.h"
+
+struct rpc_info {
+ struct trpc_endpoint *rpc_ep;
+ struct file *sema_file;
+};
+
+/* ports names reserved for system functions, i.e. communicating with the
+ * AVP */
+static const char reserved_ports[][TEGRA_RPC_MAX_NAME_LEN] = {
+ "RPC_AVP_PORT",
+ "RPC_CPU_PORT",
+};
+static int num_reserved_ports = ARRAY_SIZE(reserved_ports);
+
+static void rpc_notify_recv(struct trpc_endpoint *ep);
+
+/* TODO: do we need to do anything when port is closed from the other side? */
+static struct trpc_ep_ops ep_ops = {
+ .notify_recv = rpc_notify_recv,
+};
+
+static struct trpc_node rpc_node = {
+ .name = "local",
+ .type = TRPC_NODE_LOCAL,
+};
+
+static void rpc_notify_recv(struct trpc_endpoint *ep)
+{
+ struct rpc_info *info = trpc_priv(ep);
+
+ if (WARN_ON(!info))
+ return;
+ if (info->sema_file)
+ trpc_sema_signal(info->sema_file);
+}
+
+static int local_rpc_open(struct inode *inode, struct file *file)
+{
+ struct rpc_info *info;
+
+ info = kzalloc(sizeof(struct rpc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ nonseekable_open(inode, file);
+ file->private_data = info;
+ return 0;
+}
+
+static int local_rpc_release(struct inode *inode, struct file *file)
+{
+ struct rpc_info *info = file->private_data;
+
+ if (info->rpc_ep)
+ trpc_close(info->rpc_ep);
+ if (info->sema_file)
+ fput(info->sema_file);
+ kfree(info);
+ file->private_data = NULL;
+ return 0;
+}
+
+static int __get_port_desc(struct tegra_rpc_port_desc *desc,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int size = _IOC_SIZE(cmd);
+
+ if (size != sizeof(struct tegra_rpc_port_desc))
+ return -EINVAL;
+ if (copy_from_user(desc, (void __user *)arg, sizeof(*desc)))
+ return -EFAULT;
+
+ desc->name[TEGRA_RPC_MAX_NAME_LEN - 1] = '\0';
+ return 0;
+}
+
+static char uniq_name[] = "aaaaaaaa+";
+static const int uniq_len = sizeof(uniq_name) - 1;
+static DEFINE_MUTEX(uniq_lock);
+
+static void _gen_port_name(char *new_name)
+{
+ int i;
+
+ mutex_lock(&uniq_lock);
+ for (i = 0; i < uniq_len - 1; i++) {
+ ++uniq_name[i];
+ if (uniq_name[i] != 'z')
+ break;
+ uniq_name[i] = 'a';
+ }
+ strlcpy(new_name, uniq_name, TEGRA_RPC_MAX_NAME_LEN);
+ mutex_unlock(&uniq_lock);
+}
+
+static int _validate_port_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < num_reserved_ports; i++)
+ if (!strncmp(name, reserved_ports[i], TEGRA_RPC_MAX_NAME_LEN))
+ return -EINVAL;
+ return 0;
+}
+
+static long local_rpc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rpc_info *info = file->private_data;
+ struct tegra_rpc_port_desc desc;
+ struct trpc_endpoint *ep;
+ int ret = 0;
+
+ if (_IOC_TYPE(cmd) != TEGRA_RPC_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_RPC_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_RPC_IOCTL_MAX_NR) {
+ ret = -ENOTTY;
+ goto err;
+ }
+
+ switch (cmd) {
+ case TEGRA_RPC_IOCTL_PORT_CREATE:
+ if (info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = __get_port_desc(&desc, cmd, arg);
+ if (ret)
+ goto err;
+ if (desc.name[0]) {
+ ret = _validate_port_name(desc.name);
+ if (ret)
+ goto err;
+ } else {
+ _gen_port_name(desc.name);
+ }
+ if (desc.notify_fd != -1) {
+ /* grab a reference to the trpc_sema fd */
+ info->sema_file = trpc_sema_get_from_fd(desc.notify_fd);
+ if (IS_ERR(info->sema_file)) {
+ ret = PTR_ERR(info->sema_file);
+ info->sema_file = NULL;
+ goto err;
+ }
+ }
+ ep = trpc_create(&rpc_node, desc.name, &ep_ops, info);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ if (info->sema_file)
+ fput(info->sema_file);
+ info->sema_file = NULL;
+ goto err;
+ }
+ info->rpc_ep = ep;
+ break;
+ case TEGRA_RPC_IOCTL_PORT_GET_NAME:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_to_user((void __user *)arg,
+ trpc_name(info->rpc_ep),
+ TEGRA_RPC_MAX_NAME_LEN)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_CONNECT:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_connect(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: can't connect to '%s' (%d)\n", __func__,
+ trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ case TEGRA_RPC_IOCTL_PORT_LISTEN:
+ if (!info->rpc_ep) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = trpc_wait_peer(info->rpc_ep, (long)arg);
+ if (ret) {
+ pr_err("%s: error waiting for peer for '%s' (%d)\n",
+ __func__, trpc_name(info->rpc_ep), ret);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("%s: unknown cmd %d\n", __func__, _IOC_NR(cmd));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("tegra_rpc: pid=%d ioctl=%x/%lx (%x) ret=%d\n",
+ current->pid, cmd, arg, _IOC_NR(cmd), ret);
+ return (long)ret;
+}
+
+static ssize_t local_rpc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct rpc_info *info = file->private_data;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+ else if (count > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ ret = trpc_send_msg(&rpc_node, info->rpc_ep, data, count,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t local_rpc_read(struct file *file, char __user *buf, size_t max,
+ loff_t *ppos)
+{
+ struct rpc_info *info = file->private_data;
+ int ret;
+ u8 data[TEGRA_RPC_MAX_MSG_LEN];
+
+ if (max > TEGRA_RPC_MAX_MSG_LEN)
+ return -EINVAL;
+
+ ret = trpc_recv_msg(&rpc_node, info->rpc_ep, data,
+ TEGRA_RPC_MAX_MSG_LEN, 0);
+ if (ret == 0)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else if (ret > max)
+ return -ENOSPC;
+ else if (copy_to_user(buf, data, ret))
+ return -EFAULT;
+
+ return ret;
+}
+
+static const struct file_operations local_rpc_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = local_rpc_open,
+ .release = local_rpc_release,
+ .unlocked_ioctl = local_rpc_ioctl,
+ .write = local_rpc_write,
+ .read = local_rpc_read,
+};
+
+static struct miscdevice local_rpc_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_rpc",
+ .fops = &local_rpc_misc_fops,
+};
+
+int __init rpc_local_init(void)
+{
+ int ret;
+
+ ret = trpc_sema_init();
+ if (ret) {
+ pr_err("%s: error in trpc_sema_init\n", __func__);
+ goto err_sema_init;
+ }
+
+ ret = misc_register(&local_rpc_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ goto err_misc;
+ }
+
+ ret = trpc_node_register(&rpc_node);
+ if (ret) {
+ pr_err("%s: can't register rpc node\n", __func__);
+ goto err_node_reg;
+ }
+ return 0;
+
+err_node_reg:
+ misc_deregister(&local_rpc_misc_device);
+err_misc:
+err_sema_init:
+ return ret;
+}
+
+module_init(rpc_local_init);
diff --git a/drivers/media/video/tegra/avp/trpc_sema.c b/drivers/media/video/tegra/avp/trpc_sema.c
new file mode 100644
index 000000000000..b8772573d956
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tegra_sema.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "trpc_sema.h"
+
+struct trpc_sema {
+ wait_queue_head_t wq;
+ spinlock_t lock;
+ int count;
+};
+
+static int rpc_sema_minor = -1;
+
+static inline bool is_trpc_sema_file(struct file *file)
+{
+ dev_t rdev = file->f_dentry->d_inode->i_rdev;
+
+ if (MAJOR(rdev) == MISC_MAJOR && MINOR(rdev) == rpc_sema_minor)
+ return true;
+ return false;
+}
+
+struct file *trpc_sema_get_from_fd(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (unlikely(file == NULL)) {
+ pr_err("%s: fd %d is invalid\n", __func__, fd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!is_trpc_sema_file(file)) {
+ pr_err("%s: fd (%d) is not a trpc_sema file\n", __func__, fd);
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file;
+}
+
+int trpc_sema_signal(struct file *file)
+{
+ struct trpc_sema *info = file->private_data;
+ unsigned long flags;
+
+ if (!info)
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->count++;
+ wake_up_interruptible_all(&info->wq);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+static int trpc_sema_wait(struct trpc_sema *info, long *timeleft)
+{
+ unsigned long flags;
+ int ret = 0;
+ unsigned long endtime;
+ long timeout = *timeleft;
+
+ *timeleft = 0;
+ if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else if (timeout > 0) {
+ timeout = msecs_to_jiffies(timeout);
+ endtime = jiffies + timeout;
+ }
+
+again:
+ if (timeout)
+ ret = wait_event_interruptible_timeout(info->wq,
+ info->count > 0,
+ timeout);
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->count > 0) {
+ info->count--;
+ ret = 0;
+ } else if (ret == 0 || timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret < 0) {
+ ret = -EINTR;
+ if (timeout != MAX_SCHEDULE_TIMEOUT &&
+ time_before(jiffies, endtime))
+ *timeleft = jiffies_to_msecs(endtime - jiffies);
+ else
+ *timeleft = 0;
+ } else {
+ /* we woke up but someone else got the semaphore and we have
+ * time left, try again */
+ timeout = ret;
+ spin_unlock_irqrestore(&info->lock, flags);
+ goto again;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+ return ret;
+}
+
+static int trpc_sema_open(struct inode *inode, struct file *file)
+{
+ struct trpc_sema *info;
+
+ info = kzalloc(sizeof(struct trpc_sema), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ nonseekable_open(inode, file);
+ init_waitqueue_head(&info->wq);
+ spin_lock_init(&info->lock);
+ file->private_data = info;
+ return 0;
+}
+
+static int trpc_sema_release(struct inode *inode, struct file *file)
+{
+ struct trpc_sema *info = file->private_data;
+
+ file->private_data = NULL;
+ kfree(info);
+ return 0;
+}
+
+static long trpc_sema_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct trpc_sema *info = file->private_data;
+ int ret;
+ long timeout;
+
+ if (_IOC_TYPE(cmd) != TEGRA_SEMA_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < TEGRA_SEMA_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > TEGRA_SEMA_IOCTL_MAX_NR)
+ return -ENOTTY;
+ else if (!info)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TEGRA_SEMA_IOCTL_WAIT:
+ if (copy_from_user(&timeout, (void __user *)arg, sizeof(long)))
+ return -EFAULT;
+ ret = trpc_sema_wait(info, &timeout);
+ if (ret != -EINTR)
+ break;
+ if (copy_to_user((void __user *)arg, &timeout, sizeof(long)))
+ ret = -EFAULT;
+ break;
+ case TEGRA_SEMA_IOCTL_SIGNAL:
+ ret = trpc_sema_signal(file);
+ break;
+ default:
+ pr_err("%s: Unknown tegra_sema ioctl 0x%x\n", __func__,
+ _IOC_NR(cmd));
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations trpc_sema_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = trpc_sema_open,
+ .release = trpc_sema_release,
+ .unlocked_ioctl = trpc_sema_ioctl,
+};
+
+static struct miscdevice trpc_sema_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "tegra_sema",
+ .fops = &trpc_sema_misc_fops,
+};
+
+int __init trpc_sema_init(void)
+{
+ int ret;
+
+ if (rpc_sema_minor >= 0) {
+ pr_err("%s: trpc_sema already registered\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = misc_register(&trpc_sema_misc_device);
+ if (ret) {
+ pr_err("%s: can't register misc device\n", __func__);
+ return ret;
+ }
+
+ rpc_sema_minor = trpc_sema_misc_device.minor;
+ pr_info("%s: registered misc dev %d:%d\n", __func__, MISC_MAJOR,
+ rpc_sema_minor);
+
+ return 0;
+}
diff --git a/drivers/media/video/tegra/avp/trpc_sema.h b/drivers/media/video/tegra/avp/trpc_sema.h
new file mode 100644
index 000000000000..566bbdbe739e
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_MACH_TEGRA_RPC_SEMA_H
+#define __ARM_MACH_TEGRA_RPC_SEMA_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+
+struct file *trpc_sema_get_from_fd(int fd);
+int trpc_sema_signal(struct file *file);
+int __init trpc_sema_init(void);
+
+#endif
diff --git a/drivers/media/video/tegra/tegra_camera.c b/drivers/media/video/tegra/tegra_camera.c
new file mode 100644
index 000000000000..f310d0f5619f
--- /dev/null
+++ b/drivers/media/video/tegra/tegra_camera.c
@@ -0,0 +1,368 @@
+/*
+ * drivers/media/video/tegra/tegra_camera.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <mach/iomap.h>
+#include <mach/clk.h>
+
+#include <media/tegra_camera.h>
+
+/* Eventually this should handle all clock and reset calls for the isp, vi,
+ * vi_sensor, and csi modules, replacing nvrm and nvos completely for camera
+ */
+#define TEGRA_CAMERA_NAME "tegra_camera"
+DEFINE_MUTEX(tegra_camera_lock);
+
+struct tegra_camera_block {
+ int (*enable) (void);
+ int (*disable) (void);
+ bool is_enabled;
+};
+
+
+static struct clk *isp_clk;
+static struct clk *vi_clk;
+static struct clk *vi_sensor_clk;
+static struct clk *csus_clk;
+static struct clk *csi_clk;
+static struct regulator *tegra_camera_regulator_csi;
+
+static int tegra_camera_enable_isp(void)
+{
+ return clk_enable(isp_clk);
+}
+
+static int tegra_camera_disable_isp(void)
+{
+ clk_disable(isp_clk);
+ return 0;
+}
+
+static int tegra_camera_enable_vi(void)
+{
+ clk_enable(vi_clk);
+ clk_enable(vi_sensor_clk);
+ clk_enable(csus_clk);
+ return 0;
+}
+
+static int tegra_camera_disable_vi(void)
+{
+ clk_disable(vi_clk);
+ clk_disable(vi_sensor_clk);
+ clk_disable(csus_clk);
+ return 0;
+}
+
+static int tegra_camera_enable_csi(void)
+{
+ int ret;
+
+ ret = regulator_enable(tegra_camera_regulator_csi);
+ if (ret)
+ return ret;
+ clk_enable(csi_clk);
+ return 0;
+}
+
+static int tegra_camera_disable_csi(void)
+{
+ int ret;
+
+ ret = regulator_disable(tegra_camera_regulator_csi);
+ if (ret)
+ return ret;
+ clk_disable(csi_clk);
+ return 0;
+}
+
+struct tegra_camera_block tegra_camera_block[] = {
+ [TEGRA_CAMERA_MODULE_ISP] = {tegra_camera_enable_isp,
+ tegra_camera_disable_isp, false},
+ [TEGRA_CAMERA_MODULE_VI] = {tegra_camera_enable_vi,
+ tegra_camera_disable_vi, false},
+ [TEGRA_CAMERA_MODULE_CSI] = {tegra_camera_enable_csi,
+ tegra_camera_disable_csi, false},
+};
+
+#define TEGRA_CAMERA_VI_CLK_SEL_INTERNAL 0
+#define TEGRA_CAMERA_VI_CLK_SEL_EXTERNAL (1<<24)
+#define TEGRA_CAMERA_PD2VI_CLK_SEL_VI_SENSOR_CLK (1<<25)
+#define TEGRA_CAMERA_PD2VI_CLK_SEL_PD2VI_CLK 0
+
+static int tegra_camera_clk_set_rate(struct tegra_camera_clk_info *info)
+{
+ u32 offset;
+ struct clk *clk;
+
+ if (info->id != TEGRA_CAMERA_MODULE_VI) {
+ pr_err("%s: Set rate only aplies to vi module %d\n", __func__,
+ info->id);
+ return -EINVAL;
+ }
+
+ switch (info->clk_id) {
+ case TEGRA_CAMERA_VI_CLK:
+ clk = vi_clk;
+ offset = 0x148;
+ break;
+ case TEGRA_CAMERA_VI_SENSOR_CLK:
+ clk = vi_sensor_clk;
+ offset = 0x1a8;
+ break;
+ default:
+ pr_err("%s: invalid clk id for set rate %d\n", __func__,
+ info->clk_id);
+ return -EINVAL;
+ }
+
+ clk_set_rate(clk, info->rate);
+
+ if (info->clk_id == TEGRA_CAMERA_VI_CLK) {
+ u32 val;
+ void __iomem *car = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+ void __iomem *apb_misc = IO_ADDRESS(TEGRA_APB_MISC_BASE);
+
+ writel(0x2, car + offset);
+
+ val = readl(apb_misc + 0x42c);
+ writel(val | 0x1, apb_misc + 0x42c);
+ }
+
+ info->rate = clk_get_rate(clk);
+ return 0;
+
+}
+static int tegra_camera_reset(uint id)
+{
+ struct clk *clk;
+
+ switch (id) {
+ case TEGRA_CAMERA_MODULE_VI:
+ clk = vi_clk;
+ break;
+ case TEGRA_CAMERA_MODULE_ISP:
+ clk = isp_clk;
+ break;
+ case TEGRA_CAMERA_MODULE_CSI:
+ clk = csi_clk;
+ break;
+ default:
+ return -EINVAL;
+ }
+ tegra_periph_reset_assert(clk);
+ udelay(10);
+ tegra_periph_reset_deassert(clk);
+
+ return 0;
+}
+
+static long tegra_camera_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ uint id;
+
+ /* first element of arg must be u32 with id of module to talk to */
+ if (copy_from_user(&id, (const void __user *)arg, sizeof(uint))) {
+ pr_err("%s: Failed to copy arg from user", __func__);
+ return -EFAULT;
+ }
+
+ if (id >= ARRAY_SIZE(tegra_camera_block)) {
+ pr_err("%s: Invalid id to tegra isp ioctl%d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case TEGRA_CAMERA_IOCTL_ENABLE:
+ {
+ int ret = 0;
+
+ mutex_lock(&tegra_camera_lock);
+ if (!tegra_camera_block[id].is_enabled) {
+ ret = tegra_camera_block[id].enable();
+ tegra_camera_block[id].is_enabled = true;
+ }
+ mutex_unlock(&tegra_camera_lock);
+ return ret;
+ }
+ case TEGRA_CAMERA_IOCTL_DISABLE:
+ {
+ int ret = 0;
+
+ mutex_lock(&tegra_camera_lock);
+ if (tegra_camera_block[id].is_enabled) {
+ ret = tegra_camera_block[id].disable();
+ tegra_camera_block[id].is_enabled = false;
+ }
+ mutex_unlock(&tegra_camera_lock);
+ return ret;
+ }
+ case TEGRA_CAMERA_IOCTL_CLK_SET_RATE:
+ {
+ struct tegra_camera_clk_info info;
+ int ret;
+
+ if (copy_from_user(&info, (const void __user *)arg,
+ sizeof(struct tegra_camera_clk_info))) {
+ pr_err("%s: Failed to copy arg from user\n", __func__);
+ return -EFAULT;
+ }
+ ret = tegra_camera_clk_set_rate(&info);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)arg, &info,
+ sizeof(struct tegra_camera_clk_info))) {
+ pr_err("%s: Failed to copy arg to user\n", __func__);
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case TEGRA_CAMERA_IOCTL_RESET:
+ return tegra_camera_reset(id);
+ default:
+ pr_err("%s: Unknown tegra_camera ioctl.\n", TEGRA_CAMERA_NAME);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tegra_camera_release(struct inode *inode, struct file *file)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_camera_block); i++)
+ if (tegra_camera_block[i].is_enabled) {
+ tegra_camera_block[i].disable();
+ tegra_camera_block[i].is_enabled = false;
+ }
+
+ return 0;
+}
+
+static const struct file_operations tegra_camera_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tegra_camera_ioctl,
+ .release = tegra_camera_release,
+};
+
+static struct miscdevice tegra_camera_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = TEGRA_CAMERA_NAME,
+ .fops = &tegra_camera_fops,
+};
+
+static int tegra_camera_clk_get(struct platform_device *pdev, const char *name,
+ struct clk **clk)
+{
+ *clk = clk_get(&pdev->dev, name);
+ if (IS_ERR_OR_NULL(*clk)) {
+ pr_err("%s: unable to get clock for %s\n", __func__, name);
+ *clk = NULL;
+ return PTR_ERR(*clk);
+ }
+ return 0;
+}
+
+static int tegra_camera_probe(struct platform_device *pdev)
+{
+ int err;
+
+ pr_info("%s: probe\n", TEGRA_CAMERA_NAME);
+ tegra_camera_regulator_csi = regulator_get(&pdev->dev, "vcsi");
+ if (IS_ERR_OR_NULL(tegra_camera_regulator_csi)) {
+ pr_err("%s: Couldn't get regulator vcsi\n", TEGRA_CAMERA_NAME);
+ return PTR_ERR(tegra_camera_regulator_csi);
+ }
+
+ err = misc_register(&tegra_camera_device);
+ if (err) {
+ pr_err("%s: Unable to register misc device!\n",
+ TEGRA_CAMERA_NAME);
+ goto misc_register_err;
+ }
+
+ err = tegra_camera_clk_get(pdev, "isp", &isp_clk);
+ if (err)
+ goto misc_register_err;
+ err = tegra_camera_clk_get(pdev, "vi", &vi_clk);
+ if (err)
+ goto vi_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "vi_sensor", &vi_sensor_clk);
+ if (err)
+ goto vi_sensor_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "csus", &csus_clk);
+ if (err)
+ goto csus_clk_get_err;
+ err = tegra_camera_clk_get(pdev, "csi", &csi_clk);
+ if (err)
+ goto csi_clk_get_err;
+
+ return 0;
+
+csi_clk_get_err:
+ clk_put(csus_clk);
+csus_clk_get_err:
+ clk_put(vi_sensor_clk);
+vi_sensor_clk_get_err:
+ clk_put(vi_clk);
+vi_clk_get_err:
+ clk_put(isp_clk);
+misc_register_err:
+ regulator_put(tegra_camera_regulator_csi);
+ return err;
+}
+
+static int tegra_camera_remove(struct platform_device *pdev)
+{
+ clk_put(isp_clk);
+ clk_put(vi_clk);
+ clk_put(vi_sensor_clk);
+ clk_put(csus_clk);
+ clk_put(csi_clk);
+
+ regulator_put(tegra_camera_regulator_csi);
+ misc_deregister(&tegra_camera_device);
+ return 0;
+}
+
+static struct platform_driver tegra_camera_driver = {
+ .probe = tegra_camera_probe,
+ .remove = tegra_camera_remove,
+ .driver = { .name = TEGRA_CAMERA_NAME }
+};
+
+static int __init tegra_camera_init(void)
+{
+ return platform_driver_register(&tegra_camera_driver);
+}
+
+static void __exit tegra_camera_exit(void)
+{
+ platform_driver_unregister(&tegra_camera_driver);
+}
+
+module_init(tegra_camera_init);
+module_exit(tegra_camera_exit);
+
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index db51ea1c6082..0d762688effe 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -542,8 +542,8 @@ config MFD_JZ4740_ADC
This driver is necessary for jz4740-battery and jz4740-hwmon driver.
config MFD_TPS6586X
- tristate "TPS6586x Power Management chips"
- depends on I2C && GPIOLIB
+ bool "TPS6586x Power Management chips"
+ depends on I2C && GPIOLIB && GENERIC_HARDIRQS
select MFD_CORE
help
If you say yes here you get support for the TPS6586X series of
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 4cde31e6a252..ab667f296897 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -15,6 +15,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -29,9 +31,62 @@
#define TPS6586X_GPIOSET1 0x5d
#define TPS6586X_GPIOSET2 0x5e
+/* interrupt control registers */
+#define TPS6586X_INT_ACK1 0xb5
+#define TPS6586X_INT_ACK2 0xb6
+#define TPS6586X_INT_ACK3 0xb7
+#define TPS6586X_INT_ACK4 0xb8
+
+/* interrupt mask registers */
+#define TPS6586X_INT_MASK1 0xb0
+#define TPS6586X_INT_MASK2 0xb1
+#define TPS6586X_INT_MASK3 0xb2
+#define TPS6586X_INT_MASK4 0xb3
+#define TPS6586X_INT_MASK5 0xb4
+
/* device id */
#define TPS6586X_VERSIONCRC 0xcd
-#define TPS658621A_VERSIONCRC 0x15
+
+struct tps6586x_irq_data {
+ u8 mask_reg;
+ u8 mask_mask;
+};
+
+#define TPS6586X_IRQ(_reg, _mask) \
+ { \
+ .mask_reg = (_reg) - TPS6586X_INT_MASK1, \
+ .mask_mask = (_mask), \
+ }
+
+static const struct tps6586x_irq_data tps6586x_irqs[] = {
+ [TPS6586X_INT_PLDO_0] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 0),
+ [TPS6586X_INT_PLDO_1] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 1),
+ [TPS6586X_INT_PLDO_2] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 2),
+ [TPS6586X_INT_PLDO_3] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 3),
+ [TPS6586X_INT_PLDO_4] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 4),
+ [TPS6586X_INT_PLDO_5] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 5),
+ [TPS6586X_INT_PLDO_6] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 6),
+ [TPS6586X_INT_PLDO_7] = TPS6586X_IRQ(TPS6586X_INT_MASK1, 1 << 7),
+ [TPS6586X_INT_COMP_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 0),
+ [TPS6586X_INT_ADC] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 1),
+ [TPS6586X_INT_PLDO_8] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 2),
+ [TPS6586X_INT_PLDO_9] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 3),
+ [TPS6586X_INT_PSM_0] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 4),
+ [TPS6586X_INT_PSM_1] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 5),
+ [TPS6586X_INT_PSM_2] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 6),
+ [TPS6586X_INT_PSM_3] = TPS6586X_IRQ(TPS6586X_INT_MASK2, 1 << 7),
+ [TPS6586X_INT_RTC_ALM1] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 4),
+ [TPS6586X_INT_ACUSB_OVP] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 0x03),
+ [TPS6586X_INT_USB_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 2),
+ [TPS6586X_INT_AC_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 3),
+ [TPS6586X_INT_BAT_DET] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 1 << 0),
+ [TPS6586X_INT_CHG_STAT] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 0xfc),
+ [TPS6586X_INT_CHG_TEMP] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 0x06),
+ [TPS6586X_INT_PP] = TPS6586X_IRQ(TPS6586X_INT_MASK3, 0xf0),
+ [TPS6586X_INT_RESUME] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 5),
+ [TPS6586X_INT_LOW_SYS] = TPS6586X_IRQ(TPS6586X_INT_MASK5, 1 << 6),
+ [TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
+};
struct tps6586x {
struct mutex lock;
@@ -39,6 +94,12 @@ struct tps6586x {
struct i2c_client *client;
struct gpio_chip gpio;
+ struct irq_chip irq_chip;
+ struct mutex irq_lock;
+ int irq_base;
+ u32 irq_en;
+ u8 mask_cache[5];
+ u8 mask_reg[5];
};
static inline int __tps6586x_read(struct i2c_client *client,
@@ -262,6 +323,129 @@ static int tps6586x_remove_subdevs(struct tps6586x *tps6586x)
return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
}
+static void tps6586x_irq_lock(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+
+ mutex_lock(&tps6586x->irq_lock);
+}
+
+static void tps6586x_irq_enable(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ unsigned int __irq = irq - tps6586x->irq_base;
+ const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
+
+ tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
+ tps6586x->irq_en |= (1 << __irq);
+}
+
+static void tps6586x_irq_disable(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+
+ unsigned int __irq = irq - tps6586x->irq_base;
+ const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
+
+ tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
+ tps6586x->irq_en &= ~(1 << __irq);
+}
+
+static void tps6586x_irq_sync_unlock(unsigned int irq)
+{
+ struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
+ if (tps6586x->mask_reg[i] != tps6586x->mask_cache[i]) {
+ if (!WARN_ON(tps6586x_write(tps6586x->dev,
+ TPS6586X_INT_MASK1 + i,
+ tps6586x->mask_reg[i])))
+ tps6586x->mask_cache[i] = tps6586x->mask_reg[i];
+ }
+ }
+
+ mutex_unlock(&tps6586x->irq_lock);
+}
+
+static irqreturn_t tps6586x_irq(int irq, void *data)
+{
+ struct tps6586x *tps6586x = data;
+ u32 acks;
+ int ret = 0;
+
+ ret = tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1,
+ sizeof(acks), (uint8_t *)&acks);
+
+ if (ret < 0) {
+ dev_err(tps6586x->dev, "failed to read interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ acks = le32_to_cpu(acks);
+
+ while (acks) {
+ int i = __ffs(acks);
+
+ if (tps6586x->irq_en & (1 << i))
+ handle_nested_irq(tps6586x->irq_base + i);
+
+ acks &= ~(1 << i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
+ int irq_base)
+{
+ int i, ret;
+ u8 tmp[4];
+
+ if (!irq_base) {
+ dev_warn(tps6586x->dev, "No interrupt support on IRQ base\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&tps6586x->irq_lock);
+ for (i = 0; i < 5; i++) {
+ tps6586x->mask_cache[i] = 0xff;
+ tps6586x->mask_reg[i] = 0xff;
+ tps6586x_write(tps6586x->dev, TPS6586X_INT_MASK1 + i, 0xff);
+ }
+
+ tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, sizeof(tmp), tmp);
+
+ tps6586x->irq_base = irq_base;
+
+ tps6586x->irq_chip.name = "tps6586x";
+ tps6586x->irq_chip.enable = tps6586x_irq_enable;
+ tps6586x->irq_chip.disable = tps6586x_irq_disable;
+ tps6586x->irq_chip.bus_lock = tps6586x_irq_lock;
+ tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
+ int __irq = i + tps6586x->irq_base;
+ set_irq_chip_data(__irq, tps6586x);
+ set_irq_chip_and_handler(__irq, &tps6586x->irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(__irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(__irq, IRQF_VALID);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
+ "tps6586x", tps6586x);
+
+ if (!ret) {
+ device_init_wakeup(tps6586x->dev, 1);
+ enable_irq_wake(irq);
+ }
+
+ return ret;
+}
+
static int __devinit tps6586x_add_subdevs(struct tps6586x *tps6586x,
struct tps6586x_platform_data *pdata)
{
@@ -306,10 +490,7 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
return -EIO;
}
- if (ret != TPS658621A_VERSIONCRC) {
- dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
- return -ENODEV;
- }
+ dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
if (tps6586x == NULL)
@@ -321,6 +502,15 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
mutex_init(&tps6586x->lock);
+ if (client->irq) {
+ ret = tps6586x_irq_init(tps6586x, client->irq,
+ pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto err_irq_init;
+ }
+ }
+
ret = tps6586x_add_subdevs(tps6586x, pdata);
if (ret) {
dev_err(&client->dev, "add devices failed: %d\n", ret);
@@ -332,12 +522,20 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
return 0;
err_add_devs:
+ if (client->irq)
+ free_irq(client->irq, tps6586x);
+err_irq_init:
kfree(tps6586x);
return ret;
}
static int __devexit tps6586x_i2c_remove(struct i2c_client *client)
{
+ struct tps6586x *tps6586x = i2c_get_clientdata(client);
+
+ if (client->irq)
+ free_irq(client->irq, tps6586x);
+
return 0;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6909a54c39be..45055c46d954 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -253,8 +253,13 @@ static int mmc_read_ext_csd(struct mmc_card *card)
ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
/* Cards with density > 2GiB are sector addressed */
- if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
+ if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) {
+ unsigned boot_sectors;
+ /* size is in 256K chunks, i.e. 512 sectors each */
+ boot_sectors = ext_csd[EXT_CSD_BOOT_SIZE_MULTI] * 512;
+ card->ext_csd.sectors -= boot_sectors;
mmc_card_set_blockaddr(card);
+ }
}
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index f332c52968b7..6b8db0465370 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -258,11 +258,13 @@ static int mmc_sdio_switch_hs(struct mmc_card *card, int enable)
int ret;
u8 speed;
- if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
- return 0;
+ if (!(card->host->caps & MMC_CAP_FORCE_HS)) {
+ if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
+ return 0;
- if (!card->cccr.high_speed)
- return 0;
+ if (!card->cccr.high_speed)
+ return 0;
+ }
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 68d12794cfd9..44a476d3f24e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -392,6 +392,12 @@ config MMC_TMIO
This provides support for the SD/MMC cell found in TC6393XB,
T7L66XB and also HTC ASIC3
+config MMC_SDHCI_TEGRA
+ tristate "Tegra SD/MMC Controller Support"
+ depends on ARCH_TEGRA && MMC_SDHCI
+ help
+ This selects the Tegra SD/MMC controller.
+
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 840bcb52d82f..ba4c798b7cf6 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
+obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 000000000000..361c8e780683
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,226 @@
+/*
+ * drivers/mmc/host/sdhci-tegra.c
+ *
+ * Copyright (C) 2009 Palm, Inc.
+ * Author: Yvonne Yip <y@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+
+#include <mach/sdhci.h>
+
+#include "sdhci.h"
+
+#define DRIVER_NAME "sdhci-tegra"
+
+struct tegra_sdhci_host {
+ struct sdhci_host *sdhci;
+ struct clk *clk;
+};
+
+static irqreturn_t carddetect_irq(int irq, void *data)
+{
+ struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+ sdhci_card_detect_callback(sdhost);
+ return IRQ_HANDLED;
+};
+
+static int tegra_sdhci_enable_dma(struct sdhci_host *host)
+{
+ return 0;
+}
+
+static struct sdhci_ops tegra_sdhci_ops = {
+ .enable_dma = tegra_sdhci_enable_dma,
+};
+
+static int __devinit tegra_sdhci_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct tegra_sdhci_platform_data *plat;
+ struct sdhci_host *sdhci;
+ struct tegra_sdhci_host *host;
+ struct resource *res;
+ int irq;
+ void __iomem *ioaddr;
+
+ plat = pdev->dev.platform_data;
+ if (plat == NULL)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ ioaddr = ioremap(res->start, res->end - res->start);
+
+ sdhci = sdhci_alloc_host(&pdev->dev, sizeof(struct tegra_sdhci_host));
+ if (IS_ERR(sdhci)) {
+ rc = PTR_ERR(sdhci);
+ goto err_unmap;
+ }
+
+ host = sdhci_priv(sdhci);
+ host->sdhci = sdhci;
+
+ host->clk = clk_get(&pdev->dev, plat->clk_id);
+ if (IS_ERR(host->clk)) {
+ rc = PTR_ERR(host->clk);
+ goto err_free_host;
+ }
+
+ rc = clk_enable(host->clk);
+ if (rc != 0)
+ goto err_clkput;
+
+ sdhci->hw_name = "tegra";
+ sdhci->ops = &tegra_sdhci_ops;
+ sdhci->irq = irq;
+ sdhci->ioaddr = ioaddr;
+ sdhci->version = SDHCI_SPEC_200;
+ sdhci->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_ENABLE_INTERRUPT_AT_BLOCK_GAP |
+ SDHCI_QUIRK_BROKEN_WRITE_PROTECT |
+ SDHCI_QUIRK_BROKEN_CTRL_HISPD |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_8_BIT_DATA |
+ SDHCI_QUIRK_NO_VERSION_REG |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_NO_SDIO_IRQ;
+
+ if (plat->force_hs != 0)
+ sdhci->quirks |= SDHCI_QUIRK_FORCE_HIGH_SPEED_MODE;
+
+ rc = sdhci_add_host(sdhci);
+ if (rc)
+ goto err_clk_disable;
+
+ platform_set_drvdata(pdev, host);
+
+ if (plat->cd_gpio != -1) {
+ rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ mmc_hostname(sdhci->mmc), sdhci);
+
+ if (rc)
+ goto err_remove_host;
+ }
+
+ if (plat->board_probe)
+ plat->board_probe(pdev->id, sdhci->mmc);
+
+ printk(KERN_INFO "sdhci%d: initialized irq %d ioaddr %p\n", pdev->id,
+ sdhci->irq, sdhci->ioaddr);
+
+ return 0;
+
+err_remove_host:
+ sdhci_remove_host(sdhci, 1);
+err_clk_disable:
+ clk_disable(host->clk);
+err_clkput:
+ clk_put(host->clk);
+err_free_host:
+ if (sdhci)
+ sdhci_free_host(sdhci);
+err_unmap:
+ iounmap(sdhci->ioaddr);
+
+ return rc;
+}
+
+static int tegra_sdhci_remove(struct platform_device *pdev)
+{
+ struct tegra_sdhci_host *host = platform_get_drvdata(pdev);
+ if (host) {
+ struct tegra_sdhci_platform_data *plat;
+ plat = pdev->dev.platform_data;
+ if (plat && plat->board_probe)
+ plat->board_probe(pdev->id, host->sdhci->mmc);
+
+ sdhci_remove_host(host->sdhci, 0);
+ sdhci_free_host(host->sdhci);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_sdhci_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_sdhci_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = sdhci_suspend_host(host->sdhci, state);
+ if (ret)
+ pr_err("%s: failed, error = %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int tegra_sdhci_resume(struct platform_device *pdev)
+{
+ struct tegra_sdhci_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = sdhci_resume_host(host->sdhci);
+ if (ret)
+ pr_err("%s: failed, error = %d\n", __func__, ret);
+
+ return ret;
+}
+#else
+#define tegra_sdhci_suspend NULL
+#define tegra_sdhci_resume NULL
+#endif
+
+static struct platform_driver tegra_sdhci_driver = {
+ .probe = tegra_sdhci_probe,
+ .remove = tegra_sdhci_remove,
+ .suspend = tegra_sdhci_suspend,
+ .resume = tegra_sdhci_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_sdhci_init(void)
+{
+ return platform_driver_register(&tegra_sdhci_driver);
+}
+
+static void __exit tegra_sdhci_exit(void)
+{
+ platform_driver_unregister(&tegra_sdhci_driver);
+}
+
+module_init(tegra_sdhci_init);
+module_exit(tegra_sdhci_exit);
+
+MODULE_DESCRIPTION("Tegra SDHCI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 401527d273b5..61670e1534b0 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -24,6 +24,7 @@
#include <linux/leds.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "sdhci.h"
@@ -1029,6 +1030,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
out:
+
host->clock = clock;
}
@@ -1177,8 +1179,6 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
- else
- ctrl &= ~SDHCI_CTRL_4BITBUS;
if (ios->timing == MMC_TIMING_SD_HS &&
!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
@@ -1211,16 +1211,22 @@ static int sdhci_get_ro(struct mmc_host *mmc)
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_DEVICE_DEAD)
+ if (host->flags & SDHCI_DEVICE_DEAD) {
present = 0;
- else
+ } else if (!(host->quirks & SDHCI_QUIRK_BROKEN_WRITE_PROTECT)) {
present = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ present = !(present & SDHCI_WRITE_PROTECT);
+ } else if (host->ops->get_ro) {
+ present = host->ops->get_ro(host);
+ } else {
+ present = 0;
+ }
spin_unlock_irqrestore(&host->lock, flags);
if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
return !!(present & SDHCI_WRITE_PROTECT);
- return !(present & SDHCI_WRITE_PROTECT);
+ return present;
}
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1239,6 +1245,16 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
else
sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
+
+ if (host->quirks & SDHCI_QUIRK_ENABLE_INTERRUPT_AT_BLOCK_GAP) {
+ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ if (enable)
+ gap_ctrl |= 0x8;
+ else
+ gap_ctrl &= ~0x8;
+ writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ }
+
out:
mmiowb();
@@ -1252,19 +1268,10 @@ static const struct mmc_host_ops sdhci_ops = {
.enable_sdio_irq = sdhci_enable_sdio_irq,
};
-/*****************************************************************************\
- * *
- * Tasklets *
- * *
-\*****************************************************************************/
-
-static void sdhci_tasklet_card(unsigned long param)
+void sdhci_card_detect_callback(struct sdhci_host *host)
{
- struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)param;
-
spin_lock_irqsave(&host->lock, flags);
if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
@@ -1286,6 +1293,22 @@ static void sdhci_tasklet_card(unsigned long param)
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
+EXPORT_SYMBOL_GPL(sdhci_card_detect_callback);
+
+/*****************************************************************************\
+ * *
+ * Tasklets *
+ * *
+\*****************************************************************************/
+
+static void sdhci_tasklet_card(unsigned long param)
+{
+ struct sdhci_host *host;
+
+ host = (struct sdhci_host *)param;
+
+ sdhci_card_detect_callback(host);
+}
static void sdhci_tasklet_finish(unsigned long param)
{
@@ -1397,7 +1420,8 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
host->cmd->error = -EILSEQ;
if (host->cmd->error) {
- tasklet_schedule(&host->finish_tasklet);
+ if (intmask & SDHCI_INT_RESPONSE)
+ tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -1610,19 +1634,22 @@ out:
int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
{
- int ret;
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
sdhci_disable_card_detection(host);
- ret = mmc_suspend_host(host->mmc);
- if (ret)
- return ret;
+ if (mmc->card && (mmc->card->type != MMC_TYPE_SDIO))
+ ret = mmc_suspend_host(host->mmc);
- free_irq(host->irq, host);
+ sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
if (host->vmmc)
ret = regulator_disable(host->vmmc);
+ if (host->irq)
+ disable_irq(host->irq);
+
return ret;
}
@@ -1630,7 +1657,8 @@ EXPORT_SYMBOL_GPL(sdhci_suspend_host);
int sdhci_resume_host(struct sdhci_host *host)
{
- int ret;
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
if (host->vmmc) {
int ret = regulator_enable(host->vmmc);
@@ -1644,15 +1672,15 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->enable_dma(host);
}
- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
- mmc_hostname(host->mmc), host);
- if (ret)
- return ret;
+ if (host->irq)
+ enable_irq(host->irq);
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
- ret = mmc_resume_host(host->mmc);
+ if (mmc->card && (mmc->card->type != MMC_TYPE_SDIO))
+ ret = mmc_resume_host(host->mmc);
+
sdhci_enable_card_detection(host);
return ret;
@@ -1705,9 +1733,12 @@ int sdhci_add_host(struct sdhci_host *host)
sdhci_reset(host, SDHCI_RESET_ALL);
- host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
- host->version = (host->version & SDHCI_SPEC_VER_MASK)
- >> SDHCI_SPEC_VER_SHIFT;
+ if (!(host->quirks & SDHCI_QUIRK_NO_VERSION_REG)) {
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+ >> SDHCI_SPEC_VER_SHIFT;
+ }
+
if (host->version > SDHCI_SPEC_200) {
printk(KERN_ERR "%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
@@ -1818,17 +1849,30 @@ int sdhci_add_host(struct sdhci_host *host)
else
mmc->f_min = host->max_clk / 256;
mmc->f_max = host->max_clk;
- mmc->caps |= MMC_CAP_SDIO_IRQ;
+ mmc->caps = 0;
+
+ if (host->quirks & SDHCI_QUIRK_8_BIT_DATA)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
mmc->caps |= MMC_CAP_4_BIT_DATA;
- if (caps & SDHCI_CAN_DO_HISPD)
+ if (!(host->quirks & SDHCI_QUIRK_NO_SDIO_IRQ))
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ if (caps & SDHCI_CAN_DO_HISPD) {
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+ }
+
+ if (host->quirks & SDHCI_QUIRK_FORCE_HIGH_SPEED_MODE)
+ mmc->caps |= MMC_CAP_FORCE_HS;
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
mmc->caps |= MMC_CAP_NEEDS_POLL;
+ mmc->caps |= MMC_CAP_ERASE;
+
mmc->ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330)
mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
@@ -1868,10 +1912,14 @@ int sdhci_add_host(struct sdhci_host *host)
* of bytes. When doing hardware scatter/gather, each entry cannot
* be larger than 64 KiB though.
*/
- if (host->flags & SDHCI_USE_ADMA)
- mmc->max_seg_size = 65536;
- else
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ mmc->max_seg_size = 0xffff;
+ else
+ mmc->max_seg_size = 65536;
+ } else {
mmc->max_seg_size = mmc->max_req_size;
+ }
/*
* Maximum block size. This varies from controller to controller and
@@ -1895,7 +1943,7 @@ int sdhci_add_host(struct sdhci_host *host)
* Maximum block count.
*/
mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
-
+
/*
* Init tasklets.
*/
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d316bc79b636..7af27866c4ed 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -66,6 +66,7 @@
#define SDHCI_HOST_CONTROL 0x28
#define SDHCI_CTRL_LED 0x01
#define SDHCI_CTRL_4BITBUS 0x02
+#define SDHCI_CTRL_8BITBUS 0x20
#define SDHCI_CTRL_HISPD 0x04
#define SDHCI_CTRL_DMA_MASK 0x18
#define SDHCI_CTRL_SDMA 0x00
@@ -185,7 +186,7 @@ struct sdhci_host {
/* Data set by hardware interface driver */
const char *hw_name; /* Hardware bus name */
- unsigned int quirks; /* Deviations from spec. */
+ u64 quirks; /* Deviations from spec. */
/* Controller doesn't honor resets unless we touch the clock register */
#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
@@ -247,6 +248,22 @@ struct sdhci_host {
#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
+/* Controller write protect bit is broken. Assume no write protection */
+#define SDHCI_QUIRK_BROKEN_WRITE_PROTECT (1<<30)
+/* Controller needs INTERRUPT_AT_BLOCK_GAP enabled to detect card interrupts */
+#define SDHCI_QUIRK_ENABLE_INTERRUPT_AT_BLOCK_GAP (1<<31)
+/* Controller should not program HIGH_SPEED_EN after switching to high speed */
+#define SDHCI_QUIRK_BROKEN_CTRL_HISPD (1LL<<32)
+/* Controller supports 8-bit data width */
+#define SDHCI_QUIRK_8_BIT_DATA (1LL<<33)
+/* Controller has no version register */
+#define SDHCI_QUIRK_NO_VERSION_REG (1LL<<34)
+/* Controller treats ADMA descriptors with length 0000h incorrectly */
+#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1LL<<35)
+/* Controller should not use SDIO IRQ */
+#define SDHCI_QUIRK_NO_SDIO_IRQ (1LL<<36)
+/* Controller should only use high-speed mode */
+#define SDHCI_QUIRK_FORCE_HIGH_SPEED_MODE (1LL<<37)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
@@ -320,6 +337,7 @@ struct sdhci_ops {
void (*set_clock)(struct sdhci_host *host, unsigned int clock);
int (*enable_dma)(struct sdhci_host *host);
+ int (*get_ro)(struct sdhci_host *host);
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
@@ -412,6 +430,7 @@ static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
extern struct sdhci_host *sdhci_alloc_host(struct device *dev,
size_t priv_size);
extern void sdhci_free_host(struct sdhci_host *host);
+extern void sdhci_card_detect_callback(struct sdhci_host *host);
static inline void *sdhci_priv(struct sdhci_host *host)
{
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce77fbd..943d90f08c08 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -78,6 +78,12 @@ config MTD_DATAFLASH_OTP
other key product data. The second half is programmed with a
unique-to-each-chip bit pattern at the factory.
+config MTD_NAND_TEGRA
+ tristate "Support for NAND Controller on NVIDIA Tegra"
+ depends on ARCH_TEGRA
+ help
+ Enables NAND flash support for NVIDIA's Tegra family of chips.
+
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1d38fc..4793bcfe6211 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
diff --git a/drivers/mtd/devices/tegra_nand.c b/drivers/mtd/devices/tegra_nand.c
new file mode 100644
index 000000000000..6982a74ce65b
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.c
@@ -0,0 +1,1605 @@
+/*
+ * drivers/mtd/devices/tegra_nand.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Derived from: drivers/mtd/nand/nand_base.c
+ * drivers/mtd/nand/pxa3xx.c
+ *
+ * TODO:
+ * - Add support for 16bit bus width
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <mach/nand.h>
+
+#include "tegra_nand.h"
+
+#define DRIVER_NAME "tegra_nand"
+#define DRIVER_DESC "Nvidia Tegra NAND Flash Controller driver"
+
+#define MAX_DMA_SZ SZ_64K
+#define ECC_BUF_SZ SZ_1K
+
+/* FIXME: is this right?!
+ * NvRM code says it should be 128 bytes, but that seems awfully small
+ */
+
+/*#define TEGRA_NAND_DEBUG
+#define TEGRA_NAND_DEBUG_PEDANTIC*/
+
+#ifdef TEGRA_NAND_DEBUG
+#define TEGRA_DBG(fmt, args...) \
+ do { pr_info(fmt, ##args); } while (0)
+#else
+#define TEGRA_DBG(fmt, args...)
+#endif
+
+/* TODO: will vary with devices, move into appropriate device spcific header */
+#define SCAN_TIMING_VAL 0x3f0bd214
+#define SCAN_TIMING2_VAL 0xb
+
+/* TODO: pull in the register defs (fields, masks, etc) from Nvidia files
+ * so we don't have to redefine them */
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
+struct tegra_nand_chip {
+ spinlock_t lock;
+ uint32_t chipsize;
+ int num_chips;
+ int curr_chip;
+
+ /* addr >> chip_shift == chip number */
+ uint32_t chip_shift;
+ /* (addr >> page_shift) & page_mask == page number within chip */
+ uint32_t page_shift;
+ uint32_t page_mask;
+ /* column within page */
+ uint32_t column_mask;
+ /* addr >> block_shift == block number (across the whole mtd dev, not
+ * just a single chip. */
+ uint32_t block_shift;
+
+ void *priv;
+};
+
+struct tegra_nand_info {
+ struct tegra_nand_chip chip;
+ struct mtd_info mtd;
+ struct tegra_nand_platform *plat;
+ struct device *dev;
+ struct mtd_partition *parts;
+
+ /* synchronizes access to accessing the actual NAND controller */
+ struct mutex lock;
+
+
+ void *oob_dma_buf;
+ dma_addr_t oob_dma_addr;
+ /* ecc error vector info (offset into page and data mask to apply */
+ void *ecc_buf;
+ dma_addr_t ecc_addr;
+ /* ecc error status (page number, err_cnt) */
+ uint32_t *ecc_errs;
+ uint32_t num_ecc_errs;
+ uint32_t max_ecc_errs;
+ spinlock_t ecc_lock;
+
+ uint32_t command_reg;
+ uint32_t config_reg;
+ uint32_t dmactrl_reg;
+
+ struct completion cmd_complete;
+ struct completion dma_complete;
+
+ /* bad block bitmap: 1 == good, 0 == bad/unknown */
+ unsigned long *bb_bitmap;
+
+ struct clk *clk;
+};
+#define MTD_TO_INFO(mtd) container_of((mtd), struct tegra_nand_info, mtd)
+
+/* 64 byte oob block info for large page (== 2KB) device
+ *
+ * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC:
+ * Skipped bytes(4)
+ * Main area Ecc(36)
+ * Tag data(20)
+ * Tag data Ecc(4)
+ *
+ * Yaffs2 will use 16 tag bytes.
+ */
+
+static struct nand_ecclayout tegra_nand_oob_64 = {
+ .eccbytes = 36,
+ .eccpos = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobavail = 20,
+ .oobfree = {
+ { .offset = 40,
+ .length = 20,
+ },
+ },
+};
+
+static struct nand_flash_dev *
+find_nand_flash_device(int dev_id)
+{
+ struct nand_flash_dev *dev = &nand_flash_ids[0];
+
+ while (dev->name && dev->id != dev_id)
+ dev++;
+ return dev->name ? dev : NULL;
+}
+
+static struct nand_manufacturers *
+find_nand_flash_vendor(int vendor_id)
+{
+ struct nand_manufacturers *vendor = &nand_manuf_ids[0];
+
+ while (vendor->id && vendor->id != vendor_id)
+ vendor++;
+ return vendor->id ? vendor : NULL;
+}
+
+#define REG_NAME(name) { name, #name }
+static struct {
+ uint32_t addr;
+ char *name;
+} reg_names[] = {
+ REG_NAME(COMMAND_REG),
+ REG_NAME(STATUS_REG),
+ REG_NAME(ISR_REG),
+ REG_NAME(IER_REG),
+ REG_NAME(CONFIG_REG),
+ REG_NAME(TIMING_REG),
+ REG_NAME(RESP_REG),
+ REG_NAME(TIMING2_REG),
+ REG_NAME(CMD_REG1),
+ REG_NAME(CMD_REG2),
+ REG_NAME(ADDR_REG1),
+ REG_NAME(ADDR_REG2),
+ REG_NAME(DMA_MST_CTRL_REG),
+ REG_NAME(DMA_CFG_A_REG),
+ REG_NAME(DMA_CFG_B_REG),
+ REG_NAME(FIFO_CTRL_REG),
+ REG_NAME(DATA_BLOCK_PTR_REG),
+ REG_NAME(TAG_PTR_REG),
+ REG_NAME(ECC_PTR_REG),
+ REG_NAME(DEC_STATUS_REG),
+ REG_NAME(HWSTATUS_CMD_REG),
+ REG_NAME(HWSTATUS_MASK_REG),
+ { 0, NULL },
+};
+#undef REG_NAME
+
+
+static int
+dump_nand_regs(void)
+{
+ int i = 0;
+
+ TEGRA_DBG("%s: dumping registers\n", __func__);
+ while (reg_names[i].name != NULL) {
+ TEGRA_DBG("%s = 0x%08x\n", reg_names[i].name, readl(reg_names[i].addr));
+ i++;
+ }
+ TEGRA_DBG("%s: end of reg dump\n", __func__);
+ return 1;
+}
+
+
+static inline void
+enable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) | mask, IER_REG);
+}
+
+
+static inline void
+disable_ints(struct tegra_nand_info *info, uint32_t mask)
+{
+ (void)info;
+ writel(readl(IER_REG) & ~mask, IER_REG);
+}
+
+
+static inline void
+split_addr(struct tegra_nand_info *info, loff_t offset, int *chipnr, uint32_t *page,
+ uint32_t *column)
+{
+ *chipnr = (int)(offset >> info->chip.chip_shift);
+ *page = (offset >> info->chip.page_shift) & info->chip.page_mask;
+ *column = offset & info->chip.column_mask;
+}
+
+
+static irqreturn_t
+tegra_nand_irq(int irq, void *dev_id)
+{
+ struct tegra_nand_info *info = dev_id;
+ uint32_t isr;
+ uint32_t ier;
+ uint32_t dma_ctrl;
+ uint32_t tmp;
+
+ isr = readl(ISR_REG);
+ ier = readl(IER_REG);
+ dma_ctrl = readl(DMA_MST_CTRL_REG);
+#ifdef DEBUG_DUMP_IRQ
+ pr_info("IRQ: ISR=0x%08x IER=0x%08x DMA_IS=%d DMA_IE=%d\n",
+ isr, ier, !!(dma_ctrl & (1 << 20)), !!(dma_ctrl & (1 << 28)));
+#endif
+ if (isr & ISR_CMD_DONE) {
+ if (likely(!(readl(COMMAND_REG) & COMMAND_GO)))
+ complete(&info->cmd_complete);
+ else
+ pr_err("tegra_nand_irq: Spurious cmd done irq!\n");
+ }
+
+ if (isr & ISR_ECC_ERR) {
+ /* always want to read the decode status so xfers don't stall. */
+ tmp = readl(DEC_STATUS_REG);
+
+ /* was ECC check actually enabled */
+ if ((ier & IER_ECC_ERR)) {
+ unsigned long flags;
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ info->ecc_errs[info->num_ecc_errs++] = tmp;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+ }
+ }
+
+ if ((dma_ctrl & DMA_CTRL_IS_DMA_DONE) &&
+ (dma_ctrl & DMA_CTRL_IE_DMA_DONE)) {
+ complete(&info->dma_complete);
+ writel(dma_ctrl, DMA_MST_CTRL_REG);
+ }
+
+ if ((isr & ISR_UND) && (ier & IER_UND))
+ pr_err("%s: fifo underrun.\n", __func__);
+
+ if ((isr & ISR_OVR) && (ier & IER_OVR))
+ pr_err("%s: fifo overrun.\n", __func__);
+
+ /* clear ALL interrupts?! */
+ writel(isr & 0xfffc, ISR_REG);
+
+ return IRQ_HANDLED;
+}
+
+static inline int
+tegra_nand_is_cmd_done(struct tegra_nand_info *info)
+{
+ return (readl(COMMAND_REG) & COMMAND_GO) ? 0 : 1;
+}
+
+static int
+tegra_nand_wait_cmd_done(struct tegra_nand_info *info)
+{
+ uint32_t timeout = (2 * HZ); /* TODO: make this realistic */
+ int ret;
+
+ ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(!ret && dump_nand_regs());
+#endif
+
+ return ret ? 0 : ret;
+}
+
+static inline void
+select_chip(struct tegra_nand_info *info, int chipnr)
+{
+ BUG_ON(chipnr != -1 && chipnr >= info->plat->max_chips);
+ info->chip.curr_chip = chipnr;
+}
+
+static void
+cfg_hwstatus_mon(struct tegra_nand_info *info)
+{
+ uint32_t val;
+
+ val = (HWSTATUS_RDSTATUS_MASK(1) |
+ HWSTATUS_RDSTATUS_EXP_VAL(0) |
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) |
+ HWSTATUS_RBSY_EXP_VAL(NAND_STATUS_READY));
+ writel(NAND_CMD_STATUS, HWSTATUS_CMD_REG);
+ writel(val, HWSTATUS_MASK_REG);
+}
+
+/* Tells the NAND controller to initiate the command. */
+static int
+tegra_nand_go(struct tegra_nand_info *info)
+{
+ BUG_ON(!tegra_nand_is_cmd_done(info));
+
+ INIT_COMPLETION(info->cmd_complete);
+ writel(info->command_reg | COMMAND_GO, COMMAND_REG);
+
+ if (unlikely(tegra_nand_wait_cmd_done(info))) {
+ /* TODO: abort command if needed? */
+ pr_err("%s: Timeout while waiting for command\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* TODO: maybe wait for dma here? */
+ return 0;
+}
+
+static void
+tegra_nand_prep_readid(struct tegra_nand_info *info)
+{
+ info->command_reg = (COMMAND_CLE | COMMAND_ALE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_ALE_BYTE_SIZE(0) | COMMAND_TRANS_SIZE(3) |
+ (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_READID, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(0, CONFIG_REG);
+}
+
+static int
+tegra_nand_cmd_readid(struct tegra_nand_info *info, uint32_t *chip_id)
+{
+ int err;
+
+#ifdef TEGRA_NAND_DEBUG_PEDANTIC
+ BUG_ON(info->chip.curr_chip == -1);
+#endif
+
+ tegra_nand_prep_readid(info);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *chip_id = readl(RESP_REG);
+ return 0;
+}
+
+
+/* assumes right locks are held */
+static int
+nand_cmd_get_status(struct tegra_nand_info *info, uint32_t *status)
+{
+ int err;
+
+ info->command_reg = (COMMAND_CLE | COMMAND_PIO | COMMAND_RX |
+ COMMAND_RBSY_CHK | (COMMAND_CE(info->chip.curr_chip)));
+ writel(NAND_CMD_STATUS, CMD_REG1);
+ writel(0, CMD_REG2);
+ writel(0, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ err = tegra_nand_go(info);
+ if (err != 0)
+ return err;
+
+ *status = readl(RESP_REG) & 0xff;
+ return 0;
+}
+
+
+/* must be called with lock held */
+static int
+check_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (info->bb_bitmap[BIT_WORD(block)] & BIT_MASK(block))
+ return 0;
+
+ offs &= ~(mtd->erasesize - 1);
+
+ /* Only set COM_BSY. */
+ /* TODO: should come from board file */
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+
+ /* check fist two pages of the block */
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_RX | COMMAND_PIO |
+ COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | COMMAND_RBSY_CHK |
+ COMMAND_SEC_CMD;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ /* ... poison me ... */
+ writel(0xaa55aa55, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0) {
+ pr_info("baaaaaad\n");
+ goto out;
+ }
+
+ if ((readl(RESP_REG) & 0xffff) != 0xffff) {
+ ret = 1;
+ goto out;
+ }
+
+ /* Note: The assumption here is that we cannot cross chip
+ * boundary since the we are only looking at the first 2 pages in
+ * a block, i.e. erasesize > writesize ALWAYS */
+ page++;
+ }
+
+out:
+ /* update the bitmap if the block is good */
+ if (ret == 0)
+ set_bit(block, info->bb_bitmap);
+ return ret;
+}
+
+
+static int
+tegra_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int ret;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ mutex_lock(&info->lock);
+ ret = check_block_isbad(mtd, offs);
+ mutex_unlock(&info->lock);
+
+#if 0
+ if (ret > 0)
+ pr_info("block @ 0x%llx is bad.\n", offs);
+ else if (ret < 0)
+ pr_err("error checking block @ 0x%llx for badness.\n", offs);
+#endif
+
+ return ret;
+}
+
+
+static int
+tegra_nand_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t block = offs >> info->chip.block_shift;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ int ret = 0;
+ int i;
+
+ if (offs >= mtd->size)
+ return -EINVAL;
+
+ pr_info("tegra_nand: setting block %d bad\n", block);
+
+ mutex_lock(&info->lock);
+ offs &= ~(mtd->erasesize - 1);
+
+ /* mark the block bad in our bitmap */
+ clear_bit(block, info->bb_bitmap);
+ mtd->ecc_stats.badblocks++;
+
+ /* Only set COM_BSY. */
+ /* TODO: should come from board file */
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ split_addr(info, offs, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
+
+ /* write to fist two pages in the block */
+ for (i = 0; i < 2; ++i) {
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_TX | COMMAND_PIO |
+ COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | COMMAND_RBSY_CHK |
+ COMMAND_AFT_DAT | COMMAND_SEC_CMD;
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+
+ writel(column | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+
+ writel(0x0, RESP_REG);
+ ret = tegra_nand_go(info);
+ if (ret != 0)
+ goto out;
+
+ /* TODO: check if the program op worked? */
+ page++;
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+
+static int
+tegra_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ uint32_t num_blocks;
+ uint32_t offs;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint32_t status = 0;
+
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08llx len=%lld\n", instr->addr,
+ instr->len);
+
+ if ((instr->addr + instr->len) > mtd->size) {
+ pr_err("tegra_nand_erase: Can't erase past end of device\n");
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->addr & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: addr=0x%08llx not block-aligned\n",
+ instr->addr);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ if (instr->len & (mtd->erasesize - 1)) {
+ pr_err("tegra_nand_erase: len=%lld not block-aligned\n",
+ instr->len);
+ instr->state = MTD_ERASE_FAILED;
+ return -EINVAL;
+ }
+
+ instr->fail_addr = 0xffffffff;
+
+ mutex_lock(&info->lock);
+
+ instr->state = MTD_ERASING;
+
+ offs = instr->addr;
+ num_blocks = instr->len >> info->chip.block_shift;
+
+ select_chip(info, -1);
+
+ while (num_blocks--) {
+ split_addr(info, offs, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ TEGRA_DBG("tegra_nand_erase: addr=0x%08x, page=0x%08x\n", offs, page);
+
+ if (check_block_isbad(mtd, offs)) {
+ pr_info("%s: skipping bad block @ 0x%08x\n", __func__, offs);
+ goto next_block;
+ }
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(2) | COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
+ writel(NAND_CMD_ERASE1, CMD_REG1);
+ writel(NAND_CMD_ERASE2, CMD_REG2);
+
+ writel(page & 0xffffff, ADDR_REG1);
+ writel(0, ADDR_REG2);
+ writel(CONFIG_COM_BSY, CONFIG_REG);
+
+ if (tegra_nand_go(info) != 0) {
+ instr->fail_addr = offs;
+ goto out_err;
+ }
+
+ /* TODO: do we want a timeout here? */
+ if ((nand_cmd_get_status(info, &status) != 0) ||
+ (status & NAND_STATUS_FAIL) ||
+ ((status & NAND_STATUS_READY) != NAND_STATUS_READY)) {
+ instr->fail_addr = offs;
+ pr_info("%s: erase failed @ 0x%08x (stat=0x%08x)\n",
+ __func__, offs, status);
+ goto out_err;
+ }
+next_block:
+ offs += mtd->erasesize;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mutex_unlock(&info->lock);
+ mtd_erase_callback(instr);
+ return 0;
+
+out_err:
+ instr->state = MTD_ERASE_FAILED;
+ mutex_unlock(&info->lock);
+ return -EIO;
+}
+
+
+static inline void
+dump_mtd_oob_ops(struct mtd_oob_ops *ops)
+{
+ pr_info("%s: oob_ops: mode=%s len=0x%x ooblen=0x%x "
+ "ooboffs=0x%x dat=0x%p oob=0x%p\n", __func__,
+ (ops->mode == MTD_OOB_AUTO ? "MTD_OOB_AUTO" :
+ (ops->mode == MTD_OOB_PLACE ? "MTD_OOB_PLACE" : "MTD_OOB_RAW")),
+ ops->len, ops->ooblen, ops->ooboffs, ops->datbuf, ops->oobbuf);
+}
+
+static int
+tegra_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: read: from=0x%llx len=0x%x\n", __func__, from, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = mtd->read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static void
+correct_ecc_errors_on_blank_page(struct tegra_nand_info *info, u8 *datbuf, u8 *oobbuf, unsigned int a_len, unsigned int b_len) {
+ int i;
+ int all_ff = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ if (info->num_ecc_errs) {
+ if (datbuf) {
+ for (i = 0; i < a_len; i++)
+ if (datbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (oobbuf) {
+ for (i = 0; i < b_len; i++)
+ if (oobbuf[i] != 0xFF)
+ all_ff = 0;
+ }
+ if (all_ff)
+ info->num_ecc_errs = 0;
+ }
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static void
+update_ecc_counts(struct tegra_nand_info *info, int check_oob)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&info->ecc_lock, flags);
+ for (i = 0; i < info->num_ecc_errs; ++i) {
+ /* correctable */
+ info->mtd.ecc_stats.corrected +=
+ DEC_STATUS_ERR_CNT(info->ecc_errs[i]);
+
+ /* uncorrectable */
+ if (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_A)
+ info->mtd.ecc_stats.failed++;
+ if (check_oob && (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_B))
+ info->mtd.ecc_stats.failed++;
+ }
+ info->num_ecc_errs = 0;
+ spin_unlock_irqrestore(&info->ecc_lock, flags);
+}
+
+static inline void
+clear_regs(struct tegra_nand_info *info)
+{
+ info->command_reg = 0;
+ info->config_reg = 0;
+ info->dmactrl_reg = 0;
+}
+
+static void
+prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc, uint32_t page,
+ uint32_t column, dma_addr_t data_dma,
+ uint32_t data_len, dma_addr_t oob_dma, uint32_t oob_len)
+{
+ uint32_t tag_sz = oob_len;
+
+#if 0
+ pr_info("%s: rx=%d ecc=%d page=%d col=%d data_dma=0x%x "
+ "data_len=0x%08x oob_dma=0x%x ooblen=%d\n", __func__,
+ rx, do_ecc, page, column, data_dma, data_len, oob_dma,
+ oob_len);
+#endif
+
+ info->command_reg =
+ COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
+ COMMAND_ALE_BYTE_SIZE(4) | COMMAND_SEC_CMD | COMMAND_RBSY_CHK |
+ COMMAND_TRANS_SIZE(8);
+
+ info->config_reg = (CONFIG_PAGE_SIZE_SEL(3) | CONFIG_PIPELINE_EN |
+ CONFIG_COM_BSY);
+
+ info->dmactrl_reg = (DMA_CTRL_DMA_GO |
+ DMA_CTRL_DMA_PERF_EN | DMA_CTRL_IE_DMA_DONE |
+ DMA_CTRL_IS_DMA_DONE | DMA_CTRL_BURST_SIZE(4));
+
+ if (rx) {
+ if (do_ecc)
+ info->config_reg |= CONFIG_HW_ERR_CORRECTION;
+ info->command_reg |= COMMAND_RX;
+ info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER;
+ writel(NAND_CMD_READ0, CMD_REG1);
+ writel(NAND_CMD_READSTART, CMD_REG2);
+ } else {
+ info->command_reg |= (COMMAND_TX | COMMAND_AFT_DAT);
+ info->dmactrl_reg |= DMA_CTRL_DIR; /* DMA_RD == TX */
+ writel(NAND_CMD_SEQIN, CMD_REG1);
+ writel(NAND_CMD_PAGEPROG, CMD_REG2);
+ }
+
+ if (data_len) {
+ if (do_ecc)
+ info->config_reg |=
+ CONFIG_HW_ECC | CONFIG_ECC_SEL | CONFIG_TVALUE(0) |
+ CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0);
+ info->command_reg |= COMMAND_A_VALID;
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_A;
+ writel(DMA_CFG_BLOCK_SIZE(data_len - 1), DMA_CFG_A_REG);
+ writel(data_dma, DATA_BLOCK_PTR_REG);
+ } else {
+ column = info->mtd.writesize;
+ if (do_ecc)
+ column += info->mtd.ecclayout->oobfree[0].offset;
+ writel(0, DMA_CFG_A_REG);
+ writel(0, DATA_BLOCK_PTR_REG);
+ }
+
+ if (oob_len) {
+ oob_len = info->mtd.oobavail;
+ tag_sz = info->mtd.oobavail;
+ if (do_ecc) {
+ tag_sz += 4; /* size of tag ecc */
+ if (rx)
+ oob_len += 4; /* size of tag ecc */
+ info->config_reg |= CONFIG_ECC_EN_TAG;
+ }
+ if (data_len && rx)
+ oob_len += 4; /* num of skipped bytes */
+
+ info->command_reg |= COMMAND_B_VALID;
+ info->config_reg |= CONFIG_TAG_BYTE_SIZE(tag_sz - 1);
+ info->dmactrl_reg |= DMA_CTRL_DMA_EN_B;
+ writel(DMA_CFG_BLOCK_SIZE(oob_len - 1), DMA_CFG_B_REG);
+ writel(oob_dma, TAG_PTR_REG);
+ } else {
+ writel(0, DMA_CFG_B_REG);
+ writel(0, TAG_PTR_REG);
+ }
+
+ writel((column & 0xffff) | ((page & 0xffff) << 16), ADDR_REG1);
+ writel((page >> 16) & 0xff, ADDR_REG2);
+}
+
+static dma_addr_t
+tegra_nand_dma_map(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ if (virt_addr_valid(addr))
+ page = virt_to_page(addr);
+ else {
+ if (WARN_ON(size + offset > PAGE_SIZE))
+ return ~0;
+ page = vmalloc_to_page(addr);
+ }
+ return dma_map_page(dev, page, offset, size, dir);
+}
+
+/* if mode == RAW, then we read data only, with no ECC
+ * if mode == PLACE, we read ONLY the OOB data from a raw offset into the spare
+ * area (ooboffs).
+ * if mode == AUTO, we read main data and the OOB data from the oobfree areas as
+ * specified by nand_ecclayout.
+ */
+static int
+do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct mtd_ecc_stats old_ecc_stats;
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t len = datbuf ? ops->len : 0;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(mtd, ops);
+#endif
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ /* TODO: Worry about reads from non-page boundaries later */
+ if (unlikely(from & info->chip.column_mask)) {
+ pr_err("%s: Unaligned read (from 0x%llx) not supported\n",
+ __func__, from);
+ return -EINVAL;
+ }
+
+ if (likely(ops->mode == MTD_OOB_AUTO)) {
+ oobsz = mtd->oobavail;
+ } else {
+ oobsz = mtd->oobsize;
+ do_ecc = 0;
+ }
+
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't read OOB from multiple pages (%d > %d)\n", __func__,
+ ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf) {
+ page_count = 1;
+ } else {
+ page_count = max((uint32_t)(ops->len / mtd->writesize), (uint32_t)1);
+ }
+
+ mutex_lock(&info->lock);
+
+ memcpy(&old_ecc_stats, &mtd->ecc_stats, sizeof(old_ecc_stats));
+
+ if (do_ecc) {
+ enable_ints(info, IER_ECC_ERR);
+ writel(info->ecc_addr, ECC_PTR_REG);
+ } else
+ disable_ints(info, IER_ECC_ERR);
+
+ split_addr(info, from, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ /* reset it to point back to beginning of page */
+ from -= column;
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize - column, len);
+ int b_len = min(oobsz, ooblen);
+
+#if 0
+ pr_info("%s: chip:=%d page=%d col=%d\n", __func__, chipnr,
+ page, column);
+#endif
+
+ clear_regs(info);
+ if (datbuf)
+ datbuf_dma_addr = tegra_nand_dma_map(info->dev, datbuf, a_len, DMA_FROM_DEVICE);
+
+ prep_transfer_dma(info, 1, do_ecc, page, column, datbuf_dma_addr,
+ a_len, info->oob_dma_addr,
+ b_len);
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, 2*HZ)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ err = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ /*pr_info("tegra_read_oob: DMA complete\n");*/
+
+ /* if we are here, transfer is done */
+ if (datbuf)
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len, DMA_FROM_DEVICE);
+
+ if (oobbuf) {
+ uint32_t ofs = datbuf && oobbuf ? 4 : 0; /* skipped bytes */
+ memcpy(oobbuf, info->oob_dma_buf + ofs, b_len);
+ }
+
+ correct_ecc_errors_on_blank_page(info, datbuf, oobbuf, a_len, b_len);
+
+ if (datbuf) {
+ len -= a_len;
+ datbuf += a_len;
+ ops->retlen += a_len;
+ }
+
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ update_ecc_counts(info, oobbuf != NULL);
+
+ if (!page_count)
+ break;
+
+ from += mtd->writesize;
+ column = 0;
+
+ split_addr(info, from, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ disable_ints(info, IER_ECC_ERR);
+
+ if (mtd->ecc_stats.failed != old_ecc_stats.failed)
+ err = -EBADMSG;
+ else if (mtd->ecc_stats.corrected != old_ecc_stats.corrected)
+ err = -EUCLEAN;
+ else
+ err = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ disable_ints(info, IER_ECC_ERR);
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+/* just does some parameter checking and calls do_read_oob */
+static int
+tegra_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ if (ops->datbuf && unlikely((from + ops->len) > mtd->size)) {
+ pr_err("%s: Can't read past end of device.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Reading 0 bytes from OOB is meaningless\n", __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->mode != MTD_OOB_AUTO)) {
+ if (ops->oobbuf && ops->datbuf) {
+ pr_err("%s: can't read OOB + Data in non-AUTO mode.\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((ops->mode == MTD_OOB_RAW) && !ops->datbuf) {
+ pr_err("%s: Raw mode only supports reading data area.\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return do_read_oob(mtd, from, ops);
+}
+
+static int
+tegra_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ pr_debug("%s: write: to=0x%llx len=0x%x\n", __func__, to, len);
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ret = mtd->write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static int
+do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ int chipnr;
+ uint32_t page;
+ uint32_t column;
+ uint8_t *datbuf = ops->datbuf;
+ uint8_t *oobbuf = ops->oobbuf;
+ uint32_t len = datbuf ? ops->len : 0;
+ uint32_t ooblen = oobbuf ? ops->ooblen : 0;
+ uint32_t oobsz;
+ uint32_t page_count;
+ int err;
+ int do_ecc = 1;
+ dma_addr_t datbuf_dma_addr = 0;
+
+#if 0
+ dump_mtd_oob_ops(mtd, ops);
+#endif
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ if (!ops->len)
+ return 0;
+
+
+ if (likely(ops->mode == MTD_OOB_AUTO)) {
+ oobsz = mtd->oobavail;
+ } else {
+ oobsz = mtd->oobsize;
+ do_ecc = 0;
+ }
+
+ if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
+ pr_err("%s: can't write OOB to multiple pages (%d > %d)\n",
+ __func__, ops->ooblen, oobsz);
+ return -EINVAL;
+ } else if (ops->oobbuf) {
+ page_count = 1;
+ } else
+ page_count = max((uint32_t)(ops->len / mtd->writesize), (uint32_t)1);
+
+ mutex_lock(&info->lock);
+
+ split_addr(info, to, &chipnr, &page, &column);
+ select_chip(info, chipnr);
+
+ while (page_count--) {
+ int a_len = min(mtd->writesize, len);
+ int b_len = min(oobsz, ooblen);
+
+ if (datbuf)
+ datbuf_dma_addr = tegra_nand_dma_map(info->dev, datbuf, a_len, DMA_TO_DEVICE);
+ if (oobbuf)
+ memcpy(info->oob_dma_buf, oobbuf, b_len);
+
+ clear_regs(info);
+ prep_transfer_dma(info, 0, do_ecc, page, column, datbuf_dma_addr,
+ a_len, info->oob_dma_addr, b_len);
+
+ writel(info->config_reg, CONFIG_REG);
+ writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
+
+ INIT_COMPLETION(info->dma_complete);
+ err = tegra_nand_go(info);
+ if (err != 0)
+ goto out_err;
+
+ if (!wait_for_completion_timeout(&info->dma_complete, 2*HZ)) {
+ pr_err("%s: dma completion timeout\n", __func__);
+ dump_nand_regs();
+ goto out_err;
+ }
+
+ if (datbuf) {
+ dma_unmap_page(info->dev, datbuf_dma_addr, a_len, DMA_TO_DEVICE);
+ len -= a_len;
+ datbuf += a_len;
+ ops->retlen += a_len;
+ }
+ if (oobbuf) {
+ ooblen -= b_len;
+ oobbuf += b_len;
+ ops->oobretlen += b_len;
+ }
+
+ if (!page_count)
+ break;
+
+ to += mtd->writesize;
+ column = 0;
+
+ split_addr(info, to, &chipnr, &page, &column);
+ if (chipnr != info->chip.curr_chip)
+ select_chip(info, chipnr);
+ }
+
+ mutex_unlock(&info->lock);
+ return err;
+
+out_err:
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ mutex_unlock(&info->lock);
+ return err;
+}
+
+static int
+tegra_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+
+ if (unlikely(to & info->chip.column_mask)) {
+ pr_err("%s: Unaligned write (to 0x%llx) not supported\n",
+ __func__, to);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->oobbuf && !ops->ooblen)) {
+ pr_err("%s: Writing 0 bytes to OOB is meaningless\n", __func__);
+ return -EINVAL;
+ }
+
+ return do_write_oob(mtd, to, ops);
+}
+
+static int
+tegra_nand_suspend(struct mtd_info *mtd)
+{
+ return 0;
+}
+
+static void
+tegra_nand_resume(struct mtd_info *mtd)
+{
+}
+
+static int
+scan_bad_blocks(struct tegra_nand_info *info)
+{
+ struct mtd_info *mtd = &info->mtd;
+ int num_blocks = mtd->size >> info->chip.block_shift;
+ uint32_t block;
+ int is_bad = 0;
+
+ for (block = 0; block < num_blocks; ++block) {
+ /* make sure the bit is cleared, meaning it's bad/unknown before
+ * we check. */
+ clear_bit(block, info->bb_bitmap);
+ is_bad = mtd->block_isbad(mtd, block << info->chip.block_shift);
+
+ if (is_bad == 0)
+ set_bit(block, info->bb_bitmap);
+ else if (is_bad > 0)
+ pr_info("block 0x%08x is bad.\n", block);
+ else {
+ pr_err("Fatal error (%d) while scanning for "
+ "bad blocks\n", is_bad);
+ return is_bad;
+ }
+ }
+ return 0;
+}
+
+static void
+set_chip_timing(struct tegra_nand_info *info)
+{
+ struct tegra_nand_chip_parms *chip_parms = &info->plat->chip_parms[0];
+ uint32_t tmp;
+
+ /* TODO: Actually search the chip_parms list for the correct device. */
+ /* TODO: Get the appropriate frequency from the clock subsystem */
+#define NAND_CLK_FREQ 108000
+#define CNT(t) (((((t) * NAND_CLK_FREQ) + 1000000 - 1) / 1000000) - 1)
+ tmp = (TIMING_TRP_RESP(CNT(chip_parms->timing.trp_resp)) |
+ TIMING_TWB(CNT(chip_parms->timing.twb)) |
+ TIMING_TCR_TAR_TRR(CNT(chip_parms->timing.tcr_tar_trr)) |
+ TIMING_TWHR(CNT(chip_parms->timing.twhr)) |
+ TIMING_TCS(CNT(chip_parms->timing.tcs)) |
+ TIMING_TWH(CNT(chip_parms->timing.twh)) |
+ TIMING_TWP(CNT(chip_parms->timing.twp)) |
+ TIMING_TRH(CNT(chip_parms->timing.trh)) |
+ TIMING_TRP(CNT(chip_parms->timing.trp)));
+ writel(tmp, TIMING_REG);
+ writel(TIMING2_TADL(CNT(chip_parms->timing.tadl)), TIMING2_REG);
+#undef CNT
+#undef NAND_CLK_FREQ
+}
+
+/* Scans for nand flash devices, identifies them, and fills in the
+ * device info. */
+static int
+tegra_nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ struct tegra_nand_info *info = MTD_TO_INFO(mtd);
+ struct nand_flash_dev *dev_info;
+ struct nand_manufacturers *vendor_info;
+ uint32_t tmp;
+ uint32_t dev_id;
+ uint32_t vendor_id;
+ uint32_t dev_parms;
+ uint32_t mlc_parms;
+ int cnt;
+ int err = 0;
+
+ writel(SCAN_TIMING_VAL, TIMING_REG);
+ writel(SCAN_TIMING2_VAL, TIMING2_REG);
+ writel(0, CONFIG_REG);
+
+ select_chip(info, 0);
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+
+ vendor_id = tmp & 0xff;
+ dev_id = (tmp >> 8) & 0xff;
+ mlc_parms = (tmp >> 16) & 0xff;
+ dev_parms = (tmp >> 24) & 0xff;
+
+ dev_info = find_nand_flash_device(dev_id);
+ if (dev_info == NULL) {
+ pr_err("%s: unknown flash device id (0x%02x) found.\n", __func__,
+ dev_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ vendor_info = find_nand_flash_vendor(vendor_id);
+ if (vendor_info == NULL) {
+ pr_err("%s: unknown flash vendor id (0x%02x) found.\n", __func__,
+ vendor_id);
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ /* loop through and see if we can find more devices */
+ for (cnt = 1; cnt < info->plat->max_chips; ++cnt) {
+ select_chip(info, cnt);
+ /* TODO: figure out what to do about errors here */
+ err = tegra_nand_cmd_readid(info, &tmp);
+ if (err != 0)
+ goto out_error;
+ if ((dev_id != ((tmp >> 8) & 0xff)) ||
+ (vendor_id != (tmp & 0xff)))
+ break;
+ }
+
+ pr_info("%s: %d NAND chip(s) found (vend=0x%02x, dev=0x%02x) (%s %s)\n",
+ DRIVER_NAME, cnt, vendor_id, dev_id, vendor_info->name,
+ dev_info->name);
+ info->chip.num_chips = cnt;
+ info->chip.chipsize = dev_info->chipsize << 20;
+ mtd->size = info->chip.num_chips * info->chip.chipsize;
+
+ /* format of 4th id byte returned by READ ID
+ * bit 7 = rsvd
+ * bit 6 = bus width. 1 == 16bit, 0 == 8bit
+ * bits 5:4 = data block size. 64kb * (2^val)
+ * bit 3 = rsvd
+ * bit 2 = spare area size / 512 bytes. 0 == 8bytes, 1 == 16bytes
+ * bits 1:0 = page size. 1kb * (2^val)
+ */
+
+ /* TODO: we should reconcile the information read from chip and
+ * the data given to us in tegra_nand_platform->chip_parms??
+ * platform data will give us timing information. */
+
+ /* page_size */
+ tmp = dev_parms & 0x3;
+ mtd->writesize = 1024 << tmp;
+ info->chip.column_mask = mtd->writesize - 1;
+
+ /* Note: See oob layout description of why we only support 2k pages. */
+ if (mtd->writesize > 2048) {
+ pr_err("%s: Large page devices with pagesize > 2kb are NOT "
+ "supported\n", __func__);
+ goto out_error;
+ } else if (mtd->writesize < 2048) {
+ pr_err("%s: Small page devices are NOT supported\n", __func__);
+ goto out_error;
+ }
+
+ /* spare area, must be at least 64 bytes */
+ tmp = (dev_parms >> 2) & 0x1;
+ tmp = (8 << tmp) * (mtd->writesize / 512);
+ if (tmp < 64) {
+ pr_err("%s: Spare area (%d bytes) too small\n", __func__, tmp);
+ goto out_error;
+ }
+ mtd->oobsize = tmp;
+ mtd->oobavail = tegra_nand_oob_64.oobavail;
+
+ /* data block size (erase size) (w/o spare) */
+ tmp = (dev_parms >> 4) & 0x3;
+ mtd->erasesize = (64 * 1024) << tmp;
+ info->chip.block_shift = ffs(mtd->erasesize) - 1;
+
+ /* used to select the appropriate chip/page in case multiple devices
+ * are connected */
+ info->chip.chip_shift = ffs(info->chip.chipsize) - 1;
+ info->chip.page_shift = ffs(mtd->writesize) - 1;
+ info->chip.page_mask =
+ (info->chip.chipsize >> info->chip.page_shift) - 1;
+
+ /* now fill in the rest of the mtd fields */
+ mtd->ecclayout = &tegra_nand_oob_64;
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+
+ mtd->erase = tegra_nand_erase;
+ mtd->lock = NULL;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = tegra_nand_read;
+ mtd->write = tegra_nand_write;
+ mtd->read_oob = tegra_nand_read_oob;
+ mtd->write_oob = tegra_nand_write_oob;
+
+ mtd->resume = tegra_nand_resume;
+ mtd->suspend = tegra_nand_suspend;
+ mtd->block_isbad = tegra_nand_block_isbad;
+ mtd->block_markbad = tegra_nand_block_markbad;
+
+ /* TODO: should take vendor_id/device_id */
+ set_chip_timing(info);
+
+ return 0;
+
+out_error:
+ pr_err("%s: NAND device scan aborted due to error(s).\n", __func__);
+ return err;
+}
+
+static int __devinit
+tegra_nand_probe(struct platform_device *pdev)
+{
+ struct tegra_nand_platform *plat = pdev->dev.platform_data;
+ struct tegra_nand_info *info = NULL;
+ struct tegra_nand_chip *chip = NULL;
+ struct mtd_info *mtd = NULL;
+ int err = 0;
+ uint64_t num_erase_blocks;
+
+ pr_debug("%s: probing (%p)\n", __func__, pdev);
+
+ if (!plat) {
+ pr_err("%s: no platform device info\n", __func__);
+ return -EINVAL;
+ } else if (!plat->chip_parms) {
+ pr_err("%s: no platform nand parms\n", __func__);
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(struct tegra_nand_info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: no memory for flash info\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+ info->plat = plat;
+
+ platform_set_drvdata(pdev, info);
+
+ init_completion(&info->cmd_complete);
+ init_completion(&info->dma_complete);
+
+ mutex_init(&info->lock);
+ spin_lock_init(&info->ecc_lock);
+
+ chip = &info->chip;
+ chip->priv = &info->mtd;
+ chip->curr_chip = -1;
+
+ mtd = &info->mtd;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->priv = &info->chip;
+ mtd->owner = THIS_MODULE;
+
+ /* HACK: allocate a dma buffer to hold 1 page oob data */
+ info->oob_dma_buf = dma_alloc_coherent(NULL, 64,
+ &info->oob_dma_addr, GFP_KERNEL);
+ if (!info->oob_dma_buf) {
+ err = -ENOMEM;
+ goto out_free_info;
+ }
+
+ /* this will store the ecc error vector info */
+ info->ecc_buf = dma_alloc_coherent(NULL, ECC_BUF_SZ, &info->ecc_addr,
+ GFP_KERNEL);
+ if (!info->ecc_buf) {
+ err = -ENOMEM;
+ goto out_free_dma_buf;
+ }
+
+ /* grab the irq */
+ if (!(pdev->resource[0].flags & IORESOURCE_IRQ)) {
+ pr_err("NAND IRQ resource not defined\n");
+ err = -EINVAL;
+ goto out_free_ecc_buf;
+ }
+
+ err = request_irq(pdev->resource[0].start, tegra_nand_irq,
+ IRQF_SHARED, DRIVER_NAME, info);
+ if (err) {
+ pr_err("Unable to request IRQ %d (%d)\n",
+ pdev->resource[0].start, err);
+ goto out_free_ecc_buf;
+ }
+
+ /* TODO: configure pinmux here?? */
+ info->clk = clk_get(&pdev->dev, NULL);
+ clk_set_rate(info->clk, 108000000);
+
+ cfg_hwstatus_mon(info);
+
+ /* clear all pending interrupts */
+ writel(readl(ISR_REG), ISR_REG);
+
+ /* clear dma interrupt */
+ writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG);
+
+ /* enable interrupts */
+ disable_ints(info, 0xffffffff);
+ enable_ints(info, IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE |
+ IER_ECC_ERR | IER_GIE);
+
+ if (tegra_nand_scan(mtd, plat->max_chips)) {
+ err = -ENXIO;
+ goto out_dis_irq;
+ }
+ pr_info("%s: NVIDIA Tegra NAND controller @ base=0x%08x irq=%d.\n",
+ DRIVER_NAME, TEGRA_NAND_PHYS, pdev->resource[0].start);
+
+ /* allocate memory to hold the ecc error info */
+ info->max_ecc_errs = MAX_DMA_SZ / mtd->writesize;
+ info->ecc_errs = kmalloc(info->max_ecc_errs * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!info->ecc_errs) {
+ err = -ENOMEM;
+ goto out_dis_irq;
+ }
+
+ /* alloc the bad block bitmap */
+ num_erase_blocks = mtd->size;
+ do_div(num_erase_blocks, mtd->erasesize);
+ info->bb_bitmap = kzalloc(BITS_TO_LONGS(num_erase_blocks) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!info->bb_bitmap) {
+ err = -ENOMEM;
+ goto out_free_ecc;
+ }
+
+ err = scan_bad_blocks(info);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+#if 0
+ dump_nand_regs();
+#endif
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(mtd, part_probes, &info->parts, 0);
+ if (err > 0) {
+ err = add_mtd_partitions(mtd, info->parts, err);
+ } else if (err <= 0 && plat->parts) {
+ err = add_mtd_partitions(mtd, plat->parts, plat->nr_parts);
+ } else
+#endif
+ err = add_mtd_device(mtd);
+ if (err != 0)
+ goto out_free_bbbmap;
+
+ dev_set_drvdata(&pdev->dev, info);
+
+ pr_debug("%s: probe done.\n", __func__);
+ return 0;
+
+out_free_bbbmap:
+ kfree(info->bb_bitmap);
+
+out_free_ecc:
+ kfree(info->ecc_errs);
+
+out_dis_irq:
+ disable_ints(info, 0xffffffff);
+ free_irq(pdev->resource[0].start, info);
+
+out_free_ecc_buf:
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
+
+out_free_dma_buf:
+ dma_free_coherent(NULL, 64, info->oob_dma_buf,
+ info->oob_dma_addr);
+
+out_free_info:
+ platform_set_drvdata(pdev, NULL);
+ kfree(info);
+
+ return err;
+}
+
+static int __devexit
+tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (info) {
+ free_irq(pdev->resource[0].start, info);
+ kfree(info->bb_bitmap);
+ kfree(info->ecc_errs);
+ dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
+ dma_free_coherent(NULL, info->mtd.writesize + info->mtd.oobsize,
+ info->oob_dma_buf, info->oob_dma_addr);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tegra_nand_driver = {
+ .probe = tegra_nand_probe,
+ .remove = __devexit_p(tegra_nand_remove),
+ .suspend = NULL,
+ .resume = NULL,
+ .driver = {
+ .name = "tegra_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init
+tegra_nand_init(void)
+{
+ return platform_driver_register(&tegra_nand_driver);
+}
+
+static void __exit
+tegra_nand_exit(void)
+{
+ platform_driver_unregister(&tegra_nand_driver);
+}
+
+module_init(tegra_nand_init);
+module_exit(tegra_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/mtd/devices/tegra_nand.h b/drivers/mtd/devices/tegra_nand.h
new file mode 100644
index 000000000000..cc310d577631
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.h
@@ -0,0 +1,147 @@
+/*
+ * drivers/mtd/devices/tegra_nand.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTD_DEV_TEGRA_NAND_H
+#define __MTD_DEV_TEGRA_NAND_H
+
+#include <mach/io.h>
+
+#define __BITMASK0(len) ((1 << (len)) - 1)
+#define __BITMASK(start, len) (__BITMASK0(len) << (start))
+#define REG_BIT(bit) (1 << (bit))
+#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
+#define REG_FIELD_MASK(start, len) (~(__BITMASK((start), (len))))
+#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
+
+/* tegra nand registers... */
+#define TEGRA_NAND_PHYS 0x70008000
+#define TEGRA_NAND_BASE IO_TO_VIRT(TEGRA_NAND_PHYS)
+#define COMMAND_REG (TEGRA_NAND_BASE + 0x00)
+#define STATUS_REG (TEGRA_NAND_BASE + 0x04)
+#define ISR_REG (TEGRA_NAND_BASE + 0x08)
+#define IER_REG (TEGRA_NAND_BASE + 0x0c)
+#define CONFIG_REG (TEGRA_NAND_BASE + 0x10)
+#define TIMING_REG (TEGRA_NAND_BASE + 0x14)
+#define RESP_REG (TEGRA_NAND_BASE + 0x18)
+#define TIMING2_REG (TEGRA_NAND_BASE + 0x1c)
+#define CMD_REG1 (TEGRA_NAND_BASE + 0x20)
+#define CMD_REG2 (TEGRA_NAND_BASE + 0x24)
+#define ADDR_REG1 (TEGRA_NAND_BASE + 0x28)
+#define ADDR_REG2 (TEGRA_NAND_BASE + 0x2c)
+#define DMA_MST_CTRL_REG (TEGRA_NAND_BASE + 0x30)
+#define DMA_CFG_A_REG (TEGRA_NAND_BASE + 0x34)
+#define DMA_CFG_B_REG (TEGRA_NAND_BASE + 0x38)
+#define FIFO_CTRL_REG (TEGRA_NAND_BASE + 0x3c)
+#define DATA_BLOCK_PTR_REG (TEGRA_NAND_BASE + 0x40)
+#define TAG_PTR_REG (TEGRA_NAND_BASE + 0x44)
+#define ECC_PTR_REG (TEGRA_NAND_BASE + 0x48)
+#define DEC_STATUS_REG (TEGRA_NAND_BASE + 0x4c)
+#define HWSTATUS_CMD_REG (TEGRA_NAND_BASE + 0x50)
+#define HWSTATUS_MASK_REG (TEGRA_NAND_BASE + 0x54)
+#define LL_CONFIG_REG (TEGRA_NAND_BASE + 0x58)
+#define LL_PTR_REG (TEGRA_NAND_BASE + 0x5c)
+#define LL_STATUS_REG (TEGRA_NAND_BASE + 0x60)
+
+/* nand_command bits */
+#define COMMAND_GO REG_BIT(31)
+#define COMMAND_CLE REG_BIT(30)
+#define COMMAND_ALE REG_BIT(29)
+#define COMMAND_PIO REG_BIT(28)
+#define COMMAND_TX REG_BIT(27)
+#define COMMAND_RX REG_BIT(26)
+#define COMMAND_SEC_CMD REG_BIT(25)
+#define COMMAND_AFT_DAT REG_BIT(24)
+#define COMMAND_TRANS_SIZE(val) REG_FIELD((val), 20, 4)
+#define COMMAND_A_VALID REG_BIT(19)
+#define COMMAND_B_VALID REG_BIT(18)
+#define COMMAND_RD_STATUS_CHK REG_BIT(17)
+#define COMMAND_RBSY_CHK REG_BIT(16)
+#define COMMAND_CE(val) REG_BIT(8 + ((val) & 0x7))
+#define COMMAND_CLE_BYTE_SIZE(val) REG_FIELD((val), 4, 2)
+#define COMMAND_ALE_BYTE_SIZE(val) REG_FIELD((val), 0, 4)
+
+/* nand isr bits */
+#define ISR_UND REG_BIT(7)
+#define ISR_OVR REG_BIT(6)
+#define ISR_CMD_DONE REG_BIT(5)
+#define ISR_ECC_ERR REG_BIT(4)
+
+/* nand ier bits */
+#define IER_ERR_TRIG_VAL(val) REG_FIELD((val), 16, 4)
+#define IER_UND REG_BIT(7)
+#define IER_OVR REG_BIT(6)
+#define IER_CMD_DONE REG_BIT(5)
+#define IER_ECC_ERR REG_BIT(4)
+#define IER_GIE REG_BIT(0)
+
+/* nand config bits */
+#define CONFIG_HW_ECC REG_BIT(31)
+#define CONFIG_ECC_SEL REG_BIT(30)
+#define CONFIG_HW_ERR_CORRECTION REG_BIT(29)
+#define CONFIG_PIPELINE_EN REG_BIT(28)
+#define CONFIG_ECC_EN_TAG REG_BIT(27)
+#define CONFIG_TVALUE(val) REG_FIELD((val), 24, 2)
+#define CONFIG_SKIP_SPARE REG_BIT(23)
+#define CONFIG_COM_BSY REG_BIT(22)
+#define CONFIG_BUS_WIDTH REG_BIT(21)
+#define CONFIG_PAGE_SIZE_SEL(val) REG_FIELD((val), 16, 3)
+#define CONFIG_SKIP_SPARE_SEL(val) REG_FIELD((val), 14, 2)
+#define CONFIG_TAG_BYTE_SIZE(val) REG_FIELD((val), 0, 8)
+
+/* nand timing bits */
+#define TIMING_TRP_RESP(val) REG_FIELD((val), 28, 4)
+#define TIMING_TWB(val) REG_FIELD((val), 24, 4)
+#define TIMING_TCR_TAR_TRR(val) REG_FIELD((val), 20, 4)
+#define TIMING_TWHR(val) REG_FIELD((val), 16, 4)
+#define TIMING_TCS(val) REG_FIELD((val), 14, 2)
+#define TIMING_TWH(val) REG_FIELD((val), 12, 2)
+#define TIMING_TWP(val) REG_FIELD((val), 8, 4)
+#define TIMING_TRH(val) REG_FIELD((val), 4, 2)
+#define TIMING_TRP(val) REG_FIELD((val), 0, 4)
+
+/* nand timing2 bits */
+#define TIMING2_TADL(val) REG_FIELD((val), 0, 4)
+
+/* nand dma_mst_ctrl bits */
+#define DMA_CTRL_DMA_GO REG_BIT(31)
+#define DMA_CTRL_DIR REG_BIT(30)
+#define DMA_CTRL_DMA_PERF_EN REG_BIT(29)
+#define DMA_CTRL_IE_DMA_DONE REG_BIT(28)
+#define DMA_CTRL_REUSE_BUFFER REG_BIT(27)
+#define DMA_CTRL_BURST_SIZE(val) REG_FIELD((val), 24, 3)
+#define DMA_CTRL_IS_DMA_DONE REG_BIT(20)
+#define DMA_CTRL_DMA_EN_A REG_BIT(2)
+#define DMA_CTRL_DMA_EN_B REG_BIT(1)
+
+/* nand dma_cfg_a/cfg_b bits */
+#define DMA_CFG_BLOCK_SIZE(val) REG_FIELD((val), 0, 16)
+
+/* nand dec_status bits */
+#define DEC_STATUS_ERR_PAGE_NUM(val) REG_GET_FIELD((val), 24, 8)
+#define DEC_STATUS_ERR_CNT(val) REG_GET_FIELD((val), 16, 8)
+#define DEC_STATUS_ECC_FAIL_A REG_BIT(1)
+#define DEC_STATUS_ECC_FAIL_B REG_BIT(0)
+
+/* nand hwstatus_mask bits */
+#define HWSTATUS_RDSTATUS_MASK(val) REG_FIELD((val), 24, 8)
+#define HWSTATUS_RDSTATUS_EXP_VAL(val) REG_FIELD((val), 16, 8)
+#define HWSTATUS_RBSY_MASK(val) REG_FIELD((val), 8, 8)
+#define HWSTATUS_RBSY_EXP_VAL(val) REG_FIELD((val), 0, 8)
+
+#endif
+
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 51237fbb1bbb..6d20b0454a1d 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -231,8 +231,7 @@ static int tps6586x_dvm_voltages[] = {
};
#define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
-{ \
+ ereg0, ebit0, ereg1, ebit1) \
.desc = { \
.name = "REG-" #_id, \
.ops = &tps6586x_regulator_##_ops, \
@@ -248,18 +247,26 @@ static int tps6586x_dvm_voltages[] = {
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
.enable_bit[1] = (ebit1), \
- .voltages = tps6586x_##vdata##_voltages, \
-}
+ .voltages = tps6586x_##vdata##_voltages,
+
+#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
+ .go_reg = TPS6586X_##goreg, \
+ .go_bit = (gobit),
#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
+{ \
TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1, 0, 0)
+ ereg0, ebit0, ereg1, ebit1) \
+}
#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
+{ \
TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \
- ereg0, ebit0, ereg1, ebit1, goreg, gobit)
+ ereg0, ebit0, ereg1, ebit1) \
+ TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
+}
static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0),
@@ -267,11 +274,11 @@ static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
- TPS6586X_LDO(LDO_8, ldo, SUPPLYV1, 5, 3, ENC, 6, END, 6),
+ TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
- TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, ENE, 7, ENE, 7),
+ TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
- TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 1, END, 1),
+ TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6),
TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6),
@@ -290,6 +297,10 @@ static inline int tps6586x_regulator_preinit(struct device *parent,
uint8_t val1, val2;
int ret;
+ if (ri->enable_reg[0] == ri->enable_reg[1] &&
+ ri->enable_bit[0] == ri->enable_bit[1])
+ return 0;
+
ret = tps6586x_read(parent, ri->enable_reg[0], &val1);
if (ret)
return ret;
@@ -298,14 +309,14 @@ static inline int tps6586x_regulator_preinit(struct device *parent,
if (ret)
return ret;
- if (!(val2 & ri->enable_bit[1]))
+ if (!(val2 & (1 << ri->enable_bit[1])))
return 0;
/*
* The regulator is on, but it's enabled with the bit we don't
* want to use, so we switch the enable bits
*/
- if (!(val1 & ri->enable_bit[0])) {
+ if (!(val1 & (1 << ri->enable_bit[0]))) {
ret = tps6586x_set_bits(parent, ri->enable_reg[0],
1 << ri->enable_bit[0]);
if (ret)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 48ca7132cc05..241c7f73a410 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -286,6 +286,13 @@ config RTC_DRV_DM355EVM
help
Supports the RTC firmware in the MSP430 on the DM355 EVM.
+config RTC_DRV_TPS6586X
+ tristate "TI TPS6586X RTC"
+ depends on I2C
+ select MFD_TPS6586X
+ help
+ This driver supports TPS6586X RTC
+
config RTC_DRV_TWL92330
boolean "TI TWL92330/Menelaus"
depends on MENELAUS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0f207b3b5833..acd8426c3d8e 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
new file mode 100644
index 000000000000..9ab93cb9de0e
--- /dev/null
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -0,0 +1,325 @@
+/*
+ * drivers/rtc/rtc-tps6586x.c
+ *
+ * RTC driver for TI TPS6586x
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define TPS_EPOCH 2009
+
+#define RTC_CTRL 0xc0
+# define RTC_ENABLE (1 << 5) /* enables tick updates */
+# define RTC_HIRES (1 << 4) /* 1Khz or 32Khz updates */
+#define RTC_ALARM1_HI 0xc1
+#define RTC_COUNT4 0xc6
+
+struct tps6586x_rtc {
+ unsigned long epoch_start;
+ int irq;
+ bool irq_en;
+ struct rtc_device *rtc;
+};
+
+static inline struct device *to_tps6586x_dev(struct device *dev)
+{
+ return dev->parent;
+}
+
+static int tps6586x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks = 0;
+ unsigned long seconds;
+ u8 buff[5];
+ int err;
+ int i;
+
+ err = tps6586x_reads(tps_dev, RTC_COUNT4, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "failed to read counter\n");
+ return err;
+ }
+
+ for (i = 0; i < sizeof(buff); i++) {
+ ticks <<= 8;
+ ticks |= buff[i];
+ }
+
+ seconds = ticks >> 10;
+
+ seconds += rtc->epoch_start;
+ rtc_time_to_tm(seconds, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int tps6586x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long long ticks;
+ unsigned long seconds;
+ u8 buff[5];
+ int err;
+
+ rtc_tm_to_time(tm, &seconds);
+
+ if (WARN_ON(seconds < rtc->epoch_start)) {
+ dev_err(dev, "requested time unsupported\n");
+ return -EINVAL;
+ }
+
+ seconds -= rtc->epoch_start;
+
+ ticks = seconds << 10;
+ buff[0] = (ticks >> 32) & 0xff;
+ buff[1] = (ticks >> 24) & 0xff;
+ buff[2] = (ticks >> 16) & 0xff;
+ buff[3] = (ticks >> 8) & 0xff;
+ buff[4] = ticks & 0xff;
+
+ err = tps6586x_clr_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to clear RTC_ENABLE\n");
+ return err;
+ }
+
+ err = tps6586x_writes(tps_dev, RTC_COUNT4, sizeof(buff), buff);
+ if (err < 0) {
+ dev_err(dev, "failed to program new time\n");
+ return err;
+ }
+
+ err = tps6586x_set_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
+ if (err < 0) {
+ dev_err(dev, "failed to set RTC_ENABLE\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long seconds;
+ unsigned long ticks;
+ u8 buff[3];
+ int err;
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ rtc_tm_to_time(&alrm->time, &seconds);
+
+ if (WARN_ON(alrm->enabled && (seconds < rtc->epoch_start))) {
+ dev_err(dev, "can't set alarm to requested time\n");
+ return -EINVAL;
+ }
+
+ if (rtc->irq_en && rtc->irq_en && (rtc->irq != -1)) {
+ disable_irq(rtc->irq);
+ rtc->irq_en = false;
+ }
+
+ seconds -= rtc->epoch_start;
+ ticks = (seconds << 10) & 0xffffff;
+
+ buff[0] = (ticks >> 16) & 0xff;
+ buff[1] = (ticks >> 8) & 0xff;
+ buff[2] = ticks & 0xff;
+
+ err = tps6586x_writes(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (err) {
+ dev_err(tps_dev, "unable to program alarm\n");
+ return err;
+ }
+
+ if (alrm->enabled && (rtc->irq != -1)) {
+ enable_irq(rtc->irq);
+ rtc->irq_en = true;
+ }
+
+ return err;
+}
+
+static int tps6586x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+ struct device *tps_dev = to_tps6586x_dev(dev);
+ unsigned long ticks;
+ unsigned long seconds;
+ u8 buff[3];
+ int err;
+
+ err = tps6586x_reads(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
+ if (err)
+ return err;
+
+ ticks = (buff[0] << 16) | (buff[1] << 8) | buff[2];
+ seconds = ticks >> 10;
+ seconds += rtc->epoch_start;
+
+ rtc_time_to_tm(seconds, &alrm->time);
+ alrm->enabled = rtc->irq_en;
+
+ return 0;
+}
+
+static int tps6586x_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ if (rtc->irq == -1)
+ return -EIO;
+
+ enabled = !!enabled;
+ if (enabled == rtc->irq_en)
+ return 0;
+
+ if (enabled)
+ enable_irq(rtc->irq);
+ else
+ disable_irq(rtc->irq);
+
+ rtc->irq_en = enabled;
+ return 0;
+}
+
+static const struct rtc_class_ops tps6586x_rtc_ops = {
+ .read_time = tps6586x_rtc_read_time,
+ .set_time = tps6586x_rtc_set_time,
+ .set_alarm = tps6586x_rtc_set_alarm,
+ .read_alarm = tps6586x_rtc_read_alarm,
+ .update_irq_enable = tps6586x_rtc_update_irq_enable,
+};
+
+static irqreturn_t tps6586x_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int __devinit tps6586x_rtc_probe(struct platform_device *pdev)
+{
+ struct tps6586x_rtc_platform_data *pdata = pdev->dev.platform_data;
+ struct device *tps_dev = to_tps6586x_dev(&pdev->dev);
+ struct tps6586x_rtc *rtc;
+ int err;
+
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->irq = -1;
+ if (!pdata || (pdata->irq < 0))
+ dev_warn(&pdev->dev, "no IRQ specified, wakeup is disabled\n");
+
+ rtc->epoch_start = mktime(TPS_EPOCH, 1, 1, 0, 0, 0);
+
+ rtc->rtc = rtc_device_register("tps6586x-rtc", &pdev->dev,
+ &tps6586x_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto fail;
+ }
+
+ /* disable high-res mode, enable tick counting */
+ err = tps6586x_update(tps_dev, RTC_CTRL,
+ (RTC_ENABLE | RTC_HIRES), RTC_ENABLE);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to start counter\n");
+ goto fail;
+ }
+
+ dev_set_drvdata(&pdev->dev, rtc);
+ if (pdata && (pdata->irq >= 0)) {
+ rtc->irq = pdata->irq;
+ err = request_threaded_irq(pdata->irq, NULL, tps6586x_rtc_irq,
+ IRQF_ONESHOT, "tps6586x-rtc",
+ &pdev->dev);
+ if (err) {
+ dev_warn(&pdev->dev, "unable to request IRQ\n");
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(&pdev->dev, 1);
+ disable_irq(rtc->irq);
+ enable_irq_wake(rtc->irq);
+ }
+ }
+
+ return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(rtc->rtc))
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return err;
+}
+
+static int __devexit tps6586x_rtc_remove(struct platform_device *pdev)
+{
+ struct tps6586x_rtc *rtc = dev_get_drvdata(&pdev->dev);
+
+ if (rtc->irq != -1)
+ free_irq(rtc->irq, rtc);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ return 0;
+}
+
+static struct platform_driver tps6586x_rtc_driver = {
+ .driver = {
+ .name = "tps6586x-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6586x_rtc_probe,
+ .remove = __devexit_p(tps6586x_rtc_remove),
+};
+
+static int __init tps6586x_rtc_init(void)
+{
+ return platform_driver_register(&tps6586x_rtc_driver);
+}
+module_init(tps6586x_rtc_init);
+
+static void __exit tps6586x_rtc_exit(void)
+{
+ platform_driver_unregister(&tps6586x_rtc_driver);
+}
+module_exit(tps6586x_rtc_exit);
+
+MODULE_DESCRIPTION("TI TPS6586x RTC driver");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 12900f7083b0..3053d8d8cd89 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -542,6 +542,14 @@ config SERIAL_S5PV210
help
Serial port support for Samsung's S5P Family of SoC's
+config SERIAL_TEGRA
+ boolean "High speed serial support for NVIDIA Tegra SoCs"
+ depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
+ select SERIAL_CORE
+ help
+ Support for the on-chip UARTs on NVIDIA Tegra SoC, providing
+ /dev/ttyHSx, where x is determined by the number of UARTs on the
+ platform
config SERIAL_MAX3100
tristate "MAX3100 support"
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 1ca4fd599ffe..f0faee6ec05a 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
+obj-$(CONFIG_SERIAL_TEGRA) += tegra_hsuart.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
diff --git a/drivers/serial/tegra_hsuart.c b/drivers/serial/tegra_hsuart.c
new file mode 100644
index 000000000000..09f5f454683c
--- /dev/null
+++ b/drivers/serial/tegra_hsuart.c
@@ -0,0 +1,1319 @@
+/*
+ * drivers/serial/tegra_hsuart.c
+ *
+ * High-speed serial driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2009 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*#define DEBUG 1*/
+/*#define VERBOSE_DEBUG 1*/
+
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/termios.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <mach/dma.h>
+#include <mach/clk.h>
+
+#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
+
+#define BYTES_TO_ALIGN(x) ((unsigned long)(ALIGN((x), sizeof(u32))) - \
+ (unsigned long)(x))
+
+#define UART_RX_DMA_BUFFER_SIZE (2048*4)
+
+#define UART_LSR_FIFOE 0x80
+#define UART_IER_EORD 0x20
+#define UART_MCR_RTS_EN 0x40
+#define UART_MCR_CTS_EN 0x20
+#define UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
+ UART_LSR_PE | UART_LSR_FE)
+
+#define TX_FORCE_PIO 0
+#define RX_FORCE_PIO 0
+
+const int dma_req_sel[] = {
+ TEGRA_DMA_REQ_SEL_UARTA,
+ TEGRA_DMA_REQ_SEL_UARTB,
+ TEGRA_DMA_REQ_SEL_UARTC,
+ TEGRA_DMA_REQ_SEL_UARTD,
+ TEGRA_DMA_REQ_SEL_UARTE,
+};
+
+#define TEGRA_TX_PIO 1
+#define TEGRA_TX_DMA 2
+
+#define TEGRA_UART_MIN_DMA 16
+#define TEGRA_UART_FIFO_SIZE 8
+
+/* Tx fifo trigger level setting in tegra uart is in
+ * reverse way then conventional uart */
+#define TEGRA_UART_TX_TRIG_16B 0x00
+#define TEGRA_UART_TX_TRIG_8B 0x10
+#define TEGRA_UART_TX_TRIG_4B 0x20
+#define TEGRA_UART_TX_TRIG_1B 0x30
+
+struct tegra_uart_port {
+ struct uart_port uport;
+ char port_name[32];
+
+ /* Module info */
+ unsigned long size;
+ struct clk *clk;
+ unsigned int baud;
+
+ /* Register shadow */
+ unsigned char fcr_shadow;
+ unsigned char mcr_shadow;
+ unsigned char lcr_shadow;
+ unsigned char ier_shadow;
+ bool use_cts_control;
+ bool rts_active;
+
+ int tx_in_progress;
+ unsigned int tx_bytes;
+
+ dma_addr_t xmit_dma_addr;
+
+ /* TX DMA */
+ struct tegra_dma_req tx_dma_req;
+ struct tegra_dma_channel *tx_dma;
+ struct work_struct tx_work;
+
+ /* RX DMA */
+ struct tegra_dma_req rx_dma_req;
+ struct tegra_dma_channel *rx_dma;
+
+ bool use_rx_dma;
+ bool use_tx_dma;
+
+ bool rx_timeout;
+ int rx_in_progress;
+};
+
+static inline u8 uart_readb(struct tegra_uart_port *t, unsigned long reg)
+{
+ u8 val = readb(t->uport.membase + (reg << t->uport.regshift));
+ dev_vdbg(t->uport.dev, "%s: %p %03lx = %02x\n", __func__,
+ t->uport.membase, reg << t->uport.regshift, val);
+ return val;
+}
+
+static inline void uart_writeb(struct tegra_uart_port *t, u8 val,
+ unsigned long reg)
+{
+ dev_vdbg(t->uport.dev, "%s: %p %03lx %02x\n",
+ __func__, t->uport.membase, reg << t->uport.regshift, val);
+ writeb(val, t->uport.membase + (reg << t->uport.regshift));
+}
+
+static inline void uart_writel(struct tegra_uart_port *t, u32 val,
+ unsigned long reg)
+{
+ dev_vdbg(t->uport.dev, "%s: %p %03lx %08x\n",
+ __func__, t->uport.membase, reg << t->uport.regshift, val);
+ writel(val, t->uport.membase + (reg << t->uport.regshift));
+}
+
+static void tegra_set_baudrate(struct tegra_uart_port *t, unsigned int baud);
+static void tegra_set_mctrl(struct uart_port *u, unsigned int mctrl);
+static void do_handle_rx_pio(struct tegra_uart_port *t);
+static void do_handle_rx_dma(struct tegra_uart_port *t);
+static void set_rts(struct tegra_uart_port *t, bool active);
+static void set_dtr(struct tegra_uart_port *t, bool active);
+
+static void fill_tx_fifo(struct tegra_uart_port *t, int max_bytes)
+{
+ int i;
+ struct circ_buf *xmit = &t->uport.state->xmit;
+
+ for (i = 0; i < max_bytes; i++) {
+ BUG_ON(uart_circ_empty(xmit));
+ uart_writeb(t, xmit->buf[xmit->tail], UART_TX);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ t->uport.icount.tx++;
+ }
+}
+
+static void tegra_start_pio_tx(struct tegra_uart_port *t, unsigned int bytes)
+{
+ if (bytes > TEGRA_UART_FIFO_SIZE)
+ bytes = TEGRA_UART_FIFO_SIZE;
+
+ t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_8B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ t->tx_in_progress = TEGRA_TX_PIO;
+ t->tx_bytes = bytes;
+ t->ier_shadow |= UART_IER_THRI;
+ uart_writeb(t, t->ier_shadow, UART_IER);
+}
+
+static void tegra_start_dma_tx(struct tegra_uart_port *t, unsigned long bytes)
+{
+ struct circ_buf *xmit;
+ xmit = &t->uport.state->xmit;
+
+ dma_sync_single_for_device(t->uport.dev, t->xmit_dma_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ t->fcr_shadow &= ~UART_FCR_T_TRIG_11;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_4B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ t->tx_bytes = bytes & ~(sizeof(u32)-1);
+ t->tx_dma_req.source_addr = t->xmit_dma_addr + xmit->tail;
+ t->tx_dma_req.size = t->tx_bytes;
+
+ t->tx_in_progress = TEGRA_TX_DMA;
+
+ tegra_dma_enqueue_req(t->tx_dma, &t->tx_dma_req);
+}
+
+/* Called with u->lock taken */
+static void tegra_start_next_tx(struct tegra_uart_port *t)
+{
+ unsigned long tail;
+ unsigned long count;
+
+ struct circ_buf *xmit;
+
+ xmit = &t->uport.state->xmit;
+ tail = (unsigned long)&xmit->buf[xmit->tail];
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+
+ dev_vdbg(t->uport.dev, "+%s %lu %d\n", __func__, count,
+ t->tx_in_progress);
+
+ if (count == 0)
+ goto out;
+
+ if (!t->use_tx_dma || count < TEGRA_UART_MIN_DMA)
+ tegra_start_pio_tx(t, count);
+ else if (BYTES_TO_ALIGN(tail) > 0)
+ tegra_start_pio_tx(t, BYTES_TO_ALIGN(tail));
+ else
+ tegra_start_dma_tx(t, count);
+
+out:
+ dev_vdbg(t->uport.dev, "-%s", __func__);
+}
+
+/* Called by serial core driver with u->lock taken. */
+static void tegra_start_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ struct circ_buf *xmit;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ xmit = &u->state->xmit;
+
+ if (!uart_circ_empty(xmit) && !t->tx_in_progress)
+ tegra_start_next_tx(t);
+}
+
+static int tegra_start_dma_rx(struct tegra_uart_port *t)
+{
+ wmb();
+ if (tegra_dma_enqueue_req(t->rx_dma, &t->rx_dma_req)) {
+ dev_err(t->uport.dev, "Could not enqueue Rx DMA req\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void tegra_rx_dma_threshold_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ struct uart_port *u = &t->uport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+
+ do_handle_rx_dma(t);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+}
+
+/* It is expected that the callers take the UART lock when this API is called.
+ *
+ * There are 2 contexts when this function is called:
+ *
+ * 1. DMA ISR - DMA ISR triggers the threshold complete calback, which calls the
+ * dequue API which in-turn calls this callback. UART lock is taken during
+ * the call to the threshold callback.
+ *
+ * 2. UART ISR - UART calls the dequue API which in-turn will call this API.
+ * In this case, UART ISR takes the UART lock.
+ * */
+static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ struct uart_port *u = &t->uport;
+ struct tty_struct *tty = u->state->port.tty;
+
+ /* If we are here, DMA is stopped */
+
+ dev_dbg(t->uport.dev, "%s: %d %d\n", __func__, req->bytes_transferred,
+ req->status);
+ if (req->bytes_transferred) {
+ t->uport.icount.rx += req->bytes_transferred;
+ tty_insert_flip_string(tty,
+ ((unsigned char *)(req->virt_addr)),
+ req->bytes_transferred);
+ }
+
+ do_handle_rx_pio(t);
+
+ /* Push the read data later in caller place. */
+ if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
+ return;
+
+ spin_unlock(&u->lock);
+ tty_flip_buffer_push(u->state->port.tty);
+ spin_lock(&u->lock);
+}
+
+/* Lock already taken */
+static void do_handle_rx_dma(struct tegra_uart_port *t)
+{
+ struct uart_port *u = &t->uport;
+ if (t->rts_active)
+ set_rts(t, false);
+ tegra_dma_dequeue(t->rx_dma);
+ tty_flip_buffer_push(u->state->port.tty);
+ /* enqueue the request again */
+ tegra_start_dma_rx(t);
+ if (t->rts_active)
+ set_rts(t, true);
+}
+
+static char do_decode_rx_error(struct tegra_uart_port *t, u8 lsr)
+{
+ char flag = TTY_NORMAL;
+
+ if (unlikely(lsr & UART_LSR_ANY)) {
+ if (lsr & UART_LSR_OE) {
+ /* Overrrun error */
+ flag |= TTY_OVERRUN;
+ t->uport.icount.overrun++;
+ dev_err(t->uport.dev, "Got overrun errors\n");
+ } else if (lsr & UART_LSR_PE) {
+ /* Parity error */
+ flag |= TTY_PARITY;
+ t->uport.icount.parity++;
+ dev_err(t->uport.dev, "Got Parity errors\n");
+ } else if (lsr & UART_LSR_FE) {
+ flag |= TTY_FRAME;
+ t->uport.icount.frame++;
+ dev_err(t->uport.dev, "Got frame errors\n");
+ } else if (lsr & UART_LSR_BI) {
+ dev_err(t->uport.dev, "Got Break\n");
+ t->uport.icount.brk++;
+ /* If FIFO read error without any data, reset Rx FIFO */
+ if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE)) {
+ unsigned char fcr = t->fcr_shadow;
+ fcr |= UART_FCR_CLEAR_RCVR;
+ uart_writeb(t, fcr, UART_FCR);
+ }
+ }
+ }
+ return flag;
+}
+
+static void do_handle_rx_pio(struct tegra_uart_port *t)
+{
+ int count = 0;
+ do {
+ char flag = TTY_NORMAL;
+ unsigned char lsr = 0;
+ unsigned char ch;
+
+
+ lsr = uart_readb(t, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ break;
+
+ flag = do_decode_rx_error(t, lsr);
+ ch = uart_readb(t, UART_RX);
+ t->uport.icount.rx++;
+ count++;
+
+ if (!uart_handle_sysrq_char(&t->uport, c))
+ uart_insert_char(&t->uport, lsr, UART_LSR_OE, ch, flag);
+ } while (1);
+
+ dev_dbg(t->uport.dev, "PIO received %d bytes\n", count);
+
+ return;
+}
+
+static void do_handle_modem_signal(struct uart_port *u)
+{
+ unsigned char msr;
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ msr = uart_readb(t, UART_MSR);
+ if (msr & UART_MSR_CTS)
+ dev_dbg(u->dev, "CTS triggered\n");
+ if (msr & UART_MSR_DSR)
+ dev_dbg(u->dev, "DSR enabled\n");
+ if (msr & UART_MSR_DCD)
+ dev_dbg(u->dev, "CD enabled\n");
+ if (msr & UART_MSR_RI)
+ dev_dbg(u->dev, "RI enabled\n");
+ return;
+}
+
+static void do_handle_tx_pio(struct tegra_uart_port *t)
+{
+ struct circ_buf *xmit = &t->uport.state->xmit;
+
+ fill_tx_fifo(t, t->tx_bytes);
+
+ t->tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&t->uport);
+
+ tegra_start_next_tx(t);
+ return;
+}
+
+static void tegra_tx_dma_complete_work(struct work_struct *work)
+{
+ struct tegra_uart_port *t =
+ container_of(work, struct tegra_uart_port, tx_work);
+ struct tegra_dma_req *req = &t->tx_dma_req;
+ unsigned long flags;
+ int timeout = 20;
+
+ while ((uart_readb(t, UART_LSR) & TX_EMPTY_STATUS) != TX_EMPTY_STATUS) {
+ timeout--;
+ if (timeout == 0) {
+ dev_err(t->uport.dev,
+ "timed out waiting for TX FIFO to empty\n");
+ return;
+ }
+ msleep(1);
+ }
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+
+ t->tx_in_progress = 0;
+
+ if (req->status != -TEGRA_DMA_REQ_ERROR_ABORTED)
+ tegra_start_next_tx(t);
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+static void tegra_tx_dma_complete_callback(struct tegra_dma_req *req)
+{
+ struct tegra_uart_port *t = req->dev;
+ struct circ_buf *xmit = &t->uport.state->xmit;
+ int count = req->bytes_transferred;
+ unsigned long flags;
+
+ dev_vdbg(t->uport.dev, "%s: %d\n", __func__, count);
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&t->uport);
+
+ schedule_work(&t->tx_work);
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+static irqreturn_t tegra_uart_isr(int irq, void *data)
+{
+ struct tegra_uart_port *t = data;
+ struct uart_port *u = &t->uport;
+ unsigned char iir;
+ unsigned char ier;
+ bool is_rx_int = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&u->lock, flags);
+ t = container_of(u, struct tegra_uart_port, uport);
+ while (1) {
+ iir = uart_readb(t, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ if (likely(t->use_rx_dma) && is_rx_int) {
+ do_handle_rx_dma(t);
+
+ if (t->rx_in_progress) {
+ ier = t->ier_shadow;
+ ier |= (UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ }
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(u->dev, "tegra_uart_isr iir = 0x%x (%d)\n", iir,
+ (iir >> 1) & 0x7);
+ switch ((iir >> 1) & 0x7) {
+ case 0: /* Modem signal change interrupt */
+ do_handle_modem_signal(u);
+ break;
+ case 1: /* Transmit interrupt only triggered when using PIO */
+ t->ier_shadow &= ~UART_IER_THRI;
+ uart_writeb(t, t->ier_shadow, UART_IER);
+ do_handle_tx_pio(t);
+ break;
+ case 4: /* End of data */
+ case 6: /* Rx timeout */
+ case 2: /* Receive */
+ if (likely(t->use_rx_dma)) {
+ if (!is_rx_int) {
+ is_rx_int = true;
+ /* Disable interrups */
+ ier = t->ier_shadow;
+ ier |= UART_IER_RDI;
+ uart_writeb(t, ier, UART_IER);
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ }
+ } else {
+ do_handle_rx_pio(t);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ tty_flip_buffer_push(u->state->port.tty);
+ spin_lock_irqsave(&u->lock, flags);
+ }
+ break;
+ case 3: /* Receive error */
+ /* FIXME how to handle this? Why do we get here */
+ do_decode_rx_error(t, uart_readb(t, UART_LSR));
+ break;
+ case 5: /* break nothing to handle */
+ case 7: /* break nothing to handle */
+ break;
+ }
+ }
+}
+
+static void tegra_stop_rx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ unsigned char ier;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->rts_active)
+ set_rts(t, false);
+
+ if (t->rx_in_progress) {
+ ier = t->ier_shadow;
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+ t->rx_in_progress = 0;
+ }
+ if (t->use_rx_dma && t->rx_dma) {
+ tegra_dma_dequeue(t->rx_dma);
+ tty_flip_buffer_push(u->state->port.tty);
+ }
+
+ return;
+}
+
+static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
+{
+ unsigned char fcr;
+ unsigned long flags;
+
+ flush_work(&t->tx_work);
+
+ /* Disable interrupts */
+ uart_writeb(t, 0, UART_IER);
+
+ while ((uart_readb(t, UART_LSR) & UART_LSR_TEMT) != UART_LSR_TEMT);
+ udelay(200);
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+
+ /* Reset the Rx and Tx FIFOs */
+ fcr = t->fcr_shadow;
+ fcr |= UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR;
+ uart_writeb(t, fcr, UART_FCR);
+
+ udelay(200);
+
+ clk_disable(t->clk);
+ t->baud = 0;
+
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+}
+
+static void tegra_uart_free_rx_dma(struct tegra_uart_port *t)
+{
+ if (!t->use_rx_dma)
+ return;
+
+ tegra_dma_free_channel(t->rx_dma);
+ t->rx_dma = NULL;
+
+ if (likely(t->rx_dma_req.dest_addr))
+ dma_free_coherent(t->uport.dev, t->rx_dma_req.size,
+ t->rx_dma_req.virt_addr, t->rx_dma_req.dest_addr);
+ t->rx_dma_req.dest_addr = 0;
+ t->rx_dma_req.virt_addr = NULL;
+
+ t->use_rx_dma = false;
+}
+
+static int tegra_uart_hw_init(struct tegra_uart_port *t)
+{
+ unsigned char fcr;
+ unsigned char ier;
+
+ dev_vdbg(t->uport.dev, "+tegra_uart_hw_init\n");
+
+ t->fcr_shadow = 0;
+ t->mcr_shadow = 0;
+ t->lcr_shadow = 0;
+ t->ier_shadow = 0;
+ t->baud = 0;
+
+ clk_enable(t->clk);
+
+ /* Reset the UART controller to clear all previous status.*/
+ tegra_periph_reset_assert(t->clk);
+ udelay(100);
+ tegra_periph_reset_deassert(t->clk);
+ udelay(100);
+
+ t->rx_in_progress = 0;
+
+ /* Reset the FIFO twice with some delay to make sure that the FIFOs are
+ * really flushed. Wait is needed as the clearing needs to cross
+ * multiple clock domains.
+ * */
+ t->fcr_shadow = UART_FCR_ENABLE_FIFO;
+
+ fcr = t->fcr_shadow;
+ fcr |= UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR;
+ uart_writeb(t, fcr, UART_FCR);
+
+ udelay(100);
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ udelay(100);
+
+ /* Set the trigger level
+ *
+ * For PIO mode:
+ *
+ * For receive, this will interrupt the CPU after that many number of
+ * bytes are received, for the remaining bytes the receive timeout
+ * interrupt is received.
+ *
+ * Rx high watermark is set to 4.
+ *
+ * For transmit, if the trasnmit interrupt is enabled, this will
+ * interrupt the CPU when the number of entries in the FIFO reaches the
+ * low watermark.
+ *
+ * Tx low watermark is set to 8.
+ *
+ * For DMA mode:
+ *
+ * Set the Tx trigger to 4. This should match the DMA burst size that
+ * programmed in the DMA registers.
+ * */
+ t->fcr_shadow |= UART_FCR_R_TRIG_01;
+ t->fcr_shadow |= TEGRA_UART_TX_TRIG_8B;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ if (t->use_rx_dma) {
+ /* initialize the UART for a simple default configuration
+ * so that the receive DMA buffer may be enqueued */
+ t->lcr_shadow = 3; /* no parity, stop, 8 data bits */
+ tegra_set_baudrate(t, 9600);
+ t->fcr_shadow |= UART_FCR_DMA_SELECT;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ if (tegra_start_dma_rx(t)) {
+ dev_err(t->uport.dev, "Rx DMA enqueue failed\n");
+ tegra_uart_free_rx_dma(t);
+ t->fcr_shadow &= ~UART_FCR_DMA_SELECT;
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+ }
+ }
+ else
+ uart_writeb(t, t->fcr_shadow, UART_FCR);
+
+ t->rx_in_progress = 1;
+
+ /*
+ * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
+ *
+ * If using DMA mode, enable EORD instead of receive interrupt which
+ * will interrupt after the UART is done with the receive instead of
+ * the interrupt when the FIFO "threshold" is reached.
+ *
+ * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
+ * the DATA is sitting in the FIFO and couldn't be transferred to the
+ * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
+ * triggered when there is a pause of the incomming data stream for 4
+ * characters long.
+ *
+ * For pauses in the data which is not aligned to 4 bytes, we get
+ * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
+ * then the EORD.
+ *
+ * Don't get confused, believe in the magic of nvidia hw...:-)
+ */
+ ier = 0;
+ ier |= UART_IER_RLSI | UART_IER_RTOIE;
+ if (t->use_rx_dma)
+ ier |= UART_IER_EORD;
+ else
+ ier |= UART_IER_RDI;
+ t->ier_shadow = ier;
+ uart_writeb(t, ier, UART_IER);
+
+ dev_vdbg(t->uport.dev, "-tegra_uart_hw_init\n");
+ return 0;
+}
+
+static int tegra_uart_init_rx_dma(struct tegra_uart_port *t)
+{
+ dma_addr_t rx_dma_phys;
+ void *rx_dma_virt;
+
+ t->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_CONTINUOUS);
+ if (!t->rx_dma) {
+ dev_err(t->uport.dev, "%s: failed to allocate RX DMA.\n", __func__);
+ return -ENODEV;
+ }
+
+ t->rx_dma_req.size = UART_RX_DMA_BUFFER_SIZE;
+ rx_dma_virt = dma_alloc_coherent(t->uport.dev,
+ t->rx_dma_req.size, &rx_dma_phys, GFP_KERNEL);
+ if (!rx_dma_virt) {
+ dev_err(t->uport.dev, "DMA buffers allocate failed\n");
+ goto fail;
+ }
+ t->rx_dma_req.dest_addr = rx_dma_phys;
+ t->rx_dma_req.virt_addr = rx_dma_virt;
+
+ t->rx_dma_req.source_addr = (unsigned long)t->uport.mapbase;
+ t->rx_dma_req.source_wrap = 4;
+ t->rx_dma_req.dest_wrap = 0;
+ t->rx_dma_req.to_memory = 1;
+ t->rx_dma_req.source_bus_width = 8;
+ t->rx_dma_req.dest_bus_width = 32;
+ t->rx_dma_req.req_sel = dma_req_sel[t->uport.line];
+ t->rx_dma_req.complete = tegra_rx_dma_complete_callback;
+ t->rx_dma_req.threshold = tegra_rx_dma_threshold_callback;
+ t->rx_dma_req.dev = t;
+
+ return 0;
+fail:
+ tegra_uart_free_rx_dma(t);
+ return -ENODEV;
+}
+
+static int tegra_startup(struct uart_port *u)
+{
+ struct tegra_uart_port *t = container_of(u,
+ struct tegra_uart_port, uport);
+ int ret = 0;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ sprintf(t->port_name, "tegra_uart_%d", u->line);
+
+ t->use_tx_dma = false;
+ if (!TX_FORCE_PIO) {
+ t->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
+ if (t->tx_dma)
+ t->use_tx_dma = true;
+ else
+ pr_err("%s: failed to allocate TX DMA.\n", __func__);
+ }
+ if (t->use_tx_dma) {
+ t->tx_dma_req.instance = u->line;
+ t->tx_dma_req.complete = tegra_tx_dma_complete_callback;
+ t->tx_dma_req.to_memory = 0;
+
+ t->tx_dma_req.dest_addr = (unsigned long)t->uport.mapbase;
+ t->tx_dma_req.dest_wrap = 4;
+ t->tx_dma_req.source_wrap = 0;
+ t->tx_dma_req.source_bus_width = 32;
+ t->tx_dma_req.dest_bus_width = 8;
+ t->tx_dma_req.req_sel = dma_req_sel[t->uport.line];
+ t->tx_dma_req.dev = t;
+ t->tx_dma_req.size = 0;
+ t->xmit_dma_addr = dma_map_single(t->uport.dev,
+ t->uport.state->xmit.buf, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ }
+ t->tx_in_progress = 0;
+
+ t->use_rx_dma = false;
+ if (!RX_FORCE_PIO) {
+ if (!tegra_uart_init_rx_dma(t))
+ t->use_rx_dma = true;
+ }
+
+ ret = tegra_uart_hw_init(t);
+ if (ret)
+ goto fail;
+
+ dev_dbg(u->dev, "Requesting IRQ %d\n", u->irq);
+ msleep(1);
+
+ ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
+ t->port_name, t);
+ if (ret) {
+ dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
+ goto fail;
+ }
+ dev_dbg(u->dev,"Started UART port %d\n", u->line);
+
+ return 0;
+fail:
+ dev_err(u->dev, "Tegra UART startup failed\n");
+ return ret;
+}
+
+static void tegra_shutdown(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(u->dev, "+tegra_shutdown\n");
+
+ tegra_uart_hw_deinit(t);
+
+ t->rx_in_progress = 0;
+ t->tx_in_progress = 0;
+
+ tegra_uart_free_rx_dma(t);
+ if (t->use_tx_dma) {
+ tegra_dma_free_channel(t->tx_dma);
+ t->tx_dma = NULL;
+ t->use_tx_dma = false;
+ dma_unmap_single(t->uport.dev, t->xmit_dma_addr, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ t->xmit_dma_addr = 0;
+ }
+
+ free_irq(u->irq, t);
+ dev_vdbg(u->dev, "-tegra_shutdown\n");
+}
+
+static unsigned int tegra_get_mctrl(struct uart_port *u)
+{
+ /* RI - Ring detector is active
+ * CD/DCD/CAR - Carrier detect is always active. For some reason
+ * linux has different names for carrier detect.
+ * DSR - Data Set ready is active as the hardware doesn't support it.
+ * Don't know if the linux support this yet?
+ * CTS - Clear to send. Always set to active, as the hardware handles
+ * CTS automatically.
+ * */
+ return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void set_rts(struct tegra_uart_port *t, bool active)
+{
+ unsigned char mcr;
+ mcr = t->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_RTS;
+ else
+ mcr &= ~UART_MCR_RTS;
+ if (mcr != t->mcr_shadow) {
+ uart_writeb(t, mcr, UART_MCR);
+ t->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void set_dtr(struct tegra_uart_port *t, bool active)
+{
+ unsigned char mcr;
+ mcr = t->mcr_shadow;
+ if (active)
+ mcr |= UART_MCR_DTR;
+ else
+ mcr &= ~UART_MCR_DTR;
+ if (mcr != t->mcr_shadow) {
+ uart_writeb(t, mcr, UART_MCR);
+ t->mcr_shadow = mcr;
+ }
+ return;
+}
+
+static void tegra_set_mctrl(struct uart_port *u, unsigned int mctrl)
+{
+ unsigned char mcr;
+ struct tegra_uart_port *t;
+
+ dev_dbg(u->dev, "tegra_set_mctrl called with %d\n", mctrl);
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ mcr = t->mcr_shadow;
+ if (mctrl & TIOCM_RTS) {
+ t->rts_active = true;
+ set_rts(t, true);
+ } else {
+ t->rts_active = false;
+ set_rts(t, false);
+ }
+
+ if (mctrl & TIOCM_DTR)
+ set_dtr(t, true);
+ else
+ set_dtr(t, false);
+ return;
+}
+
+static void tegra_break_ctl(struct uart_port *u, int break_ctl)
+{
+ struct tegra_uart_port *t;
+ unsigned char lcr;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ lcr = t->lcr_shadow;
+ if (break_ctl)
+ lcr |= UART_LCR_SBC;
+ else
+ lcr &= ~UART_LCR_SBC;
+ uart_writeb(t, lcr, UART_LCR);
+ t->lcr_shadow = lcr;
+}
+
+static int tegra_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static void tegra_release_port(struct uart_port *u)
+{
+
+}
+
+static unsigned int tegra_tx_empty(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+ unsigned int ret = 0;
+ unsigned long flags;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(u->dev, "+tegra_tx_empty\n");
+
+ spin_lock_irqsave(&u->lock, flags);
+ if (!t->tx_in_progress)
+ ret = TIOCSER_TEMT;
+ spin_unlock_irqrestore(&u->lock, flags);
+
+ dev_vdbg(u->dev, "-tegra_tx_empty\n");
+ return ret;
+}
+
+static void tegra_stop_tx(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->use_tx_dma)
+ tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req);
+
+ return;
+}
+
+static void tegra_enable_ms(struct uart_port *u)
+{
+}
+
+#define UART_CLOCK_ACCURACY 5
+
+static void tegra_set_baudrate(struct tegra_uart_port *t, unsigned int baud)
+{
+ unsigned long rate;
+ unsigned int divisor;
+ unsigned char lcr;
+
+ if (t->baud == baud)
+ return;
+
+ rate = clk_get_rate(t->clk);
+
+ divisor = rate;
+ do_div(divisor, 16);
+ divisor += baud/2;
+ do_div(divisor, baud);
+
+ lcr = t->lcr_shadow;
+ lcr |= UART_LCR_DLAB;
+ uart_writeb(t, lcr, UART_LCR);
+
+ uart_writel(t, divisor & 0xFF, UART_TX);
+ uart_writel(t, ((divisor >> 8) & 0xFF), UART_IER);
+
+ lcr &= ~UART_LCR_DLAB;
+ uart_writeb(t, lcr, UART_LCR);
+
+ t->baud = baud;
+ dev_dbg(t->uport.dev, "Baud %u clock freq %lu and divisor of %u\n",
+ baud, rate, divisor);
+}
+
+static void tegra_set_termios(struct uart_port *u, struct ktermios *termios,
+ struct ktermios *oldtermios)
+{
+ struct tegra_uart_port *t;
+ unsigned int baud;
+ unsigned long flags;
+ unsigned int lcr;
+ unsigned int c_cflag = termios->c_cflag;
+ unsigned char mcr;
+
+ t = container_of(u, struct tegra_uart_port, uport);
+ dev_vdbg(t->uport.dev, "+tegra_set_termios\n");
+
+ spin_lock_irqsave(&u->lock, flags);
+
+ /* Changing configuration, it is safe to stop any rx now */
+ if (t->rts_active)
+ set_rts(t, false);
+
+ /* Baud rate */
+ baud = uart_get_baud_rate(u, termios, oldtermios, 200, 4000000);
+ tegra_set_baudrate(t, baud);
+
+ /* Parity */
+ lcr = t->lcr_shadow;
+ lcr &= ~UART_LCR_PARITY;
+ if (PARENB == (c_cflag & PARENB)) {
+ if (CMSPAR == (c_cflag & CMSPAR)) {
+ /* FIXME What is space parity? */
+ /* data |= SPACE_PARITY; */
+ } else if (c_cflag & PARODD) {
+ lcr |= UART_LCR_PARITY;
+ lcr &= ~UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ } else {
+ lcr |= UART_LCR_PARITY;
+ lcr |= UART_LCR_EPAR;
+ lcr &= ~UART_LCR_SPAR;
+ }
+ }
+
+ lcr &= ~UART_LCR_WLEN8;
+ switch (c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ /* Stop bits */
+ if (termios->c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+ else
+ lcr &= ~UART_LCR_STOP;
+
+ uart_writeb(t, lcr, UART_LCR);
+ t->lcr_shadow = lcr;
+
+ /* Flow control */
+ if (termios->c_cflag & CRTSCTS) {
+ mcr = t->mcr_shadow;
+ mcr |= UART_MCR_CTS_EN;
+ mcr &= ~UART_MCR_RTS_EN;
+ t->mcr_shadow = mcr;
+ uart_writeb(t, mcr, UART_MCR);
+ t->use_cts_control = true;
+ /* if top layer has asked to set rts active then do so here */
+ if (t->rts_active)
+ set_rts(t, true);
+ } else {
+ mcr = t->mcr_shadow;
+ mcr &= ~UART_MCR_CTS_EN;
+ mcr &= ~UART_MCR_RTS_EN;
+ t->mcr_shadow = mcr;
+ uart_writeb(t, mcr, UART_MCR);
+ t->use_cts_control = false;
+ }
+
+ /* update the port timeout based on new settings */
+ uart_update_timeout(u, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&u->lock, flags);
+ dev_vdbg(t->uport.dev, "-tegra_set_termios\n");
+ return;
+}
+
+/*
+ * Flush any TX data submitted for DMA. Called when the TX circular
+ * buffer is reset.
+ */
+static void tegra_flush_buffer(struct uart_port *u)
+{
+ struct tegra_uart_port *t;
+
+ dev_vdbg(u->dev, "%s called", __func__);
+
+ t = container_of(u, struct tegra_uart_port, uport);
+
+ if (t->use_tx_dma) {
+ tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req);
+ t->tx_dma_req.size = 0;
+ }
+ return;
+}
+
+
+static void tegra_pm(struct uart_port *u, unsigned int state,
+ unsigned int oldstate)
+{
+
+}
+
+static const char *tegra_type(struct uart_port *u)
+{
+ return 0;
+}
+
+static struct uart_ops tegra_uart_ops = {
+ .tx_empty = tegra_tx_empty,
+ .set_mctrl = tegra_set_mctrl,
+ .get_mctrl = tegra_get_mctrl,
+ .stop_tx = tegra_stop_tx,
+ .start_tx = tegra_start_tx,
+ .stop_rx = tegra_stop_rx,
+ .flush_buffer = tegra_flush_buffer,
+ .enable_ms = tegra_enable_ms,
+ .break_ctl = tegra_break_ctl,
+ .startup = tegra_startup,
+ .shutdown = tegra_shutdown,
+ .set_termios = tegra_set_termios,
+ .pm = tegra_pm,
+ .type = tegra_type,
+ .request_port = tegra_request_port,
+ .release_port = tegra_release_port,
+};
+
+static int tegra_uart_probe(struct platform_device *pdev);
+static int __devexit tegra_uart_remove(struct platform_device *pdev);
+static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state);
+static int tegra_uart_resume(struct platform_device *pdev);
+
+static struct platform_driver tegra_uart_platform_driver = {
+ .remove = tegra_uart_remove,
+ .probe = tegra_uart_probe,
+ .suspend = tegra_uart_suspend,
+ .resume = tegra_uart_resume,
+ .driver = {
+ .name = "tegra_uart"
+ }
+};
+
+static struct uart_driver tegra_uart_driver =
+{
+ .owner = THIS_MODULE,
+ .driver_name = "tegra_uart",
+ .dev_name = "ttyHS",
+ .cons = 0,
+ .nr = 5,
+};
+
+static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ uart_suspend_port(&tegra_uart_driver, u);
+
+ flush_work(&t->tx_work);
+ return 0;
+}
+
+static int tegra_uart_resume(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ uart_resume_port(&tegra_uart_driver, u);
+ return 0;
+}
+
+
+
+static int __devexit tegra_uart_remove(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t = platform_get_drvdata(pdev);
+ struct uart_port *u;
+
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr)
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+
+ u = &t->uport;
+ uart_remove_one_port(&tegra_uart_driver, u);
+
+ platform_set_drvdata(pdev, NULL);
+
+ pr_info("Unregistered UART port %s%d\n",
+ tegra_uart_driver.dev_name, u->line);
+ kfree(t);
+ return 0;
+}
+
+static int tegra_uart_probe(struct platform_device *pdev)
+{
+ struct tegra_uart_port *t;
+ struct uart_port *u;
+ struct resource *resource;
+ int ret;
+ char name[64];
+ if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr) {
+ pr_err("Invalid Uart instance (%d)\n", pdev->id);
+ return -ENODEV;
+ }
+
+ t = kzalloc(sizeof(struct tegra_uart_port), GFP_KERNEL);
+ if (!t) {
+ pr_err("%s: Failed to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+ u = &t->uport;
+ u->dev = &pdev->dev;
+ platform_set_drvdata(pdev, u);
+ u->line = pdev->id;
+ u->ops = &tegra_uart_ops;
+ u->type = ~PORT_UNKNOWN;
+ u->fifosize = 32;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+
+ u->mapbase = resource->start;
+ u->membase = IO_ADDRESS(u->mapbase);
+ if (unlikely(!u->membase))
+ return -ENOMEM;
+
+ u->irq = platform_get_irq(pdev, 0);
+ if (unlikely(u->irq < 0))
+ return -ENXIO;
+
+ u->regshift = 2;
+
+ t->clk = clk_get(&pdev->dev, NULL);
+ if (!t->clk) {
+ dev_err(&pdev->dev, "Couldn't get the clock\n");
+ goto fail;
+ }
+
+ ret = uart_add_one_port(&tegra_uart_driver, u);
+ if (ret) {
+ pr_err("%s: Failed(%d) to add uart port %s%d\n",
+ __func__, ret, tegra_uart_driver.dev_name, u->line);
+ kfree(t);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
+ snprintf(name, sizeof(name), "tegra_hsuart_%d", u->line);
+ pr_info("Registered UART port %s%d\n",
+ tegra_uart_driver.dev_name, u->line);
+
+ INIT_WORK(&t->tx_work, tegra_tx_dma_complete_work);
+ return ret;
+fail:
+ kfree(t);
+ return -ENODEV;
+}
+
+static int __init tegra_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&tegra_uart_driver);
+ if (unlikely(ret)) {
+ pr_err("Could not register %s driver\n",
+ tegra_uart_driver.driver_name);
+ return ret;
+ }
+
+ ret = platform_driver_register(&tegra_uart_platform_driver);
+ if (unlikely(ret)) {
+ pr_err("Could not register the UART platfrom "
+ "driver\n");
+ uart_unregister_driver(&tegra_uart_driver);
+ return ret;
+ }
+
+ pr_info("Initialized tegra uart driver\n");
+ return 0;
+}
+
+static void __exit tegra_uart_exit(void)
+{
+ pr_info("Unloading tegra uart driver\n");
+ platform_driver_unregister(&tegra_uart_platform_driver);
+ uart_unregister_driver(&tegra_uart_driver);
+}
+
+module_init(tegra_uart_init);
+module_exit(tegra_uart_exit);
+MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 91c2f4f3af10..9fdb309defbb 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -298,6 +298,13 @@ config SPI_STMP3XXX
help
SPI driver for Freescale STMP37xx/378x SoC SSP interface
+config SPI_TEGRA
+ tristate "Nvidia Tegra SPI controller"
+ depends on ARCH_TEGRA
+ select TEGRA_SYSTEM_DMA
+ help
+ SPI driver for NVidia Tegra SoCs
+
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index e9cbd18217a0..b6573d8ea823 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
+obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
new file mode 100644
index 000000000000..842ac14f745d
--- /dev/null
+++ b/drivers/spi/spi_tegra.c
@@ -0,0 +1,676 @@
+/*
+ * Driver for Nvidia TEGRA spi controller.
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/spi/spi.h>
+
+#include <mach/dma.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+static const unsigned long spi_tegra_req_sels[] = {
+ TEGRA_DMA_REQ_SEL_SL2B1,
+ TEGRA_DMA_REQ_SEL_SL2B2,
+ TEGRA_DMA_REQ_SEL_SL2B3,
+ TEGRA_DMA_REQ_SEL_SL2B4,
+};
+
+#define BB_LEN 32
+
+struct spi_tegra_data {
+ struct spi_master *master;
+ struct platform_device *pdev;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long phys;
+
+ u32 cur_speed;
+
+ struct list_head queue;
+ struct spi_transfer *cur;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned cur_bytes_per_word;
+
+ /* The tegra spi controller has a bug which causes the first word
+ * in PIO transactions to be garbage. Since packed DMA transactions
+ * require transfers to be 4 byte aligned we need a bounce buffer
+ * for the generic case.
+ */
+ struct tegra_dma_req rx_dma_req;
+ struct tegra_dma_channel *rx_dma;
+ u32 *rx_bb;
+ dma_addr_t rx_bb_phys;
+ bool is_suspended;
+ unsigned long save_slink_cmd;
+};
+
+
+static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
+ unsigned long val,
+ unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+}
+
+static void spi_tegra_go(struct spi_tegra_data *tspi)
+{
+ unsigned long val;
+
+ wmb();
+
+ val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
+ val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
+ val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1);
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+
+ tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
+
+ val |= SLINK_DMA_EN;
+ spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
+}
+
+static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
+ tspi->cur_bytes_per_word);
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos;
+ int i, j;
+ unsigned long val;
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val &= ~SLINK_WORD_SIZE(~0);
+ val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
+ val = 0;
+ for (j = 0; j < tspi->cur_bytes_per_word; j++)
+ val |= tx_buf[i + j] << j * 8;
+
+ spi_tegra_writel(tspi, val, SLINK_TX_FIFO);
+ }
+
+ tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4;
+
+ return len;
+}
+
+static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned len = tspi->cur_len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos;
+ int i, j;
+ unsigned long val;
+
+ for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
+ val = tspi->rx_bb[i / tspi->cur_bytes_per_word];
+ for (j = 0; j < tspi->cur_bytes_per_word; j++)
+ rx_buf[i + j] = (val >> (j * 8)) & 0xff;
+ }
+
+ return len;
+}
+
+static void spi_tegra_start_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned long val;
+
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+
+ tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (speed != tspi->cur_speed)
+ clk_set_rate(tspi->clk, speed);
+
+ if (tspi->cur_speed == 0)
+ clk_enable(tspi->clk);
+
+ tspi->cur_speed = speed;
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND2);
+ val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN;
+ if (t->rx_buf)
+ val |= SLINK_RXEN;
+ if (t->tx_buf)
+ val |= SLINK_TXEN;
+ val |= SLINK_SS_EN_CS(spi->chip_select);
+ val |= SLINK_SPIE;
+ val |= SLINK_SS_SETUP(3);
+ spi_tegra_writel(tspi, val, SLINK_COMMAND2);
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ val &= ~SLINK_BIT_LENGTH(~0);
+ val |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ /* FIXME: should probably control CS manually so that we can be sure
+ * it does not go low between transfer and to support delay_usecs
+ * correctly.
+ */
+ val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW;
+
+ if (spi->mode & SPI_CPHA)
+ val |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ val |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ val |= SLINK_M_S;
+
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS);
+
+ tspi->cur = t;
+ tspi->cur_pos = 0;
+ tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t);
+
+ spi_tegra_go(tspi);
+}
+
+static void spi_tegra_start_message(struct spi_device *spi,
+ struct spi_message *m)
+{
+ struct spi_transfer *t;
+
+ m->actual_length = 0;
+ m->status = 0;
+
+ t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
+ spi_tegra_start_transfer(spi, t);
+}
+
+static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
+{
+ struct spi_tegra_data *tspi = req->dev;
+ unsigned long flags;
+ struct spi_message *m;
+ struct spi_device *spi;
+ int timeout = 0;
+ unsigned long val;
+
+ /* the SPI controller may come back with both the BSY and RDY bits
+ * set. In this case we need to wait for the BSY bit to clear so
+ * that we are sure the DMA is finished. 1000 reads was empirically
+ * determined to be long enough.
+ */
+ while (timeout++ < 1000) {
+ if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY))
+ break;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ val = spi_tegra_readl(tspi, SLINK_STATUS);
+ val |= SLINK_RDY;
+ spi_tegra_writel(tspi, val, SLINK_STATUS);
+
+ m = list_first_entry(&tspi->queue, struct spi_message, queue);
+
+ if (timeout >= 1000)
+ m->status = -EIO;
+
+ spi = m->state;
+
+ tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur);
+ m->actual_length += tspi->cur_pos;
+
+ if (tspi->cur_pos < tspi->cur->len) {
+ tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur);
+ spi_tegra_go(tspi);
+ } else if (!list_is_last(&tspi->cur->transfer_list,
+ &m->transfers)) {
+ tspi->cur = list_first_entry(&tspi->cur->transfer_list,
+ struct spi_transfer,
+ transfer_list);
+ spi_tegra_start_transfer(spi, tspi->cur);
+ } else {
+ list_del(&m->queue);
+
+ m->complete(m->context);
+
+ if (!list_empty(&tspi->queue)) {
+ m = list_first_entry(&tspi->queue, struct spi_message,
+ queue);
+ spi = m->state;
+ spi_tegra_start_message(spi, m);
+ } else {
+ clk_disable(tspi->clk);
+ tspi->cur_speed = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+}
+
+static int spi_tegra_setup(struct spi_device *spi)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned long cs_bit;
+ unsigned long val;
+ unsigned long flags;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+
+ switch (spi->chip_select) {
+ case 0:
+ cs_bit = SLINK_CS_POLARITY;
+ break;
+
+ case 1:
+ cs_bit = SLINK_CS_POLARITY1;
+ break;
+
+ case 2:
+ cs_bit = SLINK_CS_POLARITY2;
+ break;
+
+ case 4:
+ cs_bit = SLINK_CS_POLARITY3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ val = spi_tegra_readl(tspi, SLINK_COMMAND);
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_bit;
+ else
+ val &= ~cs_bit;
+ spi_tegra_writel(tspi, val, SLINK_COMMAND);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ return 0;
+}
+
+static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
+ struct spi_transfer *t;
+ unsigned long flags;
+ int was_empty;
+
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word < 0 || t->bits_per_word > 32)
+ return -EINVAL;
+
+ if (t->len == 0)
+ return -EINVAL;
+
+ if (!t->rx_buf && !t->tx_buf)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+
+ if (WARN_ON(tspi->is_suspended)) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return -EBUSY;
+ }
+
+ m->state = spi;
+
+ was_empty = list_empty(&tspi->queue);
+ list_add_tail(&m->queue, &tspi->queue);
+
+ if (was_empty)
+ spi_tegra_start_message(spi, m);
+
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ return 0;
+}
+
+static int __init spi_tegra_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *tspi);
+ if (master == NULL) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ master->bus_num = pdev->id;
+
+ master->setup = spi_tegra_setup;
+ master->transfer = spi_tegra_transfer;
+ master->num_chipselect = 4;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->pdev = pdev;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto err0;
+ }
+
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ dev_name(&pdev->dev))) {
+ ret = -EBUSY;
+ goto err0;
+ }
+
+ tspi->phys = r->start;
+ tspi->base = ioremap(r->start, r->end - r->start + 1);
+ if (!tspi->base) {
+ dev_err(&pdev->dev, "can't ioremap iomem\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ tspi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR_OR_NULL(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto err2;
+ }
+
+ INIT_LIST_HEAD(&tspi->queue);
+
+ tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
+ TEGRA_DMA_SHARED);
+ if (!tspi->rx_dma) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+
+ tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ &tspi->rx_bb_phys, GFP_KERNEL);
+ if (!tspi->rx_bb) {
+ dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
+ ret = -ENOMEM;
+ goto err4;
+ }
+
+ tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
+ tspi->rx_dma_req.to_memory = 1;
+ tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys;
+ tspi->rx_dma_req.dest_bus_width = 32;
+ tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
+ tspi->rx_dma_req.source_bus_width = 32;
+ tspi->rx_dma_req.source_wrap = 4;
+ tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
+ tspi->rx_dma_req.dev = tspi;
+
+ ret = spi_register_master(master);
+
+ if (ret < 0)
+ goto err5;
+
+ return ret;
+
+err5:
+ dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ tspi->rx_bb, tspi->rx_bb_phys);
+err4:
+ tegra_dma_free_channel(tspi->rx_dma);
+err3:
+ clk_put(tspi->clk);
+err2:
+ iounmap(tspi->base);
+err1:
+ release_mem_region(r->start, (r->end - r->start) + 1);
+err0:
+ spi_master_put(master);
+ return ret;
+}
+
+static int __devexit spi_tegra_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ struct resource *r;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+
+ tegra_dma_free_channel(tspi->rx_dma);
+
+ dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
+ tspi->rx_bb, tspi->rx_bb_phys);
+
+ clk_put(tspi->clk);
+ iounmap(tspi->base);
+
+ spi_master_put(master);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, (r->end - r->start) + 1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ unsigned long flags;
+ unsigned limit = 500;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+ spin_lock_irqsave(&tspi->lock, flags);
+ tspi->is_suspended = true;
+ WARN_ON(!list_empty(&tspi->queue));
+
+ while (!list_empty(&tspi->queue) && limit--) {
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&tspi->lock, flags);
+ }
+
+ tspi->save_slink_cmd = spi_tegra_readl(tspi, SLINK_COMMAND);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return 0;
+}
+
+static int spi_tegra_resume(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct spi_tegra_data *tspi;
+ unsigned long flags;
+
+ master = dev_get_drvdata(&pdev->dev);
+ tspi = spi_master_get_devdata(master);
+ spin_lock_irqsave(&tspi->lock, flags);
+ clk_enable(tspi->clk);
+ spi_tegra_writel(tspi, tspi->save_slink_cmd, SLINK_COMMAND);
+ clk_disable(tspi->clk);
+ tspi->cur_speed = 0;
+ tspi->is_suspended = false;
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return 0;
+}
+#endif
+
+MODULE_ALIAS("platform:spi_tegra");
+
+static struct platform_driver spi_tegra_driver = {
+ .driver = {
+ .name = "spi_tegra",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(spi_tegra_remove),
+#ifdef CONFIG_PM
+ .suspend = spi_tegra_suspend,
+ .resume = spi_tegra_resume,
+#endif
+};
+
+static int __init spi_tegra_init(void)
+{
+ return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
+}
+module_init(spi_tegra_init);
+
+static void __exit spi_tegra_exit(void)
+{
+ platform_driver_unregister(&spi_tegra_driver);
+}
+module_exit(spi_tegra_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4aa00e6e57ad..69e8a096c35a 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -61,6 +61,7 @@ config USB_ARCH_HAS_EHCI
default y if PPC_83xx
default y if SOC_AU1200
default y if ARCH_IXP4XX
+ default y if ARCH_TEGRA
default y if ARCH_W90X900
default y if ARCH_AT91SAM9G45
default y if ARCH_MXC
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index cd27f9bde2c8..bd1751e37f1d 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -156,7 +156,7 @@ config USB_ATMEL_USBA
config USB_GADGET_FSL_USB2
boolean "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC || ARCH_MXC || ARCH_TEGRA
select USB_GADGET_DUALSPEED
help
Some of Freescale PowerPC processors have a High Speed
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 27283df37d09..f6f822d2841a 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -22,6 +22,9 @@ fsl_usb2_udc-objs := fsl_udc_core.o
ifeq ($(CONFIG_ARCH_MXC),y)
fsl_usb2_udc-objs += fsl_mxc_udc.o
endif
+ifeq ($(CONFIG_ARCH_TEGRA),y)
+fsl_usb2_udc-objs += fsl_tegra_udc.o
+endif
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
diff --git a/drivers/usb/gadget/fsl_tegra_udc.c b/drivers/usb/gadget/fsl_tegra_udc.c
new file mode 100644
index 000000000000..6091d1f7ea0c
--- /dev/null
+++ b/drivers/usb/gadget/fsl_tegra_udc.c
@@ -0,0 +1,97 @@
+/*
+ * Description:
+ * Helper functions to support the tegra USB controller
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/fsl_devices.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <mach/usb_phy.h>
+
+static struct tegra_usb_phy *phy;
+static struct clk *udc_clk;
+static void *udc_base;
+
+int fsl_udc_clk_init(struct platform_device *pdev)
+{
+ struct resource *res;
+ int err;
+ int instance;
+ struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+
+
+ udc_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc_clk)) {
+ dev_err(&pdev->dev, "Can't get udc clock\n");
+ return PTR_ERR(udc_clk);
+ }
+
+ clk_enable(udc_clk);
+
+ /* we have to remap the registers ourselves as fsl_udc does not
+ * export them for us.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENXIO;
+ goto err0;
+ }
+ udc_base = ioremap(res->start, resource_size(res));
+ if (!udc_base) {
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ instance = pdev->id;
+ if (instance == -1)
+ instance = 0;
+
+ phy = tegra_usb_phy_open(instance, udc_base, pdata->phy_config,
+ TEGRA_USB_PHY_MODE_DEVICE);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "Can't open phy\n");
+ err = PTR_ERR(phy);
+ goto err1;
+ }
+
+ tegra_usb_phy_power_on(phy);
+
+ return 0;
+err1:
+ iounmap(udc_base);
+err0:
+ clk_disable(udc_clk);
+ clk_put(udc_clk);
+ return err;
+}
+
+void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+}
+
+void fsl_udc_clk_release(void)
+{
+ tegra_usb_phy_close(phy);
+
+ iounmap(udc_base);
+
+ clk_disable(udc_clk);
+ clk_put(udc_clk);
+}
+
+void fsl_udc_clk_suspend(void)
+{
+ tegra_usb_phy_power_off(phy);
+ clk_disable(udc_clk);
+}
+
+void fsl_udc_clk_resume(void)
+{
+ clk_enable(udc_clk);
+ tegra_usb_phy_power_on(phy);
+}
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 08a9a62a39e3..2fab37a2a094 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -48,13 +48,22 @@
#include "fsl_usb2_udc.h"
+#ifdef CONFIG_ARCH_TEGRA
+#define DRIVER_DESC "NVidia Tegra High-Speed USB SOC Device Controller driver"
+#else
#define DRIVER_DESC "Freescale High-Speed USB SOC Device Controller driver"
+#endif
#define DRIVER_AUTHOR "Li Yang/Jiang Bo"
#define DRIVER_VERSION "Apr 20, 2007"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#define STATUS_BUFFER_SIZE 8
+#ifdef CONFIG_ARCH_TEGRA
+static const char driver_name[] = "fsl-tegra-udc";
+#else
static const char driver_name[] = "fsl-usb2-udc";
+#endif
static const char driver_desc[] = DRIVER_DESC;
static struct usb_dr_device *dr_regs;
@@ -75,6 +84,7 @@ fsl_ep0_desc = {
};
static void fsl_ep_fifo_flush(struct usb_ep *_ep);
+static int reset_queues(struct fsl_udc *udc);
#ifdef CONFIG_PPC32
#define fsl_readl(addr) in_le32(addr)
@@ -84,6 +94,25 @@ static void fsl_ep_fifo_flush(struct usb_ep *_ep);
#define fsl_writel(val32, addr) writel(val32, addr)
#endif
+/*
+ * High speed test mode packet(53 bytes).
+ * See USB 2.0 spec, section 7.1.20.
+ */
+static const u8 fsl_udc_test_packet[53] = {
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+};
+
/********************************************************************
* Internal Used Function
********************************************************************/
@@ -175,14 +204,43 @@ static void nuke(struct fsl_ep *ep, int status)
Internal Hardware related function
------------------------------------------------------------------*/
+#define FSL_UDC_RESET_TIMEOUT 1000
+static int dr_controller_reset(struct fsl_udc *udc)
+{
+ unsigned int tmp;
+ unsigned long timeout;
+
+ /* Stop and reset the usb controller */
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp &= ~USB_CMD_RUN_STOP;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp |= USB_CMD_CTRL_RESET;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ /* Wait for reset to complete */
+ timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
+ while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc reset timeout!\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
static int dr_controller_setup(struct fsl_udc *udc)
{
unsigned int tmp, portctrl;
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
unsigned int ctrl;
#endif
+#ifdef CONFIG_ARCH_TEGRA
unsigned long timeout;
-#define FSL_UDC_RESET_TIMEOUT 1000
+#endif
+ int status;
/* Config PHY interface */
portctrl = fsl_readl(&dr_regs->portsc1);
@@ -205,31 +263,29 @@ static int dr_controller_setup(struct fsl_udc *udc)
}
fsl_writel(portctrl, &dr_regs->portsc1);
- /* Stop and reset the usb controller */
- tmp = fsl_readl(&dr_regs->usbcmd);
- tmp &= ~USB_CMD_RUN_STOP;
- fsl_writel(tmp, &dr_regs->usbcmd);
+ status = dr_controller_reset(udc);
+ if (status)
+ return status;
- tmp = fsl_readl(&dr_regs->usbcmd);
- tmp |= USB_CMD_CTRL_RESET;
- fsl_writel(tmp, &dr_regs->usbcmd);
+ /* Set the controller as device mode */
+ tmp = fsl_readl(&dr_regs->usbmode);
+ tmp |= USB_MODE_CTRL_MODE_DEVICE;
+ /* Disable Setup Lockout */
+ tmp |= USB_MODE_SETUP_LOCK_OFF;
+ fsl_writel(tmp, &dr_regs->usbmode);
- /* Wait for reset to complete */
+#ifdef CONFIG_ARCH_TEGRA
+ /* Wait for controller to switch to device mode */
timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
- while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+ while ((fsl_readl(&dr_regs->usbmode) & USB_MODE_CTRL_MODE_DEVICE) !=
+ USB_MODE_CTRL_MODE_DEVICE) {
if (time_after(jiffies, timeout)) {
- ERR("udc reset timeout!\n");
+ ERR("udc device mode setup timeout!\n");
return -ETIMEDOUT;
}
cpu_relax();
}
-
- /* Set the controller as device mode */
- tmp = fsl_readl(&dr_regs->usbmode);
- tmp |= USB_MODE_CTRL_MODE_DEVICE;
- /* Disable Setup Lockout */
- tmp |= USB_MODE_SETUP_LOCK_OFF;
- fsl_writel(tmp, &dr_regs->usbmode);
+#endif
/* Clear the setup status */
fsl_writel(0, &dr_regs->usbsts);
@@ -243,7 +299,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
fsl_readl(&dr_regs->endpointlistaddr));
/* Config control enable i/o output, cpu endian register */
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl |= USB_CTRL_IOENB;
__raw_writel(ctrl, &usb_sys_regs->control);
@@ -267,6 +323,12 @@ static int dr_controller_setup(struct fsl_udc *udc)
static void dr_controller_run(struct fsl_udc *udc)
{
u32 temp;
+#ifdef CONFIG_ARCH_TEGRA
+ unsigned long timeout;
+#define FSL_UDC_RUN_TIMEOUT 1000
+#endif
+ /* Clear stopped bit */
+ udc->stopped = 0;
/* Enable DR irq reg */
temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
@@ -275,9 +337,6 @@ static void dr_controller_run(struct fsl_udc *udc)
fsl_writel(temp, &dr_regs->usbintr);
- /* Clear stopped bit */
- udc->stopped = 0;
-
/* Set the controller as device mode */
temp = fsl_readl(&dr_regs->usbmode);
temp |= USB_MODE_CTRL_MODE_DEVICE;
@@ -288,6 +347,19 @@ static void dr_controller_run(struct fsl_udc *udc)
temp |= USB_CMD_RUN_STOP;
fsl_writel(temp, &dr_regs->usbcmd);
+#ifdef CONFIG_ARCH_TEGRA
+ /* Wait for controller to start */
+ timeout = jiffies + FSL_UDC_RUN_TIMEOUT;
+ while ((fsl_readl(&dr_regs->usbcmd) & USB_CMD_RUN_STOP) !=
+ USB_CMD_RUN_STOP) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc start timeout!\n");
+ return;
+ }
+ cpu_relax();
+ }
+#endif
+
return;
}
@@ -616,6 +688,9 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
? (1 << (ep_index(ep) + 16))
: (1 << (ep_index(ep)));
+ /* Flush all the dTD structs out to memory */
+ wmb();
+
/* check if the pipe is empty */
if (!(list_empty(&ep->queue))) {
/* Add td to the end */
@@ -623,6 +698,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
lastreq->tail->next_td_ptr =
cpu_to_le32(req->head->td_dma & DTD_ADDR_MASK);
+ wmb();
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
goto out;
@@ -673,7 +749,7 @@ out:
* @is_last: return flag if it is the last dTD of the request
* return: pointer to the built dTD */
static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
- dma_addr_t *dma, int *is_last)
+ dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
{
u32 swap_temp;
struct ep_td_struct *dtd;
@@ -682,7 +758,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
*length = min(req->req.length - req->req.actual,
(unsigned)EP_MAX_LENGTH_TRANSFER);
- dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
+ dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
if (dtd == NULL)
return dtd;
@@ -732,7 +808,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
}
/* Generate dtd chain for a request */
-static int fsl_req_to_dtd(struct fsl_req *req)
+static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
{
unsigned count;
int is_last;
@@ -741,7 +817,7 @@ static int fsl_req_to_dtd(struct fsl_req *req)
dma_addr_t dma;
do {
- dtd = fsl_build_dtd(req, &count, &dma, &is_last);
+ dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
if (dtd == NULL)
return -ENOMEM;
@@ -770,9 +846,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
- struct fsl_udc *udc;
+ struct fsl_udc *udc = ep->udc;
unsigned long flags;
+ enum dma_data_direction dir;
int is_iso = 0;
+ int status;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
@@ -780,17 +858,27 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
VDBG("%s, bad params", __func__);
return -EINVAL;
}
- if (unlikely(!_ep || !ep->desc)) {
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (unlikely(!ep->desc)) {
VDBG("%s, bad ep", __func__);
+ spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
+
if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
- if (req->req.length > ep->ep.maxpacket)
+ if (req->req.length > ep->ep.maxpacket) {
+ spin_unlock_irqrestore(&udc->lock, flags);
return -EMSGSIZE;
+ }
is_iso = 1;
}
- udc = ep->udc;
+ dir = ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
@@ -798,18 +886,12 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* map virtual address to hardware */
if (req->req.dma == DMA_ADDR_INVALID) {
- req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
- req->req.buf,
- req->req.length, ep_is_in(ep)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ req->req.dma = dma_map_single(udc->gadget.dev.parent,
+ req->req.buf, req->req.length, dir);
req->mapped = 1;
} else {
- dma_sync_single_for_device(ep->udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- ep_is_in(ep)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ dma_sync_single_for_device(udc->gadget.dev.parent,
+ req->req.dma, req->req.length, dir);
req->mapped = 0;
}
@@ -817,16 +899,23 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
req->req.actual = 0;
req->dtd_count = 0;
- spin_lock_irqsave(&udc->lock, flags);
/* build dtds and push them to device queue */
- if (!fsl_req_to_dtd(req)) {
- fsl_queue_td(ep, req);
- } else {
+ status = fsl_req_to_dtd(req, gfp_flags);
+ if (status)
+ goto err_unmap;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* re-check if the ep has not been disabled */
+ if (unlikely(!ep->desc)) {
spin_unlock_irqrestore(&udc->lock, flags);
- return -ENOMEM;
+ status = -EINVAL;
+ goto err_unmap;
}
+ fsl_queue_td(ep, req);
+
/* Update ep0 state */
if ((ep_index(ep) == 0))
udc->ep0_state = DATA_STATE_XMIT;
@@ -837,6 +926,15 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
+
+err_unmap:
+ if (req->mapped) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ req->req.dma, req->req.length, dir);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ }
+ return status;
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
@@ -1073,7 +1171,38 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+
VDBG("VBUS %s", is_active ? "on" : "off");
+
+ if (udc->transceiver) {
+ if (udc->vbus_active && !is_active) {
+ /* reset all internal Queues and inform client driver */
+ reset_queues(udc);
+ /* stop the controller and turn off the clocks */
+ dr_controller_stop(udc);
+ dr_controller_reset(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ fsl_udc_clk_suspend();
+ udc->vbus_active = 0;
+ udc->usb_state = USB_STATE_DEFAULT;
+ } else if (!udc->vbus_active && is_active) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ fsl_udc_clk_resume();
+ /* setup the controller in the device mode */
+ dr_controller_setup(udc);
+ /* setup EP0 for setup packet */
+ ep0_setup(udc);
+ /* initialize the USB and EP states */
+ udc->usb_state = USB_STATE_ATTACHED;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+ udc->vbus_active = 1;
+ /* start the controller */
+ dr_controller_run(udc);
+ }
+ return 0;
+ }
+
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -1166,7 +1295,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
req->req.complete = NULL;
req->dtd_count = 0;
- if (fsl_req_to_dtd(req) == 0)
+ if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
fsl_queue_td(ep, req);
else
return -ENOMEM;
@@ -1244,7 +1373,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
req->dtd_count = 0;
/* prime the data phase */
- if ((fsl_req_to_dtd(req) == 0))
+ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
fsl_queue_td(ep, req);
else /* no mem */
goto stall;
@@ -1256,6 +1385,107 @@ stall:
ep0stall(udc);
}
+static void udc_test_mode(struct fsl_udc *udc, u32 test_mode)
+{
+ struct fsl_req *req;
+ struct fsl_ep *ep;
+ u32 portsc, bitmask;
+ unsigned long timeout;
+
+ /* Ack the ep0 IN */
+ if (ep0_prime_status(udc, EP_DIR_IN))
+ ep0stall(udc);
+
+ /* get the ep0 */
+ ep = &udc->eps[0];
+ bitmask = ep_is_in(ep)
+ ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep)));
+
+ timeout = jiffies + HZ;
+ /* Wait until ep0 IN endpoint txfr is complete */
+ while (!(fsl_readl(&dr_regs->endptcomplete) & bitmask)) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("Timeout for Ep0 IN Ack\n");
+ break;
+ }
+ cpu_relax();
+ }
+
+ switch (test_mode << PORTSCX_PTC_BIT_POS) {
+ case PORTSCX_PTC_JSTATE:
+ VDBG("TEST_J\n");
+ break;
+ case PORTSCX_PTC_KSTATE:
+ VDBG("TEST_K\n");
+ break;
+ case PORTSCX_PTC_SEQNAK:
+ VDBG("TEST_SE0_NAK\n");
+ break;
+ case PORTSCX_PTC_PACKET:
+ VDBG("TEST_PACKET\n");
+
+ /* get the ep and configure for IN direction */
+ ep = &udc->eps[0];
+ udc->ep0_dir = USB_DIR_IN;
+
+ /* Initialize ep0 status request structure */
+ req = container_of(fsl_alloc_request(NULL, GFP_ATOMIC),
+ struct fsl_req, req);
+ /* allocate a small amount of memory to get valid address */
+ req->req.buf = kmalloc(sizeof(fsl_udc_test_packet), GFP_ATOMIC);
+ req->req.dma = virt_to_phys(req->req.buf);
+
+ /* Fill in the reqest structure */
+ memcpy(req->req.buf, fsl_udc_test_packet, sizeof(fsl_udc_test_packet));
+ req->ep = ep;
+ req->req.length = sizeof(fsl_udc_test_packet);
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = NULL;
+ req->dtd_count = 0;
+ req->mapped = 0;
+
+ dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+ req->req.dma, req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ /* prime the data phase */
+ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
+ fsl_queue_td(ep, req);
+ else /* no mem */
+ goto stall;
+
+ list_add_tail(&req->queue, &ep->queue);
+ udc->ep0_state = DATA_STATE_XMIT;
+ break;
+ case PORTSCX_PTC_FORCE_EN:
+ VDBG("TEST_FORCE_EN\n");
+ break;
+ default:
+ ERR("udc unknown test mode[%d]!\n", test_mode);
+ goto stall;
+ }
+
+ /* read the portsc register */
+ portsc = fsl_readl(&dr_regs->portsc1);
+ /* set the test mode selector */
+ portsc |= test_mode << PORTSCX_PTC_BIT_POS;
+ fsl_writel(portsc, &dr_regs->portsc1);
+
+ /*
+ * The device must have its power cycled to exit test mode.
+ * See USB 2.0 spec, section 9.4.9 for test modes operation in "Set Feature"
+ * See USB 2.0 spec, section 7.1.20 for test modes.
+ */
+ pr_info("udc entering the test mode, power cycle to exit test mode\n");
+ return;
+stall:
+ ep0stall(udc);
+}
+
static void setup_received_irq(struct fsl_udc *udc,
struct usb_ctrlrequest *setup)
{
@@ -1289,7 +1519,17 @@ static void setup_received_irq(struct fsl_udc *udc,
{
int rc = -EOPNOTSUPP;
- if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+ if (setup->bRequestType == USB_RECIP_DEVICE &&
+ wValue == USB_DEVICE_TEST_MODE) {
+ /*
+ * If the feature selector is TEST_MODE, then the most
+ * significant byte of wIndex is used to specify the specific
+ * test mode and the lower byte of wIndex must be zero.
+ */
+ udc_test_mode(udc, wIndex >> 8);
+ return;
+
+ } else if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
int pipe = get_pipe_by_windex(wIndex);
struct fsl_ep *ep;
@@ -1516,7 +1756,12 @@ static void dtd_complete_irq(struct fsl_udc *udc)
if (!bit_pos)
return;
+#ifdef CONFIG_ARCH_TEGRA
+ /* XXX what's going on here */
+ for (i = 0; i < udc->max_ep; i++) {
+#else
for (i = 0; i < udc->max_ep * 2; i++) {
+#endif
ep_num = i >> 1;
direction = i % 2;
@@ -1664,6 +1909,15 @@ static void reset_irq(struct fsl_udc *udc)
/* Write 1s to the flush register */
fsl_writel(0xffffffff, &dr_regs->endptflush);
+#if defined(CONFIG_ARCH_TEGRA)
+ /* When the bus reset is seen on Tegra, the PORTSCX_PORT_RESET bit
+ * is not set */
+ VDBG("Bus reset");
+ /* Reset all the queues, include XD, dTD, EP queue
+ * head and TR Queue */
+ reset_queues(udc);
+ udc->usb_state = USB_STATE_DEFAULT;
+#else
if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
VDBG("Bus reset");
/* Reset all the queues, include XD, dTD, EP queue
@@ -1685,6 +1939,7 @@ static void reset_irq(struct fsl_udc *udc)
dr_controller_run(udc);
udc->usb_state = USB_STATE_ATTACHED;
}
+#endif
}
/*
@@ -1697,10 +1952,14 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
irqreturn_t status = IRQ_NONE;
unsigned long flags;
+ spin_lock_irqsave(&udc->lock, flags);
+
/* Disable ISR for OTG host mode */
- if (udc->stopped)
+ if (udc->stopped) {
+ spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_NONE;
- spin_lock_irqsave(&udc->lock, flags);
+ }
+
irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
/* Clear notification bits */
fsl_writel(irq_src, &dr_regs->usbsts);
@@ -1801,10 +2060,13 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
}
/* Enable DR IRQ reg and Set usbcmd reg Run bit */
- dr_controller_run(udc_controller);
- udc_controller->usb_state = USB_STATE_ATTACHED;
- udc_controller->ep0_state = WAIT_FOR_SETUP;
- udc_controller->ep0_dir = 0;
+ if (!udc_controller->transceiver) {
+ dr_controller_run(udc_controller);
+ udc_controller->usb_state = USB_STATE_ATTACHED;
+ udc_controller->ep0_state = WAIT_FOR_SETUP;
+ udc_controller->ep0_dir = 0;
+ }
+
printk(KERN_INFO "%s: bind to driver %s\n",
udc_controller->gadget.name, driver->driver.name);
@@ -1828,9 +2090,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
if (!driver || driver != udc_controller->driver || !driver->unbind)
return -EINVAL;
- if (udc_controller->transceiver)
- otg_set_peripheral(udc_controller->transceiver, NULL);
-
/* stop DR, disable intr */
dr_controller_stop(udc_controller);
@@ -1869,7 +2128,11 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
#include <linux/seq_file.h>
+#ifdef CONFIG_ARCH_TEGRA
+static const char proc_filename[] = "driver/fsl_tegra_udc";
+#else
static const char proc_filename[] = "driver/fsl_usb2_udc";
+#endif
static int fsl_proc_read(char *page, char **start, off_t off, int count,
int *eof, void *_dev)
@@ -2051,7 +2314,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
size -= t;
next += t;
-#ifndef CONFIG_ARCH_MXC
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
tmp_reg = usb_sys_regs->snoop1;
t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
size -= t;
@@ -2139,8 +2402,10 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
static void fsl_udc_release(struct device *dev)
{
complete(udc_controller->done);
+#ifndef CONFIG_ARCH_TEGRA
dma_free_coherent(dev->parent, udc_controller->ep_qh_size,
udc_controller->ep_qh, udc_controller->ep_qh_dma);
+#endif
kfree(udc_controller);
}
@@ -2166,6 +2431,13 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
return -1;
}
+#ifdef CONFIG_ARCH_TEGRA
+ /* Tegra uses hardware queue heads */
+ size = udc->max_ep * sizeof(struct ep_queue_head);
+ udc->ep_qh = (struct ep_queue_head *)((u8 *)dr_regs + QH_OFFSET);
+ udc->ep_qh_dma = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start +
+ QH_OFFSET;
+#else
/* initialized QHs, take care of alignment */
size = udc->max_ep * sizeof(struct ep_queue_head);
if (size < QH_ALIGNMENT)
@@ -2181,6 +2453,7 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
kfree(udc->eps);
return -1;
}
+#endif
udc->ep_qh_size = size;
@@ -2189,8 +2462,17 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
/* allocate a small amount of memory to get valid address */
- udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
- udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
+ udc->status_req->req.buf = dma_alloc_coherent(&pdev->dev,
+ STATUS_BUFFER_SIZE, &udc->status_req->req.dma,
+ GFP_KERNEL);
+ if (!udc->status_req->req.buf) {
+ ERR("alloc status_req buffer failed\n");
+#ifndef CONFIG_ARCH_TEGRA
+ dma_free_coherent(&pdev->dev, size, udc->ep_qh, udc->ep_qh_dma);
+#endif
+ kfree(udc->eps);
+ return -ENOMEM;
+ }
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2245,6 +2527,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
int ret = -ENODEV;
unsigned int i;
u32 dccparams;
+#if defined(CONFIG_ARCH_TEGRA)
+ struct resource *res_sys = NULL;
+#endif
if (strcmp(pdev->name, driver_name)) {
VDBG("Wrong device");
@@ -2279,7 +2564,21 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
goto err_release_mem_region;
}
-#ifndef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_TEGRA)
+ /* If the PHY registers are NOT provided as a seperate aperture, then
+ * we should be using the registers inside the controller aperture. */
+ res_sys = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_sys) {
+ usb_sys_regs = ioremap(res_sys->start, resource_size(res_sys));
+ if (!usb_sys_regs)
+ goto err_release_mem_region;
+ } else {
+ usb_sys_regs = (struct usb_sys_interface *)
+ ((u32)dr_regs + USB_DR_SYS_OFFSET);
+ }
+#endif
+
+#if !defined(CONFIG_ARCH_MXC) && !defined(CONFIG_ARCH_TEGRA)
usb_sys_regs = (struct usb_sys_interface *)
((u32)dr_regs + USB_DR_SYS_OFFSET);
#endif
@@ -2374,6 +2673,25 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
goto err_unregister;
}
create_proc_file();
+
+#ifdef CONFIG_USB_OTG_UTILS
+ udc_controller->transceiver = otg_get_transceiver();
+ if (udc_controller->transceiver) {
+ dr_controller_stop(udc_controller);
+ dr_controller_reset(udc_controller);
+ fsl_udc_clk_suspend();
+ udc_controller->vbus_active = 0;
+ udc_controller->usb_state = USB_STATE_DEFAULT;
+ otg_set_peripheral(udc_controller->transceiver, &udc_controller->gadget);
+ }
+#else
+#ifdef CONFIG_ARCH_TEGRA
+ /* Power down the phy if cable is not connected */
+ if (!(fsl_readl(&usb_sys_regs->vbus_wakeup) & USB_SYS_VBUS_STATUS))
+ fsl_udc_clk_suspend();
+#endif
+#endif
+
return 0;
err_unregister:
@@ -2405,13 +2723,18 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
return -ENODEV;
udc_controller->done = &done;
+ if (udc_controller->transceiver)
+ otg_set_peripheral(udc_controller->transceiver, NULL);
+
fsl_udc_clk_release();
/* DR has been stopped in usb_gadget_unregister_driver() */
remove_proc_file();
/* Free allocated memory */
- kfree(udc_controller->status_req->req.buf);
+ dma_free_coherent(&pdev->dev, STATUS_BUFFER_SIZE,
+ udc_controller->status_req->req.buf,
+ udc_controller->status_req->req.dma);
kfree(udc_controller->status_req);
kfree(udc_controller->eps);
@@ -2433,6 +2756,10 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
-----------------------------------------------------------------*/
static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
{
+ if (udc_controller->transceiver &&
+ udc_controller->transceiver->state != OTG_STATE_B_PERIPHERAL)
+ return 0;
+
dr_controller_stop(udc_controller);
return 0;
}
@@ -2443,6 +2770,10 @@ static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
*-----------------------------------------------------------------*/
static int fsl_udc_resume(struct platform_device *pdev)
{
+ if (udc_controller->transceiver &&
+ udc_controller->transceiver->state != OTG_STATE_B_PERIPHERAL)
+ return 0;
+
/* Enable DR irq reg and set controller Run */
if (udc_controller->stopped) {
dr_controller_setup(udc_controller);
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 20aeceed48c7..8d5bd2fe7475 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -84,6 +84,15 @@ struct usb_dr_host {
};
/* non-EHCI USB system interface registers (Big Endian) */
+#ifdef CONFIG_ARCH_TEGRA
+struct usb_sys_interface {
+ u32 suspend_ctrl;
+ u32 vbus_sensors;
+ u32 vbus_wakeup;
+ u32 vbus_alt_status;
+ u32 legacy_ctrl;
+};
+#else
struct usb_sys_interface {
u32 snoop1;
u32 snoop2;
@@ -93,6 +102,7 @@ struct usb_sys_interface {
u8 res[236];
u32 control; /* General Purpose Control Register */
};
+#endif
/* ep0 transfer state */
#define WAIT_FOR_SETUP 0
@@ -420,10 +430,19 @@ struct ep_td_struct {
/* Alignment requirements; must be a power of two */
#define DTD_ALIGNMENT 0x20
#define QH_ALIGNMENT 2048
+#define QH_OFFSET 0x1000
/* Controller dma boundary */
#define UDC_DMA_BOUNDARY 0x1000
+#define USB_SYS_VBUS_ASESSION_INT_EN 0x10000
+#define USB_SYS_VBUS_ASESSION_CHANGED 0x20000
+#define USB_SYS_VBUS_ASESSION 0x40000
+#define USB_SYS_VBUS_WAKEUP_ENABLE 0x40000000
+#define USB_SYS_VBUS_WAKEUP_INT_ENABLE 0x100
+#define USB_SYS_VBUS_WAKEUP_INT_STATUS 0x200
+#define USB_SYS_VBUS_STATUS 0x400
+
/*-------------------------------------------------------------------------*/
/* ### driver private data
@@ -564,10 +583,12 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
struct platform_device;
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_ARCH_TEGRA)
int fsl_udc_clk_init(struct platform_device *pdev);
void fsl_udc_clk_finalize(struct platform_device *pdev);
void fsl_udc_clk_release(void);
+void fsl_udc_clk_suspend(void);
+void fsl_udc_clk_resume(void);
#else
static inline int fsl_udc_clk_init(struct platform_device *pdev)
{
@@ -579,6 +600,12 @@ static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
static inline void fsl_udc_clk_release(void)
{
}
+static inline void fsl_udc_clk_suspend(void)
+{
+}
+static inline void fsl_udc_clk_resume(void)
+{
+}
#endif
#endif
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2d926cec0725..10f6ab5f9150 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -418,6 +418,14 @@ config USB_HWA_HCD
To compile this driver a module, choose M here: the module
will be called "hwa-hc".
+config USB_TEGRA_HCD
+ boolean "NVIDIA Tegra HCD support"
+ depends on USB && ARCH_TEGRA && USB_EHCI_HCD
+ select USB_EHCI_ROOT_HUB_TT
+ help
+ This driver enables support for the internal USB Host Controller
+ found in NVIDIA Tegra SoCs. The Tegra controller is EHCI compliant.
+
config USB_IMX21_HCD
tristate "iMX21 HCD support"
depends on USB && ARM && MACH_MX21
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index df7b194d1499..74cc97b80c4b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -259,7 +259,8 @@ static int ehci_reset (struct ehci_hcd *ehci)
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
- ehci_writel(ehci, command, &ehci->regs->command);
+ if (!ehci->controller_resets_phy)
+ ehci_writel(ehci, command, &ehci->regs->command);
ehci_to_hcd(ehci)->state = HC_STATE_HALT;
ehci->next_statechange = jiffies;
retval = handshake (ehci, &ehci->regs->command,
@@ -1197,6 +1198,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_atmel_driver
#endif
+#ifdef CONFIG_ARCH_TEGRA
+#include "ehci-tegra.c"
+#define PLATFORM_DRIVER tegra_ehci_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c8900f..e33e78cbde00 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -900,6 +900,7 @@ static int ehci_hub_control (
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET)
+ && !ehci->port_reset_no_wait
&& time_after_eq(jiffies,
ehci->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 233c288e3f93..95bd514c0d06 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -838,6 +838,7 @@ qh_make (
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
+ qh->stamp = ehci->periodic_stamp;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
@@ -1008,6 +1009,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh_get(qh);
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
+ wmb();
/* qtd completions reported later by interrupt */
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a92526d6e5ae..fa442c5ec16b 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -2261,6 +2261,7 @@ scan_periodic (struct ehci_hcd *ehci)
}
clock &= mod - 1;
clock_frame = clock >> 3;
+ ++ehci->periodic_stamp;
for (;;) {
union ehci_shadow q, *q_p;
@@ -2289,10 +2290,14 @@ restart:
temp.qh = qh_get (q.qh);
type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
q = q.qh->qh_next;
- modified = qh_completions (ehci, temp.qh);
- if (unlikely(list_empty(&temp.qh->qtd_list) ||
- temp.qh->needs_rescan))
- intr_deschedule (ehci, temp.qh);
+ if (temp.qh->stamp != ehci->periodic_stamp) {
+ modified = qh_completions(ehci, temp.qh);
+ if (!modified)
+ temp.qh->stamp = ehci->periodic_stamp;
+ if (unlikely(list_empty(&temp.qh->qtd_list) ||
+ temp.qh->needs_rescan))
+ intr_deschedule(ehci, temp.qh);
+ }
qh_put (temp.qh);
break;
case Q_TYPE_FSTN:
@@ -2427,6 +2432,7 @@ restart:
free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
+ ++ehci->periodic_stamp;
} else {
now_uframe++;
now_uframe &= mod - 1;
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
new file mode 100644
index 000000000000..262bbb01228b
--- /dev/null
+++ b/drivers/usb/host/ehci-tegra.c
@@ -0,0 +1,685 @@
+/*
+ * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2009 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/tegra_usb.h>
+#include <linux/irq.h>
+#include <linux/usb/otg.h>
+#include <mach/usb_phy.h>
+
+#define TEGRA_USB_USBCMD_REG_OFFSET 0x140
+#define TEGRA_USB_USBCMD_RESET (1 << 1)
+#define TEGRA_USB_USBMODE_REG_OFFSET 0x1a8
+#define TEGRA_USB_USBMODE_HOST (3 << 0)
+#define TEGRA_USB_PORTSC1_PTC(x) (((x) & 0xf) << 16)
+
+struct tegra_ehci_context {
+ bool valid;
+ u32 command;
+ u32 frame_list;
+ u32 async_next;
+ u32 txfilltunning;
+ u32 otgsc;
+ enum tegra_usb_phy_port_speed port_speed;
+};
+
+struct tegra_ehci_hcd {
+ struct ehci_hcd *ehci;
+ struct tegra_usb_phy *phy;
+ struct clk *clk;
+ struct otg_transceiver *transceiver;
+ int host_resumed;
+ int bus_suspended;
+ int port_resuming;
+ struct tegra_ehci_context context;
+ int power_down_on_bus_suspend;
+};
+
+static void tegra_ehci_power_up(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ clk_enable(tegra->clk);
+ tegra_usb_phy_power_on(tegra->phy);
+ tegra->host_resumed = 1;
+}
+
+static void tegra_ehci_power_down(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ tegra->host_resumed = 0;
+ tegra_usb_phy_power_off(tegra->phy);
+ clk_disable(tegra->clk);
+}
+
+static int tegra_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ u32 __iomem *status_reg;
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /*
+ * In ehci_hub_control() for USB_PORT_FEAT_ENABLE clears the other bits
+ * that are write on clear, by writing back the register read value, so
+ * USB_PORT_FEAT_ENABLE is handled by masking the set on clear bits
+ */
+ if (typeReq == ClearPortFeature && wValue == USB_PORT_FEAT_ENABLE) {
+ temp = ehci_readl(ehci, status_reg);
+ ehci_writel(ehci, (temp & ~PORT_RWC_BITS) & ~PORT_PE, status_reg);
+ goto done;
+ }
+
+ else if (typeReq == GetPortStatus) {
+ temp = ehci_readl(ehci, status_reg);
+ if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
+ /* resume completed */
+ tegra->port_resuming = 0;
+ tegra_usb_phy_postresume(tegra->phy);
+ }
+ }
+
+ else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have hostpc feature
+ */
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+ if (handshake(ehci, status_reg, PORT_SUSPEND,
+ PORT_SUSPEND, 5000))
+ pr_err("%s: timeout waiting for PORT_SUSPEND\n", __func__);
+ goto done;
+ }
+
+ /*
+ * Tegra host controller will time the resume operation to clear the bit
+ * when the port control state switches to HS or FS Idle. This behavior
+ * is different from EHCI where the host controller driver is required
+ * to set this bit to a zero after the resume duration is timed in the
+ * driver.
+ */
+ else if (typeReq == ClearPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ if (!(temp & PORT_SUSPEND))
+ goto done;
+
+ tegra_usb_phy_preresume(tegra->phy);
+
+ /* reschedule root hub polling during resume signaling */
+ ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+ /* check the port again */
+ mod_timer(&ehci_to_hcd(ehci)->rh_timer,
+ ehci->reset_done[wIndex-1]);
+
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ /* start resume signalling */
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /* polling PORT_RESUME until the controller clear this bit */
+ if (handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
+ pr_err("%s: timeout waiting for PORT_RESUME\n", __func__);
+
+ /* polling PORT_SUSPEND until the controller clear this bit */
+ if (handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
+ pr_err("%s: timeout waiting for PORT_SUSPEND\n", __func__);
+
+ tegra->port_resuming = 1;
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
+static void tegra_ehci_restart(struct usb_hcd *hcd)
+{
+ unsigned int temp;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ /* reset the ehci controller */
+ ehci->controller_resets_phy = 0;
+ ehci_reset(ehci);
+ ehci->controller_resets_phy = 1;
+
+ /* Set to Host mode by setting bit 0-1 of USB device mode register */
+ temp = readl(hcd->regs + TEGRA_USB_USBMODE_REG_OFFSET);
+ writel((temp | TEGRA_USB_USBMODE_HOST),
+ (hcd->regs + TEGRA_USB_USBMODE_REG_OFFSET));
+
+ /* setup the frame list and Async q heads */
+ ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+ ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
+ /* setup the command register and set the controller in RUN mode */
+ ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ ehci->command |= CMD_RUN;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ down_write(&ehci_cf_port_reset_rwsem);
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ /* flush posted writes */
+ ehci_readl(ehci, &ehci->regs->command);
+ up_write(&ehci_cf_port_reset_rwsem);
+}
+
+static int tegra_usb_suspend(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ struct ehci_regs __iomem *hw = tegra->ehci->regs;
+ struct tegra_ehci_context *context = &tegra->context;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra->ehci->lock, flags);
+
+ context->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
+
+ if (context->port_speed > TEGRA_USB_PHY_PORT_HIGH) {
+ /* If no device connection or invalid speeds,
+ * don't save the context */
+ context->valid = false;
+ } else {
+ context->command = readl(&hw->command);
+ context->frame_list = readl(&hw->frame_list);
+ context->async_next = readl(&hw->async_next);
+ context->txfilltunning = readl(&hw->reserved[2]);
+ context->otgsc = readl(&hw->reserved[18]);
+ context->valid = true;
+ }
+
+ ehci_halt(tegra->ehci);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ spin_unlock_irqrestore(&tegra->ehci->lock, flags);
+
+ tegra_ehci_power_down(ehci_to_hcd(tegra->ehci));
+ return 0;
+}
+
+static int tegra_usb_resume(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ struct tegra_ehci_context *context = &tegra->context;
+ struct ehci_regs __iomem *hw = tegra->ehci->regs;
+ unsigned long val;
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ tegra_ehci_power_up(ehci_to_hcd(tegra->ehci));
+
+ if (!context->valid) {
+ /* Wait for the phy to detect new devices
+ * before we restart the controller */
+ msleep(10);
+ goto restart;
+ }
+
+ /* Restore register context */
+ writel(TEGRA_USB_USBMODE_HOST, &hw->reserved[19]);
+ writel(context->otgsc, &hw->reserved[18]);
+ writel(context->txfilltunning, &hw->reserved[2]);
+ writel(context->async_next, &hw->async_next);
+ writel(context->frame_list, &hw->frame_list);
+ writel(context->command, &hw->command);
+
+ /* Enable Port Power */
+ val = readl(&hw->port_status[0]);
+ val |= PORT_POWER;
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Program the field PTC in PORTSC based on the saved speed mode */
+ val = readl(&hw->port_status[0]);
+ val &= ~(TEGRA_USB_PORTSC1_PTC(~0));
+ if (context->port_speed == TEGRA_USB_PHY_PORT_HIGH)
+ val |= TEGRA_USB_PORTSC1_PTC(5);
+ else if (context->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
+ val |= TEGRA_USB_PORTSC1_PTC(6);
+ else if (context->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
+ val |= TEGRA_USB_PORTSC1_PTC(7);
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Disable test mode by setting PTC field to NORMAL_OP */
+ val = readl(&hw->port_status[0]);
+ val &= ~(TEGRA_USB_PORTSC1_PTC(~0));
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Poll until CCS is enabled */
+ if (handshake(tegra->ehci, &hw->port_status[0], PORT_CONNECT,
+ PORT_CONNECT, 2000)) {
+ pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
+ goto restart;
+ }
+
+ /* Poll until PE is enabled */
+ if (handshake(tegra->ehci, &hw->port_status[0], PORT_PE,
+ PORT_PE, 2000)) {
+ pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
+ goto restart;
+ }
+
+ /* Clear the PCI status, to avoid an interrupt taken upon resume */
+ val = readl(&hw->status);
+ val |= STS_PCD;
+ writel(val, &hw->status);
+
+ /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
+ val = readl(&hw->port_status[0]);
+ if ((val & PORT_POWER) && (val & PORT_PE)) {
+ val |= PORT_SUSPEND;
+ writel(val, &hw->port_status[0]);
+
+ /* Wait until port suspend completes */
+ if (handshake(tegra->ehci, &hw->port_status[0], PORT_SUSPEND,
+ PORT_SUSPEND, 1000)) {
+ pr_err("%s: timeout waiting for PORT_SUSPEND\n",
+ __func__);
+ goto restart;
+ }
+ }
+
+ return 0;
+
+restart:
+ tegra_ehci_restart(hcd);
+ return 0;
+}
+
+static int tegra_ehci_reset(struct usb_hcd *hcd)
+{
+ unsigned long temp;
+ int usec = 250*1000; /* see ehci_reset */
+
+ temp = readl(hcd->regs + TEGRA_USB_USBCMD_REG_OFFSET);
+ temp |= TEGRA_USB_USBCMD_RESET;
+ writel(temp, hcd->regs + TEGRA_USB_USBCMD_REG_OFFSET);
+
+ do {
+ temp = readl(hcd->regs + TEGRA_USB_USBCMD_REG_OFFSET);
+ if (!(temp & TEGRA_USB_USBCMD_RESET))
+ break;
+ udelay(1);
+ usec--;
+ } while (usec);
+
+ if (!usec)
+ return -ETIMEDOUT;
+
+ /* Set to Host mode by setting bit 0-1 of USB device mode register */
+ temp = readl(hcd->regs + TEGRA_USB_USBMODE_REG_OFFSET);
+ writel((temp | TEGRA_USB_USBMODE_HOST),
+ (hcd->regs + TEGRA_USB_USBMODE_REG_OFFSET));
+
+ return 0;
+}
+
+static void tegra_ehci_shutdown(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ /* ehci_shutdown touches the USB controller registers, make sure
+ * controller has clocks to it */
+ if (!tegra->host_resumed)
+ tegra_ehci_power_up(hcd);
+
+ /* call ehci shut down */
+ ehci_shutdown(hcd);
+}
+
+static int tegra_ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ /* EHCI registers start at offset 0x100 */
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(readl(&ehci->caps->hc_capbase));
+
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ hcd->has_tt = 1;
+ ehci->sbrn = 0x20;
+
+ ehci_reset(ehci);
+
+ /*
+ * Resetting the controller has the side effect of resetting the PHY.
+ * So, never reset the controller after the calling
+ * tegra_ehci_reinit API.
+ */
+ ehci->controller_resets_phy = 1;
+ ehci->port_reset_no_wait = 1;
+
+ ehci_port_power(ehci, 1);
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ int error_status = 0;
+
+ error_status = ehci_bus_suspend(hcd);
+ if (!error_status && tegra->power_down_on_bus_suspend) {
+ tegra_usb_suspend(hcd);
+ tegra->bus_suspended = 1;
+ }
+
+ return error_status;
+}
+
+static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
+ tegra_usb_resume(hcd);
+ tegra->bus_suspended = 0;
+ }
+
+ tegra_usb_phy_preresume(tegra->phy);
+ tegra->port_resuming = 1;
+ return ehci_bus_resume(hcd);
+}
+#endif
+
+static const struct hc_driver tegra_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tegra EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ .reset = tegra_ehci_setup,
+ .irq = ehci_irq,
+
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = tegra_ehci_shutdown,
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .get_frame_number = ehci_get_frame,
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = tegra_ehci_hub_control,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+#ifdef CONFIG_PM
+ .bus_suspend = tegra_ehci_bus_suspend,
+ .bus_resume = tegra_ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+};
+
+static int tegra_ehci_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct tegra_ehci_hcd *tegra;
+ struct tegra_ehci_platform_data *pdata;
+ struct tegra_utmip_config *config;
+ int err = 0;
+ int irq;
+ int instance = pdev->id;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data missing\n");
+ return -EINVAL;
+ }
+
+ tegra = kzalloc(sizeof(struct tegra_ehci_hcd), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ err = -ENOMEM;
+ goto fail_hcd;
+ }
+
+ platform_set_drvdata(pdev, tegra);
+
+ tegra->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get ehci clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto fail_clk;
+ }
+
+ err = clk_enable(tegra->clk);
+ if (err)
+ goto fail_clken;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = ioremap(res->start, resource_size(res));
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+
+ config = pdata->phy_config;
+
+ tegra->phy = tegra_usb_phy_open(instance, hcd->regs, config,
+ TEGRA_USB_PHY_MODE_HOST);
+ if (IS_ERR(tegra->phy)) {
+ dev_err(&pdev->dev, "Failed to open USB phy\n");
+ err = -ENXIO;
+ goto fail_phy;
+ }
+
+ err = tegra_ehci_reset(hcd);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to reset controller\n");
+ goto fail;
+ }
+
+ tegra_usb_phy_power_on(tegra->phy);
+ tegra->host_resumed = 1;
+ tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend;
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ set_irq_flags(irq, IRQF_VALID);
+
+ ehci = hcd_to_ehci(hcd);
+ tegra->ehci = ehci;
+
+#ifdef CONFIG_USB_OTG_UTILS
+ if (pdata->operating_mode == TEGRA_USB_OTG) {
+ tegra->transceiver = otg_get_transceiver();
+ if (tegra->transceiver)
+ otg_set_host(tegra->transceiver, &hcd->self);
+ }
+#endif
+
+ err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail;
+ }
+
+ return err;
+
+fail:
+#ifdef CONFIG_USB_OTG_UTILS
+ if (tegra->transceiver) {
+ otg_set_host(tegra->transceiver, NULL);
+ otg_put_transceiver(tegra->transceiver);
+ }
+#endif
+ tegra_usb_phy_close(tegra->phy);
+fail_phy:
+ iounmap(hcd->regs);
+fail_io:
+ clk_disable(tegra->clk);
+fail_clken:
+ clk_put(tegra->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+fail_hcd:
+ kfree(tegra);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static int tegra_ehci_resume(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra->bus_suspended)
+ return 0;
+
+ return tegra_usb_resume(hcd);
+}
+
+static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra->bus_suspended)
+ return 0;
+
+ if (time_before(jiffies, tegra->ehci->next_statechange))
+ msleep(10);
+
+ return tegra_usb_suspend(hcd);
+}
+#endif
+
+static int tegra_ehci_remove(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra == NULL || hcd == NULL)
+ return -EINVAL;
+
+#ifdef CONFIG_USB_OTG_UTILS
+ if (tegra->transceiver) {
+ otg_set_host(tegra->transceiver, NULL);
+ otg_put_transceiver(tegra->transceiver);
+ }
+#endif
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ tegra_usb_phy_close(tegra->phy);
+ iounmap(hcd->regs);
+
+ clk_disable(tegra->clk);
+ clk_put(tegra->clk);
+
+ kfree(tegra);
+ return 0;
+}
+
+static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver tegra_ehci_driver = {
+ .probe = tegra_ehci_probe,
+ .remove = tegra_ehci_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_ehci_suspend,
+ .resume = tegra_ehci_resume,
+#endif
+ .shutdown = tegra_ehci_hcd_shutdown,
+ .driver = {
+ .name = "tegra-ehci",
+ }
+};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index bde823f704e9..530540a4bdd4 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -117,6 +117,7 @@ struct ehci_hcd { /* one per controller */
struct timer_list watchdog;
unsigned long actions;
unsigned stamp;
+ unsigned periodic_stamp;
unsigned random_frame;
unsigned long next_statechange;
ktime_t last_periodic_enable;
@@ -131,6 +132,8 @@ struct ehci_hcd { /* one per controller */
unsigned need_io_watchdog:1;
unsigned broken_periodic:1;
unsigned fs_i_thresh:1; /* Intel iso scheduling */
+ unsigned controller_resets_phy:1;
+ unsigned port_reset_no_wait:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 3b1289572d72..2240602fc81b 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -67,4 +67,12 @@ config NOP_USB_XCEIV
built-in with usb ip or which are autonomous and doesn't require any
phy programming such as ISP1x04 etc.
+config USB_TEGRA_OTG
+ boolean "Tegra OTG Driver"
+ depends on USB && ARCH_TEGRA
+ select USB_OTG_UTILS
+ help
+ Enable this driver on boards which use the internal VBUS and ID
+ sensing of the Tegra USB PHY.
+
endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index aeb49a8ec412..fbf2a25a2e8c 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o
# transceiver drivers
obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
+obj-$(CONFIG_USB_TEGRA_OTG) += tegra-otg.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
diff --git a/drivers/usb/otg/tegra-otg.c b/drivers/usb/otg/tegra-otg.c
new file mode 100644
index 000000000000..542a184824a3
--- /dev/null
+++ b/drivers/usb/otg/tegra-otg.c
@@ -0,0 +1,393 @@
+/*
+ * drivers/usb/otg/tegra-otg.c
+ *
+ * OTG transceiver driver for Tegra UTMI phy
+ *
+ * Copyright (C) 2010 NVIDIA Corp.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+#include <linux/tegra_usb.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#define USB_PHY_WAKEUP 0x408
+#define USB_ID_INT_EN (1 << 0)
+#define USB_ID_INT_STATUS (1 << 1)
+#define USB_ID_STATUS (1 << 2)
+#define USB_ID_PIN_WAKEUP_EN (1 << 6)
+#define USB_VBUS_WAKEUP_EN (1 << 30)
+#define USB_VBUS_INT_EN (1 << 8)
+#define USB_VBUS_INT_STATUS (1 << 9)
+#define USB_VBUS_STATUS (1 << 10)
+#define USB_INTS (USB_VBUS_INT_STATUS | USB_ID_INT_STATUS)
+
+struct tegra_otg_data {
+ struct otg_transceiver otg;
+ unsigned long int_status;
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ struct platform_device *host;
+ struct platform_device *pdev;
+};
+
+static inline unsigned long otg_readl(struct tegra_otg_data *tegra,
+ unsigned int offset)
+{
+ return readl(tegra->regs + offset);
+}
+
+static inline void otg_writel(struct tegra_otg_data *tegra, unsigned long val,
+ unsigned int offset)
+{
+ writel(val, tegra->regs + offset);
+}
+
+static const char *tegra_state_name(enum usb_otg_state state)
+{
+ if (state == OTG_STATE_A_HOST)
+ return "HOST";
+ if (state == OTG_STATE_B_PERIPHERAL)
+ return "PERIPHERAL";
+ if (state == OTG_STATE_A_SUSPEND)
+ return "SUSPEND";
+ return "INVALID";
+}
+
+void tegra_start_host(struct tegra_otg_data *tegra)
+{
+ int retval;
+ struct platform_device *pdev;
+ struct platform_device *host = tegra->host;
+ void *platform_data;
+
+ pdev = platform_device_alloc(host->name, host->id);
+ if (!pdev)
+ return;
+
+ if (host->resource) {
+ retval = platform_device_add_resources(pdev, host->resource,
+ host->num_resources);
+ if (retval)
+ goto error;
+ }
+
+ pdev->dev.dma_mask = host->dev.dma_mask;
+ pdev->dev.coherent_dma_mask = host->dev.coherent_dma_mask;
+
+ platform_data = kmalloc(sizeof(struct tegra_ehci_platform_data), GFP_KERNEL);
+ if (!platform_data)
+ goto error;
+
+ memcpy(platform_data, host->dev.platform_data,
+ sizeof(struct tegra_ehci_platform_data));
+ pdev->dev.platform_data = platform_data;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto error_add;
+
+ tegra->pdev = pdev;
+ return;
+
+error_add:
+ kfree(platform_data);
+error:
+ pr_err("%s: failed to add the host contoller device\n", __func__);
+ platform_device_put(pdev);
+}
+
+void tegra_stop_host(struct tegra_otg_data *tegra)
+{
+ if (tegra->pdev) {
+ platform_device_unregister(tegra->pdev);
+ tegra->pdev = NULL;
+ }
+}
+
+static irqreturn_t tegra_otg_irq_thread(int irq, void *data)
+{
+ struct tegra_otg_data *tegra = data;
+ struct otg_transceiver *otg = &tegra->otg;
+ enum usb_otg_state from = otg->state;
+ enum usb_otg_state to = OTG_STATE_UNDEFINED;
+ unsigned long flags;
+ unsigned long status;
+
+ clk_enable(tegra->clk);
+
+ status = otg_readl(tegra, USB_PHY_WAKEUP);
+
+ spin_lock_irqsave(&tegra->lock, flags);
+
+ if (tegra->int_status & USB_ID_INT_STATUS) {
+ if (status & USB_ID_STATUS)
+ to = OTG_STATE_A_SUSPEND;
+ else
+ to = OTG_STATE_A_HOST;
+ } else if (tegra->int_status & USB_VBUS_INT_STATUS) {
+ if (status & USB_VBUS_STATUS)
+ to = OTG_STATE_B_PERIPHERAL;
+ else
+ to = OTG_STATE_A_SUSPEND;
+ }
+
+ tegra->int_status = 0;
+
+ spin_unlock_irqrestore(&tegra->lock, flags);
+
+ otg->state = to;
+
+ dev_info(tegra->otg.dev, "%s --> %s", tegra_state_name(from),
+ tegra_state_name(to));
+
+ if (to == OTG_STATE_A_SUSPEND) {
+ if (from == OTG_STATE_A_HOST && tegra->host)
+ tegra_stop_host(tegra);
+ else if (from == OTG_STATE_B_PERIPHERAL && otg->gadget)
+ usb_gadget_vbus_disconnect(otg->gadget);
+ } else if (to == OTG_STATE_B_PERIPHERAL && otg->gadget) {
+ if (from == OTG_STATE_A_SUSPEND)
+ usb_gadget_vbus_connect(otg->gadget);
+ } else if (to == OTG_STATE_A_HOST && tegra->host) {
+ if (from == OTG_STATE_A_SUSPEND)
+ tegra_start_host(tegra);
+ }
+
+ clk_disable(tegra->clk);
+
+ return IRQ_HANDLED;
+
+}
+
+static irqreturn_t tegra_otg_irq(int irq, void *data)
+{
+ struct tegra_otg_data *tegra = data;
+ unsigned long val;
+
+ clk_enable(tegra->clk);
+
+ spin_lock(&tegra->lock);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+
+ /* and the interrupt enables into the interrupt status bits */
+ val = (val & (val << 1)) & USB_INTS;
+
+ tegra->int_status |= val;
+
+ spin_unlock(&tegra->lock);
+
+ clk_disable(tegra->clk);
+
+ return (val) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static int tegra_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ struct tegra_otg_data *tegra;
+ unsigned long val;
+
+ tegra = container_of(otg, struct tegra_otg_data, otg);
+ otg->gadget = gadget;
+
+ clk_enable(tegra->clk);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ val &= ~(USB_VBUS_INT_STATUS | USB_ID_INT_STATUS);
+
+ if (gadget)
+ val |= (USB_VBUS_INT_EN | USB_VBUS_WAKEUP_EN);
+ else
+ val &= ~(USB_VBUS_INT_EN | USB_VBUS_WAKEUP_EN);
+
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+ clk_disable(tegra->clk);
+
+ return 0;
+}
+
+static int tegra_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ struct tegra_otg_data *tegra;
+ unsigned long val;
+
+ tegra = container_of(otg, struct tegra_otg_data, otg);
+ otg->host = host;
+
+ clk_enable(tegra->clk);
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+ val &= ~(USB_VBUS_INT_STATUS | USB_ID_INT_STATUS);
+
+ if (host)
+ val |= USB_ID_INT_EN | USB_ID_PIN_WAKEUP_EN;
+ else
+ val &= ~(USB_ID_INT_EN | USB_ID_PIN_WAKEUP_EN);
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+ clk_disable(tegra->clk);
+
+ return 0;
+}
+
+static int tegra_otg_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+ return 0;
+}
+
+static int tegra_otg_set_suspend(struct otg_transceiver *otg, int suspend)
+{
+ return 0;
+}
+
+static int tegra_otg_probe(struct platform_device *pdev)
+{
+ struct tegra_otg_data *tegra;
+ struct resource *res;
+ unsigned long val;
+ int err;
+
+ tegra = kzalloc(sizeof(struct tegra_otg_data), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ tegra->otg.dev = &pdev->dev;
+ tegra->otg.label = "tegra-otg";
+ tegra->otg.state = OTG_STATE_UNDEFINED;
+ tegra->otg.set_host = tegra_otg_set_host;
+ tegra->otg.set_peripheral = tegra_otg_set_peripheral;
+ tegra->otg.set_suspend = tegra_otg_set_suspend;
+ tegra->otg.set_power = tegra_otg_set_power;
+ tegra->host = pdev->dev.platform_data;
+ spin_lock_init(&tegra->lock);
+
+ platform_set_drvdata(pdev, tegra);
+
+ tegra->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get otg clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto err_clk;
+ }
+
+ err = clk_enable(tegra->clk);
+ if (err)
+ goto err_clken;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto err_io;
+ }
+ tegra->regs = ioremap(res->start, resource_size(res));
+ if (!tegra->regs) {
+ err = -ENOMEM;
+ goto err_io;
+ }
+
+ val = otg_readl(tegra, USB_PHY_WAKEUP);
+
+ val &= ~(USB_VBUS_INT_STATUS | USB_VBUS_INT_EN |
+ USB_ID_INT_STATUS | USB_ID_INT_EN |
+ USB_VBUS_WAKEUP_EN | USB_ID_PIN_WAKEUP_EN);
+
+ otg_writel(tegra, val, USB_PHY_WAKEUP);
+
+ tegra->otg.state = OTG_STATE_A_SUSPEND;
+
+ err = otg_set_transceiver(&tegra->otg);
+ if (err) {
+ dev_err(&pdev->dev, "can't register transceiver (%d)\n", err);
+ goto err_otg;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENXIO;
+ goto err_irq;
+ }
+ tegra->irq = res->start;
+ err = request_threaded_irq(tegra->irq, tegra_otg_irq,
+ tegra_otg_irq_thread,
+ IRQF_SHARED, "tegra-otg", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register IRQ\n");
+ goto err_irq;
+ }
+
+ dev_info(&pdev->dev, "otg transceiver registered\n");
+ return 0;
+
+err_irq:
+ otg_set_transceiver(NULL);
+err_otg:
+ iounmap(tegra->regs);
+err_io:
+ clk_disable(tegra->clk);
+err_clken:
+ clk_put(tegra->clk);
+err_clk:
+ platform_set_drvdata(pdev, NULL);
+ kfree(tegra);
+ return err;
+}
+
+static int __exit tegra_otg_remove(struct platform_device *pdev)
+{
+ struct tegra_otg_data *tegra = platform_get_drvdata(pdev);
+
+ free_irq(tegra->irq, tegra);
+ otg_set_transceiver(NULL);
+ iounmap(tegra->regs);
+ clk_disable(tegra->clk);
+ clk_put(tegra->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(tegra);
+
+ return 0;
+}
+
+static struct platform_driver tegra_otg_driver = {
+ .driver = {
+ .name = "tegra-otg",
+ },
+ .remove = __exit_p(tegra_otg_remove),
+ .probe = tegra_otg_probe,
+};
+
+static int __init tegra_otg_init(void)
+{
+ return platform_driver_register(&tegra_otg_driver);
+}
+subsys_initcall(tegra_otg_init);
+
+static void __exit tegra_otg_exit(void)
+{
+ platform_driver_unregister(&tegra_otg_driver);
+}
+module_exit(tegra_otg_exit);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8b31fdfefc98..5176baf14aa6 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2256,6 +2256,7 @@ config FB_JZ4740
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
+source "drivers/video/tegra/Kconfig"
source "drivers/video/backlight/Kconfig"
source "drivers/video/display/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 485e8ed1318c..14200e4e1abd 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_FB_MB862XX) += mb862xx/
obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
+obj-y += tegra/
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 563a98b88e9b..0841e6b27e5c 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -973,6 +973,92 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
DPRINTK("========================================\n");
}
+void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+{
+ unsigned char *block;
+ unsigned char *dtd_block;
+ struct fb_videomode *mode, *m;
+ int num = 0, i, first = 1;
+
+ if (edid == NULL)
+ return;
+
+ if (!edid_checksum(edid))
+ return;
+
+ if (edid[0] != 0x2)
+ return;
+
+ mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
+ if (mode == NULL)
+ return;
+
+ block = edid + 0x4;
+ dtd_block = edid + edid[0x2];
+
+ DPRINTK(" Short Video Modes\n");
+ while (block < dtd_block) {
+ unsigned tag = block[0] >> 5;
+ unsigned len = block[0] & 0x1f;
+
+ block++;
+ if (dtd_block - block < len)
+ break;
+
+ if (tag == 0x2) {
+ for (i = 0; i < len; i++) {
+ unsigned m = block[i];
+ if (m > 0 && m < CEA_MODEDB_SIZE) {
+ memcpy(&mode[num], &cea_modes[m],
+ sizeof(mode[num]));
+ DPRINTK(" %d: %dx%d @ %d\n", m,
+ cea_modes[m].xres, cea_modes[m].yres,
+ cea_modes[m].refresh);
+
+ num++;
+ }
+ }
+ }
+
+ block += len;
+ }
+
+ DPRINTK(" Extended Detailed Timings\n");
+
+ for (i = 0; i < (128 - edid[0x2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
+ i++, dtd_block += DETAILED_TIMING_DESCRIPTION_SIZE) {
+ if (!(dtd_block[0] == 0x00 && dtd_block[1] == 0x00)) {
+ get_detailed_timing(dtd_block, &mode[num]);
+ if (first) {
+ mode[num].flag |= FB_MODE_IS_FIRST;
+ first = 0;
+ }
+ num++;
+ }
+ }
+
+ /* Yikes, EDID data is totally useless */
+ if (!num) {
+ kfree(mode);
+ return;
+ }
+
+ m = kzalloc((specs->modedb_len + num) *
+ sizeof(struct fb_videomode), GFP_KERNEL);
+
+ if (!m) {
+ kfree(mode);
+ return;
+ }
+
+ memmove(m, specs->modedb, specs->modedb_len * sizeof(struct fb_videomode));
+ memmove(m + specs->modedb_len, mode, num * sizeof(struct fb_videomode));
+ kfree(mode);
+ kfree(specs->modedb);
+ specs->modedb = m;
+ specs->modedb_len = specs->modedb_len + num;
+}
+
/*
* VESA Generalized Timing Formula (GTF)
*/
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 0a4dbdc1693a..209e6be1163e 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -402,6 +402,459 @@ const struct fb_videomode vesa_modes[] = {
FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
EXPORT_SYMBOL(vesa_modes);
+
+const struct fb_videomode cea_modes[CEA_MODEDB_SIZE] = {
+ {},
+ /* 1: 640x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 640, .yres = 480, .pixclock = 39721,
+ .left_margin = 48, .right_margin = 16,
+ .upper_margin = 33, .lower_margin = 1,
+ .hsync_len = 96, .vsync_len = 2,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 2: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 3: 720x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 720, .yres = 480, .pixclock = 37037,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 4: 1280x720p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 5: 1920x1080i @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 6: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 7: 720(1440)x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 8: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 9: 720(1440)x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 240, .pixclock = 37037,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 10: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 11: 2880x480i @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 12: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 13: 2880x240p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 240, .pixclock = 18518,
+ .left_margin = 228, .right_margin = 76,
+ .upper_margin = 15, .lower_margin = 5,
+ .hsync_len = 248, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 14: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 15: 1440x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 120, .right_margin = 32,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 124, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 16: 1920x1080p @ 59.94Hz/60Hz */
+ {.refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 17: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 18: 720x576p @ 50Hz */
+ {.refresh = 50, .xres = 720, .yres = 576, .pixclock = 37037,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 19: 1280x720p @ 50Hz */
+ {.refresh = 50, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 20: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 21: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 22: 720(1440)x576i @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 23: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 24: 720(1440)x288p @ 50Hz */
+ {.refresh = 49, .xres = 1440, .yres = 288, .pixclock = 37037,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 25: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 26: 2880x576i @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 27: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 28: 2880x288p @ 50Hz */
+ {.refresh = 49, .xres = 2880, .yres = 288, .pixclock = 18518,
+ .left_margin = 276, .right_margin = 48,
+ .upper_margin = 19, .lower_margin = 4,
+ .hsync_len = 252, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 29: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 30: 1440x576p @ 50Hz */
+ {.refresh = 50, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 136, .right_margin = 24,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 128, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 31: 1920x1080p @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 32: 1920x1080p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 638,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 33: 1920x1080p @ 25Hz */
+ {.refresh = 25, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 34: 1920x1080p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1920, .yres = 1080, .pixclock = 13468,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 35: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 36: 2880x480p @ 59.94Hz/60Hz */
+ {.refresh = 59, .xres = 2880, .yres = 480, .pixclock = 9259,
+ .left_margin = 240, .right_margin = 64,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 248, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 37: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 38: 2880x576p @ 50Hz */
+ {.refresh = 50, .xres = 2880, .yres = 576, .pixclock = 9259,
+ .left_margin = 272, .right_margin = 48,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 256, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 39: 1920x1080i @ 50Hz */
+ {.refresh = 50, .xres = 1920, .yres = 1080, .pixclock = 13888,
+ .left_margin = 184, .right_margin = 32,
+ .upper_margin = 57, .lower_margin = 2,
+ .hsync_len = 168, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 40: 1920x1080i @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 41: 1280x720p @ 100Hz */
+ {.refresh = 100, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 440,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 42: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 43: 720x576p @ 100Hz */
+ {.refresh = 100, .xres = 720, .yres = 576, .pixclock = 18518,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 44: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 45: 720(1440)x576i @ 100Hz */
+ {.refresh = 100, .xres = 1440, .yres = 576, .pixclock = 18518,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 46: 1920x1080i @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 6734,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 15, .lower_margin = 2,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 47: 1280x720p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1280, .yres = 720, .pixclock = 6734,
+ .left_margin = 220, .right_margin = 110,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 48: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 49: 720x480p @ 119.88/120Hz */
+ {.refresh = 119, .xres = 720, .yres = 480, .pixclock = 18518,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 50: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 51: 720(1440)x480i @ 119.88/120Hz */
+ {.refresh = 119, .xres = 1440, .yres = 480, .pixclock = 18518,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 52: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 53: 720x576p @ 200Hz */
+ {.refresh = 200, .xres = 720, .yres = 576, .pixclock = 9259,
+ .left_margin = 68, .right_margin = 12,
+ .upper_margin = 39, .lower_margin = 5,
+ .hsync_len = 64, .vsync_len = 5,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 54: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 55: 720(1440)x576i @ 200Hz */
+ {.refresh = 200, .xres = 1440, .yres = 576, .pixclock = 9259,
+ .left_margin = 138, .right_margin = 24,
+ .upper_margin = 19, .lower_margin = 2,
+ .hsync_len = 126, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 56: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 57: 720x480p @ 239.76/240Hz */
+ {.refresh = 239, .xres = 720, .yres = 480, .pixclock = 9259,
+ .left_margin = 60, .right_margin = 16,
+ .upper_margin = 30, .lower_margin = 9,
+ .hsync_len = 62, .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 58: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 59: 720(1440)x480i @ 239.76/240Hz */
+ {.refresh = 239, .xres = 1440, .yres = 480, .pixclock = 9259,
+ .left_margin = 114, .right_margin = 38,
+ .upper_margin = 15, .lower_margin = 4,
+ .hsync_len = 124, .vsync_len = 3,
+ .sync = 0,
+ .vmode = FB_VMODE_INTERLACED},
+ /* 60: 1280x720p @ 23.97Hz/24Hz */
+ {.refresh = 24, .xres = 1280, .yres = 720, .pixclock = 16835,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 61: 1280x720p @ 25Hz */
+ {.refresh = 25, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 2420,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 62: 1280x720p @ 29.97Hz/30Hz */
+ {.refresh = 30, .xres = 1280, .yres = 720, .pixclock = 13468,
+ .left_margin = 220, .right_margin = 1760,
+ .upper_margin = 20, .lower_margin = 5,
+ .hsync_len = 40, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 63: 1920x1080p @ 119.88/120Hz */
+ {.refresh = 120, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 88,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+ /* 64: 1920x1080p @ 100Hz */
+ {.refresh = 100, .xres = 1920, .yres = 1080, .pixclock = 3367,
+ .left_margin = 148, .right_margin = 528,
+ .upper_margin = 36, .lower_margin = 4,
+ .hsync_len = 44, .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED},
+};
+EXPORT_SYMBOL(cea_modes);
#endif /* CONFIG_FB_MODE_HELPERS */
/**
@@ -923,7 +1376,7 @@ int fb_add_videomode(const struct fb_videomode *mode, struct list_head *head)
if (!modelist)
return -ENOMEM;
modelist->mode = *mode;
- list_add(&modelist->list, head);
+ list_add_tail(&modelist->list, head);
}
return 0;
}
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
new file mode 100644
index 000000000000..2b8160877688
--- /dev/null
+++ b/drivers/video/tegra/Kconfig
@@ -0,0 +1,65 @@
+if ARCH_TEGRA
+
+comment "NVIDIA Tegra Display Driver options"
+
+config TEGRA_GRHOST
+ tristate "Tegra graphics host driver"
+ depends on TEGRA_IOVMM
+ default n
+ help
+ Driver for the Tegra graphics host hardware.
+
+config TEGRA_DC
+ tristate "Tegra Display Contoller"
+ depends on ARCH_TEGRA
+ select FB_MODE_HELPERS
+ select I2C
+ help
+ Tegra display controller support.
+
+config FB_TEGRA
+ tristate "Tegra Framebuffer driver"
+ depends on TEGRA_DC && FB = y
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default FB
+ help
+ Framebuffer device support for the Tegra display controller.
+
+config TEGRA_NVMAP
+ bool "Tegra GPU memory management driver (nvmap)"
+ default y
+ help
+ Say Y here to include the memory management driver for the Tegra
+ GPU, multimedia and display subsystems
+
+config NVMAP_RECLAIM_UNPINNED_VM
+ bool "Virtualize IOVMM memory in nvmap"
+ depends on TEGRA_NVMAP && TEGRA_IOVMM
+ default y
+ help
+ Say Y here to enable nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other handles. This can
+ allow a larger virtual I/O VM space than would normally be
+ supported by the hardware, at a slight cost in performance.
+
+config NVMAP_ALLOW_SYSMEM
+ bool "Allow physical system memory to be used by nvmap"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ Say Y here to allow nvmap to use physical system memory (i.e.,
+ shared with the operating system but not translated through
+ an IOVMM device) for allocations.
+
+config NVMAP_HIGHMEM_ONLY
+ bool "Use only HIGHMEM for nvmap"
+ depends on TEGRA_NVMAP && (NVMAP_ALLOW_SYSMEM || TEGRA_IOVMM) && HIGHMEM
+ default n
+ help
+ Say Y here to restrict nvmap system memory allocations (both
+ physical system memory and IOVMM) to just HIGHMEM pages.
+
+endif
+
diff --git a/drivers/video/tegra/Makefile b/drivers/video/tegra/Makefile
new file mode 100644
index 000000000000..ef9e709303df
--- /dev/null
+++ b/drivers/video/tegra/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_TEGRA_GRHOST) += host/
+obj-$(CONFIG_TEGRA_DC) += dc/
+obj-$(CONFIG_FB_TEGRA) += fb.o
+obj-$(CONFIG_TEGRA_NVMAP) += nvmap/
diff --git a/drivers/video/tegra/dc/Makefile b/drivers/video/tegra/dc/Makefile
new file mode 100644
index 000000000000..eb39d5d28e92
--- /dev/null
+++ b/drivers/video/tegra/dc/Makefile
@@ -0,0 +1,4 @@
+obj-y += dc.o
+obj-y += rgb.o
+obj-y += hdmi.o
+obj-y += edid.o \ No newline at end of file
diff --git a/drivers/video/tegra/dc/dc.c b/drivers/video/tegra/dc/dc.c
new file mode 100644
index 000000000000..2819a93d8442
--- /dev/null
+++ b/drivers/video/tegra/dc/dc.c
@@ -0,0 +1,1309 @@
+/*
+ * drivers/video/tegra/dc/dc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <mach/clk.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/mc.h>
+#include <mach/nvhost.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+static int no_vsync;
+
+module_param_named(no_vsync, no_vsync, int, S_IRUGO | S_IWUSR);
+
+struct tegra_dc *tegra_dcs[TEGRA_MAX_DC];
+
+DEFINE_MUTEX(tegra_dc_lock);
+
+static inline int tegra_dc_fmt_bpp(int fmt)
+{
+ switch (fmt) {
+ case TEGRA_WIN_FMT_P1:
+ return 1;
+
+ case TEGRA_WIN_FMT_P2:
+ return 2;
+
+ case TEGRA_WIN_FMT_P4:
+ return 4;
+
+ case TEGRA_WIN_FMT_P8:
+ return 8;
+
+ case TEGRA_WIN_FMT_B4G4R4A4:
+ case TEGRA_WIN_FMT_B5G5R5A:
+ case TEGRA_WIN_FMT_B5G6R5:
+ case TEGRA_WIN_FMT_AB5G5R5:
+ return 16;
+
+ case TEGRA_WIN_FMT_B8G8R8A8:
+ case TEGRA_WIN_FMT_R8G8B8A8:
+ case TEGRA_WIN_FMT_B6x2G6x2R6x2A8:
+ case TEGRA_WIN_FMT_R6x2G6x2B6x2A8:
+ return 32;
+
+ /* for planar formats, size of the Y plane, 8bit */
+ case TEGRA_WIN_FMT_YCbCr420P:
+ case TEGRA_WIN_FMT_YUV420P:
+ case TEGRA_WIN_FMT_YCbCr422P:
+ case TEGRA_WIN_FMT_YUV422P:
+ return 8;
+
+ case TEGRA_WIN_FMT_YCbCr422:
+ case TEGRA_WIN_FMT_YUV422:
+ case TEGRA_WIN_FMT_YCbCr422R:
+ case TEGRA_WIN_FMT_YUV422R:
+ case TEGRA_WIN_FMT_YCbCr422RA:
+ case TEGRA_WIN_FMT_YUV422RA:
+ /* FIXME: need to know the bpp of these formats */
+ return 0;
+ }
+ return 0;
+}
+
+static inline bool tegra_dc_is_yuv_planar(int fmt)
+{
+ switch (fmt) {
+ case TEGRA_WIN_FMT_YUV420P:
+ case TEGRA_WIN_FMT_YCbCr420P:
+ case TEGRA_WIN_FMT_YCbCr422P:
+ case TEGRA_WIN_FMT_YUV422P:
+ return true;
+ }
+ return false;
+}
+
+#define DUMP_REG(a) do { \
+ snprintf(buff, sizeof(buff), "%-32s\t%03x\t%08lx\n", \
+ #a, a, tegra_dc_readl(dc, a)); \
+ print(data, buff); \
+ } while (0)
+
+static void _dump_regs(struct tegra_dc *dc, void *data,
+ void (* print)(void *data, const char *str))
+{
+ int i;
+ char buff[256];
+
+ tegra_dc_io_start(dc);
+
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE);
+ DUMP_REG(DC_CMD_INT_STATUS);
+ DUMP_REG(DC_CMD_INT_MASK);
+ DUMP_REG(DC_CMD_INT_ENABLE);
+ DUMP_REG(DC_CMD_INT_TYPE);
+ DUMP_REG(DC_CMD_INT_POLARITY);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+ DUMP_REG(DC_CMD_STATE_ACCESS);
+ DUMP_REG(DC_CMD_STATE_CONTROL);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+ DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+ DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY);
+ DUMP_REG(DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+ DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+ DUMP_REG(DC_DISP_REF_TO_SYNC);
+ DUMP_REG(DC_DISP_SYNC_WIDTH);
+ DUMP_REG(DC_DISP_BACK_PORCH);
+ DUMP_REG(DC_DISP_DISP_ACTIVE);
+ DUMP_REG(DC_DISP_FRONT_PORCH);
+ DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+ DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+ DUMP_REG(DC_DISP_M0_CONTROL);
+ DUMP_REG(DC_DISP_M1_CONTROL);
+ DUMP_REG(DC_DISP_DI_CONTROL);
+ DUMP_REG(DC_DISP_PP_CONTROL);
+ DUMP_REG(DC_DISP_PP_SELECT_A);
+ DUMP_REG(DC_DISP_PP_SELECT_B);
+ DUMP_REG(DC_DISP_PP_SELECT_C);
+ DUMP_REG(DC_DISP_PP_SELECT_D);
+ DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+ DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+ DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+ DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+ DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+ DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+ DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+ DUMP_REG(DC_DISP_BORDER_COLOR);
+ DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+ DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+ DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+ DUMP_REG(DC_DISP_CURSOR_POSITION);
+ DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+ DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+ DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0C_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+ DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+ DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+
+
+ for (i = 0; i < 3; i++) {
+ print(data, "\n");
+ snprintf(buff, sizeof(buff), "WINDOW %c:\n", 'A' + i);
+ print(data, buff);
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_WIN_WIN_OPTIONS);
+ DUMP_REG(DC_WIN_BYTE_SWAP);
+ DUMP_REG(DC_WIN_BUFFER_CONTROL);
+ DUMP_REG(DC_WIN_COLOR_DEPTH);
+ DUMP_REG(DC_WIN_POSITION);
+ DUMP_REG(DC_WIN_SIZE);
+ DUMP_REG(DC_WIN_PRESCALED_SIZE);
+ DUMP_REG(DC_WIN_H_INITIAL_DDA);
+ DUMP_REG(DC_WIN_V_INITIAL_DDA);
+ DUMP_REG(DC_WIN_DDA_INCREMENT);
+ DUMP_REG(DC_WIN_LINE_STRIDE);
+ DUMP_REG(DC_WIN_BUF_STRIDE);
+ DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+ DUMP_REG(DC_WIN_BLEND_NOKEY);
+ DUMP_REG(DC_WIN_BLEND_1WIN);
+ DUMP_REG(DC_WIN_BLEND_2WIN_X);
+ DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+ DUMP_REG(DC_WIN_BLEND_3WIN_XY);
+ DUMP_REG(DC_WINBUF_START_ADDR);
+ DUMP_REG(DC_WINBUF_START_ADDR_U);
+ DUMP_REG(DC_WINBUF_START_ADDR_V);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+ DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+ DUMP_REG(DC_WIN_CSC_YOF);
+ DUMP_REG(DC_WIN_CSC_KYRGB);
+ DUMP_REG(DC_WIN_CSC_KUR);
+ DUMP_REG(DC_WIN_CSC_KVR);
+ DUMP_REG(DC_WIN_CSC_KUG);
+ DUMP_REG(DC_WIN_CSC_KVG);
+ DUMP_REG(DC_WIN_CSC_KUB);
+ DUMP_REG(DC_WIN_CSC_KVB);
+ }
+
+ tegra_dc_io_end(dc);
+}
+
+#undef DUMP_REG
+
+#ifdef DEBUG
+static void dump_regs_print(void *data, const char *str)
+{
+ struct tegra_dc *dc = data;
+ dev_dbg(&dc->ndev->dev, "%s", str);
+}
+
+static void dump_regs(struct tegra_dc *dc)
+{
+ _dump_regs(dc, dc, dump_regs_print);
+}
+#else
+
+static void dump_regs(struct tegra_dc *dc) {}
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+static void dbg_regs_print(void *data, const char *str)
+{
+ struct seq_file *s = data;
+
+ seq_printf(s, "%s", str);
+}
+
+#undef DUMP_REG
+
+static int dbg_dc_show(struct seq_file *s, void *unused)
+{
+ struct tegra_dc *dc = s->private;
+
+ _dump_regs(dc, s, dbg_regs_print);
+
+ return 0;
+}
+
+
+static int dbg_dc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_dc_show, inode->i_private);
+}
+
+static const struct file_operations dbg_fops = {
+ .open = dbg_dc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void tegra_dc_dbg_add(struct tegra_dc *dc)
+{
+ char name[32];
+
+ snprintf(name, sizeof(name), "tegra_dc%d_regs", dc->ndev->id);
+ (void) debugfs_create_file(name, S_IRUGO, NULL, dc, &dbg_fops);
+}
+#else
+static void tegra_dc_dbg_add(struct tegra_dc *dc) {}
+
+#endif
+
+
+static int tegra_dc_add(struct tegra_dc *dc, int index)
+{
+ int ret = 0;
+
+ mutex_lock(&tegra_dc_lock);
+ if (index >= TEGRA_MAX_DC) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tegra_dcs[index] != NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tegra_dcs[index] = dc;
+
+out:
+ mutex_unlock(&tegra_dc_lock);
+
+ return ret;
+}
+
+struct tegra_dc *tegra_dc_get_dc(unsigned idx)
+{
+ if (idx < TEGRA_MAX_DC)
+ return tegra_dcs[idx];
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(tegra_dc_get_dc);
+
+struct tegra_dc_win *tegra_dc_get_window(struct tegra_dc *dc, unsigned win)
+{
+ if (win >= dc->n_windows)
+ return NULL;
+
+ return &dc->windows[win];
+}
+EXPORT_SYMBOL(tegra_dc_get_window);
+
+static int get_topmost_window(u32 *depths, unsigned long *wins)
+{
+ int idx, best = -1;
+
+ for_each_set_bit(idx, wins, DC_N_WINDOWS) {
+ if (best == -1 || depths[idx] < depths[best])
+ best = idx;
+ }
+ clear_bit(best, wins);
+ return best;
+}
+
+static u32 blend_topwin(u32 flags)
+{
+ if (flags & TEGRA_WIN_FLAG_BLEND_COVERAGE)
+ return BLEND(NOKEY, ALPHA, 0xff, 0xff);
+ else if (flags & TEGRA_WIN_FLAG_BLEND_PREMULT)
+ return BLEND(NOKEY, PREMULT, 0xff, 0xff);
+ else
+ return BLEND(NOKEY, FIX, 0xff, 0xff);
+}
+
+static u32 blend_2win(int idx, unsigned long behind_mask, u32* flags, int xy)
+{
+ int other;
+
+ for (other = 0; other < DC_N_WINDOWS; other++) {
+ if (other != idx && (xy-- == 0))
+ break;
+ }
+ if (BIT(other) & behind_mask)
+ return blend_topwin(flags[idx]);
+ else if (flags[other])
+ return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+ else
+ return BLEND(NOKEY, FIX, 0x00, 0x00);
+}
+
+static u32 blend_3win(int idx, unsigned long behind_mask, u32* flags)
+{
+ unsigned long infront_mask;
+ int first;
+
+ infront_mask = ~(behind_mask | BIT(idx));
+ infront_mask &= (BIT(DC_N_WINDOWS) - 1);
+ first = ffs(infront_mask) - 1;
+
+ if (!infront_mask)
+ return blend_topwin(flags[idx]);
+ else if (behind_mask && first != -1 && flags[first])
+ return BLEND(NOKEY, DEPENDANT, 0x00, 0x00);
+ else
+ return BLEND(NOKEY, FIX, 0x0, 0x0);
+}
+
+static void tegra_dc_set_blending(struct tegra_dc *dc, struct tegra_dc_blend *blend)
+{
+ unsigned long mask = BIT(DC_N_WINDOWS) - 1;
+
+ while (mask) {
+ int idx = get_topmost_window(blend->z, &mask);
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+ DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, BLEND(NOKEY, FIX, 0xff, 0xff),
+ DC_WIN_BLEND_1WIN);
+ tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 0),
+ DC_WIN_BLEND_2WIN_X);
+ tegra_dc_writel(dc, blend_2win(idx, mask, blend->flags, 1),
+ DC_WIN_BLEND_2WIN_Y);
+ tegra_dc_writel(dc, blend_3win(idx, mask, blend->flags),
+ DC_WIN_BLEND_3WIN_XY);
+ }
+}
+
+static void tegra_dc_set_csc(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
+ tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
+ tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
+ tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
+ tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
+ tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+}
+
+static void tegra_dc_set_scaling_filter(struct tegra_dc *dc)
+{
+ unsigned i;
+ unsigned v0 = 128;
+ unsigned v1 = 0;
+ /* linear horizontal and vertical filters */
+ for (i = 0; i < 16; i++) {
+ tegra_dc_writel(dc, (v1 << 16) | (v0 << 8),
+ DC_WIN_H_FILTER_P(i));
+
+ tegra_dc_writel(dc, v0,
+ DC_WIN_V_FILTER_P(i));
+ v0 -= 8;
+ v1 += 8;
+ }
+}
+
+/* does not support updating windows on multiple dcs in one call */
+int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n)
+{
+ struct tegra_dc *dc;
+ unsigned long update_mask = GENERAL_ACT_REQ;
+ unsigned long val;
+ bool update_blend = false;
+ int i;
+
+ dc = windows[0]->dc;
+
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled) {
+ mutex_unlock(&dc->lock);
+ return -EFAULT;
+ }
+
+ if (no_vsync)
+ tegra_dc_writel(dc, WRITE_MUX_ACTIVE | READ_MUX_ACTIVE, DC_CMD_STATE_ACCESS);
+ else
+ tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | READ_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS);
+
+ for (i = 0; i < n; i++) {
+ struct tegra_dc_win *win = windows[i];
+ unsigned h_dda;
+ unsigned v_dda;
+ bool yuvp = tegra_dc_is_yuv_planar(win->fmt);
+
+ if (win->z != dc->blend.z[win->idx]) {
+ dc->blend.z[win->idx] = win->z;
+ update_blend = true;
+ }
+ if ((win->flags & TEGRA_WIN_BLEND_FLAGS_MASK) !=
+ dc->blend.flags[win->idx]) {
+ dc->blend.flags[win->idx] =
+ win->flags & TEGRA_WIN_BLEND_FLAGS_MASK;
+ update_blend = true;
+ }
+
+ tegra_dc_writel(dc, WINDOW_A_SELECT << win->idx,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ if (!no_vsync)
+ update_mask |= WIN_A_ACT_REQ << win->idx;
+
+ if (!(win->flags & TEGRA_WIN_FLAG_ENABLED)) {
+ tegra_dc_writel(dc, 0, DC_WIN_WIN_OPTIONS);
+ continue;
+ }
+
+ tegra_dc_writel(dc, win->fmt, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ tegra_dc_writel(dc,
+ V_POSITION(win->out_y) | H_POSITION(win->out_x),
+ DC_WIN_POSITION);
+ tegra_dc_writel(dc,
+ V_SIZE(win->out_h) | H_SIZE(win->out_w),
+ DC_WIN_SIZE);
+ tegra_dc_writel(dc,
+ V_PRESCALED_SIZE(win->h) |
+ H_PRESCALED_SIZE(win->w * tegra_dc_fmt_bpp(win->fmt) / 8),
+ DC_WIN_PRESCALED_SIZE);
+
+ h_dda = ((win->w - 1) * 0x1000) / max_t(int, win->out_w - 1, 1);
+ v_dda = ((win->h - 1) * 0x1000) / max_t(int, win->out_h - 1, 1);
+ tegra_dc_writel(dc, V_DDA_INC(v_dda) | H_DDA_INC(h_dda),
+ DC_WIN_DDA_INCREMENT);
+ tegra_dc_writel(dc, 0, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, 0, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, (unsigned long)win->phys_addr,
+ DC_WINBUF_START_ADDR);
+
+ if (!yuvp) {
+ tegra_dc_writel(dc, win->stride, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_dc_writel(dc,
+ (unsigned long)win->phys_addr +
+ (unsigned long)win->offset_u,
+ DC_WINBUF_START_ADDR_U);
+ tegra_dc_writel(dc,
+ (unsigned long)win->phys_addr +
+ (unsigned long)win->offset_v,
+ DC_WINBUF_START_ADDR_V);
+ tegra_dc_writel(dc,
+ LINE_STRIDE(win->stride) |
+ UV_LINE_STRIDE(win->stride_uv),
+ DC_WIN_LINE_STRIDE);
+ }
+
+ tegra_dc_writel(dc, win->x * tegra_dc_fmt_bpp(win->fmt) / 8,
+ DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, win->y, DC_WINBUF_ADDR_V_OFFSET);
+
+ val = WIN_ENABLE;
+ if (yuvp)
+ val |= CSC_ENABLE;
+ else if (tegra_dc_fmt_bpp(win->fmt) < 24)
+ val |= COLOR_EXPAND;
+
+ if (win->w != win->out_w)
+ val |= H_FILTER_ENABLE;
+ if (win->h != win->out_h)
+ val |= V_FILTER_ENABLE;
+
+ tegra_dc_writel(dc, val, DC_WIN_WIN_OPTIONS);
+
+ win->dirty = no_vsync ? 0 : 1;
+ }
+
+ if (update_blend) {
+ tegra_dc_set_blending(dc, &dc->blend);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ if (!no_vsync)
+ dc->windows[i].dirty = 1;
+ update_mask |= WIN_A_ACT_REQ << i;
+ }
+ }
+
+ tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+ if (!no_vsync) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ val |= FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+
+ val = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ val |= FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_MASK);
+ }
+
+ tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_update_windows);
+
+u32 tegra_dc_get_syncpt_id(const struct tegra_dc *dc)
+{
+ return dc->syncpt_id;
+}
+EXPORT_SYMBOL(tegra_dc_get_syncpt_id);
+
+u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc)
+{
+ u32 max;
+
+ mutex_lock(&dc->lock);
+ max = nvhost_syncpt_incr_max(&dc->ndev->host->syncpt, dc->syncpt_id, 1);
+ dc->syncpt_max = max;
+ mutex_unlock(&dc->lock);
+
+ return max;
+}
+
+void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, u32 val)
+{
+ mutex_lock(&dc->lock);
+ while (dc->syncpt_min < val) {
+ dc->syncpt_min++;
+ nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt, dc->syncpt_id);
+ }
+ mutex_unlock(&dc->lock);
+}
+
+static bool tegra_dc_windows_are_clean(struct tegra_dc_win *windows[],
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (windows[i]->dirty)
+ return false;
+ }
+
+ return true;
+}
+
+/* does not support syncing windows on multiple dcs in one call */
+int tegra_dc_sync_windows(struct tegra_dc_win *windows[], int n)
+{
+ if (n < 1 || n > DC_N_WINDOWS)
+ return -EINVAL;
+
+ if (!windows[0]->dc->enabled)
+ return -EFAULT;
+
+ return wait_event_interruptible_timeout(windows[0]->dc->wq,
+ tegra_dc_windows_are_clean(windows, n),
+ HZ);
+}
+EXPORT_SYMBOL(tegra_dc_sync_windows);
+
+static unsigned long tegra_dc_pclk_round_rate(struct tegra_dc *dc, int pclk)
+{
+ unsigned long rate;
+ unsigned long div;
+
+ rate = clk_get_rate(dc->clk);
+
+ div = DIV_ROUND_CLOSEST(rate * 2, pclk);
+
+ if (div < 2)
+ return 0;
+
+ return rate * 2 / div;
+}
+
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk)
+{
+ int pclk;
+
+ if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+ unsigned long rate;
+ struct clk *pll_d_out0_clk =
+ clk_get_sys(NULL, "pll_d_out0");
+ struct clk *pll_d_clk =
+ clk_get_sys(NULL, "pll_d");
+
+ if (dc->mode.pclk > 70000000)
+ rate = 594000000;
+ else
+ rate = 216000000;
+
+ if (rate != clk_get_rate(pll_d_clk))
+ clk_set_rate(pll_d_clk, rate);
+
+ if (clk_get_parent(clk) != pll_d_out0_clk)
+ clk_set_parent(clk, pll_d_out0_clk);
+ }
+
+ pclk = tegra_dc_pclk_round_rate(dc, dc->mode.pclk);
+ tegra_dvfs_set_rate(clk, pclk);
+
+}
+
+static int tegra_dc_program_mode(struct tegra_dc *dc, struct tegra_dc_mode *mode)
+{
+ unsigned long val;
+ unsigned long rate;
+ unsigned long div;
+ unsigned long pclk;
+
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, mode->h_ref_to_sync | (mode->v_ref_to_sync << 16),
+ DC_DISP_REF_TO_SYNC);
+ tegra_dc_writel(dc, mode->h_sync_width | (mode->v_sync_width << 16),
+ DC_DISP_SYNC_WIDTH);
+ tegra_dc_writel(dc, mode->h_back_porch | (mode->v_back_porch << 16),
+ DC_DISP_BACK_PORCH);
+ tegra_dc_writel(dc, mode->h_active | (mode->v_active << 16),
+ DC_DISP_DISP_ACTIVE);
+ tegra_dc_writel(dc, mode->h_front_porch | (mode->v_front_porch << 16),
+ DC_DISP_FRONT_PORCH);
+
+ tegra_dc_writel(dc, DE_SELECT_ACTIVE | DE_CONTROL_NORMAL,
+ DC_DISP_DATA_ENABLE_OPTIONS);
+
+ /* TODO: MIPI/CRT/HDMI clock cals */
+
+ val = DISP_DATA_FORMAT_DF1P1C;
+
+ if (dc->out->align == TEGRA_DC_ALIGN_MSB)
+ val |= DISP_DATA_ALIGNMENT_MSB;
+ else
+ val |= DISP_DATA_ALIGNMENT_LSB;
+
+ if (dc->out->order == TEGRA_DC_ORDER_RED_BLUE)
+ val |= DISP_DATA_ORDER_RED_BLUE;
+ else
+ val |= DISP_DATA_ORDER_BLUE_RED;
+
+ tegra_dc_writel(dc, val, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ rate = clk_get_rate(dc->clk);
+
+ pclk = tegra_dc_pclk_round_rate(dc, mode->pclk);
+ if (pclk < (mode->pclk / 100 * 99) ||
+ pclk > (mode->pclk / 100 * 109)) {
+ dev_err(&dc->ndev->dev,
+ "can't divide %ld clock to %d -1/+9%% %ld %d %d\n",
+ rate, mode->pclk,
+ pclk, (mode->pclk / 100 * 99),
+ (mode->pclk / 100 * 109));
+ return -EINVAL;
+ }
+
+ div = (rate * 2 / pclk) - 2;
+
+ tegra_dc_writel(dc, 0x00010001,
+ DC_DISP_SHIFT_CLOCK_OPTIONS);
+ tegra_dc_writel(dc, PIXEL_CLK_DIVIDER_PCD1 | SHIFT_CLK_DIVIDER(div),
+ DC_DISP_DISP_CLOCK_CONTROL);
+
+ return 0;
+}
+
+
+int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode)
+{
+ memcpy(&dc->mode, mode, sizeof(dc->mode));
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_set_mode);
+
+static void tegra_dc_set_out(struct tegra_dc *dc, struct tegra_dc_out *out)
+{
+ dc->out = out;
+
+ if (out->n_modes > 0)
+ tegra_dc_set_mode(dc, &dc->out->modes[0]);
+
+ switch (out->type) {
+ case TEGRA_DC_OUT_RGB:
+ dc->out_ops = &tegra_dc_rgb_ops;
+ break;
+
+ case TEGRA_DC_OUT_HDMI:
+ dc->out_ops = &tegra_dc_hdmi_ops;
+ break;
+
+ default:
+ dc->out_ops = NULL;
+ break;
+ }
+
+ if (dc->out_ops && dc->out_ops->init)
+ dc->out_ops->init(dc);
+
+}
+
+
+static irqreturn_t tegra_dc_irq(int irq, void *ptr)
+{
+ struct tegra_dc *dc = ptr;
+ unsigned long status;
+ unsigned long val;
+ int i;
+
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+ if (status & FRAME_END_INT) {
+ int completed = 0;
+ int dirty = 0;
+
+ val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ if (!(val & (WIN_A_UPDATE << i))) {
+ dc->windows[i].dirty = 0;
+ completed = 1;
+ } else {
+ dirty = 1;
+ }
+ }
+
+ if (!dirty) {
+ val = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ val &= ~FRAME_END_INT;
+ tegra_dc_writel(dc, val, DC_CMD_INT_ENABLE);
+ }
+
+ if (completed)
+ wake_up(&dc->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_dc_set_color_control(struct tegra_dc *dc)
+{
+ u32 color_control;
+
+ switch (dc->out->depth) {
+ case 3:
+ color_control = BASE_COLOR_SIZE111;
+ break;
+
+ case 6:
+ color_control = BASE_COLOR_SIZE222;
+ break;
+
+ case 8:
+ color_control = BASE_COLOR_SIZE332;
+ break;
+
+ case 9:
+ color_control = BASE_COLOR_SIZE333;
+ break;
+
+ case 12:
+ color_control = BASE_COLOR_SIZE444;
+ break;
+
+ case 15:
+ color_control = BASE_COLOR_SIZE555;
+ break;
+
+ case 16:
+ color_control = BASE_COLOR_SIZE565;
+ break;
+
+ case 18:
+ color_control = BASE_COLOR_SIZE666;
+ break;
+
+ default:
+ color_control = BASE_COLOR_SIZE888;
+ break;
+ }
+
+ tegra_dc_writel(dc, color_control, DC_DISP_DISP_COLOR_CONTROL);
+}
+
+static void tegra_dc_init(struct tegra_dc *dc)
+{
+ u32 disp_syncpt;
+ u32 vblank_syncpt;
+ int i;
+
+ tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ if (dc->ndev->id == 0) {
+ disp_syncpt = NVSYNCPT_DISP0;
+ vblank_syncpt = NVSYNCPT_VBLANK0;
+
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0A,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0B,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0C,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1B,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHC,
+ TEGRA_MC_PRIO_HIGH);
+ } else if (dc->ndev->id == 1) {
+ disp_syncpt = NVSYNCPT_DISP1;
+ vblank_syncpt = NVSYNCPT_VBLANK1;
+
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0AB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0BB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY0CB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAY1BB,
+ TEGRA_MC_PRIO_MED);
+ tegra_mc_set_priority(TEGRA_MC_CLIENT_DISPLAYHCB,
+ TEGRA_MC_PRIO_HIGH);
+ }
+ tegra_dc_writel(dc, 0x00000100 | vblank_syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+ tegra_dc_writel(dc, 0x00004700, DC_CMD_INT_TYPE);
+ tegra_dc_writel(dc, 0x0001c700, DC_CMD_INT_POLARITY);
+ tegra_dc_writel(dc, 0x00202020, DC_DISP_MEM_HIGH_PRIORITY);
+ tegra_dc_writel(dc, 0x00010101, DC_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ tegra_dc_writel(dc, 0x00000002, DC_CMD_INT_MASK);
+ tegra_dc_writel(dc, 0x00000000, DC_CMD_INT_ENABLE);
+
+ tegra_dc_writel(dc, 0x00000000, DC_DISP_BORDER_COLOR);
+
+ tegra_dc_set_color_control(dc);
+ for (i = 0; i < DC_N_WINDOWS; i++) {
+ tegra_dc_writel(dc, WINDOW_A_SELECT << i,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+ tegra_dc_set_csc(dc);
+ tegra_dc_set_scaling_filter(dc);
+ }
+
+
+ dc->syncpt_id = disp_syncpt;
+
+ dc->syncpt_min = dc->syncpt_max =
+ nvhost_syncpt_read(&dc->ndev->host->syncpt, disp_syncpt);
+
+ if (dc->mode.pclk)
+ tegra_dc_program_mode(dc, &dc->mode);
+}
+
+static bool _tegra_dc_enable(struct tegra_dc *dc)
+{
+ if (dc->mode.pclk == 0)
+ return false;
+
+ tegra_dc_io_start(dc);
+
+ if (dc->out && dc->out->enable)
+ dc->out->enable();
+
+ tegra_dc_setup_clk(dc, dc->clk);
+
+ clk_enable(dc->clk);
+ clk_enable(dc->emc_clk);
+ enable_irq(dc->irq);
+
+ tegra_dc_init(dc);
+
+ if (dc->out_ops && dc->out_ops->enable)
+ dc->out_ops->enable(dc);
+
+ /* force a full blending update */
+ dc->blend.z[0] = -1;
+
+ return true;
+}
+
+void tegra_dc_enable(struct tegra_dc *dc)
+{
+ mutex_lock(&dc->lock);
+
+ if (!dc->enabled)
+ dc->enabled = _tegra_dc_enable(dc);
+
+ mutex_unlock(&dc->lock);
+}
+
+static void _tegra_dc_disable(struct tegra_dc *dc)
+{
+ disable_irq(dc->irq);
+
+ if (dc->out_ops && dc->out_ops->disable)
+ dc->out_ops->disable(dc);
+
+ clk_disable(dc->emc_clk);
+ clk_disable(dc->clk);
+ tegra_dvfs_set_rate(dc->clk, 0);
+
+ if (dc->out && dc->out->disable)
+ dc->out->disable();
+
+ /* flush any pending syncpt waits */
+ while (dc->syncpt_min < dc->syncpt_max) {
+ dc->syncpt_min++;
+ nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt, dc->syncpt_id);
+ }
+
+ tegra_dc_io_end(dc);
+}
+
+
+void tegra_dc_disable(struct tegra_dc *dc)
+{
+ mutex_lock(&dc->lock);
+
+ if (dc->enabled) {
+ dc->enabled = false;
+ _tegra_dc_disable(dc);
+ }
+
+ mutex_unlock(&dc->lock);
+}
+
+static int tegra_dc_probe(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc;
+ struct clk *clk;
+ struct clk *emc_clk;
+ struct resource *res;
+ struct resource *base_res;
+ struct resource *fb_mem = NULL;
+ int ret = 0;
+ void __iomem *base;
+ int irq;
+ int i;
+ unsigned long emc_clk_rate;
+
+ if (!ndev->dev.platform_data) {
+ dev_err(&ndev->dev, "no platform data\n");
+ return -ENOENT;
+ }
+
+ dc = kzalloc(sizeof(struct tegra_dc), GFP_KERNEL);
+ if (!dc) {
+ dev_err(&ndev->dev, "can't allocate memory for tegra_dc\n");
+ return -ENOMEM;
+ }
+
+ irq = nvhost_get_irq_byname(ndev, "irq");
+ if (irq <= 0) {
+ dev_err(&ndev->dev, "no irq\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
+ res = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ dev_err(&ndev->dev, "no mem resource\n");
+ ret = -ENOENT;
+ goto err_free;
+ }
+
+ base_res = request_mem_region(res->start, resource_size(res), ndev->name);
+ if (!base_res) {
+ dev_err(&ndev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
+ goto err_free;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&ndev->dev, "registers can't be mapped\n");
+ ret = -EBUSY;
+ goto err_release_resource_reg;
+ }
+
+ fb_mem = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "fbmem");
+
+ clk = clk_get(&ndev->dev, NULL);
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(&ndev->dev, "can't get clock\n");
+ ret = -ENOENT;
+ goto err_iounmap_reg;
+ }
+
+ emc_clk = clk_get(&ndev->dev, "emc");
+ if (IS_ERR_OR_NULL(emc_clk)) {
+ dev_err(&ndev->dev, "can't get emc clock\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ dc->clk = clk;
+ dc->emc_clk = emc_clk;
+ dc->base_res = base_res;
+ dc->base = base;
+ dc->irq = irq;
+ dc->ndev = ndev;
+ dc->pdata = ndev->dev.platform_data;
+
+ /*
+ * The emc is a shared clock, it will be set based on
+ * the requirements for each user on the bus.
+ */
+ emc_clk_rate = dc->pdata->emc_clk_rate;
+ clk_set_rate(emc_clk, emc_clk_rate ? emc_clk_rate : ULONG_MAX);
+
+ if (dc->pdata->flags & TEGRA_DC_FLAG_ENABLED)
+ dc->enabled = true;
+
+ mutex_init(&dc->lock);
+ init_waitqueue_head(&dc->wq);
+
+ dc->n_windows = DC_N_WINDOWS;
+ for (i = 0; i < dc->n_windows; i++) {
+ dc->windows[i].idx = i;
+ dc->windows[i].dc = dc;
+ }
+
+ if (request_irq(irq, tegra_dc_irq, IRQF_DISABLED,
+ dev_name(&ndev->dev), dc)) {
+ dev_err(&ndev->dev, "request_irq %d failed\n", irq);
+ ret = -EBUSY;
+ goto err_put_emc_clk;
+ }
+
+ /* hack to ballence enable_irq calls in _tegra_dc_enable() */
+ disable_irq(dc->irq);
+
+ ret = tegra_dc_add(dc, ndev->id);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "can't add dc\n");
+ goto err_free_irq;
+ }
+
+ nvhost_set_drvdata(ndev, dc);
+
+ if (dc->pdata->default_out)
+ tegra_dc_set_out(dc, dc->pdata->default_out);
+ else
+ dev_err(&ndev->dev, "No default output specified. Leaving output disabled.\n");
+
+ if (dc->enabled)
+ _tegra_dc_enable(dc);
+
+ tegra_dc_dbg_add(dc);
+
+ dev_info(&ndev->dev, "probed\n");
+
+ if (dc->pdata->fb) {
+ if (dc->pdata->fb->bits_per_pixel == -1) {
+ unsigned long fmt;
+ tegra_dc_writel(dc,
+ WINDOW_A_SELECT << dc->pdata->fb->win,
+ DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ fmt = tegra_dc_readl(dc, DC_WIN_COLOR_DEPTH);
+ dc->pdata->fb->bits_per_pixel =
+ tegra_dc_fmt_bpp(fmt);
+ }
+
+ dc->fb = tegra_fb_register(ndev, dc, dc->pdata->fb, fb_mem);
+ if (IS_ERR_OR_NULL(dc->fb))
+ dc->fb = NULL;
+ }
+
+ if (dc->out_ops && dc->out_ops->detect)
+ dc->out_ops->detect(dc);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, dc);
+err_put_emc_clk:
+ clk_put(emc_clk);
+err_put_clk:
+ clk_put(clk);
+err_iounmap_reg:
+ iounmap(base);
+ if (fb_mem)
+ release_resource(fb_mem);
+err_release_resource_reg:
+ release_resource(base_res);
+err_free:
+ kfree(dc);
+
+ return ret;
+}
+
+static int tegra_dc_remove(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ if (dc->fb) {
+ tegra_fb_unregister(dc->fb);
+ if (dc->fb_mem)
+ release_resource(dc->fb_mem);
+ }
+
+
+ if (dc->enabled)
+ _tegra_dc_disable(dc);
+
+ free_irq(dc->irq, dc);
+ clk_put(dc->emc_clk);
+ clk_put(dc->clk);
+ iounmap(dc->base);
+ if (dc->fb_mem)
+ release_resource(dc->base_res);
+ kfree(dc);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct nvhost_device *ndev, pm_message_t state)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ dev_info(&ndev->dev, "suspend\n");
+
+ mutex_lock(&dc->lock);
+
+ if (dc->out_ops && dc->out_ops->suspend)
+ dc->out_ops->suspend(dc);
+
+ if (dc->enabled) {
+ tegra_fb_suspend(dc->fb);
+ _tegra_dc_disable(dc);
+ }
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+static int tegra_dc_resume(struct nvhost_device *ndev)
+{
+ struct tegra_dc *dc = nvhost_get_drvdata(ndev);
+
+ dev_info(&ndev->dev, "resume\n");
+
+ mutex_lock(&dc->lock);
+ if (dc->enabled)
+ _tegra_dc_enable(dc);
+
+ if (dc->out_ops && dc->out_ops->resume)
+ dc->out_ops->resume(dc);
+ mutex_unlock(&dc->lock);
+
+ return 0;
+}
+
+#endif
+
+extern int suspend_set(const char *val, struct kernel_param *kp)
+{
+ if (!strcmp(val, "dump"))
+ dump_regs(tegra_dcs[0]);
+#ifdef CONFIG_PM
+ else if (!strcmp(val, "suspend"))
+ tegra_dc_suspend(tegra_dcs[0]->ndev, PMSG_SUSPEND);
+ else if (!strcmp(val, "resume"))
+ tegra_dc_resume(tegra_dcs[0]->ndev);
+#endif
+
+ return 0;
+}
+
+extern int suspend_get(char *buffer, struct kernel_param *kp)
+{
+ return 0;
+}
+
+int suspend;
+
+module_param_call(suspend, suspend_set, suspend_get, &suspend, 0644);
+
+struct nvhost_driver tegra_dc_driver = {
+ .driver = {
+ .name = "tegradc",
+ .owner = THIS_MODULE,
+ },
+ .probe = tegra_dc_probe,
+ .remove = tegra_dc_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_dc_suspend,
+ .resume = tegra_dc_resume,
+#endif
+};
+
+static int __init tegra_dc_module_init(void)
+{
+ return nvhost_driver_register(&tegra_dc_driver);
+}
+
+static void __exit tegra_dc_module_exit(void)
+{
+ nvhost_driver_unregister(&tegra_dc_driver);
+}
+
+module_exit(tegra_dc_module_exit);
+module_init(tegra_dc_module_init);
diff --git a/drivers/video/tegra/dc/dc_priv.h b/drivers/video/tegra/dc/dc_priv.h
new file mode 100644
index 000000000000..253d03f057d7
--- /dev/null
+++ b/drivers/video/tegra/dc/dc_priv.h
@@ -0,0 +1,140 @@
+/*
+ * drivers/video/tegra/dc/dc_priv.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_PRIV_H
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include "../host/dev.h"
+
+struct tegra_dc;
+
+struct tegra_dc_blend {
+ unsigned z[DC_N_WINDOWS];
+ unsigned flags[DC_N_WINDOWS];
+};
+
+struct tegra_dc_out_ops {
+ /* initialize output. dc clocks are not on at this point */
+ int (*init)(struct tegra_dc *dc);
+ /* destroy output. dc clocks are not on at this point */
+ void (*destroy)(struct tegra_dc *dc);
+ /* detect connected display. can sleep.*/
+ bool (*detect)(struct tegra_dc *dc);
+ /* enable output. dc clocks are on at this point */
+ void (*enable)(struct tegra_dc *dc);
+ /* disable output. dc clocks are on at this point */
+ void (*disable)(struct tegra_dc *dc);
+
+ /* suspend output. dc clocks are on at this point */
+ void (*suspend)(struct tegra_dc *dc);
+ /* resume output. dc clocks are on at this point */
+ void (*resume)(struct tegra_dc *dc);
+};
+
+struct tegra_dc {
+ struct list_head list;
+
+ struct nvhost_device *ndev;
+ struct tegra_dc_platform_data *pdata;
+
+ struct resource *base_res;
+ void __iomem *base;
+ int irq;
+
+ struct clk *clk;
+ struct clk *emc_clk;
+
+ bool enabled;
+
+ struct tegra_dc_out *out;
+ struct tegra_dc_out_ops *out_ops;
+ void *out_data;
+
+ struct tegra_dc_mode mode;
+
+ struct tegra_dc_win windows[DC_N_WINDOWS];
+ struct tegra_dc_blend blend;
+ int n_windows;
+
+ wait_queue_head_t wq;
+
+ struct mutex lock;
+
+ struct resource *fb_mem;
+ struct tegra_fb_info *fb;
+
+ u32 syncpt_id;
+ u32 syncpt_min;
+ u32 syncpt_max;
+};
+
+static inline void tegra_dc_io_start(struct tegra_dc *dc)
+{
+ nvhost_module_busy(&dc->ndev->host->mod);
+}
+
+static inline void tegra_dc_io_end(struct tegra_dc *dc)
+{
+ nvhost_module_idle(&dc->ndev->host->mod);
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+ unsigned long reg)
+{
+ BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
+ return readl(dc->base + reg * 4);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long val,
+ unsigned long reg)
+{
+ BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
+ writel(val, dc->base + reg * 4);
+}
+
+static inline void _tegra_dc_write_table(struct tegra_dc *dc, const u32 *table,
+ unsigned len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ tegra_dc_writel(dc, table[i * 2 + 1], table[i * 2]);
+}
+
+#define tegra_dc_write_table(dc, table) \
+ _tegra_dc_write_table(dc, table, ARRAY_SIZE(table) / 2)
+
+static inline void tegra_dc_set_outdata(struct tegra_dc *dc, void *data)
+{
+ dc->out_data = data;
+}
+
+static inline void *tegra_dc_get_outdata(struct tegra_dc *dc)
+{
+ return dc->out_data;
+}
+
+void tegra_dc_setup_clk(struct tegra_dc *dc, struct clk *clk);
+
+extern struct tegra_dc_out_ops tegra_dc_rgb_ops;
+extern struct tegra_dc_out_ops tegra_dc_hdmi_ops;
+
+#endif
diff --git a/drivers/video/tegra/dc/dc_reg.h b/drivers/video/tegra/dc/dc_reg.h
new file mode 100644
index 000000000000..bd1750b78e44
--- /dev/null
+++ b/drivers/video/tegra/dc/dc_reg.h
@@ -0,0 +1,415 @@
+/*
+ * drivers/video/tegra/dc/dc_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_DC_REG_H
+
+#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
+#define DC_CMD_DISPLAY_COMMAND 0x032
+#define DISP_COMMAND_RAISE (1 << 0)
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_COMMAND_RAISE_VECTOR(x) (((x) & 0x1f) << 22)
+#define DISP_COMMAND_RAISE_CHANNEL_ID(x) (((x) & 0xf) << 27)
+
+#define DC_CMD_SIGNAL_RAISE 0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
+#define PW0_ENABLE (1 << 0)
+#define PW1_ENABLE (1 << 2)
+#define PW2_ENABLE (1 << 4)
+#define PW3_ENABLE (1 << 6)
+#define PW4_ENABLE (1 << 8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+#define SPI_ENABLE (1 << 24)
+#define HSPI_ENABLE (1 << 25)
+
+#define DC_CMD_INT_STATUS 0x037
+#define DC_CMD_INT_MASK 0x038
+#define DC_CMD_INT_ENABLE 0x039
+#define DC_CMD_INT_TYPE 0x03a
+#define DC_CMD_INT_POLARITY 0x03b
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define V_BLANK_INT (1 << 2)
+#define H_BLANK_INT (1 << 3)
+#define V_PULSE3_INT (1 << 4)
+#define SPI_BUSY_INT (1 << 7)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define MSF_INT (1 << 12)
+#define SSF_INT (1 << 13)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+#define GPIO_0_INT (1 << 18)
+#define GPIO_1_INT (1 << 19)
+#define GPIO_2_INT (1 << 20)
+
+#define DC_CMD_SIGNAL_RAISE1 0x03c
+#define DC_CMD_SIGNAL_RAISE2 0x03d
+#define DC_CMD_SIGNAL_RAISE3 0x03e
+#define DC_CMD_STATE_ACCESS 0x040
+#define READ_MUX_ASSEMBLY (0 << 0)
+#define READ_MUX_ACTIVE (1 << 0)
+#define WRITE_MUX_ASSEMBLY (0 << 2)
+#define WRITE_MUX_ACTIVE (1 << 2)
+
+#define DC_CMD_STATE_CONTROL 0x041
+#define GENERAL_ACT_REQ (1 << 0)
+#define WIN_A_ACT_REQ (1 << 1)
+#define WIN_B_ACT_REQ (1 << 2)
+#define WIN_C_ACT_REQ (1 << 3)
+#define GENERAL_UPDATE (1 << 8)
+#define WIN_A_UPDATE (1 << 9)
+#define WIN_B_UPDATE (1 << 10)
+#define WIN_C_UPDATE (1 << 11)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL 0x043
+
+#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CHECKSUM 0x301
+#define DC_COM_PIN_OUTPUT_ENABLE0 0x302
+#define DC_COM_PIN_OUTPUT_ENABLE1 0x303
+#define DC_COM_PIN_OUTPUT_ENABLE2 0x304
+#define DC_COM_PIN_OUTPUT_ENABLE3 0x305
+#define DC_COM_PIN_OUTPUT_POLARITY0 0x306
+#define DC_COM_PIN_OUTPUT_POLARITY1 0x307
+#define DC_COM_PIN_OUTPUT_POLARITY2 0x308
+#define DC_COM_PIN_OUTPUT_POLARITY3 0x309
+#define DC_COM_PIN_OUTPUT_DATA0 0x30a
+#define DC_COM_PIN_OUTPUT_DATA1 0x30b
+#define DC_COM_PIN_OUTPUT_DATA2 0x30c
+#define DC_COM_PIN_OUTPUT_DATA3 0x30d
+#define DC_COM_PIN_INPUT_ENABLE0 0x30e
+#define DC_COM_PIN_INPUT_ENABLE1 0x30f
+#define DC_COM_PIN_INPUT_ENABLE2 0x310
+#define DC_COM_PIN_INPUT_ENABLE3 0x311
+#define DC_COM_PIN_INPUT_DATA0 0x312
+#define DC_COM_PIN_INPUT_DATA1 0x313
+#define DC_COM_PIN_OUTPUT_SELECT0 0x314
+#define DC_COM_PIN_OUTPUT_SELECT1 0x315
+#define DC_COM_PIN_OUTPUT_SELECT2 0x316
+#define DC_COM_PIN_OUTPUT_SELECT3 0x317
+#define DC_COM_PIN_OUTPUT_SELECT4 0x318
+#define DC_COM_PIN_OUTPUT_SELECT5 0x319
+#define DC_COM_PIN_OUTPUT_SELECT6 0x31a
+#define DC_COM_PIN_MISC_CONTROL 0x31b
+#define DC_COM_PM0_CONTROL 0x31c
+#define DC_COM_PM0_DUTY_CYCLE 0x31d
+#define DC_COM_PM1_CONTROL 0x31e
+#define DC_COM_PM1_DUTY_CYCLE 0x31f
+#define DC_COM_SPI_CONTROL 0x320
+#define DC_COM_SPI_START_BYTE 0x321
+#define DC_COM_HSPI_WRITE_DATA_AB 0x322
+#define DC_COM_HSPI_WRITE_DATA_CD 0x323
+#define DC_COM_HSPI_CS_DC 0x324
+#define DC_COM_SCRATCH_REGISTER_A 0x325
+#define DC_COM_SCRATCH_REGISTER_B 0x326
+#define DC_COM_GPIO_CTRL 0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
+#define H_PULSE_0_ENABLE (1 << 8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+#define V_PULSE_0_ENABLE (1 << 16)
+#define V_PULSE_1_ENABLE (1 << 18)
+#define V_PULSE_2_ENABLE (1 << 19)
+#define V_PULSE_3_ENABLE (1 << 20)
+#define M0_ENABLE (1 << 24)
+#define M1_ENABLE (1 << 26)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
+#define DI_ENABLE (1 << 16)
+#define PP_ENABLE (1 << 18)
+
+#define DC_DISP_DISP_WIN_OPTIONS 0x402
+#define CURSOR_ENABLE (1 << 16)
+#define TVO_ENABLE (1 << 28)
+#define DSI_ENABLE (1 << 29)
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_MEM_HIGH_PRIORITY 0x403
+#define DC_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
+#define DC_DISP_DISP_TIMING_OPTIONS 0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC 0x406
+#define DC_DISP_SYNC_WIDTH 0x407
+#define DC_DISP_BACK_PORCH 0x408
+#define DC_DISP_DISP_ACTIVE 0x409
+#define DC_DISP_FRONT_PORCH 0x40a
+#define DC_DISP_H_PULSE0_CONTROL 0x40b
+#define DC_DISP_H_PULSE0_POSITION_A 0x40c
+#define DC_DISP_H_PULSE0_POSITION_B 0x40d
+#define DC_DISP_H_PULSE0_POSITION_C 0x40e
+#define DC_DISP_H_PULSE0_POSITION_D 0x40f
+#define DC_DISP_H_PULSE1_CONTROL 0x410
+#define DC_DISP_H_PULSE1_POSITION_A 0x411
+#define DC_DISP_H_PULSE1_POSITION_B 0x412
+#define DC_DISP_H_PULSE1_POSITION_C 0x413
+#define DC_DISP_H_PULSE1_POSITION_D 0x414
+#define DC_DISP_H_PULSE2_CONTROL 0x415
+#define DC_DISP_H_PULSE2_POSITION_A 0x416
+#define DC_DISP_H_PULSE2_POSITION_B 0x417
+#define DC_DISP_H_PULSE2_POSITION_C 0x418
+#define DC_DISP_H_PULSE2_POSITION_D 0x419
+#define DC_DISP_V_PULSE0_CONTROL 0x41a
+#define DC_DISP_V_PULSE0_POSITION_A 0x41b
+#define DC_DISP_V_PULSE0_POSITION_B 0x41c
+#define DC_DISP_V_PULSE0_POSITION_C 0x41d
+#define DC_DISP_V_PULSE1_CONTROL 0x41e
+#define DC_DISP_V_PULSE1_POSITION_A 0x41f
+#define DC_DISP_V_PULSE1_POSITION_B 0x420
+#define DC_DISP_V_PULSE1_POSITION_C 0x421
+#define DC_DISP_V_PULSE2_CONTROL 0x422
+#define DC_DISP_V_PULSE2_POSITION_A 0x423
+#define DC_DISP_V_PULSE3_CONTROL 0x424
+#define DC_DISP_V_PULSE3_POSITION_A 0x425
+#define DC_DISP_M0_CONTROL 0x426
+#define DC_DISP_M1_CONTROL 0x427
+#define DC_DISP_DI_CONTROL 0x428
+#define DC_DISP_PP_CONTROL 0x429
+#define DC_DISP_PP_SELECT_A 0x42a
+#define DC_DISP_PP_SELECT_B 0x42b
+#define DC_DISP_PP_SELECT_C 0x42c
+#define DC_DISP_PP_SELECT_D 0x42d
+
+#define PULSE_MODE_NORMAL (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH (0 << 4)
+#define PULSE_POLARITY_LOW (1 << 4)
+#define PULSE_QUAL_ALWAYS (0 << 6)
+#define PULSE_QUAL_VACTIVE (2 << 6)
+#define PULSE_QUAL_VACTIVE1 (3 << 6)
+#define PULSE_LAST_START_A (0 << 8)
+#define PULSE_LAST_END_A (1 << 8)
+#define PULSE_LAST_START_B (2 << 8)
+#define PULSE_LAST_END_B (3 << 8)
+#define PULSE_LAST_START_C (4 << 8)
+#define PULSE_LAST_END_C (5 << 8)
+#define PULSE_LAST_START_D (6 << 8)
+#define PULSE_LAST_END_D (7 << 8)
+
+#define PULSE_START(x) ((x) & 0xfff)
+#define PULSE_END(x) (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
+#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
+#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S (5 << 0)
+#define DISP_DATA_FORMAT_DF3S (6 << 0)
+#define DISP_DATA_FORMAT_DFSPI (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (8 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (9 << 0)
+#define DISP_DATA_ALIGNMENT_MSB (0 << 8)
+#define DISP_DATA_ALIGNMENT_LSB (1 << 8)
+#define DISP_DATA_ORDER_RED_BLUE (0 << 9)
+#define DISP_DATA_ORDER_BLUE_RED (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL 0x430
+#define BASE_COLOR_SIZE666 (0 << 0)
+#define BASE_COLOR_SIZE111 (1 << 0)
+#define BASE_COLOR_SIZE222 (2 << 0)
+#define BASE_COLOR_SIZE333 (3 << 0)
+#define BASE_COLOR_SIZE444 (4 << 0)
+#define BASE_COLOR_SIZE555 (5 << 0)
+#define BASE_COLOR_SIZE565 (6 << 0)
+#define BASE_COLOR_SIZE332 (7 << 0)
+#define BASE_COLOR_SIZE888 (8 << 0)
+
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
+#define DE_SELECT_ACTIVE_BLANK 0x0
+#define DE_SELECT_ACTIVE 0x1
+#define DE_SELECT_ACTIVE_IS 0x2
+#define DE_CONTROL_ONECLK (0 << 2)
+#define DE_CONTROL_NORMAL (1 << 2)
+#define DE_CONTROL_EARLY_EXT (2 << 2)
+#define DE_CONTROL_EARLY (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
+#define DC_DISP_LCD_SPI_OPTIONS 0x434
+#define DC_DISP_BORDER_COLOR 0x435
+#define DC_DISP_COLOR_KEY0_LOWER 0x436
+#define DC_DISP_COLOR_KEY0_UPPER 0x437
+#define DC_DISP_COLOR_KEY1_LOWER 0x438
+#define DC_DISP_COLOR_KEY1_UPPER 0x439
+#define DC_DISP_CURSOR_FOREGROUND 0x43c
+#define DC_DISP_CURSOR_BACKGROUND 0x43d
+#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
+#define DC_DISP_CURSOR_POSITION 0x440
+#define DC_DISP_CURSOR_POSITION_NS 0x441
+#define DC_DISP_INIT_SEQ_CONTROL 0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
+#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
+#define DC_DISP_MCCIF_DISPLAY0C_HYST 0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
+#define DC_DISP_DAC_CRT_CTRL 0x4c0
+#define DC_DISP_DISP_MISC_CONTROL 0x4c1
+
+#define DC_WIN_COLOR_PALETTE(x) (0x500 + (x))
+
+#define DC_WIN_PALETTE_COLOR_EXT 0x600
+#define DC_WIN_H_FILTER_P(x) (0x601 + (x))
+#define DC_WIN_CSC_YOF 0x611
+#define DC_WIN_CSC_KYRGB 0x612
+#define DC_WIN_CSC_KUR 0x613
+#define DC_WIN_CSC_KVR 0x614
+#define DC_WIN_CSC_KUG 0x615
+#define DC_WIN_CSC_KVG 0x616
+#define DC_WIN_CSC_KUB 0x617
+#define DC_WIN_CSC_KVB 0x618
+#define DC_WIN_V_FILTER_P(x) (0x619 + (x))
+#define DC_WIN_WIN_OPTIONS 0x700
+#define H_DIRECTION_INCREMENT (0 << 0)
+#define H_DIRECTION_DECREMENTT (1 << 0)
+#define V_DIRECTION_INCREMENT (0 << 2)
+#define V_DIRECTION_DECREMENTT (1 << 2)
+#define COLOR_EXPAND (1 << 6)
+#define H_FILTER_ENABLE (1 << 8)
+#define V_FILTER_ENABLE (1 << 10)
+#define CP_ENABLE (1 << 16)
+#define CSC_ENABLE (1 << 18)
+#define DV_ENABLE (1 << 20)
+#define WIN_ENABLE (1 << 30)
+
+#define DC_WIN_BYTE_SWAP 0x701
+#define BYTE_SWAP_NOSWAP 0
+#define BYTE_SWAP_SWAP2 1
+#define BYTE_SWAP_SWAP4 2
+#define BYTE_SWAP_SWAP4HW 3
+
+#define DC_WIN_BUFFER_CONTROL 0x702
+#define BUFFER_CONTROL_HOST 0
+#define BUFFER_CONTROL_VI 1
+#define BUFFER_CONTROL_EPP 2
+#define BUFFER_CONTROL_MPEGE 3
+#define BUFFER_CONTROL_SB2D 4
+
+#define DC_WIN_COLOR_DEPTH 0x703
+
+#define DC_WIN_POSITION 0x704
+#define H_POSITION(x) (((x) & 0xfff) << 0)
+#define V_POSITION(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_SIZE 0x705
+#define H_SIZE(x) (((x) & 0xfff) << 0)
+#define V_SIZE(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE 0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x3fff) << 0)
+#define V_PRESCALED_SIZE(x) (((x) & 0xfff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA 0x707
+#define DC_WIN_V_INITIAL_DDA 0x708
+#define DC_WIN_DDA_INCREMENT 0x709
+#define H_DDA_INC(x) (((x) & 0xffff) << 0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE 0x70a
+#define LINE_STRIDE(x) (x)
+#define UV_LINE_STRIDE(x) (((x) & 0xffff) << 16)
+#define DC_WIN_BUF_STRIDE 0x70b
+#define DC_WIN_UV_BUF_STRIDE 0x70c
+#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_DV_CONTROL 0x70e
+#define DC_WIN_BLEND_NOKEY 0x70f
+#define DC_WIN_BLEND_1WIN 0x710
+#define DC_WIN_BLEND_2WIN_X 0x711
+#define DC_WIN_BLEND_2WIN_Y 0x712
+#define DC_WIN_BLEND_3WIN_XY 0x713
+#define CKEY_NOKEY (0 << 0)
+#define CKEY_KEY0 (1 << 0)
+#define CKEY_KEY1 (2 << 0)
+#define CKEY_KEY01 (3 << 0)
+#define BLEND_CONTROL_FIX (0 << 2)
+#define BLEND_CONTROL_ALPHA (1 << 2)
+#define BLEND_CONTROL_DEPENDANT (2 << 2)
+#define BLEND_CONTROL_PREMULT (3 << 2)
+#define BLEND_WEIGHT0(x) (((x) & 0xff) << 8)
+#define BLEND_WEIGHT1(x) (((x) & 0xff) << 16)
+#define BLEND(key, control, weight0, weight1) \
+ (CKEY_ ## key | BLEND_CONTROL_ ## control | \
+ BLEND_WEIGHT0(weight0) | BLEND_WEIGHT1(weight1))
+
+
+#define DC_WIN_HP_FETCH_CONTROL 0x714
+#define DC_WINBUF_START_ADDR 0x800
+#define DC_WINBUF_START_ADDR_NS 0x801
+#define DC_WINBUF_START_ADDR_U 0x802
+#define DC_WINBUF_START_ADDR_U_NS 0x803
+#define DC_WINBUF_START_ADDR_V 0x804
+#define DC_WINBUF_START_ADDR_V_NS 0x805
+#define DC_WINBUF_ADDR_H_OFFSET 0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
+#define DC_WINBUF_ADDR_V_OFFSET 0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
+#define DC_WINBUF_UFLOW_STATUS 0x80a
+
+#endif
diff --git a/drivers/video/tegra/dc/edid.c b/drivers/video/tegra/dc/edid.c
new file mode 100644
index 000000000000..812a0087a96d
--- /dev/null
+++ b/drivers/video/tegra/dc/edid.c
@@ -0,0 +1,276 @@
+/*
+ * drivers/video/tegra/dc/edid.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DEBUG
+
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include "edid.h"
+
+struct tegra_edid {
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ int bus;
+
+ u8 *data;
+ unsigned len;
+};
+
+#if defined(DEBUG) || defined(CONFIG_DEBUG_FS)
+static int tegra_edid_show(struct seq_file *s, void *unused)
+{
+ struct tegra_edid *edid = s->private;
+ int i;
+
+ for (i = 0; i < edid->len; i++) {
+ if (i % 16 == 0)
+ seq_printf(s, "edid[%03x] =", i);
+
+ seq_printf(s, " %02x", edid->data[i]);
+
+ if (i % 16 == 15)
+ seq_printf(s, "\n");
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_edid_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_edid_show, inode->i_private);
+}
+
+static const struct file_operations tegra_edid_debug_fops = {
+ .open = tegra_edid_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+ char name[] = "edidX";
+
+ snprintf(name, sizeof(name), "edid%1d", edid->bus);
+ debugfs_create_file(name, S_IRUGO, NULL, edid, &tegra_edid_debug_fops);
+}
+#else
+void tegra_edid_debug_add(struct tegra_edid *edid)
+{
+}
+#endif
+
+#ifdef DEBUG
+static char tegra_edid_dump_buff[16 * 1024];
+
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+ struct seq_file s;
+ int i;
+ char c;
+
+ memset(&s, 0x0, sizeof(s));
+
+ s.buf = tegra_edid_dump_buff;
+ s.size = sizeof(tegra_edid_dump_buff);
+ s.private = edid;
+
+ tegra_edid_show(&s, NULL);
+
+ i = 0;
+ while (i < s.count ) {
+ if ((s.count - i) > 256) {
+ c = s.buf[i + 256];
+ s.buf[i + 256] = 0;
+ printk("%s", s.buf + i);
+ s.buf[i + 256] = c;
+ } else {
+ printk("%s", s.buf + i);
+ }
+ i += 256;
+ }
+}
+#else
+static void tegra_edid_dump(struct tegra_edid *edid)
+{
+}
+#endif
+
+int tegra_edid_read_block(struct tegra_edid *edid, int block, u8 *data)
+{
+ u8 block_buf[] = {block >> 1};
+ u8 cmd_buf[] = {(block & 0x1) * 128};
+ int status;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x30,
+ .flags = 0,
+ .len = 1,
+ .buf = block_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = cmd_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 128,
+ .buf = data,
+ }};
+ struct i2c_msg *m;
+ int msg_len;
+
+ if (block > 1) {
+ msg_len = 3;
+ m = msg;
+ } else {
+ msg_len = 2;
+ m = &msg[1];
+ }
+
+ status = i2c_transfer(edid->client->adapter, m, msg_len);
+
+ if (status < 0)
+ return status;
+
+ if (status != msg_len)
+ return -EIO;
+
+ return 0;
+}
+
+
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs)
+{
+ int i;
+ int ret;
+ int extension_blocks;
+
+ ret = tegra_edid_read_block(edid, 0, edid->data);
+
+ memset(specs, 0x0, sizeof(struct fb_monspecs));
+ fb_edid_to_monspecs(edid->data, specs);
+ if (specs->modedb == NULL)
+ return -EINVAL;
+
+ extension_blocks = edid->data[0x7e];
+
+ for (i = 1; i <= extension_blocks; i++) {
+ ret = tegra_edid_read_block(edid, i, edid->data + i * 128);
+ if (ret < 0)
+ break;
+
+ if (edid->data[i * 128] == 0x2)
+ fb_edid_add_monspecs(edid->data + i * 128, specs);
+ }
+
+ edid->len = i * 128;
+
+ tegra_edid_dump(edid);
+
+ return 0;
+}
+
+struct tegra_edid *tegra_edid_create(int bus)
+{
+ struct tegra_edid *edid;
+ struct i2c_adapter *adapter;
+ int err;
+
+ edid = kzalloc(sizeof(struct tegra_edid), GFP_KERNEL);
+ if (!edid)
+ return ERR_PTR(-ENOMEM);
+
+ edid->data = vmalloc(SZ_32K);
+ if (!edid->data) {
+ err = -ENOMEM;
+ goto free_edid;
+ }
+ strlcpy(edid->info.type, "tegra_edid", sizeof(edid->info.type));
+ edid->bus = bus;
+ edid->info.addr = 0x50;
+ edid->info.platform_data = edid;
+
+ adapter = i2c_get_adapter(bus);
+ if (!adapter) {
+ pr_err("can't get adpater for bus %d\n", bus);
+ err = -EBUSY;
+ goto free_edid;
+ }
+
+ edid->client = i2c_new_device(adapter, &edid->info);
+ i2c_put_adapter(adapter);
+
+ if (!edid->client) {
+ pr_err("can't create new device\n");
+ err = -EBUSY;
+ goto free_edid;
+ }
+
+ tegra_edid_debug_add(edid);
+
+ return edid;
+
+free_edid:
+ vfree(edid->data);
+ kfree(edid);
+
+ return ERR_PTR(err);
+}
+
+void tegra_edid_destroy(struct tegra_edid *edid)
+{
+ i2c_release_client(edid->client);
+ vfree(edid->data);
+ kfree(edid);
+}
+
+static const struct i2c_device_id tegra_edid_id[] = {
+ { "tegra_edid", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tegra_edid_id);
+
+static struct i2c_driver tegra_edid_driver = {
+ .id_table = tegra_edid_id,
+ .driver = {
+ .name = "tegra_edid",
+ },
+};
+
+static int __init tegra_edid_init(void)
+{
+ return i2c_add_driver(&tegra_edid_driver);
+}
+
+static void __exit tegra_edid_exit(void)
+{
+ i2c_del_driver(&tegra_edid_driver);
+}
+
+module_init(tegra_edid_init);
+module_exit(tegra_edid_exit);
diff --git a/drivers/video/tegra/dc/edid.h b/drivers/video/tegra/dc/edid.h
new file mode 100644
index 000000000000..821da90a8b4f
--- /dev/null
+++ b/drivers/video/tegra/dc/edid.h
@@ -0,0 +1,31 @@
+/*
+ * drivers/video/tegra/dc/edid.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+#define __DRIVERS_VIDEO_TEGRA_DC_EDID_H
+
+#include <linux/i2c.h>
+#include <linux/wait.h>
+
+struct tegra_edid;
+
+struct tegra_edid *tegra_edid_create(int bus);
+void tegra_edid_destroy(struct tegra_edid *edid);
+
+int tegra_edid_get_monspecs(struct tegra_edid *edid, struct fb_monspecs *specs);
+
+#endif
diff --git a/drivers/video/tegra/dc/hdmi.c b/drivers/video/tegra/dc/hdmi.c
new file mode 100644
index 000000000000..878cca51283a
--- /dev/null
+++ b/drivers/video/tegra/dc/hdmi.c
@@ -0,0 +1,1102 @@
+/*
+ * drivers/video/tegra/dc/hdmi.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <mach/clk.h>
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/nvhost.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+#include "hdmi_reg.h"
+#include "hdmi.h"
+#include "edid.h"
+
+/* datasheet claims this will always be 216MHz */
+#define HDMI_AUDIOCLK_FREQ 216000000
+
+#define HDMI_REKEY_DEFAULT 56
+
+struct tegra_dc_hdmi_data {
+ struct tegra_dc *dc;
+ struct tegra_edid *edid;
+ struct delayed_work work;
+
+ struct resource *base_res;
+ void __iomem *base;
+ struct clk *clk;
+
+ struct clk *disp1_clk;
+ struct clk *disp2_clk;
+
+ spinlock_t suspend_lock;
+ bool suspended;
+ bool hpd_pending;
+};
+
+const struct fb_videomode tegra_dc_hdmi_supported_modes[] = {
+ /* 1280x720p 60hz: EIA/CEA-861-B Format 4 */
+ {
+ .xres = 1280,
+ .yres = 720,
+ .pixclock = KHZ2PICOS(74250),
+ .hsync_len = 40, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 220, /* h_back_porch */
+ .upper_margin = 20, /* v_back_porch */
+ .right_margin = 110, /* h_front_porch */
+ .lower_margin = 5, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 720x480p 59.94hz: EIA/CEA-861-B Formats 2 & 3 */
+ {
+ .xres = 720,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(27000),
+ .hsync_len = 62, /* h_sync_width */
+ .vsync_len = 6, /* v_sync_width */
+ .left_margin = 60, /* h_back_porch */
+ .upper_margin = 30, /* v_back_porch */
+ .right_margin = 16, /* h_front_porch */
+ .lower_margin = 9, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 640x480p 60hz: EIA/CEA-861-B Format 1 */
+ {
+ .xres = 640,
+ .yres = 480,
+ .pixclock = KHZ2PICOS(25200),
+ .hsync_len = 96, /* h_sync_width */
+ .vsync_len = 2, /* v_sync_width */
+ .left_margin = 48, /* h_back_porch */
+ .upper_margin = 33, /* v_back_porch */
+ .right_margin = 16, /* h_front_porch */
+ .lower_margin = 10, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 720x576p 50hz EIA/CEA-861-B Formats 17 & 18 */
+ {
+ .xres = 720,
+ .yres = 576,
+ .pixclock = KHZ2PICOS(27000),
+ .hsync_len = 64, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 68, /* h_back_porch */
+ .upper_margin = 39, /* v_back_porch */
+ .right_margin = 12, /* h_front_porch */
+ .lower_margin = 5, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+
+ /* 1920x1080p 59.94/60hz EIA/CEA-861-B Format 16 */
+ {
+ .xres = 1920,
+ .yres = 1080,
+ .pixclock = KHZ2PICOS(148500),
+ .hsync_len = 44, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 148, /* h_back_porch */
+ .upper_margin = 36, /* v_back_porch */
+ .right_margin = 88, /* h_front_porch */
+ .lower_margin = 4, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+};
+
+struct tegra_hdmi_audio_config {
+ unsigned pix_clock;
+ unsigned n;
+ unsigned cts;
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+ {25200000, 4096, 25250},
+ {27000000, 4096, 27000},
+ {54000000, 4096, 54000},
+ {74250000, 4096, 74250},
+ {148500000, 4096, 148500},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+ {25200000, 14112, 63125},
+ {27000000, 6272, 30000},
+ {54000000, 6272, 60000},
+ {74250000, 6272, 82500},
+ {148500000, 6272, 165000},
+ {0, 0, 0},
+};
+
+const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+ {25200000, 6144, 25250},
+ {27000000, 6144, 27000},
+ {54000000, 6144, 54000},
+ {74250000, 6144, 74250},
+ {148500000, 6144, 148500},
+ {0, 0, 0},
+};
+
+static const struct tegra_hdmi_audio_config
+*tegra_hdmi_get_audio_config(unsigned audio_freq, unsigned pix_clock)
+{
+ const struct tegra_hdmi_audio_config *table;
+
+ switch (audio_freq) {
+ case 32000:
+ table = tegra_hdmi_audio_32k;
+ break;
+
+ case 44100:
+ table = tegra_hdmi_audio_44_1k;
+ break;
+
+ case 48000:
+ table = tegra_hdmi_audio_48k;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ while (table->pix_clock) {
+ if (table->pix_clock == pix_clock)
+ return table;
+ table++;
+ }
+
+ return NULL;
+}
+
+
+static inline unsigned long tegra_hdmi_readl(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg)
+{
+ return readl(hdmi->base + reg * 4);
+}
+
+static inline void tegra_hdmi_writel(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, hdmi->base + reg * 4);
+}
+
+static inline void tegra_hdmi_clrsetbits(struct tegra_dc_hdmi_data *hdmi,
+ unsigned long reg, unsigned long clr,
+ unsigned long set)
+{
+ unsigned long val = tegra_hdmi_readl(hdmi, reg);
+ val &= ~clr;
+ val |= set;
+ tegra_hdmi_writel(hdmi, val, reg);
+}
+
+#define DUMP_REG(a) do { \
+ printk("HDMI %-32s\t%03x\t%08lx\n", \
+ #a, a, tegra_hdmi_readl(hdmi, a)); \
+ } while (0)
+
+#ifdef DEBUG
+static void hdmi_dumpregs(struct tegra_dc_hdmi_data *hdmi)
+{
+ DUMP_REG(HDMI_CTXSW);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST3);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST4);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST5);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST6);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST7);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST8);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST9);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTC);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTD);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTE);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INSTF);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+ DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+ DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+ DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+}
+#endif
+
+#define PIXCLOCK_TOLERANCE 200
+
+static bool tegra_dc_hdmi_mode_equal(const struct fb_videomode *mode1,
+ const struct fb_videomode *mode2)
+{
+ return mode1->xres == mode2->xres &&
+ mode1->yres == mode2->yres &&
+ mode1->vmode == mode2->vmode;
+}
+
+static bool tegra_dc_hdmi_mode_filter(struct fb_videomode *mode)
+{
+ int i;
+ int clocks;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_dc_hdmi_supported_modes); i++) {
+ if (tegra_dc_hdmi_mode_equal(&tegra_dc_hdmi_supported_modes[i],
+ mode)) {
+ memcpy(mode, &tegra_dc_hdmi_supported_modes[i], sizeof(*mode));
+ mode->flag = FB_MODE_IS_DETAILED;
+ clocks = (mode->left_margin + mode->xres + mode->right_margin + mode->hsync_len) *
+ (mode->upper_margin + mode->yres + mode->lower_margin + mode->vsync_len);
+ mode->refresh = (PICOS2KHZ(mode->pixclock) * 1000) / clocks;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+static bool tegra_dc_hdmi_hpd(struct tegra_dc *dc)
+{
+ int sense;
+ int level;
+
+ level = gpio_get_value(dc->out->hotplug_gpio);
+
+ sense = dc->out->flags & TEGRA_DC_OUT_HOTPLUG_MASK;
+
+ return (sense == TEGRA_DC_OUT_HOTPLUG_HIGH && level) ||
+ (sense == TEGRA_DC_OUT_HOTPLUG_LOW && !level);
+}
+
+static bool tegra_dc_hdmi_detect(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct fb_monspecs specs;
+ int err;
+
+ if (!tegra_dc_hdmi_hpd(dc))
+ return false;
+
+ err = tegra_edid_get_monspecs(hdmi->edid, &specs);
+ if (err < 0) {
+ dev_err(&dc->ndev->dev, "error reading edid\n");
+ return false;
+ }
+
+ /* monitors like to lie about these but they are still useful for
+ * detecting aspect ratios
+ */
+ dc->out->h_size = specs.max_x * 1000;
+ dc->out->v_size = specs.max_y * 1000;
+
+ tegra_fb_update_monspecs(dc->fb, &specs, tegra_dc_hdmi_mode_filter);
+ dev_info(&dc->ndev->dev, "display detected\n");
+ return true;
+}
+
+
+static void tegra_dc_hdmi_detect_worker(struct work_struct *work)
+{
+ struct tegra_dc_hdmi_data *hdmi =
+ container_of(to_delayed_work(work), struct tegra_dc_hdmi_data, work);
+ struct tegra_dc *dc = hdmi->dc;
+
+ if (!tegra_dc_hdmi_detect(dc)) {
+ tegra_dc_disable(dc);
+ tegra_fb_update_monspecs(dc->fb, NULL, NULL);
+ }
+}
+
+static irqreturn_t tegra_dc_hdmi_irq(int irq, void *ptr)
+{
+ struct tegra_dc *dc = ptr;
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ if (hdmi->suspended) {
+ hdmi->hpd_pending = true;
+ } else {
+ if (tegra_dc_hdmi_hpd(dc))
+ schedule_delayed_work(&hdmi->work, msecs_to_jiffies(100));
+ else
+ schedule_delayed_work(&hdmi->work, msecs_to_jiffies(0));
+ }
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_dc_hdmi_suspend(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ hdmi->suspended = true;
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+}
+
+static void tegra_dc_hdmi_resume(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->suspend_lock, flags);
+ hdmi->suspended = false;
+ if (hdmi->hpd_pending) {
+ if (tegra_dc_hdmi_hpd(dc))
+ schedule_delayed_work(&hdmi->work, msecs_to_jiffies(100));
+ else
+ schedule_delayed_work(&hdmi->work, msecs_to_jiffies(0));
+ hdmi->hpd_pending = false;
+ }
+ spin_unlock_irqrestore(&hdmi->suspend_lock, flags);
+}
+
+static int tegra_dc_hdmi_init(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi;
+ struct resource *res;
+ struct resource *base_res;
+ void __iomem *base;
+ struct clk *clk = NULL;
+ struct clk *disp1_clk = NULL;
+ struct clk *disp2_clk = NULL;
+ int err;
+
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ res = nvhost_get_resource_byname(dc->ndev, IORESOURCE_MEM, "hdmi_regs");
+ if (!res) {
+ dev_err(&dc->ndev->dev, "hdmi: no mem resource\n");
+ err = -ENOENT;
+ goto err_free_hdmi;
+ }
+
+ base_res = request_mem_region(res->start, resource_size(res), dc->ndev->name);
+ if (!base_res) {
+ dev_err(&dc->ndev->dev, "hdmi: request_mem_region failed\n");
+ err = -EBUSY;
+ goto err_free_hdmi;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&dc->ndev->dev, "hdmi: registers can't be mapped\n");
+ err = -EBUSY;
+ goto err_release_resource_reg;
+ }
+
+ clk = clk_get(&dc->ndev->dev, "hdmi");
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't get clock\n");
+ err = -ENOENT;
+ goto err_iounmap_reg;
+ }
+
+ disp1_clk = clk_get_sys("tegradc.0", NULL);
+ if (IS_ERR_OR_NULL(disp1_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't disp1 clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+ disp2_clk = clk_get_sys("tegradc.1", NULL);
+ if (IS_ERR_OR_NULL(disp2_clk)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't disp2 clock\n");
+ err = -ENOENT;
+ goto err_put_clock;
+ }
+
+ /* TODO: support non-hotplug */
+ if (request_irq(gpio_to_irq(dc->out->hotplug_gpio), tegra_dc_hdmi_irq,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&dc->ndev->dev), dc)) {
+ dev_err(&dc->ndev->dev, "hdmi: request_irq %d failed\n",
+ gpio_to_irq(dc->out->hotplug_gpio));
+ err = -EBUSY;
+ goto err_put_clock;
+ }
+
+ hdmi->edid = tegra_edid_create(dc->out->dcc_bus);
+ if (IS_ERR_OR_NULL(hdmi->edid)) {
+ dev_err(&dc->ndev->dev, "hdmi: can't create edid\n");
+ err = PTR_ERR(hdmi->edid);
+ goto err_free_irq;
+ }
+
+ INIT_DELAYED_WORK(&hdmi->work, tegra_dc_hdmi_detect_worker);
+
+ hdmi->dc = dc;
+ hdmi->base = base;
+ hdmi->base_res = base_res;
+ hdmi->clk = clk;
+ hdmi->disp1_clk = disp1_clk;
+ hdmi->disp2_clk = disp2_clk;
+ hdmi->suspended = false;
+ hdmi->hpd_pending = false;
+ spin_lock_init(&hdmi->suspend_lock);
+
+ dc->out->depth = 24;
+
+ tegra_dc_set_outdata(dc, hdmi);
+
+ return 0;
+
+err_free_irq:
+ free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+err_put_clock:
+ if (!IS_ERR_OR_NULL(disp2_clk))
+ clk_put(disp2_clk);
+ if (!IS_ERR_OR_NULL(disp1_clk))
+ clk_put(disp1_clk);
+ if (!IS_ERR_OR_NULL(clk))
+ clk_put(clk);
+err_iounmap_reg:
+ iounmap(base);
+err_release_resource_reg:
+ release_resource(base_res);
+err_free_hdmi:
+ kfree(hdmi);
+ return err;
+}
+
+static void tegra_dc_hdmi_destroy(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ free_irq(gpio_to_irq(dc->out->hotplug_gpio), dc);
+ cancel_delayed_work_sync(&hdmi->work);
+ iounmap(hdmi->base);
+ release_resource(hdmi->base_res);
+ clk_put(hdmi->clk);
+ clk_put(hdmi->disp1_clk);
+ clk_put(hdmi->disp2_clk);
+ tegra_edid_destroy(hdmi->edid);
+
+ kfree(hdmi);
+
+}
+
+static void tegra_dc_hdmi_setup_audio_fs_tables(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ int i;
+ unsigned freqs[] = {
+ 32000,
+ 44100,
+ 48000,
+ 88200,
+ 96000,
+ 176400,
+ 192000,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned f = freqs[i];
+ unsigned eight_half;
+ unsigned delta;;
+
+ if (f > 96000)
+ delta = 2;
+ else if (f > 48000)
+ delta = 6;
+ else
+ delta = 9;
+
+ eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+ tegra_hdmi_writel(hdmi, AUDIO_FS_LOW(eight_half - delta) |
+ AUDIO_FS_HIGH(eight_half + delta),
+ HDMI_NV_PDISP_AUDIO_FS(i));
+ }
+}
+
+static int tegra_dc_hdmi_setup_audio(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ const struct tegra_hdmi_audio_config *config;
+ unsigned long audio_n;
+ unsigned audio_freq = 44100; /* TODO: find some way of configuring this */
+
+ tegra_hdmi_writel(hdmi,
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+ AUDIO_CNTRL0_SOURCE_SELECT_AUTO,
+ HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+ config = tegra_hdmi_get_audio_config(audio_freq, dc->mode.pclk);
+ if (!config) {
+ dev_err(&dc->ndev->dev,
+ "hdmi: can't set audio to %d at %d pix_clock",
+ audio_freq, dc->mode.pclk);
+ return -EINVAL;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+ audio_n = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNALTE |
+ AUDIO_N_VALUE(config->n - 1);
+ tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+ tegra_hdmi_writel(hdmi, SPARE_HW_CTS | SPARE_FORCE_SW_CTS |
+ SPARE_CTS_RESET_VAL(1),
+ HDMI_NV_PDISP_HDMI_SPARE);
+
+ audio_n &= ~AUDIO_N_RESETF;
+ tegra_hdmi_writel(hdmi, audio_n, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_dc_hdmi_setup_audio_fs_tables(dc);
+
+ return 0;
+}
+
+static void tegra_dc_hdmi_write_infopack(struct tegra_dc *dc, int header_reg,
+ u8 type, u8 version, void *data, int len)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ u32 subpack[2]; /* extra byte for zero padding of subpack */
+ int i;
+ u8 csum;
+
+ /* first byte of data is the checksum */
+ csum = type + version + len - 1;
+ for (i = 1; i < len; i++)
+ csum +=((u8 *)data)[i];
+ ((u8 *)data)[0] = 0x100 - csum;
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_HEADER_TYPE(type) |
+ INFOFRAME_HEADER_VERSION(version) |
+ INFOFRAME_HEADER_LEN(len - 1),
+ header_reg);
+
+ /* The audio inforame only has one set of subpack registers. The hdmi
+ * block pads the rest of the data as per the spec so we have to fixup
+ * the length before filling in the subpacks.
+ */
+ if (header_reg == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+ len = 6;
+
+ /* each subpack 7 bytes devided into:
+ * subpack_low - bytes 0 - 3
+ * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 0; i < len; i++) {
+ int subpack_idx = i % 7;
+
+ if (subpack_idx == 0)
+ memset(subpack, 0x0, sizeof(subpack));
+
+ ((u8 *)subpack)[subpack_idx] = ((u8 *)data)[i];
+
+ if (subpack_idx == 6 || (i + 1 == len)) {
+ int reg = header_reg + 1 + (i / 7) * 2;
+
+ tegra_hdmi_writel(hdmi, subpack[0], reg);
+ tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+ }
+ }
+}
+
+static void tegra_dc_hdmi_setup_avi_infoframe(struct tegra_dc *dc, bool dvi)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct hdmi_avi_infoframe avi;
+
+ if (dvi) {
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&avi, 0x0, sizeof(avi));
+
+ avi.r = HDMI_AVI_R_SAME;
+
+ if (dc->mode.v_active == 480) {
+ if (dc->mode.h_active == 640) {
+ avi.m = HDMI_AVI_M_4_3;
+ avi.vic = 1;
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 3;
+ }
+ } else if (dc->mode.v_active == 576) {
+ /* CEC modes 17 and 18 differ only by the pysical size of the
+ * screen so we have to calculation the physical aspect
+ * ratio. 4 * 10 / 3 is 13
+ */
+ if ((dc->out->h_size * 10) / dc->out->v_size > 14) {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 18;
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 17;
+ }
+ } else if (dc->mode.v_active == 720) {
+ avi.m = HDMI_AVI_M_16_9;
+ if (dc->mode.h_front_porch == 110)
+ avi.vic = 4; /* 60 Hz */
+ else
+ avi.vic = 19; /* 50 Hz */
+ } else if (dc->mode.v_active == 720) {
+ avi.m = HDMI_AVI_M_16_9;
+ if (dc->mode.h_front_porch == 88)
+ avi.vic = 16; /* 60 Hz */
+ else if (dc->mode.h_front_porch == 528)
+ avi.vic = 31; /* 50 Hz */
+ else
+ avi.vic = 32; /* 24 Hz */
+ } else {
+ avi.m = HDMI_AVI_M_16_9;
+ avi.vic = 0;
+ }
+
+
+ tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AVI,
+ HDMI_AVI_VERSION,
+ &avi, sizeof(avi));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_setup_audio_infoframe(struct tegra_dc *dc, bool dvi)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ struct hdmi_audio_infoframe audio;
+
+ if (dvi) {
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&audio, 0x0, sizeof(audio));
+
+ audio.cc = HDMI_AUDIO_CC_2;
+ tegra_dc_hdmi_write_infopack(dc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AUDIO,
+ HDMI_AUDIO_VERSION,
+ &audio, sizeof(audio));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_dc_hdmi_enable(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+ int pulse_start;
+ int dispclk_div_8_2;
+ int pll0;
+ int pll1;
+ int ds;
+ int retries;
+ int rekey;
+ int err;
+ unsigned long val;
+ bool dvi = false;
+
+ /* enbale power, clocks, resets, etc. */
+
+ /* The upstream DC needs to be clocked for accesses to HDMI to not
+ * hard lock the system. Because we don't know if HDMI is conencted
+ * to disp1 or disp2 we need to enable both until we set the DC mux.
+ */
+ clk_enable(hdmi->disp1_clk);
+ clk_enable(hdmi->disp2_clk);
+ tegra_dc_setup_clk(dc, hdmi->clk);
+ clk_set_rate(hdmi->clk, dc->mode.pclk);
+
+ clk_enable(hdmi->clk);
+ tegra_periph_reset_assert(hdmi->clk);
+ mdelay(1);
+ tegra_periph_reset_deassert(hdmi->clk);
+
+ /* TODO: copy HDCP keys from KFUSE to HDMI */
+
+ /* Program display timing registers: handled by dc */
+
+ /* program HDMI registers and SOR sequencer */
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ DC_DISP_DISP_COLOR_CONTROL);
+
+ /* video_preamble uses h_pulse2 */
+ pulse_start = dc->mode.h_ref_to_sync + dc->mode.h_sync_width +
+ dc->mode.h_back_porch - 10;
+ tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ tegra_dc_writel(dc,
+ PULSE_MODE_NORMAL |
+ PULSE_POLARITY_HIGH |
+ PULSE_QUAL_VACTIVE |
+ PULSE_LAST_END_A,
+ DC_DISP_H_PULSE2_CONTROL);
+ tegra_dc_writel(dc, PULSE_START(pulse_start) | PULSE_END(pulse_start + 8),
+ DC_DISP_H_PULSE2_POSITION_A);
+
+ tegra_hdmi_writel(hdmi,
+ VSYNC_WINDOW_END(0x210) |
+ VSYNC_WINDOW_START(0x200) |
+ VSYNC_WINDOW_ENABLE,
+ HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+ tegra_hdmi_writel(hdmi,
+ (dc->ndev->id ? HDMI_SRC_DISPLAYB : HDMI_SRC_DISPLAYA) |
+ ARM_VIDEO_RANGE_LIMITED,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+
+ clk_disable(hdmi->disp1_clk);
+ clk_disable(hdmi->disp2_clk);
+
+ dispclk_div_8_2 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+ tegra_hdmi_writel(hdmi,
+ SOR_REFCLK_DIV_INT(dispclk_div_8_2 >> 2) |
+ SOR_REFCLK_DIV_FRAC(dispclk_div_8_2),
+ HDMI_NV_PDISP_SOR_REFCLK);
+
+ err = tegra_dc_hdmi_setup_audio(dc);
+ if (err < 0)
+ dvi = true;
+
+ rekey = HDMI_REKEY_DEFAULT;
+ val = HDMI_CTRL_REKEY(rekey);
+ val |= HDMI_CTRL_MAX_AC_PACKET((dc->mode.h_sync_width +
+ dc->mode.h_back_porch +
+ dc->mode.h_front_porch -
+ rekey - 18) / 32);
+ if (!dvi)
+ val |= HDMI_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_HDMI_CTRL);
+
+ if (dvi)
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ else
+ tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+
+ tegra_dc_hdmi_setup_avi_infoframe(dc, dvi);
+ tegra_dc_hdmi_setup_audio_infoframe(dc, dvi);
+
+ /* TMDS CONFIG */
+ pll0 = 0x200033f;
+ pll1 = 0;
+
+ pll0 &= ~SOR_PLL_PWR & ~SOR_PLL_VCOPD & ~SOR_PLL_PDBG & ~SOR_PLL_PDPORT & ~SOR_PLL_PULLDOWN &
+ ~SOR_PLL_VCOCAP(~0) & ~SOR_PLL_ICHPMP(~0);
+ pll0 |= SOR_PLL_RESISTORSEL;
+
+ if (dc->mode.pclk <= 27000000)
+ pll0 |= SOR_PLL_VCOCAP(0);
+ else if (dc->mode.pclk <= 74250000)
+ pll0 |= SOR_PLL_VCOCAP(1);
+ else
+ pll0 |= SOR_PLL_VCOCAP(3);
+
+ if (dc->mode.h_active == 1080) {
+ pll0 |= SOR_PLL_ICHPMP(1) | SOR_PLL_TX_REG_LOAD(3) |
+ SOR_PLL_TX_REG_LOAD(3) | SOR_PLL_BG_V17_S(3);
+ pll1 |= SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN;
+ } else {
+ pll0 |= SOR_PLL_ICHPMP(2);
+ }
+
+ tegra_hdmi_writel(hdmi, pll0, HDMI_NV_PDISP_SOR_PLL0);
+ tegra_hdmi_writel(hdmi, pll1, HDMI_NV_PDISP_SOR_PLL1);
+
+ if (pll1 & SOR_PLL_PE_EN) {
+ tegra_hdmi_writel(hdmi,
+ PE_CURRENT0(0xf) |
+ PE_CURRENT1(0xf) |
+ PE_CURRENT2(0xf) |
+ PE_CURRENT3(0xf),
+ HDMI_NV_PDISP_PE_CURRENT);
+ }
+
+ /* enable SOR */
+ if (dc->mode.h_active == 1080)
+ ds = DRIVE_CURRENT_13_500_mA;
+ else
+ ds = DRIVE_CURRENT_5_250_mA;
+
+ tegra_hdmi_writel(hdmi,
+ DRIVE_CURRENT_LANE0(ds) |
+ DRIVE_CURRENT_LANE1(ds) |
+ DRIVE_CURRENT_LANE2(ds) |
+ DRIVE_CURRENT_LANE3(ds) |
+ DRIVE_CURRENT_FUSE_OVERRIDE,
+ HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+ tegra_hdmi_writel(hdmi,
+ SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC_ALT(0) |
+ SOR_SEQ_PD_PC(8) |
+ SOR_SEQ_PD_PC_ALT(8),
+ HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+ val = SOR_SEQ_INST_WAIT_TIME(1) |
+ SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+ SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_PIN_A_LOW |
+ SOR_SEQ_INST_PIN_B_LOW |
+ SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST0);
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_SEQ_INST8);
+
+ val = 0x1c800;
+ val &= ~SOR_CSTM_ROTCLK(~0);
+ val |= SOR_CSTM_ROTCLK(2);
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_CSTM);
+
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+
+ /* start SOR */
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_TRIGGER,
+ HDMI_NV_PDISP_SOR_PWR);
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_DONE,
+ HDMI_NV_PDISP_SOR_PWR);
+
+ retries = 1000;
+ do {
+ BUG_ON(--retries < 0);
+ val = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+ } while (val & SOR_PWR_SETTING_NEW_PENDING);
+
+ tegra_hdmi_writel(hdmi,
+ SOR_STATE_ASY_CRCMODE_COMPLETE |
+ SOR_STATE_ASY_OWNER_HEAD0 |
+ SOR_STATE_ASY_SUBOWNER_BOTH |
+ SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+ /* TODO: to look at hsync polarity */
+ SOR_STATE_ASY_HSYNCPOL_POS |
+ SOR_STATE_ASY_VSYNCPOL_POS |
+ SOR_STATE_ASY_DEPOL_POS,
+ HDMI_NV_PDISP_SOR_STATE2);
+
+ val = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+ tegra_hdmi_writel(hdmi, val, HDMI_NV_PDISP_SOR_STATE1);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, val | SOR_STATE_ATTACHED,
+ HDMI_NV_PDISP_SOR_STATE1);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+ tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+ DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_hdmi_disable(struct tegra_dc *dc)
+{
+ struct tegra_dc_hdmi_data *hdmi = tegra_dc_get_outdata(dc);
+
+ tegra_periph_reset_assert(hdmi->clk);
+ clk_disable(hdmi->clk);
+}
+struct tegra_dc_out_ops tegra_dc_hdmi_ops = {
+ .init = tegra_dc_hdmi_init,
+ .destroy = tegra_dc_hdmi_destroy,
+ .enable = tegra_dc_hdmi_enable,
+ .disable = tegra_dc_hdmi_disable,
+ .detect = tegra_dc_hdmi_detect,
+ .suspend = tegra_dc_hdmi_suspend,
+ .resume = tegra_dc_hdmi_resume,
+};
+
diff --git a/drivers/video/tegra/dc/hdmi.h b/drivers/video/tegra/dc/hdmi.h
new file mode 100644
index 000000000000..0189f08719fe
--- /dev/null
+++ b/drivers/video/tegra/dc/hdmi.h
@@ -0,0 +1,183 @@
+/*
+ * drivers/video/tegra/dc/hdmi.h
+ *
+ * non-tegra specific HDMI declarations
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_H
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HDMI_INFOFRAME_TYPE_AVI 0x82
+#define HDMI_INFOFRAME_TYPE_SPD 0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned s:2; /* scan information */
+ unsigned b:2; /* bar info data valid */
+ unsigned a:1; /* active info present */
+ unsigned y:2; /* RGB or YCbCr */
+ unsigned res1:1;
+
+ /* PB2 */
+ unsigned r:4; /* active format aspect ratio */
+ unsigned m:2; /* picture aspect ratio */
+ unsigned c:2; /* colorimetry */
+
+ /* PB3 */
+ unsigned sc:2; /* scan information */
+ unsigned q:2; /* quantization range */
+ unsigned ec:3; /* extended colorimetry */
+ unsigned itc:1; /* it content */
+
+ /* PB4 */
+ unsigned vic:7; /* video format id code */
+ unsigned res4:1;
+
+ /* PB5 */
+ unsigned pr:4; /* pixel repetition factor */
+ unsigned cn:2; /* it content type*/
+ unsigned yq:2; /* ycc quantization range */
+
+ /* PB6-7 */
+ u16 top_bar_end_line;
+
+ /* PB8-9 */
+ u16 bot_bar_start_line;
+
+ /* PB10-11 */
+ u16 left_bar_end_pixel;
+
+ /* PB12-13 */
+ u16 right_bar_start_pixel;
+} __attribute__((packed));
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB 0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT 0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE 0x0
+#define HDMI_AVI_S_OVERSCAN 0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE 0x0
+#define HDMI_AVI_C_SMPTE 0x1
+#define HDMI_AVI_C_ITU_R 0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3 0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME 0x8
+#define HDMI_AVI_R_4_3_CENTER 0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned cc:3; /* channel count */
+ unsigned res1:1;
+ unsigned ct:4; /* coding type */
+
+ /* PB2 */
+ unsigned ss:2; /* sample size */
+ unsigned sf:3; /* sample frequency */
+ unsigned res2:3;
+
+ /* PB3 */
+ unsigned cxt:5; /* coding extention type */
+ unsigned res3:3;
+
+ /* PB4 */
+ u8 ca; /* channel/speaker allocation */
+
+ /* PB5 */
+ unsigned res5:3;
+ unsigned lsv:4; /* level shift value */
+ unsigned dm_inh:1; /* downmix inhibit */
+
+ /* PB6-10 reserved */
+ u8 res6;
+ u8 res7;
+ u8 res8;
+ u8 res9;
+ u8 res10;
+} __attribute__((packed));
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2 0x1
+#define HDMI_AUDIO_CC_3 0x2
+#define HDMI_AUDIO_CC_4 0x3
+#define HDMI_AUDIO_CC_5 0x4
+#define HDMI_AUDIO_CC_6 0x5
+#define HDMI_AUDIO_CC_7 0x6
+#define HDMI_AUDIO_CC_8 0x7
+
+#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM 0x1
+#define HDMI_AUDIO_CT_AC3 0x2
+#define HDMI_AUDIO_CT_MPEG1 0x3
+#define HDMI_AUDIO_CT_MP3 0x4
+#define HDMI_AUDIO_CT_MPEG2 0x5
+#define HDMI_AUDIO_CT_AAC_LC 0x6
+#define HDMI_AUDIO_CT_DTS 0x7
+#define HDMI_AUDIO_CT_ATRAC 0x8
+#define HDMI_AUDIO_CT_DSD 0x9
+#define HDMI_AUDIO_CT_E_AC3 0xa
+#define HDMI_AUDIO_CT_DTS_HD 0xb
+#define HDMI_AUDIO_CT_MLP 0xc
+#define HDMI_AUDIO_CT_DST 0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT 0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K 0x1
+#define HDMI_AUDIO_SF_44_1K 0x2
+#define HDMI_AUDIO_SF_48K 0x3
+#define HDMI_AUDIO_SF_88_2K 0x4
+#define HDMI_AUDIO_SF_96K 0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K 0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT 0x1
+#define HDMI_AUDIO_SS_20BIT 0x2
+#define HDMI_AUDIO_SS_24BIT 0x3
+
+#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC 0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+#endif
diff --git a/drivers/video/tegra/dc/hdmi_reg.h b/drivers/video/tegra/dc/hdmi_reg.h
new file mode 100644
index 000000000000..67d2b23a3d81
--- /dev/null
+++ b/drivers/video/tegra/dc/hdmi_reg.h
@@ -0,0 +1,430 @@
+/*
+ * drivers/video/tegra/dc/hdmi_reg.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+#define __DRIVERS_VIDEO_TEGRA_DC_HDMI_REG_H
+
+#define HDMI_CTXSW 0x00
+#define HDMI_NV_PDISP_SOR_STATE0 0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1 0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_SLEEP (0 << 0)
+#define SOR_STATE_ASY_HEAD_OPMODE_SNOOSE (1 << 0)
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_SAFE (0 << 2)
+#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
+#define SOR_STATE_ATTACHED (1 << 3)
+#define SOR_STATE_ARM_SHOW_VGA (1 << 4)
+
+#define HDMI_NV_PDISP_SOR_STATE2 0x03
+#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
+#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
+#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+#define INFOFRAME_CTRL_OTHER (1 << 4)
+#define INFOFRAME_CTRL_SINGLE (1 << 8)
+
+#define INFOFRAME_HEADER_TYPE(x) ((x) & 0xff)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0xf) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
+#define GENERIC_CTRL_ENABLE (1 << 0)
+#define GENERIC_CTRL_OTHER (1 << 4)
+#define GENERIC_CTRL_SINGLE (1 << 8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
+#define ACR_SB3(x) (((x) & 0xff) << 8)
+#define ACR_SB2(x) (((x) & 0xff) << 16)
+#define ACR_SB1(x) (((x) & 0xff) << 24)
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+
+#define ACR_SB6(x) (((x) & 0xff) << 0)
+#define ACR_SB5(x) (((x) & 0xff) << 8)
+#define ACR_SB4(x) (((x) & 0xff) << 16)
+#define ACR_ENABLE (1 << 31)
+#define ACR_SUBPACK_N(x) ((x) & 0xffffff)
+
+#define HDMI_NV_PDISP_HDMI_CTRL 0x44
+#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define HDMI_CTRL_AUDIO_LAYOUT (1 << 8)
+#define HDMI_CTRL_SAMPLE_FLAT (1 << 12)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
+#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
+#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
+#define SPARE_HW_CTS (1 << 0)
+#define SPARE_FORCE_SW_CTS (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+#define SPARE_ACR_PRIORITY_HIGH (0 << 31)
+#define SPARE_ACR_PRIORITY_LOW (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
+#define HDMI_NV_PDISP_HDCPRIF_ROM_CTRL 0x53
+#define HDMI_NV_PDISP_SOR_CAP 0x54
+#define HDMI_NV_PDISP_SOR_PWR 0x55
+#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
+#define SOR_PWR_NORMAL_START_ALT (1 << 1)
+#define SOR_PWR_SAFE_STATE_PD (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU (1 << 16)
+#define SOR_PWR_SAFE_START_NORMAL (0 << 17)
+#define SOR_PWR_SAFE_START_ALT (1 << 17)
+#define SOR_PWR_HALT_DELAY (1 << 24)
+#define SOR_PWR_MODE (1 << 28)
+#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST 0x56
+#define HDMI_NV_PDISP_SOR_PLL0 0x57
+#define SOR_PLL_PWR (1 << 0)
+#define SOR_PLL_PDBG (1 << 1)
+#define SOR_PLL_VCOPD (1 << 2)
+#define SOR_PLL_PDPORT (1 << 3)
+#define SOR_PLL_RESISTORSEL (1 << 4)
+#define SOR_PLL_PULLDOWN (1 << 5)
+#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0x3) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN (1 << 28)
+#define SOR_PLL_HALF_FULL_PE (1 << 29)
+#define SOR_PLL_S_D_PIN_PE (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2 0x59
+#define HDMI_NV_PDISP_SOR_CSTM 0x5a
+#define SOR_CSTM_PD_TXDA_0 (1 << 0)
+#define SOR_CSTM_PD_TXDA_1 (1 << 1)
+#define SOR_CSTM_PD_TXDA_2 (1 << 2)
+#define SOR_CSTM_PD_TXDA_3 (1 << 3)
+#define SOR_CSTM_PD_TXDB_0 (1 << 4)
+#define SOR_CSTM_PD_TXDB_1 (1 << 5)
+#define SOR_CSTM_PD_TXDB_2 (1 << 6)
+#define SOR_CSTM_PD_TXDB_3 (1 << 7)
+#define SOR_CSTM_PD_TXCA (1 << 8)
+#define SOR_CSTM_PD_TXCB (1 << 9)
+#define SOR_CSTM_UPPER (1 << 11)
+#define SOR_CSTM_MODE(x) (((x) & 0x3) << 12)
+#define SOR_CSTM_LINKACTA (1 << 14)
+#define SOR_CSTM_LINKACTB (1 << 15)
+#define SOR_CSTM_LVDS_EN (1 << 16)
+#define SOR_CSTM_DUP_SYNC (1 << 17)
+#define SOR_CSTM_NEW_MODE (1 << 18)
+#define SOR_CSTM_BALANCED (1 << 19)
+#define SOR_CSTM_PLLDIV (1 << 21)
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+#define SOR_CSTM_ROTDAT(x) (((x) & 0x7) << 28)
+
+#define HDMI_NV_PDISP_SOR_LVDS 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK 0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS (1 << 28)
+#define SOR_SEQ_SWITCH (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST0 0x60
+#define HDMI_NV_PDISP_SOR_SEQ_INST1 0x61
+#define HDMI_NV_PDISP_SOR_SEQ_INST2 0x62
+#define HDMI_NV_PDISP_SOR_SEQ_INST3 0x63
+#define HDMI_NV_PDISP_SOR_SEQ_INST4 0x64
+#define HDMI_NV_PDISP_SOR_SEQ_INST5 0x65
+#define HDMI_NV_PDISP_SOR_SEQ_INST6 0x66
+#define HDMI_NV_PDISP_SOR_SEQ_INST7 0x67
+#define HDMI_NV_PDISP_SOR_SEQ_INST8 0x68
+#define HDMI_NV_PDISP_SOR_SEQ_INST9 0x69
+#define HDMI_NV_PDISP_SOR_SEQ_INSTA 0x6a
+#define HDMI_NV_PDISP_SOR_SEQ_INSTB 0x6b
+#define HDMI_NV_PDISP_SOR_SEQ_INSTC 0x6c
+#define HDMI_NV_PDISP_SOR_SEQ_INSTD 0x6d
+#define HDMI_NV_PDISP_SOR_SEQ_INSTE 0x6e
+#define HDMI_NV_PDISP_SOR_SEQ_INSTF 0x6f
+#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_US (0 << 12)
+#define SOR_SEQ_INST_WAIT_UNITS_MS (1 << 12)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define SOR_SEQ_INST_SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define SOR_SEQ_INST_BLANK_H (1 << 27)
+#define SOR_SEQ_INST_BLANK_V (1 << 28)
+#define SOR_SEQ_INST_ASSERT_PLL_RESETV (1 << 29)
+#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
+#define HDMI_NV_PDISP_SOR_TRIG 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
+#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
+#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
+#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_1_500_mA 0x00
+#define DRIVE_CURRENT_1_875_mA 0x01
+#define DRIVE_CURRENT_2_250_mA 0x02
+#define DRIVE_CURRENT_2_625_mA 0x03
+#define DRIVE_CURRENT_3_000_mA 0x04
+#define DRIVE_CURRENT_3_375_mA 0x05
+#define DRIVE_CURRENT_3_750_mA 0x06
+#define DRIVE_CURRENT_4_125_mA 0x07
+#define DRIVE_CURRENT_4_500_mA 0x08
+#define DRIVE_CURRENT_4_875_mA 0x09
+#define DRIVE_CURRENT_5_250_mA 0x0a
+#define DRIVE_CURRENT_5_625_mA 0x0b
+#define DRIVE_CURRENT_6_000_mA 0x0c
+#define DRIVE_CURRENT_6_375_mA 0x0d
+#define DRIVE_CURRENT_6_750_mA 0x0e
+#define DRIVE_CURRENT_7_125_mA 0x0f
+#define DRIVE_CURRENT_7_500_mA 0x10
+#define DRIVE_CURRENT_7_875_mA 0x11
+#define DRIVE_CURRENT_8_250_mA 0x12
+#define DRIVE_CURRENT_8_625_mA 0x13
+#define DRIVE_CURRENT_9_000_mA 0x14
+#define DRIVE_CURRENT_9_375_mA 0x15
+#define DRIVE_CURRENT_9_750_mA 0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
+/* note: datasheet defines FS1..FS7. we have FS(0)..FS(6) */
+#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
+#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOFT_RESET (1 << 8)
+#define AUDIO_CNTRL0_SOFT_RESET_ALL (1 << 12)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_UNKNOWN (1 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_32K (2 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_44_1K (0 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_48K (2 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_88_2K (8 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_96K (10 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_176_4K (12 << 16)
+#define AUDIO_CNTRL0_SAMPLING_FREQ_192K (14 << 16)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N 0x8c
+#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNALTE (1 << 24)
+#define AUDIO_N_LOOKUP_ENABLE (1 << 28)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
+#define HDMI_NV_PDISP_SOR_REFCLK 0x95
+#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL 0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
+#define HDMI_SRC_DISPLAYA (0 << 0)
+#define HDMI_SRC_DISPLAYB (1 << 0)
+#define ARM_VIDEO_RANGE_FULL (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH 0x98
+#define HDMI_NV_PDISP_PE_CURRENT 0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_KEY_CTRL 0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
+
+#endif
diff --git a/drivers/video/tegra/dc/rgb.c b/drivers/video/tegra/dc/rgb.c
new file mode 100644
index 000000000000..b0652f10fe96
--- /dev/null
+++ b/drivers/video/tegra/dc/rgb.c
@@ -0,0 +1,92 @@
+/*
+ * drivers/video/tegra/dc/rgb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include <mach/dc.h>
+
+#include "dc_reg.h"
+#include "dc_priv.h"
+
+
+static const u32 tegra_dc_rgb_enable_pintable[] = {
+ DC_COM_PIN_OUTPUT_ENABLE0, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE1, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE2, 0x00000000,
+ DC_COM_PIN_OUTPUT_ENABLE3, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY1, 0x01000000,
+ DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA0, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA1, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA2, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA3, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT0, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT1, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT2, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT3, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT4, 0x00210222,
+ DC_COM_PIN_OUTPUT_SELECT5, 0x00002200,
+ DC_COM_PIN_OUTPUT_SELECT6, 0x00020000,
+};
+
+static const u32 tegra_dc_rgb_disable_pintable[] = {
+ DC_COM_PIN_OUTPUT_ENABLE0, 0x55555555,
+ DC_COM_PIN_OUTPUT_ENABLE1, 0x55150005,
+ DC_COM_PIN_OUTPUT_ENABLE2, 0x55555555,
+ DC_COM_PIN_OUTPUT_ENABLE3, 0x55555555,
+ DC_COM_PIN_OUTPUT_POLARITY0, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY1, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY2, 0x00000000,
+ DC_COM_PIN_OUTPUT_POLARITY3, 0x00000000,
+ DC_COM_PIN_OUTPUT_DATA0, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA1, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA2, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_DATA3, 0xaaaaaaaa,
+ DC_COM_PIN_OUTPUT_SELECT0, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT1, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT2, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT3, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT4, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT5, 0x00000000,
+ DC_COM_PIN_OUTPUT_SELECT6, 0x00000000,
+};
+
+void tegra_dc_rgb_enable(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE,
+ DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_write_table(dc, tegra_dc_rgb_enable_pintable);
+}
+
+void tegra_dc_rgb_disable(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, 0x00000000, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_write_table(dc, tegra_dc_rgb_disable_pintable);
+}
+
+struct tegra_dc_out_ops tegra_dc_rgb_ops = {
+ .enable = tegra_dc_rgb_enable,
+ .disable = tegra_dc_rgb_disable,
+};
+
diff --git a/drivers/video/tegra/fb.c b/drivers/video/tegra/fb.c
new file mode 100644
index 000000000000..cc26c5977a20
--- /dev/null
+++ b/drivers/video/tegra/fb.c
@@ -0,0 +1,807 @@
+/*
+ * drivers/video/tegra/fb.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ * Colin Cross <ccross@android.com>
+ * Travis Geiselbrecht <travis@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+
+#include <video/tegrafb.h>
+
+#include <mach/dc.h>
+#include <mach/fb.h>
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+#include "host/dev.h"
+#include "nvmap/nvmap.h"
+
+struct tegra_fb_info {
+ struct tegra_dc_win *win;
+ struct nvhost_device *ndev;
+ struct fb_info *info;
+ bool valid;
+
+ struct resource *fb_mem;
+
+ int xres;
+ int yres;
+
+ atomic_t in_use;
+ struct nvmap_client *user_nvmap;
+ struct nvmap_client *fb_nvmap;
+
+ struct workqueue_struct *flip_wq;
+};
+
+struct tegra_fb_flip_win {
+ struct tegra_fb_windowattr attr;
+ struct nvmap_handle_ref *handle;
+ dma_addr_t phys_addr;
+};
+
+struct tegra_fb_flip_data {
+ struct work_struct work;
+ struct tegra_fb_info *fb;
+ struct tegra_fb_flip_win win[TEGRA_FB_FLIP_N_WINDOWS];
+ u32 syncpt_max;
+};
+
+/* palette array used by the fbcon */
+static u32 pseudo_palette[16];
+
+static int tegra_fb_open(struct fb_info *info, int user)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+
+ if (atomic_xchg(&tegra_fb->in_use, 1))
+ return -EBUSY;
+
+ tegra_fb->user_nvmap = NULL;
+
+ return 0;
+}
+
+static int tegra_fb_release(struct fb_info *info, int user)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+
+ flush_workqueue(tegra_fb->flip_wq);
+
+ if (tegra_fb->user_nvmap) {
+ nvmap_client_put(tegra_fb->user_nvmap);
+ tegra_fb->user_nvmap = NULL;
+ }
+
+ WARN_ON(!atomic_xchg(&tegra_fb->in_use, 0));
+
+ return 0;
+}
+
+static int tegra_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if ((var->yres * var->xres * var->bits_per_pixel / 8 * 2) >
+ info->screen_size)
+ return -EINVAL;
+
+ /* double yres_virtual to allow double buffering through pan_display */
+ var->yres_virtual = var->yres * 2;
+
+ return 0;
+}
+
+static int tegra_fb_set_par(struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+
+ if (var->bits_per_pixel) {
+ /* we only support RGB ordering for now */
+ switch (var->bits_per_pixel) {
+ case 32:
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 16;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+ tegra_fb->win->stride = info->fix.line_length;
+ tegra_fb->win->stride_uv = 0;
+ tegra_fb->win->offset_u = 0;
+ tegra_fb->win->offset_v = 0;
+ }
+
+ if (var->pixclock) {
+ struct tegra_dc_mode mode;
+
+ info->mode = (struct fb_videomode *)
+ fb_find_best_mode(var, &info->modelist);
+ if (!info->mode) {
+ dev_warn(&tegra_fb->ndev->dev, "can't match video mode\n");
+ return -EINVAL;
+ }
+
+ mode.pclk = PICOS2KHZ(info->mode->pixclock) * 1000;
+ mode.h_ref_to_sync = 1;
+ mode.v_ref_to_sync = 1;
+ mode.h_sync_width = info->mode->hsync_len;
+ mode.v_sync_width = info->mode->vsync_len;
+ mode.h_back_porch = info->mode->left_margin;
+ mode.v_back_porch = info->mode->upper_margin;
+ mode.h_active = info->mode->xres;
+ mode.v_active = info->mode->yres;
+ mode.h_front_porch = info->mode->right_margin;
+ mode.v_front_porch = info->mode->lower_margin;
+
+ tegra_dc_set_mode(tegra_fb->win->dc, &mode);
+
+ tegra_fb->win->w = info->mode->xres;
+ tegra_fb->win->h = info->mode->yres;
+ tegra_fb->win->out_w = info->mode->xres;
+ tegra_fb->win->out_h = info->mode->yres;
+ }
+ return 0;
+}
+
+static int tegra_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp, struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ u32 v;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ red = (red >> (16 - info->var.red.length));
+ green = (green >> (16 - info->var.green.length));
+ blue = (blue >> (16 - info->var.blue.length));
+
+ v = (red << var->red.offset) |
+ (green << var->green.offset) |
+ (blue << var->blue.offset);
+
+ ((u32 *)info->pseudo_palette)[regno] = v;
+ }
+
+ return 0;
+}
+
+static int tegra_fb_blank(int blank, struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ dev_dbg(&tegra_fb->ndev->dev, "unblank\n");
+ tegra_dc_enable(tegra_fb->win->dc);
+ return 0;
+
+ case FB_BLANK_POWERDOWN:
+ dev_dbg(&tegra_fb->ndev->dev, "blank\n");
+ flush_workqueue(tegra_fb->flip_wq);
+ tegra_dc_disable(tegra_fb->win->dc);
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+void tegra_fb_suspend(struct tegra_fb_info *tegra_fb)
+{
+ flush_workqueue(tegra_fb->flip_wq);
+}
+
+
+static int tegra_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ char __iomem *flush_start;
+ char __iomem *flush_end;
+ u32 addr;
+
+ flush_start = info->screen_base + (var->yoffset * info->fix.line_length);
+ flush_end = flush_start + (var->yres * info->fix.line_length);
+
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+
+ addr = info->fix.smem_start + (var->yoffset * info->fix.line_length) +
+ (var->xoffset * (var->bits_per_pixel/8));
+
+ tegra_fb->win->phys_addr = addr;
+ /* TODO: update virt_addr */
+
+ tegra_dc_update_windows(&tegra_fb->win, 1);
+ tegra_dc_sync_windows(&tegra_fb->win, 1);
+
+ if (WARN_ON(tegra_fb->win->cur_handle)) {
+ nvmap_unpin(tegra_fb->fb_nvmap, tegra_fb->win->cur_handle);
+ nvmap_free(tegra_fb->fb_nvmap, tegra_fb->win->cur_handle);
+ tegra_fb->win->cur_handle = NULL;
+ }
+
+ return 0;
+}
+
+static void tegra_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+}
+
+static void tegra_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ cfb_copyarea(info, region);
+}
+
+static void tegra_fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+}
+
+/* TODO: implement ALLOC, FREE, BLANK ioctls */
+
+static int tegra_fb_set_nvmap_fd(struct tegra_fb_info *tegra_fb, int fd)
+{
+ struct nvmap_client *nvmap = NULL;
+
+ if (fd < 0)
+ return -EINVAL;
+
+ nvmap = nvmap_client_get_file(fd);
+ if (IS_ERR(nvmap))
+ return PTR_ERR(nvmap);
+
+ if (tegra_fb->user_nvmap)
+ nvmap_client_put(tegra_fb->user_nvmap);
+
+ tegra_fb->user_nvmap = nvmap;
+
+ return 0;
+}
+
+static int tegra_fb_pin_window(struct tegra_fb_info *tegra_fb,
+ struct tegra_fb_flip_win *flip_win)
+{
+ struct nvmap_handle_ref *win_dupe;
+ struct nvmap_handle *win_handle;
+ unsigned long buff_id = flip_win->attr.buff_id;
+
+ if (!buff_id)
+ return 0;
+
+ win_handle = nvmap_get_handle_id(tegra_fb->user_nvmap, buff_id);
+ if (win_handle == NULL) {
+ dev_err(&tegra_fb->ndev->dev, "%s: flip invalid "
+ "handle %08lx\n", current->comm, buff_id);
+ return -EPERM;
+ }
+
+ /* duplicate the new framebuffer's handle into the fb driver's
+ * nvmap context, to ensure that the handle won't be freed as
+ * long as it is in-use by the fb driver */
+ win_dupe = nvmap_duplicate_handle_id(tegra_fb->fb_nvmap, buff_id);
+ nvmap_handle_put(win_handle);
+
+ if (IS_ERR(win_dupe)) {
+ dev_err(&tegra_fb->ndev->dev, "couldn't duplicate handle\n");
+ return PTR_ERR(win_dupe);
+ }
+
+ flip_win->handle = win_dupe;
+
+ flip_win->phys_addr = nvmap_pin(tegra_fb->fb_nvmap, win_dupe);
+ if (IS_ERR((void *)flip_win->phys_addr)) {
+ dev_err(&tegra_fb->ndev->dev, "couldn't pin handle\n");
+ nvmap_free(tegra_fb->fb_nvmap, win_dupe);
+ return PTR_ERR((void *)flip_win->phys_addr);
+ }
+
+ return 0;
+}
+
+static int tegra_fb_set_windowattr(struct tegra_fb_info *tegra_fb,
+ struct tegra_dc_win *win,
+ const struct tegra_fb_flip_win *flip_win)
+{
+ if (flip_win->handle == NULL) {
+ win->flags = 0;
+ win->cur_handle = NULL;
+ return 0;
+ }
+
+ win->flags = TEGRA_WIN_FLAG_ENABLED;
+ if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_PREMULT)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT;
+ else if (flip_win->attr.blend == TEGRA_FB_WIN_BLEND_COVERAGE)
+ win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE;
+ win->fmt = flip_win->attr.pixformat;
+ win->x = flip_win->attr.x;
+ win->y = flip_win->attr.y;
+ win->w = flip_win->attr.w;
+ win->h = flip_win->attr.h;
+ win->out_x = flip_win->attr.out_x;
+ win->out_y = flip_win->attr.out_y;
+ win->out_w = flip_win->attr.out_w;
+ win->out_h = flip_win->attr.out_h;
+ win->z = flip_win->attr.z;
+ win->cur_handle = flip_win->handle;
+
+ /* STOPSHIP verify that this won't read outside of the surface */
+ win->phys_addr = flip_win->phys_addr + flip_win->attr.offset;
+ win->offset_u = flip_win->attr.offset_u + flip_win->attr.offset;
+ win->offset_v = flip_win->attr.offset_v + flip_win->attr.offset;
+ win->stride = flip_win->attr.stride;
+ win->stride_uv = flip_win->attr.stride_uv;
+
+ if ((s32)flip_win->attr.pre_syncpt_id >= 0) {
+ nvhost_syncpt_wait_timeout(&tegra_fb->ndev->host->syncpt,
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val,
+ msecs_to_jiffies(500));
+ }
+
+
+ return 0;
+}
+
+static void tegra_fb_flip_worker(struct work_struct *work)
+{
+ struct tegra_fb_flip_data *data =
+ container_of(work, struct tegra_fb_flip_data, work);
+ struct tegra_fb_info *tegra_fb = data->fb;
+ struct tegra_dc_win *win;
+ struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS];
+ struct nvmap_handle_ref *unpin_handles[TEGRA_FB_FLIP_N_WINDOWS];
+ int i, nr_win = 0, nr_unpin = 0;
+
+ data = container_of(work, struct tegra_fb_flip_data, work);
+
+ for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+ struct tegra_fb_flip_win *flip_win = &data->win[i];
+ int idx = flip_win->attr.index;
+ win = tegra_dc_get_window(tegra_fb->win->dc, idx);
+
+ if (!win)
+ continue;
+
+ if (win->flags && win->cur_handle)
+ unpin_handles[nr_unpin++] = win->cur_handle;
+
+ tegra_fb_set_windowattr(tegra_fb, win, &data->win[i]);
+
+ wins[nr_win++] = win;
+
+#if 0
+ if (flip_win->attr.pre_syncpt_id < 0)
+ continue;
+ printk("%08x %08x\n",
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val);
+
+ nvhost_syncpt_wait_timeout(&tegra_fb->ndev->host->syncpt,
+ flip_win->attr.pre_syncpt_id,
+ flip_win->attr.pre_syncpt_val,
+ msecs_to_jiffies(500));
+#endif
+ }
+
+ tegra_dc_update_windows(wins, nr_win);
+ /* TODO: implement swapinterval here */
+ tegra_dc_sync_windows(wins, nr_win);
+
+ tegra_dc_incr_syncpt_min(tegra_fb->win->dc, data->syncpt_max);
+
+ /* unpin and deref previous front buffers */
+ for (i = 0; i < nr_unpin; i++) {
+ nvmap_unpin(tegra_fb->fb_nvmap, unpin_handles[i]);
+ nvmap_free(tegra_fb->fb_nvmap, unpin_handles[i]);
+ }
+
+ kfree(data);
+}
+
+static int tegra_fb_flip(struct tegra_fb_info *tegra_fb,
+ struct tegra_fb_flip_args *args)
+{
+ struct tegra_fb_flip_data *data;
+ struct tegra_fb_flip_win *flip_win;
+ u32 syncpt_max;
+ int i, err;
+
+ if (WARN_ON(!tegra_fb->user_nvmap))
+ return -EFAULT;
+
+ if (WARN_ON(!tegra_fb->ndev))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&tegra_fb->ndev->dev,
+ "can't allocate memory for flip\n");
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&data->work, tegra_fb_flip_worker);
+ data->fb = tegra_fb;
+
+ for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
+ flip_win = &data->win[i];
+
+ memcpy(&flip_win->attr, &args->win[i], sizeof(flip_win->attr));
+
+ err = tegra_fb_pin_window(tegra_fb, flip_win);
+ if (err < 0) {
+ dev_err(&tegra_fb->ndev->dev,
+ "error setting window attributes\n");
+ goto surf_err;
+ }
+ }
+
+ syncpt_max = tegra_dc_incr_syncpt_max(tegra_fb->win->dc);
+ data->syncpt_max = syncpt_max;
+
+ queue_work(tegra_fb->flip_wq, &data->work);
+
+ args->post_syncpt_val = syncpt_max;
+ args->post_syncpt_id = tegra_dc_get_syncpt_id(tegra_fb->win->dc);
+
+ return 0;
+
+surf_err:
+ while (i--) {
+ if (data->win[i].handle) {
+ nvmap_unpin(tegra_fb->fb_nvmap,
+ data->win[i].handle);
+ nvmap_free(tegra_fb->fb_nvmap,
+ data->win[i].handle);
+ }
+ }
+ kfree(data);
+ return err;
+}
+
+/* TODO: implement private window ioctls to set overlay x,y */
+
+static int tegra_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct tegra_fb_info *tegra_fb = info->par;
+ struct tegra_fb_flip_args flip_args;
+ struct tegra_fb_modedb modedb;
+ struct fb_modelist *modelist;
+ int i;
+ int fd;
+ int ret;
+
+ switch (cmd) {
+ case FBIO_TEGRA_SET_NVMAP_FD:
+ if (copy_from_user(&fd, (void __user *)arg, sizeof(fd)))
+ return -EFAULT;
+
+ return tegra_fb_set_nvmap_fd(tegra_fb, fd);
+
+ case FBIO_TEGRA_FLIP:
+ if (copy_from_user(&flip_args, (void __user *)arg, sizeof(flip_args)))
+ return -EFAULT;
+
+ ret = tegra_fb_flip(tegra_fb, &flip_args);
+
+ if (copy_to_user((void __user *)arg, &flip_args, sizeof(flip_args)))
+ return -EFAULT;
+
+ return ret;
+
+ case FBIO_TEGRA_GET_MODEDB:
+ if (copy_from_user(&modedb, (void __user *)arg, sizeof(modedb)))
+ return -EFAULT;
+
+ i = 0;
+ list_for_each_entry(modelist, &info->modelist, list) {
+ struct fb_var_screeninfo var;
+
+ if (i >= modedb.modedb_len)
+ break;
+ fb_videomode_to_var(&var, &modelist->mode);
+
+ if (copy_to_user((void __user *)&modedb.modedb[i],
+ &var, sizeof(var)))
+ return -EFAULT;
+ i++;
+ }
+ modedb.modedb_len = i;
+
+ if (copy_to_user((void __user *)arg, &modedb, sizeof(modedb)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops tegra_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = tegra_fb_open,
+ .fb_release = tegra_fb_release,
+ .fb_check_var = tegra_fb_check_var,
+ .fb_set_par = tegra_fb_set_par,
+ .fb_setcolreg = tegra_fb_setcolreg,
+ .fb_blank = tegra_fb_blank,
+ .fb_pan_display = tegra_fb_pan_display,
+ .fb_fillrect = tegra_fb_fillrect,
+ .fb_copyarea = tegra_fb_copyarea,
+ .fb_imageblit = tegra_fb_imageblit,
+ .fb_ioctl = tegra_fb_ioctl,
+};
+
+void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
+ struct fb_monspecs *specs,
+ bool (*mode_filter)(struct fb_videomode *mode))
+{
+ struct fb_event event;
+ struct fb_modelist *m;
+ int i;
+
+ mutex_lock(&fb_info->info->lock);
+ fb_destroy_modedb(fb_info->info->monspecs.modedb);
+
+ fb_destroy_modelist(&fb_info->info->modelist);
+
+ if (specs == NULL) {
+ struct tegra_dc_mode mode;
+ memset(&fb_info->info->monspecs, 0x0,
+ sizeof(fb_info->info->monspecs));
+ memset(&mode, 0x0, sizeof(mode));
+ tegra_dc_set_mode(fb_info->win->dc, &mode);
+ mutex_unlock(&fb_info->info->lock);
+ return;
+ }
+
+ memcpy(&fb_info->info->monspecs, specs,
+ sizeof(fb_info->info->monspecs));
+
+ for (i = 0; i < specs->modedb_len; i++) {
+ if (mode_filter) {
+ if (mode_filter(&specs->modedb[i]))
+ fb_add_videomode(&specs->modedb[i],
+ &fb_info->info->modelist);
+ } else {
+ fb_add_videomode(&specs->modedb[i],
+ &fb_info->info->modelist);
+ }
+ }
+
+ if (list_empty(&fb_info->info->modelist)) {
+ struct tegra_dc_mode mode;
+ memset(&fb_info->info->var, 0x0, sizeof(fb_info->info->var));
+ memset(&mode, 0x0, sizeof(mode));
+ tegra_dc_set_mode(fb_info->win->dc, &mode);
+ } else {
+ /* in case the first mode was not matched */
+ m = list_first_entry(&fb_info->info->modelist, struct fb_modelist, list);
+ m->mode.flag |= FB_MODE_IS_FIRST;
+ fb_info->info->mode = (struct fb_videomode *)
+ fb_find_best_display(specs, &fb_info->info->modelist);
+
+ fb_videomode_to_var(&fb_info->info->var, fb_info->info->mode);
+ tegra_fb_set_par(fb_info->info);
+ }
+
+ event.info = fb_info->info;
+ fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+ mutex_unlock(&fb_info->info->lock);
+}
+
+struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev,
+ struct tegra_dc *dc,
+ struct tegra_fb_data *fb_data,
+ struct resource *fb_mem)
+{
+ struct tegra_dc_win *win;
+ struct fb_info *info;
+ struct tegra_fb_info *tegra_fb;
+ void __iomem *fb_base = NULL;
+ unsigned long fb_size = 0;
+ unsigned long fb_phys = 0;
+ int ret = 0;
+
+ win = tegra_dc_get_window(dc, fb_data->win);
+ if (!win) {
+ dev_err(&ndev->dev, "dc does not have a window at index %d\n",
+ fb_data->win);
+ return ERR_PTR(-ENOENT);
+ }
+
+ info = framebuffer_alloc(sizeof(struct tegra_fb_info), &ndev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tegra_fb = info->par;
+ tegra_fb->win = win;
+ tegra_fb->ndev = ndev;
+ tegra_fb->fb_mem = fb_mem;
+ tegra_fb->xres = fb_data->xres;
+ tegra_fb->yres = fb_data->yres;
+ tegra_fb->fb_nvmap = nvmap_create_client(nvmap_dev, "tegra-fb");
+ if (!tegra_fb->fb_nvmap) {
+ dev_err(&ndev->dev, "couldn't create nvmap client\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ atomic_set(&tegra_fb->in_use, 0);
+
+ tegra_fb->flip_wq = create_singlethread_workqueue(dev_name(&ndev->dev));
+ if (!tegra_fb->flip_wq) {
+ dev_err(&ndev->dev, "couldn't create flip work-queue\n");
+ ret = -ENOMEM;
+ goto err_delete_wq;
+ }
+
+ if (fb_mem) {
+ fb_size = resource_size(fb_mem);
+ fb_phys = fb_mem->start;
+ fb_base = ioremap_nocache(fb_phys, fb_size);
+ if (!fb_base) {
+ dev_err(&ndev->dev, "fb can't be mapped\n");
+ ret = -EBUSY;
+ goto err_put_client;
+ }
+ tegra_fb->valid = true;
+ }
+
+ info->fbops = &tegra_fb_ops;
+ info->pseudo_palette = pseudo_palette;
+ info->screen_base = fb_base;
+ info->screen_size = fb_size;
+
+ strlcpy(info->fix.id, "tegra_fb", sizeof(info->fix.id));
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.xpanstep = 1;
+ info->fix.ypanstep = 1;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.smem_start = fb_phys;
+ info->fix.smem_len = fb_size;
+
+ info->var.xres = fb_data->xres;
+ info->var.yres = fb_data->yres;
+ info->var.xres_virtual = fb_data->xres;
+ info->var.yres_virtual = fb_data->yres * 2;
+ info->var.bits_per_pixel = fb_data->bits_per_pixel;
+ info->var.activate = FB_ACTIVATE_VBL;
+ /* TODO: fill in the following by querying the DC */
+ info->var.height = -1;
+ info->var.width = -1;
+ info->var.pixclock = 0;
+ info->var.left_margin = 0;
+ info->var.right_margin = 0;
+ info->var.upper_margin = 0;
+ info->var.lower_margin = 0;
+ info->var.hsync_len = 0;
+ info->var.vsync_len = 0;
+ info->var.vmode = FB_VMODE_NONINTERLACED;
+
+ win->x = 0;
+ win->y = 0;
+ win->w = fb_data->xres;
+ win->h = fb_data->yres;
+ /* TODO: set to output res dc */
+ win->out_x = 0;
+ win->out_y = 0;
+ win->out_w = fb_data->xres;
+ win->out_h = fb_data->yres;
+ win->z = 0;
+ win->phys_addr = fb_phys;
+ win->virt_addr = fb_base;
+ win->offset_u = 0;
+ win->offset_v = 0;
+ win->stride = fb_data->xres * fb_data->bits_per_pixel / 8;
+ win->stride_uv = 0;
+ win->flags = TEGRA_WIN_FLAG_ENABLED;
+
+ if (fb_mem)
+ tegra_fb_set_par(info);
+
+ if (register_framebuffer(info)) {
+ dev_err(&ndev->dev, "failed to register framebuffer\n");
+ ret = -ENODEV;
+ goto err_iounmap_fb;
+ }
+
+ tegra_fb->info = info;
+
+ dev_info(&ndev->dev, "probed\n");
+
+ if (fb_data->flags & TEGRA_FB_FLIP_ON_PROBE) {
+ tegra_dc_update_windows(&tegra_fb->win, 1);
+ tegra_dc_sync_windows(&tegra_fb->win, 1);
+ }
+
+ return tegra_fb;
+
+err_iounmap_fb:
+ iounmap(fb_base);
+err_put_client:
+ nvmap_client_put(tegra_fb->fb_nvmap);
+err_delete_wq:
+ destroy_workqueue(tegra_fb->flip_wq);
+err_free:
+ framebuffer_release(info);
+err:
+ return ERR_PTR(ret);
+}
+
+void tegra_fb_unregister(struct tegra_fb_info *fb_info)
+{
+ struct fb_info *info = fb_info->info;
+
+ if (fb_info->win->cur_handle) {
+ nvmap_unpin(fb_info->fb_nvmap, fb_info->win->cur_handle);
+ nvmap_free(fb_info->fb_nvmap, fb_info->win->cur_handle);
+ }
+
+ if (fb_info->fb_nvmap)
+ nvmap_client_put(fb_info->fb_nvmap);
+
+ unregister_framebuffer(info);
+
+ flush_workqueue(fb_info->flip_wq);
+ destroy_workqueue(fb_info->flip_wq);
+
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+}
diff --git a/drivers/video/tegra/host/Makefile b/drivers/video/tegra/host/Makefile
new file mode 100644
index 000000000000..c13f8348ed9c
--- /dev/null
+++ b/drivers/video/tegra/host/Makefile
@@ -0,0 +1,13 @@
+nvhost-objs = \
+ nvhost_acm.o \
+ nvhost_syncpt.o \
+ nvhost_cdma.o \
+ nvhost_cpuaccess.o \
+ nvhost_intr.o \
+ nvhost_channel.o \
+ nvhost_3dctx.o \
+ dev.o \
+ bus.o \
+ debug.o
+
+obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
diff --git a/drivers/video/tegra/host/bus.c b/drivers/video/tegra/host/bus.c
new file mode 100644
index 000000000000..a633950aeaff
--- /dev/null
+++ b/drivers/video/tegra/host/bus.c
@@ -0,0 +1,571 @@
+/*
+ * drivers/video/tegra/host/bus.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * based heavily on drivers/base/platform.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <mach/nvhost.h>
+
+#include "dev.h"
+
+struct nvhost_master *nvhost;
+struct device nvhost_bus = {
+ .init_name = "nvhost",
+};
+
+struct resource *nvhost_get_resource(struct nvhost_device *dev,
+ unsigned int type, unsigned int num)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource);
+
+int nvhost_get_irq(struct nvhost_device *dev, unsigned int num)
+{
+ struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq);
+
+struct resource *nvhost_get_resource_byname(struct nvhost_device *dev,
+ unsigned int type,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && !strcmp(r->name, name))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource_byname);
+
+int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name)
+{
+ struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ,
+ name);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq_byname);
+
+static int nvhost_drv_probe(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ dev->host = nvhost;
+
+ return drv->probe(dev);
+}
+
+static int nvhost_drv_remove(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ return drv->remove(dev);
+}
+
+static void nvhost_drv_shutdown(struct device *_dev)
+{
+ struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ drv->shutdown(dev);
+}
+
+int nvhost_driver_register(struct nvhost_driver *drv)
+{
+ drv->driver.bus = &nvhost_bus_type;
+ if (drv->probe)
+ drv->driver.probe = nvhost_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = nvhost_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = nvhost_drv_shutdown;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(nvhost_driver_register);
+
+void nvhost_driver_unregister(struct nvhost_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvhost_driver_unregister);
+
+int nvhost_device_register(struct nvhost_device *dev)
+{
+ int i, ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ device_initialize(&dev->dev);
+
+ if (!dev->dev.parent)
+ dev->dev.parent = &nvhost_bus;
+
+ dev->dev.bus = &nvhost_bus_type;
+
+ if (dev->id != -1)
+ dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id);
+ else
+ dev_set_name(&dev->dev, "%s", dev->name);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *p, *r = &dev->resource[i];
+
+ if (r->name == NULL)
+ r->name = dev_name(&dev->dev);
+
+ p = r->parent;
+ if (!p) {
+ if (resource_type(r) == IORESOURCE_MEM)
+ p = &iomem_resource;
+ else if (resource_type(r) == IORESOURCE_IO)
+ p = &ioport_resource;
+ }
+
+ if (p && insert_resource(p, r)) {
+ pr_err("%s: failed to claim resource %d\n",
+ dev_name(&dev->dev), i);
+ ret = -EBUSY;
+ goto failed;
+ }
+ }
+
+ ret = device_add(&dev->dev);
+ if (ret == 0)
+ return ret;
+
+failed:
+ while (--i >= 0) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_device_register);
+
+void nvhost_device_unregister(struct nvhost_device *dev)
+{
+ int i;
+ if (dev) {
+ device_del(&dev->dev);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+ unsigned long type = resource_type(r);
+
+ if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ release_resource(r);
+ }
+
+ put_device(&dev->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(nvhost_device_unregister);
+
+
+static int nvhost_bus_match(struct device *_dev, struct device_driver *drv)
+{
+ struct nvhost_device *dev = to_nvhost_device(_dev);
+
+ pr_info("host1x: %s %s\n", dev->name, drv->name);
+ return !strncmp(dev->name, drv->name, strlen(drv->name));
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->suspend)
+ ret = pdrv->suspend(pdev, mesg);
+
+ return ret;
+}
+
+static int nvhost_legacy_resume(struct device *dev)
+{
+ struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+ struct nvhost_device *pdev = to_nvhost_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->resume)
+ ret = pdrv->resume(pdev);
+
+ return ret;
+}
+
+static int nvhost_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void nvhost_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define nvhost_pm_prepare NULL
+#define nvhost_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int __weak nvhost_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+int __weak nvhost_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define nvhost_pm_suspend NULL
+#define nvhost_pm_resume NULL
+#define nvhost_pm_suspend_noirq NULL
+#define nvhost_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int nvhost_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = nvhost_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int nvhost_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define nvhost_pm_freeze NULL
+#define nvhost_pm_thaw NULL
+#define nvhost_pm_poweroff NULL
+#define nvhost_pm_restore NULL
+#define nvhost_pm_freeze_noirq NULL
+#define nvhost_pm_thaw_noirq NULL
+#define nvhost_pm_poweroff_noirq NULL
+#define nvhost_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM_RUNTIME
+
+int __weak nvhost_pm_runtime_suspend(struct device *dev)
+{
+ return pm_generic_runtime_suspend(dev);
+};
+
+int __weak nvhost_pm_runtime_resume(struct device *dev)
+{
+ return pm_generic_runtime_resume(dev);
+};
+
+int __weak nvhost_pm_runtime_idle(struct device *dev)
+{
+ return pm_generic_runtime_idle(dev);
+};
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define nvhost_pm_runtime_suspend NULL
+#define nvhost_pm_runtime_resume NULL
+#define nvhost_pm_runtime_idle NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops nvhost_dev_pm_ops = {
+ .prepare = nvhost_pm_prepare,
+ .complete = nvhost_pm_complete,
+ .suspend = nvhost_pm_suspend,
+ .resume = nvhost_pm_resume,
+ .freeze = nvhost_pm_freeze,
+ .thaw = nvhost_pm_thaw,
+ .poweroff = nvhost_pm_poweroff,
+ .restore = nvhost_pm_restore,
+ .suspend_noirq = nvhost_pm_suspend_noirq,
+ .resume_noirq = nvhost_pm_resume_noirq,
+ .freeze_noirq = nvhost_pm_freeze_noirq,
+ .thaw_noirq = nvhost_pm_thaw_noirq,
+ .poweroff_noirq = nvhost_pm_poweroff_noirq,
+ .restore_noirq = nvhost_pm_restore_noirq,
+ .runtime_suspend = nvhost_pm_runtime_suspend,
+ .runtime_resume = nvhost_pm_runtime_resume,
+ .runtime_idle = nvhost_pm_runtime_idle,
+};
+
+struct bus_type nvhost_bus_type = {
+ .name = "nvhost",
+ .match = nvhost_bus_match,
+ .pm = &nvhost_dev_pm_ops,
+};
+EXPORT_SYMBOL(nvhost_bus_type);
+
+int nvhost_bus_register(struct nvhost_master *host)
+{
+ nvhost = host;
+
+ return 0;
+}
+
+
+int nvhost_bus_init(void)
+{
+ int err;
+
+ pr_info("host1x bus init\n");
+ err = device_register(&nvhost_bus);
+ if (err)
+ return err;
+
+ err = bus_register(&nvhost_bus_type);
+ if (err)
+ device_unregister(&nvhost_bus);
+
+ return err;
+}
+postcore_initcall(nvhost_bus_init);
+
diff --git a/drivers/video/tegra/host/debug.c b/drivers/video/tegra/host/debug.c
new file mode 100644
index 000000000000..c1cfd6ee229c
--- /dev/null
+++ b/drivers/video/tegra/host/debug.c
@@ -0,0 +1,270 @@
+/*
+ * drivers/video/tegra/dc/dc.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+
+#include "dev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+enum {
+ NVHOST_DBG_STATE_CMD = 0,
+ NVHOST_DBG_STATE_DATA = 1,
+};
+
+static int nvhost_debug_handle_cmd(struct seq_file *s, u32 val, int *count)
+{
+ unsigned mask;
+ unsigned subop;
+
+ switch (val >> 28) {
+ case 0x0:
+ mask = val & 0x3f;
+ if (mask) {
+ seq_printf(s, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
+ *count = hweight8(mask);
+ return NVHOST_DBG_STATE_DATA;
+ } else {
+ seq_printf(s, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ return NVHOST_DBG_STATE_CMD;
+ }
+
+ case 0x1:
+ seq_printf(s, "INCR(offset=%03x, [", val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x2:
+ seq_printf(s, "NONINCR(offset=%03x, [", val >> 16 & 0xfff);
+ *count = val & 0xffff;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x3:
+ mask = val & 0xffff;
+ seq_printf(s, "MASK(offset=%03x, mask=%03x, [",
+ val >> 16 & 0xfff, mask);
+ *count = hweight16(mask);
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0x4:
+ seq_printf(s, "IMM(offset=%03x, data=%03x)\n",
+ val >> 16 & 0x3ff, val & 0xffff);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x5:
+ seq_printf(s, "RESTART(offset=%08x)\n", val << 4);
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0x6:
+ seq_printf(s, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ val >> 16 & 0x3ff, val >> 15 & 0x1, val >> 15 & 0x1,
+ val & 0x3fff);
+ *count = 1;
+ return NVHOST_DBG_STATE_DATA;
+
+ case 0xe:
+ subop = val >> 24 & 0xf;
+ if (subop == 0)
+ seq_printf(s, "ACQUIRE_MLOCK(index=%d)\n", val & 0xff);
+ else if (subop == 1)
+ seq_printf(s, "RELEASE_MLOCK(index=%d)\n", val & 0xff);
+ else
+ seq_printf(s, "EXTEND_UNKNOWN(%08x)\n", val);
+
+ return NVHOST_DBG_STATE_CMD;
+
+ case 0xf:
+ seq_printf(s, "DONE()\n");
+ return NVHOST_DBG_STATE_CMD;
+
+ default:
+ return NVHOST_DBG_STATE_CMD;
+ }
+}
+
+static void nvhost_debug_handle_word(struct seq_file *s, int *state, int *count,
+ unsigned long addr, int channel, u32 val)
+{
+ switch (*state) {
+ case NVHOST_DBG_STATE_CMD:
+ if (addr)
+ seq_printf(s, "%d: %08x: %08x:", channel, addr, val);
+ else
+ seq_printf(s, "%d: %08x:", channel, val);
+
+ *state = nvhost_debug_handle_cmd(s, val, count);
+ if (*state == NVHOST_DBG_STATE_DATA && *count == 0) {
+ *state = NVHOST_DBG_STATE_CMD;
+ seq_printf(s, "])\n");
+ }
+ break;
+
+ case NVHOST_DBG_STATE_DATA:
+ (*count)--;
+ seq_printf(s, "%08x%s", val, *count > 0 ? ", " : "])\n");
+ if (*count == 0)
+ *state = NVHOST_DBG_STATE_CMD;
+ break;
+ }
+}
+
+
+static int nvhost_debug_show(struct seq_file *s, void *unused)
+{
+ struct nvhost_master *m = s->private;
+ int i;
+
+ nvhost_module_busy(&m->mod);
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ void __iomem *regs = m->channels[i].aperture;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 fifostat;
+ u32 val, base;
+ unsigned start, end;
+ unsigned wr_ptr, rd_ptr;
+ int state;
+ int count;
+ u32 phys_addr, size;
+
+ dmaput = readl(regs + HOST1X_CHANNEL_DMAPUT);
+ dmaget = readl(regs + HOST1X_CHANNEL_DMAGET);
+ dmactrl = readl(regs + HOST1X_CHANNEL_DMACTRL);
+ cbread = readl(m->aperture + HOST1X_SYNC_CBREAD(i));
+ cbstat = readl(m->aperture + HOST1X_SYNC_CBSTAT(i));
+
+ if (dmactrl != 0x0 || !m->channels[i].cdma.push_buffer.mapped) {
+ seq_printf(s, "%d: inactive\n\n", i);
+ continue;
+ }
+
+ switch (cbstat) {
+ case 0x00010008:
+ seq_printf(s, "%d: waiting on syncpt %d val %d\n",
+ i, cbread >> 24, cbread & 0xffffff);
+ break;
+
+ case 0x00010009:
+ base = cbread >> 15 & 0xf;
+
+ val = readl(m->aperture + HOST1X_SYNC_SYNCPT_BASE(base)) & 0xffff;
+ val += cbread & 0xffff;
+
+ seq_printf(s, "%d: waiting on syncpt %d val %d\n",
+ i, cbread >> 24, val);
+ break;
+
+ default:
+ seq_printf(s, "%d: active class %02x, offset %04x, val %08x\n",
+ i, cbstat >> 16, cbstat & 0xffff, cbread);
+ break;
+ }
+
+ nvhost_cdma_find_gather(&m->channels[i].cdma, dmaget, &phys_addr, &size);
+
+ /* If dmaget is in the pushbuffer (should always be?),
+ * check if we're executing a fetch, and if so dump
+ * it. */
+ if (size) {
+ u32 offset = dmaget - m->channels[i].cdma.push_buffer.phys;
+ u32 map_base = phys_addr & PAGE_MASK;
+ u32 map_size = (size * 4 + PAGE_SIZE - 1) & PAGE_MASK;
+ u32 map_offset = phys_addr - map_base;
+ void *map_addr = ioremap_nocache(map_base, map_size);
+
+ if (map_addr) {
+ u32 ii;
+
+ seq_printf(s, "\n%d: gather (%d words)\n", i, size);
+ state = NVHOST_DBG_STATE_CMD;
+ for (ii = 0; ii < size; ii++) {
+ val = readl(map_addr + map_offset + ii*sizeof(u32));
+ nvhost_debug_handle_word(s, &state, &count, phys_addr + ii, i, val);
+ }
+ iounmap(map_addr);
+ }
+ }
+
+ fifostat = readl(regs + HOST1X_CHANNEL_FIFOSTAT);
+ if ((fifostat & 1 << 10) == 0 ) {
+
+ seq_printf(s, "\n%d: fifo:\n", i);
+ writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(1 << 31 | i << 16, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ rd_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) & 0x1ff;
+ wr_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) >> 16 & 0x1ff;
+
+ start = readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) & 0x1ff;
+ end = (readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) >> 16) & 0x1ff;
+
+ state = NVHOST_DBG_STATE_CMD;
+
+ do {
+ writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ writel(1 << 31 | i << 16 | rd_ptr, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
+ val = readl(m->aperture + HOST1X_SYNC_CFPEEK_READ);
+
+ nvhost_debug_handle_word(s, &state, &count, 0, i, val);
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+
+
+ } while (rd_ptr != wr_ptr);
+
+ if (state == NVHOST_DBG_STATE_DATA)
+ seq_printf(s, ", ...])\n");
+ }
+
+ seq_printf(s, "\n");
+ }
+
+ nvhost_module_idle(&m->mod);
+ return 0;
+}
+
+
+static int nvhost_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvhost_debug_show, inode->i_private);
+}
+
+static const struct file_operations nvhost_debug_fops = {
+ .open = nvhost_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nvhost_debug_init(struct nvhost_master *master)
+{
+ debugfs_create_file("tegra_host", S_IRUGO, NULL, master, &nvhost_debug_fops);
+}
+#else
+void nvhost_debug_add(struct nvhost_master *master)
+{
+}
+
+#endif
+
diff --git a/drivers/video/tegra/host/dev.c b/drivers/video/tegra/host/dev.c
new file mode 100644
index 000000000000..20a4eda0fb53
--- /dev/null
+++ b/drivers/video/tegra/host/dev.c
@@ -0,0 +1,790 @@
+/*
+ * drivers/video/tegra/host/dev.c
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "dev.h"
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+#define DRIVER_NAME "tegra_grhost"
+#define IFACE_NAME "nvhost"
+
+static int nvhost_major = NVHOST_MAJOR;
+static int nvhost_minor = NVHOST_CHANNEL_BASE;
+
+struct nvhost_channel_userctx {
+ struct nvhost_channel *ch;
+ struct nvhost_hwctx *hwctx;
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 cmdbufs_pending;
+ u32 relocs_pending;
+ struct nvmap_handle_ref *gather_mem;
+ struct nvhost_op_pair *gathers;
+ int num_gathers;
+ int pinarray_size;
+ struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
+ struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
+ struct nvmap_client *nvmap;
+};
+
+struct nvhost_ctrl_userctx {
+ struct nvhost_master *dev;
+ u32 mod_locks[NV_HOST1X_NB_MLOCKS];
+};
+
+static int nvhost_channelrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+
+ filp->private_data = NULL;
+
+ nvhost_putchannel(priv->ch, priv->hwctx);
+
+ if (priv->hwctx)
+ priv->ch->ctxhandler.put(priv->hwctx);
+
+ if (priv->gathers)
+ nvmap_munmap(priv->gather_mem, priv->gathers);
+
+ if (!IS_ERR_OR_NULL(priv->gather_mem))
+ nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);
+
+ nvmap_client_put(priv->nvmap);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_channelopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_channel_userctx *priv;
+ struct nvhost_channel *ch;
+ size_t gather_size;
+
+ ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
+ ch = nvhost_getchannel(ch);
+ if (!ch)
+ return -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ nvhost_putchannel(ch, NULL);
+ return -ENOMEM;
+ }
+ filp->private_data = priv;
+ priv->ch = ch;
+ gather_size = sizeof(struct nvhost_op_pair) * NVHOST_MAX_GATHERS;
+ priv->gather_mem = nvmap_alloc(ch->dev->nvmap, gather_size, 32,
+ NVMAP_HANDLE_CACHEABLE);
+ if (IS_ERR(priv->gather_mem))
+ goto fail;
+
+ if (ch->ctxhandler.alloc) {
+ priv->hwctx = ch->ctxhandler.alloc(ch);
+ if (!priv->hwctx)
+ goto fail;
+ }
+
+ priv->gathers = (struct nvhost_op_pair *)nvmap_mmap(priv->gather_mem);
+
+ return 0;
+fail:
+ nvhost_channelrelease(inode, filp);
+ return -ENOMEM;
+}
+
+static void add_gather(struct nvhost_channel_userctx *ctx, int idx,
+ u32 mem_id, u32 words, u32 offset)
+{
+ struct nvmap_pinarray_elem *pin;
+ pin = &ctx->pinarray[ctx->pinarray_size++];
+ pin->patch_mem = (u32)nvmap_ref_to_handle(ctx->gather_mem);
+ pin->patch_offset = (idx * sizeof(struct nvhost_op_pair)) +
+ offsetof(struct nvhost_op_pair, op2);
+ pin->pin_mem = mem_id;
+ pin->pin_offset = offset;
+ ctx->gathers[idx].op1 = nvhost_opcode_gather(0, words);
+}
+
+static void reset_submit(struct nvhost_channel_userctx *ctx)
+{
+ ctx->cmdbufs_pending = 0;
+ ctx->relocs_pending = 0;
+}
+
+static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ size_t remaining = count;
+ int err = 0;
+
+ while (remaining) {
+ size_t consumed;
+ if (!priv->relocs_pending && !priv->cmdbufs_pending) {
+ consumed = sizeof(struct nvhost_submit_hdr);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ if (!priv->cmdbufs_pending) {
+ err = -EFAULT;
+ break;
+ }
+ /* leave room for ctx switch */
+ priv->num_gathers = 2;
+ priv->pinarray_size = 0;
+ } else if (priv->cmdbufs_pending) {
+ struct nvhost_cmdbuf cmdbuf;
+ consumed = sizeof(cmdbuf);
+ if (remaining < consumed)
+ break;
+ if (copy_from_user(&cmdbuf, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ add_gather(priv, priv->num_gathers++,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
+ priv->cmdbufs_pending--;
+ } else if (priv->relocs_pending) {
+ int numrelocs = remaining / sizeof(struct nvhost_reloc);
+ if (!numrelocs)
+ break;
+ numrelocs = min_t(int, numrelocs, priv->relocs_pending);
+ consumed = numrelocs * sizeof(struct nvhost_reloc);
+ if (copy_from_user(&priv->pinarray[priv->pinarray_size],
+ buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ priv->pinarray_size += numrelocs;
+ priv->relocs_pending -= numrelocs;
+ } else {
+ err = -EFAULT;
+ break;
+ }
+ remaining -= consumed;
+ buf += consumed;
+ }
+
+ if (err < 0) {
+ dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
+ reset_submit(priv);
+ return err;
+ }
+
+ return (count - remaining);
+}
+
+static int nvhost_ioctl_channel_flush(struct nvhost_channel_userctx *ctx,
+ struct nvhost_get_param_args *args)
+{
+ struct nvhost_cpuinterrupt ctxsw;
+ int gather_idx = 2;
+ int num_intrs = 0;
+ u32 syncval;
+ int num_unpin;
+ int err;
+
+ if (ctx->relocs_pending || ctx->cmdbufs_pending) {
+ reset_submit(ctx);
+ dev_err(&ctx->ch->dev->pdev->dev, "channel submit out of sync\n");
+ return -EFAULT;
+ }
+ if (!ctx->nvmap) {
+ dev_err(&ctx->ch->dev->pdev->dev, "no nvmap context set\n");
+ return -EFAULT;
+ }
+ if (ctx->num_gathers <= 2)
+ return 0;
+
+ /* keep module powered */
+ nvhost_module_busy(&ctx->ch->mod);
+
+ /* pin mem handles and patch physical addresses */
+ num_unpin = nvmap_pin_array(ctx->nvmap,
+ nvmap_ref_to_handle(ctx->gather_mem),
+ ctx->pinarray, ctx->pinarray_size,
+ ctx->unpinarray);
+ if (num_unpin < 0) {
+ dev_warn(&ctx->ch->dev->pdev->dev, "nvmap_pin_array failed: "
+ "%d\n", num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return num_unpin;
+ }
+
+ /* get submit lock */
+ err = mutex_lock_interruptible(&ctx->ch->submitlock);
+ if (err) {
+ nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
+ nvhost_module_idle(&ctx->ch->mod);
+ return err;
+ }
+
+ /* context switch */
+ if (ctx->ch->cur_ctx != ctx->hwctx) {
+ struct nvhost_hwctx *hw = ctx->hwctx;
+ if (hw && hw->valid) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->restore_size);
+ ctx->gathers[gather_idx].op2 = hw->restore_phys;
+ ctx->syncpt_incrs += hw->restore_incrs;
+ }
+ hw = ctx->ch->cur_ctx;
+ if (hw) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_gather(0, hw->save_size);
+ ctx->gathers[gather_idx].op2 = hw->save_phys;
+ ctx->syncpt_incrs += hw->save_incrs;
+ num_intrs = 1;
+ ctxsw.syncpt_val = hw->save_incrs - 1;
+ ctxsw.intr_data = hw;
+ hw->valid = true;
+ ctx->ch->ctxhandler.get(hw);
+ }
+ ctx->ch->cur_ctx = ctx->hwctx;
+ }
+
+ /* add a setclass for modules that require it */
+ if (gather_idx == 2 && ctx->ch->desc->class) {
+ gather_idx--;
+ ctx->gathers[gather_idx].op1 =
+ nvhost_opcode_setclass(ctx->ch->desc->class, 0, 0);
+ ctx->gathers[gather_idx].op2 = NVHOST_OPCODE_NOOP;
+ }
+
+ /* get absolute sync value */
+ if (BIT(ctx->syncpt_id) & NVSYNCPTS_CLIENT_MANAGED)
+ syncval = nvhost_syncpt_set_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+ else
+ syncval = nvhost_syncpt_incr_max(&ctx->ch->dev->syncpt,
+ ctx->syncpt_id, ctx->syncpt_incrs);
+
+ /* patch absolute syncpt value into interrupt triggers */
+ ctxsw.syncpt_val += syncval - ctx->syncpt_incrs;
+
+ nvhost_channel_submit(ctx->ch, ctx->nvmap, &ctx->gathers[gather_idx],
+ ctx->num_gathers - gather_idx, &ctxsw, num_intrs,
+ ctx->unpinarray, num_unpin,
+ ctx->syncpt_id, syncval);
+
+ /* schedule a submit complete interrupt */
+ nvhost_intr_add_action(&ctx->ch->dev->intr, ctx->syncpt_id, syncval,
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ctx->ch, NULL);
+
+ mutex_unlock(&ctx->ch->submitlock);
+ args->value = syncval;
+ return 0;
+}
+
+static long nvhost_channelctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_channel_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CHANNEL_FLUSH:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->syncpts;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->waitbases;
+ break;
+ case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
+ ((struct nvhost_get_param_args *)buf)->value =
+ priv->ch->desc->modulemutexes;
+ break;
+ case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
+ {
+ int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
+ struct nvmap_client *new_client = nvmap_client_get_file(fd);
+
+ if (IS_ERR(new_client)) {
+ err = PTR_ERR(new_client);
+ break;
+ }
+
+ if (priv->nvmap)
+ nvmap_client_put(priv->nvmap);
+
+ priv->nvmap = new_client;
+ break;
+ }
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_channelops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_channelrelease,
+ .open = nvhost_channelopen,
+ .write = nvhost_channelwrite,
+ .unlocked_ioctl = nvhost_channelctl
+};
+
+static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ int i;
+
+ filp->private_data = NULL;
+ if (priv->mod_locks[0])
+ nvhost_module_idle(&priv->dev->mod);
+ for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
+ if (priv->mod_locks[i])
+ nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
+ kfree(priv);
+ return 0;
+}
+
+static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
+{
+ struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
+ struct nvhost_ctrl_userctx *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = host;
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_read(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_read_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_incr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_incr_args *args)
+{
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
+ return 0;
+}
+
+static int nvhost_ioctl_ctrl_syncpt_wait(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_syncpt_wait_args *args)
+{
+ u32 timeout;
+ if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ return -EINVAL;
+ if (args->timeout == NVHOST_NO_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = (u32)msecs_to_jiffies(args->timeout);
+
+ return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
+ args->thresh, timeout);
+}
+
+static int nvhost_ioctl_ctrl_module_mutex(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_mutex_args *args)
+{
+ int err = 0;
+ if (args->id >= NV_HOST1X_NB_MLOCKS ||
+ args->lock > 1)
+ return -EINVAL;
+
+ if (args->lock && !ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_busy(&ctx->dev->mod);
+ else
+ err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
+ if (!err)
+ ctx->mod_locks[args->id] = 1;
+ }
+ else if (!args->lock && ctx->mod_locks[args->id]) {
+ if (args->id == 0)
+ nvhost_module_idle(&ctx->dev->mod);
+ else
+ nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
+ ctx->mod_locks[args->id] = 0;
+ }
+ return err;
+}
+
+static int nvhost_ioctl_ctrl_module_regrdwr(
+ struct nvhost_ctrl_userctx *ctx,
+ struct nvhost_ctrl_module_regrdwr_args *args)
+{
+ u32 num_offsets = args->num_offsets;
+ u32 *offsets = args->offsets;
+ void *values = args->values;
+ u32 vals[64];
+
+ if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
+ (num_offsets == 0))
+ return -EINVAL;
+
+ while (num_offsets--) {
+ u32 remaining = args->block_size;
+ u32 offs;
+ if (get_user(offs, offsets))
+ return -EFAULT;
+ offsets++;
+ while (remaining) {
+ u32 batch = min(remaining, 64*sizeof(u32));
+ if (args->write) {
+ if (copy_from_user(vals, values, batch))
+ return -EFAULT;
+ nvhost_write_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ } else {
+ nvhost_read_module_regs(&ctx->dev->cpuaccess,
+ args->id, offs, batch, vals);
+ if (copy_to_user(values, vals, batch))
+ return -EFAULT;
+ }
+ remaining -= batch;
+ offs += batch;
+ values += batch;
+ }
+ }
+
+ return 0;
+}
+
+static long nvhost_ctrlctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvhost_ctrl_userctx *priv = filp->private_data;
+ u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
+ int err = 0;
+
+ if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
+ (_IOC_NR(cmd) == 0) ||
+ (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
+ return -EFAULT;
+
+ BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case NVHOST_IOCTL_CTRL_SYNCPT_READ:
+ err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
+ err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
+ err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
+ err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
+ break;
+ case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
+ err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+ err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+ return err;
+}
+
+static struct file_operations nvhost_ctrlops = {
+ .owner = THIS_MODULE,
+ .release = nvhost_ctrlrelease,
+ .open = nvhost_ctrlopen,
+ .unlocked_ioctl = nvhost_ctrlctl
+};
+
+static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
+
+ if (action == NVHOST_POWER_ACTION_ON) {
+ nvhost_intr_configure(&dev->intr, clk_get_rate(mod->clk[0]));
+ }
+ else if (action == NVHOST_POWER_ACTION_OFF) {
+ int i;
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++)
+ nvhost_channel_suspend(&dev->channels[i]);
+ nvhost_syncpt_save(&dev->syncpt);
+ }
+}
+
+static int __devinit nvhost_user_init(struct nvhost_master *host)
+{
+ int i, err, devno;
+
+ host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
+ if (IS_ERR(host->nvhost_class)) {
+ err = PTR_ERR(host->nvhost_class);
+ dev_err(&host->pdev->dev, "failed to create class\n");
+ goto fail;
+ }
+
+ if (nvhost_major) {
+ devno = MKDEV(nvhost_major, nvhost_minor);
+ err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ } else {
+ err = alloc_chrdev_region(&devno, nvhost_minor,
+ NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ nvhost_major = MAJOR(devno);
+ }
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
+ goto fail;
+ }
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+
+ cdev_init(&ch->cdev, &nvhost_channelops);
+ ch->cdev.owner = THIS_MODULE;
+
+ devno = MKDEV(nvhost_major, nvhost_minor + i);
+ err = cdev_add(&ch->cdev, devno, 1);
+ if (err < 0) {
+ dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
+ goto fail;
+ }
+ ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-%s", ch->desc->name);
+ if (IS_ERR(ch->node)) {
+ err = PTR_ERR(ch->node);
+ dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
+ goto fail;
+ }
+ }
+
+ cdev_init(&host->cdev, &nvhost_ctrlops);
+ host->cdev.owner = THIS_MODULE;
+ devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
+ err = cdev_add(&host->cdev, devno, 1);
+ if (err < 0)
+ goto fail;
+ host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
+ IFACE_NAME "-ctrl");
+ if (IS_ERR(host->ctrl)) {
+ err = PTR_ERR(host->ctrl);
+ dev_err(&host->pdev->dev, "failed to create ctrl device\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int __devinit nvhost_probe(struct platform_device *pdev)
+{
+ struct nvhost_master *host;
+ struct resource *regs, *intr0, *intr1;
+ int i, err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!regs || !intr0 || !intr1) {
+ dev_err(&pdev->dev, "missing required platform resources\n");
+ return -ENXIO;
+ }
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+
+ host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
+ if (!host->nvmap) {
+ dev_err(&pdev->dev, "unable to create nvmap client\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ host->reg_mem = request_mem_region(regs->start,
+ resource_size(regs), pdev->name);
+ if (!host->reg_mem) {
+ dev_err(&pdev->dev, "failed to get host register memory\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->aperture = ioremap(regs->start, resource_size(regs));
+ if (!host->aperture) {
+ dev_err(&pdev->dev, "failed to remap host registers\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ host->sync_aperture = host->aperture +
+ (NV_HOST1X_CHANNEL0_BASE +
+ HOST1X_CHANNEL_SYNC_REG_BASE);
+
+ for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ struct nvhost_channel *ch = &host->channels[i];
+ err = nvhost_channel_init(ch, host, i);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init channel %d\n", i);
+ goto fail;
+ }
+ }
+
+ err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
+ if (err) goto fail;
+ err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
+ if (err) goto fail;
+ err = nvhost_user_init(host);
+ if (err) goto fail;
+ err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
+ if (err) goto fail;
+
+ platform_set_drvdata(pdev, host);
+
+ clk_enable(host->mod.clk[0]);
+ nvhost_syncpt_reset(&host->syncpt);
+ clk_disable(host->mod.clk[0]);
+
+ nvhost_bus_register(host);
+
+ nvhost_debug_init(host);
+
+ dev_info(&pdev->dev, "initialized\n");
+ return 0;
+
+fail:
+ if (host->nvmap)
+ nvmap_client_put(host->nvmap);
+ /* TODO: [ahatala 2010-05-04] */
+ kfree(host);
+ return err;
+}
+
+static int __exit nvhost_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "suspending\n");
+ nvhost_module_suspend(&host->mod);
+ clk_enable(host->mod.clk[0]);
+ nvhost_syncpt_save(&host->syncpt);
+ clk_disable(host->mod.clk[0]);
+ dev_info(&pdev->dev, "suspended\n");
+ return 0;
+}
+
+static int nvhost_resume(struct platform_device *pdev)
+{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "resuming\n");
+ clk_enable(host->mod.clk[0]);
+ nvhost_syncpt_reset(&host->syncpt);
+ clk_disable(host->mod.clk[0]);
+ dev_info(&pdev->dev, "resumed\n");
+ return 0;
+}
+
+static struct platform_driver nvhost_driver = {
+ .remove = __exit_p(nvhost_remove),
+ .suspend = nvhost_suspend,
+ .resume = nvhost_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME
+ }
+};
+
+static int __init nvhost_mod_init(void)
+{
+ return platform_driver_probe(&nvhost_driver, nvhost_probe);
+}
+
+static void __exit nvhost_mod_exit(void)
+{
+ platform_driver_unregister(&nvhost_driver);
+}
+
+module_init(nvhost_mod_init);
+module_exit(nvhost_mod_exit);
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Graphics host driver for Tegra products");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform-nvhost");
diff --git a/drivers/video/tegra/host/dev.h b/drivers/video/tegra/host/dev.h
new file mode 100644
index 000000000000..ae9847c2bd74
--- /dev/null
+++ b/drivers/video/tegra/host/dev.h
@@ -0,0 +1,52 @@
+/*
+ * drivers/video/tegra/host/dev.h
+ *
+ * Tegra Graphics Host Driver Entrypoint
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_DEV_H
+#define __NVHOST_DEV_H
+#include "nvhost_acm.h"
+#include "nvhost_syncpt.h"
+#include "nvhost_intr.h"
+#include "nvhost_cpuaccess.h"
+#include "nvhost_channel.h"
+#include "nvhost_hardware.h"
+
+#define NVHOST_MAJOR 0 /* dynamic */
+
+struct nvhost_master {
+ void __iomem *aperture;
+ void __iomem *sync_aperture;
+ struct resource *reg_mem;
+ struct platform_device *pdev;
+ struct class *nvhost_class;
+ struct cdev cdev;
+ struct device *ctrl;
+ struct nvhost_syncpt syncpt;
+ struct nvmap_client *nvmap;
+ struct nvhost_cpuaccess cpuaccess;
+ struct nvhost_intr intr;
+ struct nvhost_module mod;
+ struct nvhost_channel channels[NVHOST_NUMCHANNELS];
+};
+
+void nvhost_debug_init(struct nvhost_master *master);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_3dctx.c b/drivers/video/tegra/host/nvhost_3dctx.c
new file mode 100644
index 000000000000..1840d47b4f81
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_3dctx.c
@@ -0,0 +1,543 @@
+/*
+ * drivers/video/tegra/host/nvhost_3dctx.c
+ *
+ * Tegra Graphics Host 3d hardware context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_hwctx.h"
+#include "dev.h"
+
+#include <linux/slab.h>
+
+const struct hwctx_reginfo ctxsave_regs_3d[] = {
+ HWCTX_REGINFO(0xe00, 16, DIRECT),
+ HWCTX_REGINFO(0xe10, 16, DIRECT),
+ HWCTX_REGINFO(0xe20, 1, DIRECT),
+ HWCTX_REGINFO(0xe21, 1, DIRECT),
+ HWCTX_REGINFO(0xe22, 1, DIRECT),
+ HWCTX_REGINFO(0xe25, 1, DIRECT),
+ HWCTX_REGINFO(0xe26, 1, DIRECT),
+ HWCTX_REGINFO(0xe28, 2, DIRECT),
+ HWCTX_REGINFO(0xe2a, 1, DIRECT),
+ HWCTX_REGINFO(0x1, 1, DIRECT),
+ HWCTX_REGINFO(0x2, 1, DIRECT),
+ HWCTX_REGINFO(0xc, 2, DIRECT),
+ HWCTX_REGINFO(0xe, 2, DIRECT),
+ HWCTX_REGINFO(0x10, 2, DIRECT),
+ HWCTX_REGINFO(0x12, 2, DIRECT),
+ HWCTX_REGINFO(0x14, 2, DIRECT),
+ HWCTX_REGINFO(0x100, 32, DIRECT),
+ HWCTX_REGINFO(0x120, 1, DIRECT),
+ HWCTX_REGINFO(0x121, 1, DIRECT),
+ HWCTX_REGINFO(0x124, 1, DIRECT),
+ HWCTX_REGINFO(0x125, 1, DIRECT),
+ HWCTX_REGINFO(0x200, 1, DIRECT),
+ HWCTX_REGINFO(0x201, 1, DIRECT),
+ HWCTX_REGINFO(0x202, 1, DIRECT),
+ HWCTX_REGINFO(0x203, 1, DIRECT),
+ HWCTX_REGINFO(0x204, 1, DIRECT),
+ HWCTX_REGINFO(0x207, 1024, INDIRECT),
+ HWCTX_REGINFO(0x209, 1, DIRECT),
+ HWCTX_REGINFO(0x300, 64, DIRECT),
+ HWCTX_REGINFO(0x343, 1, DIRECT),
+ HWCTX_REGINFO(0x344, 1, DIRECT),
+ HWCTX_REGINFO(0x345, 1, DIRECT),
+ HWCTX_REGINFO(0x346, 1, DIRECT),
+ HWCTX_REGINFO(0x347, 1, DIRECT),
+ HWCTX_REGINFO(0x348, 1, DIRECT),
+ HWCTX_REGINFO(0x349, 1, DIRECT),
+ HWCTX_REGINFO(0x34a, 1, DIRECT),
+ HWCTX_REGINFO(0x34b, 1, DIRECT),
+ HWCTX_REGINFO(0x34c, 1, DIRECT),
+ HWCTX_REGINFO(0x34d, 1, DIRECT),
+ HWCTX_REGINFO(0x34e, 1, DIRECT),
+ HWCTX_REGINFO(0x34f, 1, DIRECT),
+ HWCTX_REGINFO(0x350, 1, DIRECT),
+ HWCTX_REGINFO(0x351, 1, DIRECT),
+ HWCTX_REGINFO(0x352, 1, DIRECT),
+ HWCTX_REGINFO(0x353, 1, DIRECT),
+ HWCTX_REGINFO(0x354, 1, DIRECT),
+ HWCTX_REGINFO(0x355, 1, DIRECT),
+ HWCTX_REGINFO(0x356, 1, DIRECT),
+ HWCTX_REGINFO(0x357, 1, DIRECT),
+ HWCTX_REGINFO(0x358, 1, DIRECT),
+ HWCTX_REGINFO(0x359, 1, DIRECT),
+ HWCTX_REGINFO(0x35a, 1, DIRECT),
+ HWCTX_REGINFO(0x35b, 1, DIRECT),
+ HWCTX_REGINFO(0x363, 1, DIRECT),
+ HWCTX_REGINFO(0x364, 1, DIRECT),
+ HWCTX_REGINFO(0x400, 2, DIRECT),
+ HWCTX_REGINFO(0x402, 1, DIRECT),
+ HWCTX_REGINFO(0x403, 1, DIRECT),
+ HWCTX_REGINFO(0x404, 1, DIRECT),
+ HWCTX_REGINFO(0x405, 1, DIRECT),
+ HWCTX_REGINFO(0x406, 1, DIRECT),
+ HWCTX_REGINFO(0x407, 1, DIRECT),
+ HWCTX_REGINFO(0x408, 1, DIRECT),
+ HWCTX_REGINFO(0x409, 1, DIRECT),
+ HWCTX_REGINFO(0x40a, 1, DIRECT),
+ HWCTX_REGINFO(0x40b, 1, DIRECT),
+ HWCTX_REGINFO(0x40c, 1, DIRECT),
+ HWCTX_REGINFO(0x40d, 1, DIRECT),
+ HWCTX_REGINFO(0x40e, 1, DIRECT),
+ HWCTX_REGINFO(0x40f, 1, DIRECT),
+ HWCTX_REGINFO(0x411, 1, DIRECT),
+ HWCTX_REGINFO(0x500, 1, DIRECT),
+ HWCTX_REGINFO(0x501, 1, DIRECT),
+ HWCTX_REGINFO(0x502, 1, DIRECT),
+ HWCTX_REGINFO(0x503, 1, DIRECT),
+ HWCTX_REGINFO(0x520, 32, DIRECT),
+ HWCTX_REGINFO(0x540, 64, INDIRECT),
+ HWCTX_REGINFO(0x600, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x602, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x603, 128, INDIRECT),
+ HWCTX_REGINFO(0x608, 4, DIRECT),
+ HWCTX_REGINFO(0x60e, 1, DIRECT),
+ HWCTX_REGINFO(0x700, 64, INDIRECT),
+ HWCTX_REGINFO(0x710, 16, DIRECT),
+ HWCTX_REGINFO(0x720, 32, DIRECT),
+ HWCTX_REGINFO(0x740, 1, DIRECT),
+ HWCTX_REGINFO(0x741, 1, DIRECT),
+ HWCTX_REGINFO(0x800, 0, INDIRECT_OFFSET),
+ HWCTX_REGINFO(0x802, 16, INDIRECT_DATA),
+ HWCTX_REGINFO(0x803, 512, INDIRECT),
+ HWCTX_REGINFO(0x805, 64, INDIRECT),
+ HWCTX_REGINFO(0x820, 32, DIRECT),
+ HWCTX_REGINFO(0x900, 64, INDIRECT),
+ HWCTX_REGINFO(0x902, 1, DIRECT),
+ HWCTX_REGINFO(0x903, 1, DIRECT),
+ HWCTX_REGINFO(0xa02, 1, DIRECT),
+ HWCTX_REGINFO(0xa03, 1, DIRECT),
+ HWCTX_REGINFO(0xa04, 1, DIRECT),
+ HWCTX_REGINFO(0xa05, 1, DIRECT),
+ HWCTX_REGINFO(0xa06, 1, DIRECT),
+ HWCTX_REGINFO(0xa07, 1, DIRECT),
+ HWCTX_REGINFO(0xa08, 1, DIRECT),
+ HWCTX_REGINFO(0xa09, 1, DIRECT),
+ HWCTX_REGINFO(0xa0a, 1, DIRECT),
+ HWCTX_REGINFO(0xa0b, 1, DIRECT),
+ HWCTX_REGINFO(0x205, 1024, INDIRECT)
+};
+
+
+/*** restore ***/
+
+static unsigned int context_restore_size = 0;
+
+static void restore_begin(u32 *ptr, u32 waitbase)
+{
+ /* set class to host */
+ ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ /* increment sync point base */
+ ptr[1] = nvhost_class_host_incr_syncpt_base(waitbase, 1);
+ /* set class to 3D */
+ ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /* program PSEQ_QUAD_ID */
+ ptr[3] = nvhost_opcode_imm(0x545, 0);
+}
+#define RESTORE_BEGIN_SIZE 4
+
+static void restore_end(u32 *ptr, u32 syncpt_id)
+{
+ /* syncpt increment to track restore gather. */
+ ptr[0] = nvhost_opcode_imm(0x0, ((1UL << 8) | (u8)(syncpt_id & 0xff)));
+}
+#define RESTORE_END_SIZE 1
+
+static void restore_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_incr(start_reg, count);
+}
+#define RESTORE_DIRECT_SIZE 1
+
+static void restore_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_imm(offset_reg, offset);
+}
+#define RESTORE_INDOFFSET_SIZE 1
+
+static void restore_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(data_reg, count);
+}
+#define RESTORE_INDDATA_SIZE 1
+
+static void restore_registers_from_fifo(u32 *ptr, unsigned int count,
+ struct nvhost_channel *channel,
+ unsigned int *pending)
+{
+ void __iomem *chan_regs = channel->aperture;
+ unsigned int entries = *pending;
+ while (count) {
+ unsigned int num;
+
+ while (!entries) {
+ /* query host for number of entries in fifo */
+ entries = nvhost_channel_fifostat_outfentries(
+ readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT));
+ if (!entries)
+ cpu_relax();
+ /* TODO: [ahowe 2010-06-14] timeout */
+ }
+ num = min(entries, count);
+ entries -= num;
+ count -= num;
+
+ while (num & ~0x3) {
+ u32 arr[4];
+ arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ memcpy(ptr, arr, 4*sizeof(u32));
+ ptr += 4;
+ num -= 4;
+ }
+ while (num--)
+ *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
+ }
+ *pending = entries;
+}
+
+static void setup_restore(u32 *ptr, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+
+ restore_begin(ptr, waitbase);
+ ptr += RESTORE_BEGIN_SIZE;
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ restore_direct(ptr, offset, count);
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ restore_indoffset(ptr, offset, 0);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ restore_inddata(ptr, offset + 1, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ restore_indoffset(ptr, offset, count);
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ restore_inddata(ptr, offset, count);
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ ptr += count;
+ }
+
+ restore_end(ptr, NVSYNCPT_3D);
+ wmb();
+}
+
+/*** save ***/
+
+/* the same context save command sequence is used for all contexts. */
+static struct nvmap_handle_ref *context_save_buf = NULL;
+static u32 context_save_phys = 0;
+static u32 *context_save_ptr = NULL;
+static unsigned int context_save_size = 0;
+
+static void save_begin(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* set class to the unit to flush */
+ ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ /*
+ * Flush pipe and signal context read thread to start reading
+ * sync point increment
+ */
+ ptr[1] = nvhost_opcode_imm(0, 0x100 | syncpt_id);
+ ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ /* wait for base+1 */
+ ptr[3] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 1);
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+ ptr[5] = nvhost_opcode_imm(0, syncpt_id);
+ ptr[6] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, 0, 0);
+}
+#define SAVE_BEGIN_SIZE 7
+
+static void save_direct(u32 *ptr, u32 start_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ start_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_DIRECT_SIZE 3
+
+static void save_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_write(NV_HOST_MODULE_GR3D,
+ offset_reg, true);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, 1);
+ ptr[3] = offset;
+}
+#define SAVE_INDOFFSET_SIZE 4
+
+static inline void save_inddata(u32 *ptr, u32 data_reg, u32 count)
+{
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
+ ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
+ data_reg, false);
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
+}
+#define SAVE_INDDDATA_SIZE 3
+
+static void save_end(u32 *ptr, u32 syncpt_id, u32 waitbase)
+{
+ /* Wait for context read service */
+ ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
+ ptr[1] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 3);
+ /* Increment syncpoint base */
+ ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
+ ptr[3] = nvhost_class_host_incr_syncpt_base(waitbase, 3);
+ /* set class back to the unit */
+ ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
+}
+#define SAVE_END_SIZE 5
+
+static void __init setup_save(
+ u32 *ptr, unsigned int *words_save, unsigned int *words_restore,
+ u32 syncpt_id, u32 waitbase)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int save = SAVE_BEGIN_SIZE + SAVE_END_SIZE;
+ unsigned int restore = RESTORE_BEGIN_SIZE + RESTORE_END_SIZE;
+
+ if (ptr) {
+ save_begin(ptr, syncpt_id, waitbase);
+ ptr += SAVE_BEGIN_SIZE;
+ }
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 offset = r->offset;
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ if (ptr) {
+ save_direct(ptr, offset, count);
+ ptr += SAVE_DIRECT_SIZE;
+ }
+ save += SAVE_DIRECT_SIZE;
+ restore += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ if (ptr) {
+ save_indoffset(ptr, offset, 0);
+ ptr += SAVE_INDOFFSET_SIZE;
+ save_inddata(ptr, offset + 1, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ if (ptr) {
+ save_indoffset(ptr, offset, count);
+ ptr += SAVE_INDOFFSET_SIZE;
+ }
+ save += SAVE_INDOFFSET_SIZE;
+ restore += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ if (ptr) {
+ save_inddata(ptr, offset, count);
+ ptr += SAVE_INDDDATA_SIZE;
+ }
+ save += SAVE_INDDDATA_SIZE;
+ restore += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ if (ptr) {
+ memset(ptr, 0, count * 4);
+ ptr += count;
+ }
+ save += count;
+ restore += count;
+ }
+
+ if (ptr)
+ save_end(ptr, syncpt_id, waitbase);
+
+ if (words_save)
+ *words_save = save;
+ if (words_restore)
+ *words_restore = restore;
+ wmb();
+}
+
+/*** ctx3d ***/
+
+static struct nvhost_hwctx *ctx3d_alloc(struct nvhost_channel *ch)
+{
+ struct nvhost_hwctx *ctx;
+ struct nvmap_client *nvmap = ch->dev->nvmap;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->restore = nvmap_alloc(nvmap, context_restore_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+
+ if (IS_ERR_OR_NULL(ctx->restore)) {
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->save_cpu_data = nvmap_mmap(ctx->restore);
+ if (!ctx->save_cpu_data) {
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+ return NULL;
+ }
+
+ setup_restore(ctx->save_cpu_data, NVWAITBASE_3D);
+ ctx->channel = ch;
+ ctx->restore_phys = nvmap_pin(nvmap, ctx->restore);
+ ctx->restore_size = context_restore_size;
+ ctx->save = context_save_buf;
+ ctx->save_phys = context_save_phys;
+ ctx->save_size = context_save_size;
+ ctx->save_incrs = 3;
+ ctx->restore_incrs = 1;
+ ctx->valid = false;
+ kref_init(&ctx->ref);
+ return ctx;
+}
+
+static void ctx3d_free(struct kref *ref)
+{
+ struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
+ struct nvmap_client *nvmap = ctx->channel->dev->nvmap;
+
+ nvmap_munmap(ctx->restore, ctx->save_cpu_data);
+ nvmap_unpin(nvmap, ctx->restore);
+ nvmap_free(nvmap, ctx->restore);
+ kfree(ctx);
+}
+
+static void ctx3d_get(struct nvhost_hwctx *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+static void ctx3d_put(struct nvhost_hwctx *ctx)
+{
+ kref_put(&ctx->ref, ctx3d_free);
+}
+
+static void ctx3d_save_service(struct nvhost_hwctx *ctx)
+{
+ const struct hwctx_reginfo *r;
+ const struct hwctx_reginfo *rend;
+ unsigned int pending = 0;
+ u32 *ptr = (u32 *)ctx->save_cpu_data + RESTORE_BEGIN_SIZE;
+
+ BUG_ON(!ctx->save_cpu_data);
+
+ r = ctxsave_regs_3d;
+ rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
+ for ( ; r != rend; ++r) {
+ u32 count = r->count;
+ switch (r->type) {
+ case HWCTX_REGINFO_DIRECT:
+ ptr += RESTORE_DIRECT_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT:
+ ptr += RESTORE_INDOFFSET_SIZE + RESTORE_INDDATA_SIZE;
+ break;
+ case HWCTX_REGINFO_INDIRECT_OFFSET:
+ ptr += RESTORE_INDOFFSET_SIZE;
+ continue; /* INDIRECT_DATA follows with real count */
+ case HWCTX_REGINFO_INDIRECT_DATA:
+ ptr += RESTORE_INDDATA_SIZE;
+ break;
+ }
+ restore_registers_from_fifo(ptr, count, ctx->channel, &pending);
+ ptr += count;
+ }
+
+ BUG_ON((u32)((ptr + RESTORE_END_SIZE) - (u32*)ctx->save_cpu_data)
+ != context_restore_size);
+
+ wmb();
+ nvhost_syncpt_cpu_incr(&ctx->channel->dev->syncpt, NVSYNCPT_3D);
+}
+
+
+/*** nvhost_3dctx ***/
+
+int __init nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ struct nvhost_channel *ch;
+ struct nvmap_client *nvmap;
+
+ ch = container_of(h, struct nvhost_channel, ctxhandler);
+ nvmap = ch->dev->nvmap;
+
+ setup_save(NULL, &context_save_size, &context_restore_size, 0, 0);
+
+ context_save_buf = nvmap_alloc(nvmap, context_save_size * 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+
+ if (IS_ERR(context_save_buf)) {
+ int err = PTR_ERR(context_save_buf);
+ context_save_buf = NULL;
+ return err;
+ }
+
+ context_save_ptr = nvmap_mmap(context_save_buf);
+ if (!context_save_ptr) {
+ nvmap_free(nvmap, context_save_buf);
+ context_save_buf = NULL;
+ return -ENOMEM;
+ }
+
+ context_save_phys = nvmap_pin(nvmap, context_save_buf);
+ setup_save(context_save_ptr, NULL, NULL, NVSYNCPT_3D, NVWAITBASE_3D);
+
+ h->alloc = ctx3d_alloc;
+ h->get = ctx3d_get;
+ h->put = ctx3d_put;
+ h->save_service = ctx3d_save_service;
+ return 0;
+}
+
+/* TODO: [ahatala 2010-05-27] */
+int __init nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h)
+{
+ return 0;
+}
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c
new file mode 100644
index 000000000000..a4bbce2b6f1d
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.c
@@ -0,0 +1,188 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.c
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_acm.h"
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <mach/powergate.h>
+#include <mach/clk.h>
+
+#define ACM_TIMEOUT 1*HZ
+
+#define DISABLE_3D_POWERGATING
+
+void nvhost_module_busy(struct nvhost_module *mod)
+{
+ mutex_lock(&mod->lock);
+ cancel_delayed_work(&mod->powerdown);
+ if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) {
+ if (mod->parent)
+ nvhost_module_busy(mod->parent);
+ if (mod->powergate_id != -1) {
+ BUG_ON(mod->num_clks != 1);
+ tegra_powergate_sequence_power_up(
+ mod->powergate_id, mod->clk[0]);
+ } else {
+ int i;
+ for (i = 0; i < mod->num_clks; i++)
+ clk_enable(mod->clk[i]);
+ }
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_ON);
+ mod->powered = true;
+ }
+ mutex_unlock(&mod->lock);
+}
+
+static void powerdown_handler(struct work_struct *work)
+{
+ struct nvhost_module *mod;
+ mod = container_of(to_delayed_work(work), struct nvhost_module, powerdown);
+ mutex_lock(&mod->lock);
+ if ((atomic_read(&mod->refcount) == 0) && mod->powered) {
+ int i;
+ if (mod->func)
+ mod->func(mod, NVHOST_POWER_ACTION_OFF);
+ for (i = 0; i < mod->num_clks; i++) {
+ clk_disable(mod->clk[i]);
+ }
+ if (mod->powergate_id != -1) {
+ tegra_periph_reset_assert(mod->clk[0]);
+ tegra_powergate_power_off(mod->powergate_id);
+ }
+ mod->powered = false;
+ if (mod->parent)
+ nvhost_module_idle(mod->parent);
+ }
+ mutex_unlock(&mod->lock);
+}
+
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs)
+{
+ bool kick = false;
+
+ mutex_lock(&mod->lock);
+ if (atomic_sub_return(refs, &mod->refcount) == 0) {
+ BUG_ON(!mod->powered);
+ schedule_delayed_work(&mod->powerdown, ACM_TIMEOUT);
+ kick = true;
+ }
+ mutex_unlock(&mod->lock);
+
+ if (kick)
+ wake_up(&mod->idle);
+}
+
+static const char *get_module_clk_id(const char *module, int index)
+{
+ if (index == 1 && strcmp(module, "gr2d") == 0)
+ return "epp";
+ else if (index == 0)
+ return module;
+ return NULL;
+}
+
+static int get_module_powergate_id(const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return TEGRA_POWERGATE_3D;
+ else if (strcmp(module, "mpe") == 0)
+ return TEGRA_POWERGATE_MPE;
+ return -1;
+}
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev)
+{
+ int i = 0;
+ mod->name = name;
+
+ while (i < NVHOST_MODULE_MAX_CLOCKS) {
+ long rate;
+ mod->clk[i] = clk_get(dev, get_module_clk_id(name, i));
+ if (IS_ERR_OR_NULL(mod->clk[i]))
+ break;
+ rate = clk_round_rate(mod->clk[i], UINT_MAX);
+ if (rate < 0) {
+ pr_err("%s: can't get maximum rate for %s\n",
+ __func__, name);
+ break;
+ }
+ if (rate != clk_get_rate(mod->clk[i])) {
+ clk_set_rate(mod->clk[i], rate);
+ }
+ i++;
+ }
+
+ mod->num_clks = i;
+ mod->func = func;
+ mod->parent = parent;
+ mod->powered = false;
+ mod->powergate_id = get_module_powergate_id(name);
+
+#ifdef DISABLE_3D_POWERGATING
+ /*
+ * It is possible for the 3d block to generate an invalid memory
+ * request during the power up sequence in some cases. Workaround
+ * is to disable 3d block power gating.
+ */
+ if (mod->powergate_id == TEGRA_POWERGATE_3D) {
+ tegra_powergate_sequence_power_up(mod->powergate_id,
+ mod->clk[0]);
+ clk_disable(mod->clk[0]);
+ mod->powergate_id = -1;
+ }
+#endif
+
+ mutex_init(&mod->lock);
+ init_waitqueue_head(&mod->idle);
+ INIT_DELAYED_WORK(&mod->powerdown, powerdown_handler);
+
+ return 0;
+}
+
+static int is_module_idle(struct nvhost_module *mod)
+{
+ int count;
+ mutex_lock(&mod->lock);
+ count = atomic_read(&mod->refcount);
+ mutex_unlock(&mod->lock);
+ return (count == 0);
+}
+
+void nvhost_module_suspend(struct nvhost_module *mod)
+{
+ wait_event(mod->idle, is_module_idle(mod));
+ flush_delayed_work(&mod->powerdown);
+ BUG_ON(mod->powered);
+}
+
+void nvhost_module_deinit(struct nvhost_module *mod)
+{
+ int i;
+ nvhost_module_suspend(mod);
+ for (i = 0; i < mod->num_clks; i++)
+ clk_put(mod->clk[i]);
+}
diff --git a/drivers/video/tegra/host/nvhost_acm.h b/drivers/video/tegra/host/nvhost_acm.h
new file mode 100644
index 000000000000..57dcc2989113
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.h
@@ -0,0 +1,76 @@
+/*
+ * drivers/video/tegra/host/nvhost_acm.h
+ *
+ * Tegra Graphics Host Automatic Clock Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_ACM_H
+#define __NVHOST_ACM_H
+
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+
+#define NVHOST_MODULE_MAX_CLOCKS 2
+
+struct nvhost_module;
+
+enum nvhost_power_action {
+ NVHOST_POWER_ACTION_OFF,
+ NVHOST_POWER_ACTION_ON,
+};
+
+typedef void (*nvhost_modulef)(struct nvhost_module *mod, enum nvhost_power_action action);
+
+struct nvhost_module {
+ const char *name;
+ nvhost_modulef func;
+ struct delayed_work powerdown;
+ struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
+ int num_clks;
+ struct mutex lock;
+ bool powered;
+ atomic_t refcount;
+ wait_queue_head_t idle;
+ struct nvhost_module *parent;
+ int powergate_id;
+};
+
+int nvhost_module_init(struct nvhost_module *mod, const char *name,
+ nvhost_modulef func, struct nvhost_module *parent,
+ struct device *dev);
+void nvhost_module_deinit(struct nvhost_module *mod);
+void nvhost_module_suspend(struct nvhost_module *mod);
+
+void nvhost_module_busy(struct nvhost_module *mod);
+void nvhost_module_idle_mult(struct nvhost_module *mod, int refs);
+
+static inline bool nvhost_module_powered(struct nvhost_module *mod)
+{
+ return mod->powered;
+}
+
+static inline void nvhost_module_idle(struct nvhost_module *mod)
+{
+ nvhost_module_idle_mult(mod, 1);
+
+}
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_cdma.c b/drivers/video/tegra/host/nvhost_cdma.c
new file mode 100644
index 000000000000..f27656bac07b
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.c
@@ -0,0 +1,650 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.c
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cdma.h"
+#include "dev.h"
+#include <asm/cacheflush.h>
+
+/*
+ * TODO:
+ * stats
+ * - for figuring out what to optimize further
+ * resizable push buffer & sync queue
+ * - some channels hardly need any, some channels (3d) could use more
+ */
+
+#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
+#define cdma_to_dev(cdma) ((cdma_to_channel(cdma))->dev)
+#define cdma_to_nvmap(cdma) ((cdma_to_dev(cdma))->nvmap)
+#define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer)
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == cur
+ * means that the push buffer is full, not empty.
+ */
+
+// 8 bytes per slot. (This number does not include the final RESTART.)
+#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
+
+static void destroy_push_buffer(struct push_buffer *pb);
+
+/**
+ * Reset to empty push buffer
+ */
+static void reset_push_buffer(struct push_buffer *pb)
+{
+ pb->fence = PUSH_BUFFER_SIZE - 8;
+ pb->cur = 0;
+}
+
+/**
+ * Init push buffer resources
+ */
+static int init_push_buffer(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+ reset_push_buffer(pb);
+
+ /* allocate and map pushbuffer memory */
+ pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32,
+ NVMAP_HANDLE_WRITE_COMBINE);
+ if (IS_ERR_OR_NULL(pb->mem)) {
+ pb->mem = NULL;
+ goto fail;
+ }
+ pb->mapped = nvmap_mmap(pb->mem);
+ if (pb->mapped == NULL)
+ goto fail;
+
+ /* pin pushbuffer and get physical address */
+ pb->phys = nvmap_pin(nvmap, pb->mem);
+ if (pb->phys >= 0xfffff000) {
+ pb->phys = 0;
+ goto fail;
+ }
+
+ /* put the restart at the end of pushbuffer memory */
+ *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = nvhost_opcode_restart(pb->phys);
+
+ return 0;
+
+fail:
+ destroy_push_buffer(pb);
+ return -ENOMEM;
+}
+
+/**
+ * Clean up push buffer resources
+ */
+static void destroy_push_buffer(struct push_buffer *pb)
+{
+ struct nvhost_cdma *cdma = pb_to_cdma(pb);
+ struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
+ if (pb->mapped)
+ nvmap_munmap(pb->mem, pb->mapped);
+
+ if (pb->phys != 0)
+ nvmap_unpin(nvmap, pb->mem);
+
+ if (pb->mem)
+ nvmap_free(nvmap, pb->mem);
+
+ pb->mem = NULL;
+ pb->mapped = NULL;
+ pb->phys = 0;
+}
+
+/**
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void push_to_push_buffer(struct push_buffer *pb, u32 op1, u32 op2)
+{
+ u32 cur = pb->cur;
+ u32 *p = (u32*)((u32)pb->mapped + cur);
+ BUG_ON(cur == pb->fence);
+ *(p++) = op1;
+ *(p++) = op2;
+ pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
+ /* printk("push_to_push_buffer: op1=%08x; op2=%08x; cur=%x\n", op1, op2, pb->cur); */
+}
+
+/**
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void pop_from_push_buffer(struct push_buffer *pb, unsigned int slots)
+{
+ pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
+}
+
+/**
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 push_buffer_space(struct push_buffer *pb)
+{
+ return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
+}
+
+static u32 push_buffer_putptr(struct push_buffer *pb)
+{
+ return pb->phys + pb->cur;
+}
+
+
+/* Sync Queue
+ *
+ * The sync queue is a circular buffer of u32s interpreted as:
+ * 0: SyncPointID
+ * 1: SyncPointValue
+ * 2: NumSlots (how many pushbuffer slots to free)
+ * 3: NumHandles
+ * 4: nvmap client which pinned the handles
+ * 5..: NumHandles * nvmemhandle to unpin
+ *
+ * There's always one word unused, so (accounting for wrap):
+ * - Write == Read => queue empty
+ * - Write + 1 == Read => queue full
+ * The queue must not be left with less than SYNC_QUEUE_MIN_ENTRY words
+ * of space at the end of the array.
+ *
+ * We want to pass contiguous arrays of handles to NrRmMemUnpin, so arrays
+ * that would wrap at the end of the buffer will be split into two (or more)
+ * entries.
+ */
+
+/* Number of words needed to store an entry containing one handle */
+#define SYNC_QUEUE_MIN_ENTRY (4 + (2 * sizeof(void *) / sizeof(u32)))
+
+/**
+ * Reset to empty queue.
+ */
+static void reset_sync_queue(struct sync_queue *queue)
+{
+ queue->read = 0;
+ queue->write = 0;
+}
+
+/**
+ * Find the number of handles that can be stashed in the sync queue without
+ * waiting.
+ * 0 -> queue is full, must update to wait for some entries to be freed.
+ */
+static unsigned int sync_queue_space(struct sync_queue *queue)
+{
+ unsigned int read = queue->read;
+ unsigned int write = queue->write;
+ u32 size;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ /*
+ * We can use all of the space up to the end of the buffer, unless the
+ * read position is within that space (the read position may advance
+ * asynchronously, but that can't take space away once we've seen it).
+ */
+ if (read > write) {
+ size = (read - 1) - write;
+ } else {
+ size = NVHOST_SYNC_QUEUE_SIZE - write;
+
+ /*
+ * If the read position is zero, it gets complicated. We can't
+ * use the last word in the buffer, because that would leave
+ * the queue empty.
+ * But also if we use too much we would not leave enough space
+ * for a single handle packet, and would have to wrap in
+ * add_to_sync_queue - also leaving write == read == 0,
+ * an empty queue.
+ */
+ if (read == 0)
+ size -= SYNC_QUEUE_MIN_ENTRY;
+ }
+
+ /*
+ * There must be room for an entry header and at least one handle,
+ * otherwise we report a full queue.
+ */
+ if (size < SYNC_QUEUE_MIN_ENTRY)
+ return 0;
+ /* Minimum entry stores one handle */
+ return (size - SYNC_QUEUE_MIN_ENTRY) + 1;
+}
+
+/**
+ * Add an entry to the sync queue.
+ */
+#define entry_size(_cnt) ((1 + _cnt)*sizeof(void *)/sizeof(u32))
+
+static void add_to_sync_queue(struct sync_queue *queue,
+ u32 sync_point_id, u32 sync_point_value,
+ u32 nr_slots, struct nvmap_client *user_nvmap,
+ struct nvmap_handle **handles, u32 nr_handles)
+{
+ u32 write = queue->write;
+ u32 *p = queue->buffer + write;
+ u32 size = 4 + (entry_size(nr_handles));
+
+ BUG_ON(sync_point_id == NVSYNCPT_INVALID);
+ BUG_ON(sync_queue_space(queue) < nr_handles);
+
+ write += size;
+ BUG_ON(write > NVHOST_SYNC_QUEUE_SIZE);
+
+ *p++ = sync_point_id;
+ *p++ = sync_point_value;
+ *p++ = nr_slots;
+ *p++ = nr_handles;
+ BUG_ON(!user_nvmap);
+ *(struct nvmap_client **)p = nvmap_client_get(user_nvmap);
+
+ p = (u32 *)((void *)p + sizeof(struct nvmap_client *));
+
+ if (nr_handles)
+ memcpy(p, handles, nr_handles * sizeof(struct nvmap_handle *));
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((write + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE) {
+ /*
+ * It's an error for the read position to be zero, as that
+ * would mean we emptied the queue while adding something.
+ */
+ BUG_ON(queue->read == 0);
+ write = 0;
+ }
+
+ queue->write = write;
+}
+
+/**
+ * Get a pointer to the next entry in the queue, or NULL if the queue is empty.
+ * Doesn't consume the entry.
+ */
+static u32 *sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 write = queue->write;
+
+ BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+ BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
+
+ if (read == write)
+ return NULL;
+ return queue->buffer + read;
+}
+
+/**
+ * Advances to the next queue entry, if you want to consume it.
+ */
+static void
+dequeue_sync_queue_head(struct sync_queue *queue)
+{
+ u32 read = queue->read;
+ u32 size;
+
+ BUG_ON(read == queue->write);
+
+ size = 4 + entry_size(queue->buffer[read + 3]);
+
+ read += size;
+ BUG_ON(read > NVHOST_SYNC_QUEUE_SIZE);
+
+ /* If there's not enough room for another entry, wrap to the start. */
+ if ((read + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE)
+ read = 0;
+
+ queue->read = read;
+}
+
+
+/*** Cdma internal stuff ***/
+
+/**
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void kick_cdma(struct nvhost_cdma *cdma)
+{
+ u32 put = push_buffer_putptr(&cdma->push_buffer);
+ if (put != cdma->last_put) {
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+ wmb();
+ writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ cdma->last_put = put;
+ }
+}
+
+/**
+ * Return the status of the cdma's sync queue or push buffer for the given event
+ * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
+ * - sq space: returns the number of handles that can be stored in the queue
+ * - pb space: returns the number of free slots in the channel's push buffer
+ * Must be called with the cdma lock held.
+ */
+static unsigned int cdma_status(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
+ return sync_queue_head(&cdma->sync_queue) ? 0 : 1;
+ case CDMA_EVENT_SYNC_QUEUE_SPACE:
+ return sync_queue_space(&cdma->sync_queue);
+ case CDMA_EVENT_PUSH_BUFFER_SPACE:
+ return push_buffer_space(&cdma->push_buffer);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Sleep (if necessary) until the requested event happens
+ * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ * - Returns 1
+ * - CDMA_EVENT_SYNC_QUEUE_SPACE : there is space in the sync queue.
+ * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ * - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+static unsigned int wait_cdma(struct nvhost_cdma *cdma, enum cdma_event event)
+{
+ for (;;) {
+ unsigned int space = cdma_status(cdma, event);
+ if (space)
+ return space;
+
+ BUG_ON(cdma->event != CDMA_EVENT_NONE);
+ cdma->event = event;
+
+ mutex_unlock(&cdma->lock);
+ down(&cdma->sem);
+ mutex_lock(&cdma->lock);
+ }
+}
+
+/**
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ * - unpin & unref their mems
+ * - pop their push buffer slots
+ * - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma(struct nvhost_cdma *cdma)
+{
+ bool signal = false;
+ struct nvhost_master *dev = cdma_to_dev(cdma);
+
+ BUG_ON(!cdma->running);
+
+ /*
+ * Walk the sync queue, reading the sync point registers as necessary,
+ * to consume as many sync queue entries as possible without blocking
+ */
+ for (;;) {
+ u32 syncpt_id, syncpt_val;
+ unsigned int nr_slots, nr_handles;
+ struct nvmap_handle **handles;
+ struct nvmap_client *nvmap;
+ u32 *sync;
+
+ sync = sync_queue_head(&cdma->sync_queue);
+ if (!sync) {
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ signal = true;
+ break;
+ }
+
+ syncpt_id = *sync++;
+ syncpt_val = *sync++;
+
+ BUG_ON(syncpt_id == NVSYNCPT_INVALID);
+
+ /* Check whether this syncpt has completed, and bail if not */
+ if (!nvhost_syncpt_min_cmp(&dev->syncpt, syncpt_id, syncpt_val))
+ break;
+
+ nr_slots = *sync++;
+ nr_handles = *sync++;
+ nvmap = *(struct nvmap_client **)sync;
+ sync = ((void *)sync + sizeof(struct nvmap_client *));
+ handles = (struct nvmap_handle **)sync;
+
+ BUG_ON(!nvmap);
+
+ /* Unpin the memory */
+ nvmap_unpin_handles(nvmap, handles, nr_handles);
+
+ nvmap_client_put(nvmap);
+
+ /* Pop push buffer slots */
+ if (nr_slots) {
+ pop_from_push_buffer(&cdma->push_buffer, nr_slots);
+ if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+ signal = true;
+ }
+
+ dequeue_sync_queue_head(&cdma->sync_queue);
+ if (cdma->event == CDMA_EVENT_SYNC_QUEUE_SPACE)
+ signal = true;
+ }
+
+ /* Wake up CdmaWait() if the requested event happened */
+ if (signal) {
+ cdma->event = CDMA_EVENT_NONE;
+ up(&cdma->sem);
+ }
+}
+
+/**
+ * Create a cdma
+ */
+int nvhost_cdma_init(struct nvhost_cdma *cdma)
+{
+ int err;
+
+ mutex_init(&cdma->lock);
+ sema_init(&cdma->sem, 0);
+ cdma->event = CDMA_EVENT_NONE;
+ cdma->running = false;
+ err = init_push_buffer(&cdma->push_buffer);
+ if (err)
+ return err;
+ reset_sync_queue(&cdma->sync_queue);
+ return 0;
+}
+
+/**
+ * Destroy a cdma
+ */
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
+{
+ BUG_ON(cdma->running);
+ destroy_push_buffer(&cdma->push_buffer);
+}
+
+static void start_cdma(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (cdma->running)
+ return;
+
+ cdma->last_put = push_buffer_putptr(&cdma->push_buffer);
+
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* set base, put, end pointer (all of memory) */
+ writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
+ writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
+ writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
+
+ /* reset GET */
+ writel(nvhost_channel_dmactrl(true, true, true),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ /* start the command DMA */
+ writel(nvhost_channel_dmactrl(false, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = true;
+
+}
+
+void nvhost_cdma_stop(struct nvhost_cdma *cdma)
+{
+ void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+
+ if (!cdma->running)
+ return;
+
+ mutex_lock(&cdma->lock);
+ wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+ mutex_unlock(&cdma->lock);
+ writel(nvhost_channel_dmactrl(true, false, false),
+ chan_regs + HOST1X_CHANNEL_DMACTRL);
+
+ cdma->running = false;
+}
+
+/**
+ * Begin a cdma submit
+ */
+void nvhost_cdma_begin(struct nvhost_cdma *cdma)
+{
+ if (!cdma->running)
+ start_cdma(cdma);
+ mutex_lock(&cdma->lock);
+ cdma->slots_free = 0;
+ cdma->slots_used = 0;
+}
+
+/**
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
+{
+ u32 slots_free = cdma->slots_free;
+ if (slots_free == 0) {
+ kick_cdma(cdma);
+ slots_free = wait_cdma(cdma, CDMA_EVENT_PUSH_BUFFER_SPACE);
+ }
+ cdma->slots_free = slots_free - 1;
+ cdma->slots_used++;
+ push_to_push_buffer(&cdma->push_buffer, op1, op2);
+}
+
+/**
+ * End a cdma submit
+ * Kick off DMA, add a contiguous block of memory handles to the sync queue,
+ * and a number of slots to be freed from the pushbuffer.
+ * Blocks as necessary if the sync queue is full.
+ * The handles for a submit must all be pinned at the same time, but they
+ * can be unpinned in smaller chunks.
+ */
+void nvhost_cdma_end(struct nvmap_client *user_nvmap, struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles)
+{
+ kick_cdma(cdma);
+
+ while (nr_handles || cdma->slots_used) {
+ unsigned int count;
+ /*
+ * Wait until there's enough room in the
+ * sync queue to write something.
+ */
+ count = wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_SPACE);
+
+ /*
+ * Add reloc entries to sync queue (as many as will fit)
+ * and unlock it
+ */
+ if (count > nr_handles)
+ count = nr_handles;
+ add_to_sync_queue(&cdma->sync_queue, sync_point_id,
+ sync_point_value, cdma->slots_used,
+ user_nvmap, handles, count);
+ /* NumSlots only goes in the first packet */
+ cdma->slots_used = 0;
+ handles += count;
+ nr_handles -= count;
+ }
+
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Update cdma state according to current sync point values
+ */
+void nvhost_cdma_update(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Manually spin until all CDMA has finished. Used if an async update
+ * cannot be scheduled for any reason.
+ */
+void nvhost_cdma_flush(struct nvhost_cdma *cdma)
+{
+ mutex_lock(&cdma->lock);
+ while (sync_queue_head(&cdma->sync_queue)) {
+ update_cdma(cdma);
+ mutex_unlock(&cdma->lock);
+ schedule();
+ mutex_lock(&cdma->lock);
+ }
+ mutex_unlock(&cdma->lock);
+}
+
+/**
+ * Find the currently executing gather in the push buffer and return
+ * its physical address and size.
+ */
+void nvhost_cdma_find_gather(struct nvhost_cdma *cdma, u32 dmaget, u32 *addr, u32 *size)
+{
+ u32 offset = dmaget - cdma->push_buffer.phys;
+
+ *addr = *size = 0;
+
+ if (offset >= 8 && offset < cdma->push_buffer.cur) {
+ u32 *p = cdma->push_buffer.mapped + (offset - 8) / 4;
+
+ /* Make sure we have a gather */
+ if ((p[0] >> 28) == 6) {
+ *addr = p[1];
+ *size = p[0] & 0x3fff;
+ }
+ }
+}
diff --git a/drivers/video/tegra/host/nvhost_cdma.h b/drivers/video/tegra/host/nvhost_cdma.h
new file mode 100644
index 000000000000..a7f17d0413d5
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cdma.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/video/tegra/host/nvhost_cdma.h
+ *
+ * Tegra Graphics Host Command DMA
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CDMA_H
+#define __NVHOST_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+#include "nvhost_acm.h"
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ * begin
+ * push - send ops to the push buffer
+ * end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ * update - call to update sync queue and push buffer, unpin memory
+ */
+
+/* Size of the sync queue. If it is too small, we won't be able to queue up
+ * many command buffers. If it is too large, we waste memory. */
+#define NVHOST_SYNC_QUEUE_SIZE 8192
+
+/* Number of gathers we allow to be queued up per channel. Must be a
+ power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
+#define NVHOST_GATHER_QUEUE_SIZE 512
+
+struct push_buffer {
+ struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */
+ u32 *mapped; /* mapped pushbuffer memory */
+ u32 phys; /* physical address of pushbuffer */
+ u32 fence; /* index we've written */
+ u32 cur; /* index to write to */
+};
+
+struct sync_queue {
+ unsigned int read; /* read position within buffer */
+ unsigned int write; /* write position within buffer */
+ u32 buffer[NVHOST_SYNC_QUEUE_SIZE]; /* queue data */
+};
+
+enum cdma_event {
+ CDMA_EVENT_NONE, /* not waiting for any event */
+ CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
+ CDMA_EVENT_SYNC_QUEUE_SPACE, /* wait for space in sync queue */
+ CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
+};
+
+struct nvhost_cdma {
+ struct mutex lock; /* controls access to shared state */
+ struct semaphore sem; /* signalled when event occurs */
+ enum cdma_event event; /* event that sem is waiting for */
+ unsigned int slots_used; /* pb slots used in current submit */
+ unsigned int slots_free; /* pb slots free in current submit */
+ unsigned int last_put; /* last value written to DMAPUT */
+ struct push_buffer push_buffer; /* channel's push buffer */
+ struct sync_queue sync_queue; /* channel's sync queue */
+ bool running;
+};
+
+int nvhost_cdma_init(struct nvhost_cdma *cdma);
+void nvhost_cdma_deinit(struct nvhost_cdma *cdma);
+void nvhost_cdma_stop(struct nvhost_cdma *cdma);
+void nvhost_cdma_begin(struct nvhost_cdma *cdma);
+void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
+void nvhost_cdma_end(struct nvmap_client *user_nvmap,
+ struct nvhost_cdma *cdma,
+ u32 sync_point_id, u32 sync_point_value,
+ struct nvmap_handle **handles, unsigned int nr_handles);
+void nvhost_cdma_update(struct nvhost_cdma *cdma);
+void nvhost_cdma_flush(struct nvhost_cdma *cdma);
+void nvhost_cdma_find_gather(struct nvhost_cdma *cdma, u32 dmaget,
+ u32 *addr, u32 *size);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_channel.c b/drivers/video/tegra/host/nvhost_channel.c
new file mode 100644
index 000000000000..40b67181c33d
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.c
@@ -0,0 +1,249 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.c
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_channel.h"
+#include "dev.h"
+#include "nvhost_hwctx.h"
+
+#include <linux/platform_device.h>
+
+#define NVMODMUTEX_2D_FULL (1)
+#define NVMODMUTEX_2D_SIMPLE (2)
+#define NVMODMUTEX_2D_SB_A (3)
+#define NVMODMUTEX_2D_SB_B (4)
+#define NVMODMUTEX_3D (5)
+#define NVMODMUTEX_DISPLAYA (6)
+#define NVMODMUTEX_DISPLAYB (7)
+#define NVMODMUTEX_VI (8)
+#define NVMODMUTEX_DSI (9)
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action);
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action);
+
+static const struct nvhost_channeldesc channelmap[] = {
+{
+ /* channel 0 */
+ .name = "display",
+ .syncpts = BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) |
+ BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
+ .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
+},
+{
+ /* channel 1 */
+ .name = "gr3d",
+ .syncpts = BIT(NVSYNCPT_3D),
+ .waitbases = BIT(NVWAITBASE_3D),
+ .modulemutexes = BIT(NVMODMUTEX_3D),
+ .class = NV_GRAPHICS_3D_CLASS_ID,
+ .power = power_3d,
+},
+{
+ /* channel 2 */
+ .name = "gr2d",
+ .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
+ .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
+ .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
+ BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
+ .power = power_2d,
+},
+{
+ /* channel 3 */
+ .name = "isp",
+ .syncpts = 0,
+},
+{
+ /* channel 4 */
+ .name = "vi",
+ .syncpts = BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
+ BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
+ BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5),
+ .modulemutexes = BIT(NVMODMUTEX_VI),
+},
+{
+ /* channel 5 */
+ .name = "mpe",
+ .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
+ BIT(NVSYNCPT_MPE_WR_SAFE),
+ .waitbases = BIT(NVWAITBASE_MPE),
+ .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
+ .power = power_mpe,
+},
+{
+ /* channel 6 */
+ .name = "dsi",
+ .syncpts = BIT(NVSYNCPT_DSI),
+ .modulemutexes = BIT(NVMODMUTEX_DSI),
+}};
+
+static inline void __iomem *channel_aperture(void __iomem *p, int ndx)
+{
+ ndx += NVHOST_CHANNEL_BASE;
+ p += NV_HOST1X_CHANNEL0_BASE;
+ p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
+ return p;
+}
+
+int __init nvhost_channel_init(struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index)
+{
+ BUILD_BUG_ON(NVHOST_NUMCHANNELS != ARRAY_SIZE(channelmap));
+
+ ch->dev = dev;
+ ch->desc = &channelmap[index];
+ ch->aperture = channel_aperture(dev->aperture, index);
+ mutex_init(&ch->reflock);
+ mutex_init(&ch->submitlock);
+
+ return nvhost_hwctx_handler_init(&ch->ctxhandler, ch->desc->name);
+}
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
+{
+ int err = 0;
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 0) {
+ err = nvhost_module_init(&ch->mod, ch->desc->name,
+ ch->desc->power, &ch->dev->mod,
+ &ch->dev->pdev->dev);
+ if (!err) {
+ err = nvhost_cdma_init(&ch->cdma);
+ if (err)
+ nvhost_module_deinit(&ch->mod);
+ }
+ }
+ if (!err) {
+ ch->refcount++;
+ }
+ mutex_unlock(&ch->reflock);
+
+ return err ? NULL : ch;
+}
+
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
+{
+ if (ctx) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx == ctx)
+ ch->cur_ctx = NULL;
+ mutex_unlock(&ch->submitlock);
+ }
+
+ mutex_lock(&ch->reflock);
+ if (ch->refcount == 1) {
+ nvhost_module_deinit(&ch->mod);
+ /* cdma may already be stopped, that's ok */
+ nvhost_cdma_stop(&ch->cdma);
+ nvhost_cdma_deinit(&ch->cdma);
+ }
+ ch->refcount--;
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_suspend(struct nvhost_channel *ch)
+{
+ mutex_lock(&ch->reflock);
+ BUG_ON(nvhost_module_powered(&ch->mod));
+ nvhost_cdma_stop(&ch->cdma);
+ mutex_unlock(&ch->reflock);
+}
+
+void nvhost_channel_submit(struct nvhost_channel *ch,
+ struct nvmap_client *user_nvmap,
+ struct nvhost_op_pair *ops, int num_pairs,
+ struct nvhost_cpuinterrupt *intrs, int num_intrs,
+ struct nvmap_handle **unpins, int num_unpins,
+ u32 syncpt_id, u32 syncpt_val)
+{
+ int i;
+ struct nvhost_op_pair* p;
+
+ /* schedule interrupts */
+ for (i = 0; i < num_intrs; i++) {
+ nvhost_intr_add_action(&ch->dev->intr, syncpt_id, intrs[i].syncpt_val,
+ NVHOST_INTR_ACTION_CTXSAVE, intrs[i].intr_data, NULL);
+ }
+
+ /* begin a CDMA submit */
+ nvhost_cdma_begin(&ch->cdma);
+
+ /* push ops */
+ for (i = 0, p = ops; i < num_pairs; i++, p++)
+ nvhost_cdma_push(&ch->cdma, p->op1, p->op2);
+
+ /* end CDMA submit & stash pinned hMems into sync queue for later cleanup */
+ nvhost_cdma_end(user_nvmap, &ch->cdma, syncpt_id, syncpt_val,
+ unpins, num_unpins);
+}
+
+static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ /* TODO: [ahatala 2010-06-17] reimplement EPP hang war */
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ /* TODO: [ahatala 2010-06-17] reset EPP */
+ }
+}
+
+static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+ struct nvhost_channel *ch = container_of(mod, struct nvhost_channel, mod);
+
+ if (action == NVHOST_POWER_ACTION_OFF) {
+ mutex_lock(&ch->submitlock);
+ if (ch->cur_ctx) {
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct nvhost_op_pair save;
+ struct nvhost_cpuinterrupt ctxsw;
+ u32 syncval;
+ void *ref;
+ syncval = nvhost_syncpt_incr_max(&ch->dev->syncpt,
+ NVSYNCPT_3D,
+ ch->cur_ctx->save_incrs);
+ save.op1 = nvhost_opcode_gather(0, ch->cur_ctx->save_size);
+ save.op2 = ch->cur_ctx->save_phys;
+ ctxsw.intr_data = ch->cur_ctx;
+ ctxsw.syncpt_val = syncval - 1;
+ ch->cur_ctx->valid = true;
+ ch->ctxhandler.get(ch->cur_ctx);
+ ch->cur_ctx = NULL;
+
+ nvhost_channel_submit(ch, ch->dev->nvmap,
+ &save, 1, &ctxsw, 1, NULL, 0,
+ NVSYNCPT_3D, syncval);
+
+ nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D,
+ syncval,
+ NVHOST_INTR_ACTION_WAKEUP,
+ &wq, &ref);
+ wait_event(wq,
+ nvhost_syncpt_min_cmp(&ch->dev->syncpt,
+ NVSYNCPT_3D, syncval));
+ nvhost_intr_put_ref(&ch->dev->intr, ref);
+ nvhost_cdma_update(&ch->cdma);
+ }
+ mutex_unlock(&ch->submitlock);
+ }
+}
+
+static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action)
+{
+}
diff --git a/drivers/video/tegra/host/nvhost_channel.h b/drivers/video/tegra/host/nvhost_channel.h
new file mode 100644
index 000000000000..c62d7397a192
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_channel.h
@@ -0,0 +1,89 @@
+/*
+ * drivers/video/tegra/host/nvhost_channel.h
+ *
+ * Tegra Graphics Host Channel
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CHANNEL_H
+#define __NVHOST_CHANNEL_H
+
+#include "nvhost_cdma.h"
+#include "nvhost_acm.h"
+#include "nvhost_hwctx.h"
+
+#include <linux/cdev.h>
+#include <linux/io.h>
+
+#define NVHOST_CHANNEL_BASE 0
+#define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1)
+#define NVHOST_MAX_GATHERS 512
+#define NVHOST_MAX_HANDLES 1280
+
+struct nvhost_master;
+
+struct nvhost_channeldesc {
+ const char *name;
+ nvhost_modulef power;
+ u32 syncpts;
+ u32 waitbases;
+ u32 modulemutexes;
+ u32 class;
+};
+
+struct nvhost_channel {
+ int refcount;
+ struct mutex reflock;
+ struct mutex submitlock;
+ void __iomem *aperture;
+ struct nvhost_master *dev;
+ const struct nvhost_channeldesc *desc;
+ struct nvhost_hwctx *cur_ctx;
+ struct device *node;
+ struct cdev cdev;
+ struct nvhost_hwctx_handler ctxhandler;
+ struct nvhost_module mod;
+ struct nvhost_cdma cdma;
+};
+
+struct nvhost_op_pair {
+ u32 op1;
+ u32 op2;
+};
+
+struct nvhost_cpuinterrupt {
+ u32 syncpt_val;
+ void *intr_data;
+};
+
+int nvhost_channel_init(
+ struct nvhost_channel *ch,
+ struct nvhost_master *dev, int index);
+
+void nvhost_channel_submit(struct nvhost_channel *ch,
+ struct nvmap_client *user_nvmap,
+ struct nvhost_op_pair *ops, int num_pairs,
+ struct nvhost_cpuinterrupt *intrs, int num_intrs,
+ struct nvmap_handle **unpins, int num_unpins,
+ u32 syncpt_id, u32 syncpt_val);
+
+struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
+void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
+void nvhost_channel_suspend(struct nvhost_channel *ch);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_cpuaccess.c b/drivers/video/tegra/host/nvhost_cpuaccess.c
new file mode 100644
index 000000000000..9114dad97783
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cpuaccess.c
@@ -0,0 +1,117 @@
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.c
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_cpuaccess.h"
+#include "dev.h"
+#include <linux/string.h>
+
+#define cpuaccess_to_dev(ctx) container_of(ctx, struct nvhost_master, cpuaccess)
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ struct resource *mem;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i+1);
+ if (!mem) {
+ dev_err(&pdev->dev, "missing module memory resource\n");
+ return -ENXIO;
+ }
+
+ ctx->regs[i] = ioremap(mem->start, resource_size(mem));
+ if (!ctx->regs[i]) {
+ dev_err(&pdev->dev, "failed to map module registers\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx)
+{
+ int i;
+ for (i = 0; i < NVHOST_MODULE_NUM; i++) {
+ iounmap(ctx->regs[i]);
+ release_resource(ctx->reg_mem[i]);
+ }
+}
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 reg;
+
+ /* mlock registers returns 0 when the lock is aquired.
+ * writing 0 clears the lock. */
+ nvhost_module_busy(&dev->mod);
+ reg = readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ if (reg) {
+ nvhost_module_idle(&dev->mod);
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *sync_regs = dev->sync_aperture;
+ writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ u32* out = (u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ *(out++) = readl(p);
+ p += 4;
+ }
+ rmb();
+ nvhost_module_idle(&dev->mod);
+}
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values)
+{
+ struct nvhost_master *dev = cpuaccess_to_dev(ctx);
+ void __iomem *p = ctx->regs[module] + offset;
+ const u32* in = (const u32*)values;
+ BUG_ON(size & 3);
+ size >>= 2;
+ nvhost_module_busy(&dev->mod);
+ while (size--) {
+ writel(*(in++), p);
+ p += 4;
+ }
+ wmb();
+ nvhost_module_idle(&dev->mod);
+}
diff --git a/drivers/video/tegra/host/nvhost_cpuaccess.h b/drivers/video/tegra/host/nvhost_cpuaccess.h
new file mode 100644
index 000000000000..d7d6c99cd416
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_cpuaccess.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/video/tegra/host/nvhost_cpuaccess.h
+ *
+ * Tegra Graphics Host Cpu Register Access
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_CPUACCESS_H
+#define __NVHOST_CPUACCESS_H
+
+#include "nvhost_hardware.h"
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+enum nvhost_module_id {
+ NVHOST_MODULE_DISPLAY_A = 0,
+ NVHOST_MODULE_DISPLAY_B,
+ NVHOST_MODULE_VI,
+ NVHOST_MODULE_ISP,
+ NVHOST_MODULE_MPE,
+#if 0
+ /* TODO: [ahatala 2010-07-02] find out if these are needed */
+ NVHOST_MODULE_FUSE,
+ NVHOST_MODULE_APB_MISC,
+ NVHOST_MODULE_CLK_RESET,
+#endif
+ NVHOST_MODULE_NUM
+};
+
+struct nvhost_cpuaccess {
+ struct resource *reg_mem[NVHOST_MODULE_NUM];
+ void __iomem *regs[NVHOST_MODULE_NUM];
+};
+
+int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
+ struct platform_device *pdev);
+
+void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx);
+
+int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx);
+
+static inline bool nvhost_access_module_regs(
+ struct nvhost_cpuaccess *ctx, u32 module)
+{
+ return (module < NVHOST_MODULE_NUM);
+}
+
+void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, void *values);
+
+void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
+ u32 offset, size_t size, const void *values);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_hardware.h b/drivers/video/tegra/host/nvhost_hardware.h
new file mode 100644
index 000000000000..f69f467dd64e
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_hardware.h
@@ -0,0 +1,233 @@
+/*
+ * drivers/video/tegra/host/nvhost_hardware.h
+ *
+ * Tegra Graphics Host Register Offsets
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HARDWARE_H
+#define __NVHOST_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+/* class ids */
+enum {
+ NV_HOST1X_CLASS_ID = 0x1,
+ NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
+ NV_GRAPHICS_3D_CLASS_ID = 0x60
+};
+
+
+/* channel registers */
+#define NV_HOST1X_CHANNELS 8
+#define NV_HOST1X_CHANNEL0_BASE 0
+#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
+
+
+#define HOST1X_CHANNEL_FIFOSTAT 0x00
+#define HOST1X_CHANNEL_INDDATA 0x0c
+#define HOST1X_CHANNEL_DMASTART 0x14
+#define HOST1X_CHANNEL_DMAPUT 0x18
+#define HOST1X_CHANNEL_DMAGET 0x1c
+#define HOST1X_CHANNEL_DMAEND 0x20
+#define HOST1X_CHANNEL_DMACTRL 0x24
+
+#define HOST1X_SYNC_CF_SETUP(x) (0x3080 + (4 * (x)))
+
+#define HOST1X_SYNC_SYNCPT_BASE(x) (0x3600 + (4 * (x)))
+
+#define HOST1X_SYNC_CBREAD(x) (0x3720 + (4 * (x)))
+#define HOST1X_SYNC_CFPEEK_CTRL 0x374c
+#define HOST1X_SYNC_CFPEEK_READ 0x3750
+#define HOST1X_SYNC_CFPEEK_PTRS 0x3754
+#define HOST1X_SYNC_CBSTAT(x) (0x3758 + (4 * (x)))
+
+static inline unsigned nvhost_channel_fifostat_outfentries(u32 reg)
+{
+ return (reg >> 24) & 0x1f;
+}
+
+static inline u32 nvhost_channel_dmactrl(bool stop, bool get_rst, bool init_get)
+{
+ u32 v = stop ? 1 : 0;
+ if (get_rst)
+ v |= 2;
+ if (init_get)
+ v |= 4;
+ return v;
+}
+
+
+/* sync registers */
+#define NV_HOST1X_SYNCPT_NB_PTS 32
+#define NV_HOST1X_SYNCPT_NB_BASES 8
+#define NV_HOST1X_NB_MLOCKS 16
+#define HOST1X_CHANNEL_SYNC_REG_BASE 12288
+
+enum {
+ HOST1X_SYNC_INTMASK = 0x4,
+ HOST1X_SYNC_INTC0MASK = 0x8,
+ HOST1X_SYNC_HINTSTATUS = 0x20,
+ HOST1X_SYNC_HINTMASK = 0x24,
+ HOST1X_SYNC_HINTSTATUS_EXT = 0x28,
+ HOST1X_SYNC_HINTMASK_EXT = 0x2c,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS = 0x40,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0 = 0x50,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1 = 0x54,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE = 0x60,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 = 0x68,
+ HOST1X_SYNC_USEC_CLK = 0x1a4,
+ HOST1X_SYNC_CTXSW_TIMEOUT_CFG = 0x1a8,
+ HOST1X_SYNC_IP_BUSY_TIMEOUT = 0x1bc,
+ HOST1X_SYNC_IP_READ_TIMEOUT_ADDR = 0x1c0,
+ HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR = 0x1c4,
+ HOST1X_SYNC_MLOCK_0 = 0x2c0,
+ HOST1X_SYNC_MLOCK_OWNER_0 = 0x340,
+ HOST1X_SYNC_SYNCPT_0 = 0x400,
+ HOST1X_SYNC_SYNCPT_INT_THRESH_0 = 0x500,
+ HOST1X_SYNC_SYNCPT_BASE_0 = 0x600,
+ HOST1X_SYNC_SYNCPT_CPU_INCR = 0x700
+};
+
+static inline bool nvhost_sync_hintstatus_ext_ip_read_int(u32 reg)
+{
+ return (reg & BIT(30)) != 0;
+}
+
+static inline bool nvhost_sync_hintstatus_ext_ip_write_int(u32 reg)
+{
+ return (reg & BIT(31)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_ch_owns(u32 reg)
+{
+ return (reg & BIT(0)) != 0;
+}
+
+static inline bool nvhost_sync_mlock_owner_cpu_owns(u32 reg)
+{
+ return (reg & BIT(1)) != 0;
+}
+
+static inline unsigned int nvhost_sync_mlock_owner_owner_chid(u32 reg)
+{
+ return (reg >> 8) & 0xf;
+}
+
+
+/* host class */
+enum {
+ NV_CLASS_HOST_INCR_SYNCPT = 0x0,
+ NV_CLASS_HOST_WAIT_SYNCPT = 0x8,
+ NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9,
+ NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc,
+ NV_CLASS_HOST_INDOFF = 0x2d,
+ NV_CLASS_HOST_INDDATA = 0x2e
+};
+
+static inline u32 nvhost_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return (indx << 24) | (base_indx << 16) | offset;
+}
+
+static inline u32 nvhost_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return (base_indx << 24) | offset;
+}
+
+enum {
+ NV_HOST_MODULE_HOST1X = 0,
+ NV_HOST_MODULE_MPE = 1,
+ NV_HOST_MODULE_GR3D = 6
+};
+
+static inline u32 nvhost_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2);
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+static inline u32 nvhost_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = (mod_id << 18) | (offset << 2) | 1;
+ if (auto_inc)
+ v |= BIT(27);
+ return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 nvhost_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 nvhost_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 nvhost_opcode_gather(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | count;
+}
+
+static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
+
+
+
+#endif /* __NVHOST_HARDWARE_H */
+
diff --git a/drivers/video/tegra/host/nvhost_hwctx.h b/drivers/video/tegra/host/nvhost_hwctx.h
new file mode 100644
index 000000000000..4fce8d13d7fe
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_hwctx.h
@@ -0,0 +1,88 @@
+/*
+ * drivers/video/tegra/host/nvhost_hwctx.h
+ *
+ * Tegra Graphics Host Hardware Context Interface
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_HWCTX_H
+#define __NVHOST_HWCTX_H
+
+#include <linux/string.h>
+#include <linux/kref.h>
+
+#include <mach/nvhost.h>
+#include <mach/nvmap.h>
+
+struct nvhost_channel;
+
+struct nvhost_hwctx {
+ struct kref ref;
+
+ struct nvhost_channel *channel;
+ bool valid;
+
+ struct nvmap_handle_ref *save;
+ u32 save_phys;
+ u32 save_size;
+ u32 save_incrs;
+ void *save_cpu_data;
+
+ struct nvmap_handle_ref *restore;
+ u32 restore_phys;
+ u32 restore_size;
+ u32 restore_incrs;
+};
+
+struct nvhost_hwctx_handler {
+ struct nvhost_hwctx * (*alloc) (struct nvhost_channel *ch);
+ void (*get) (struct nvhost_hwctx *ctx);
+ void (*put) (struct nvhost_hwctx *ctx);
+ void (*save_service) (struct nvhost_hwctx *ctx);
+};
+
+int nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h);
+int nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h);
+
+static inline int nvhost_hwctx_handler_init(struct nvhost_hwctx_handler *h,
+ const char *module)
+{
+ if (strcmp(module, "gr3d") == 0)
+ return nvhost_3dctx_handler_init(h);
+ else if (strcmp(module, "mpe") == 0)
+ return nvhost_mpectx_handler_init(h);
+
+ return 0;
+}
+
+struct hwctx_reginfo {
+ unsigned int offset:12;
+ unsigned int count:16;
+ unsigned int type:2;
+};
+
+enum {
+ HWCTX_REGINFO_DIRECT = 0,
+ HWCTX_REGINFO_INDIRECT,
+ HWCTX_REGINFO_INDIRECT_OFFSET,
+ HWCTX_REGINFO_INDIRECT_DATA
+};
+
+#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c
new file mode 100644
index 000000000000..007aaed9909f
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.c
@@ -0,0 +1,477 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.c
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_intr.h"
+#include "dev.h"
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
+
+
+/*** HW sync point threshold interrupt management ***/
+
+static void set_syncpt_threshold(void __iomem *sync_regs, u32 id, u32 thresh)
+{
+ thresh &= 0xffff;
+ writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
+}
+
+static void enable_syncpt_interrupt(void __iomem *sync_regs, u32 id)
+{
+ writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
+}
+
+
+/*** Wait list management ***/
+
+struct nvhost_waitlist {
+ struct list_head list;
+ struct kref refcount;
+ u32 thresh;
+ enum nvhost_intr_action action;
+ atomic_t state;
+ void *data;
+ int count;
+};
+
+enum waitlist_state
+{
+ WLS_PENDING,
+ WLS_REMOVED,
+ WLS_CANCELLED,
+ WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct nvhost_waitlist, refcount));
+}
+
+/*
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
+ struct list_head *queue)
+{
+ struct nvhost_waitlist *pos;
+ u32 thresh = waiter->thresh;
+
+ list_for_each_entry_reverse(pos, queue, list)
+ if ((s32)(pos->thresh - thresh) <= 0) {
+ list_add(&waiter->list, &pos->list);
+ return false;
+ }
+
+ list_add(&waiter->list, queue);
+ return true;
+}
+
+/*
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *dest;
+ struct nvhost_waitlist *waiter, *next, *prev;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ if ((s32)(waiter->thresh - sync) > 0)
+ break;
+
+ dest = completed + waiter->action;
+
+ /* consolidate submit cleanups */
+ if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+ && !list_empty(dest)) {
+ prev = list_entry(dest->prev,
+ struct nvhost_waitlist, list);
+ if (prev->data == waiter->data) {
+ prev->count++;
+ dest = NULL;
+ }
+ }
+
+ /* PENDING->REMOVED or CANCELLED->HANDLED */
+ if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ } else {
+ list_move_tail(&waiter->list, dest);
+ }
+ }
+}
+
+static void action_submit_complete(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_channel *channel = waiter->data;
+ int nr_completed = waiter->count;
+
+ nvhost_cdma_update(&channel->cdma);
+ nvhost_module_idle_mult(&channel->mod, nr_completed);
+}
+
+static void action_ctxsave(struct nvhost_waitlist *waiter)
+{
+ struct nvhost_hwctx *hwctx = waiter->data;
+ struct nvhost_channel *channel = hwctx->channel;
+
+ channel->ctxhandler.save_service(hwctx);
+ channel->ctxhandler.put(hwctx);
+}
+
+static void action_wakeup(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
+{
+ wait_queue_head_t *wq = waiter->data;
+
+ wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct nvhost_waitlist *waiter);
+
+static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
+ action_submit_complete,
+ action_ctxsave,
+ action_wakeup,
+ action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+{
+ struct list_head *head = completed;
+ int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+ action_handler handler = action_handlers[i];
+ struct nvhost_waitlist *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next, head, list) {
+ list_del(&waiter->list);
+ handler(waiter);
+ WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+}
+
+
+/*** Interrupt service functions ***/
+
+/**
+ * Host1x intterrupt service function
+ * Handles read / write failures
+ */
+static irqreturn_t host1x_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr *intr = dev_id;
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ u32 stat;
+ u32 ext_stat;
+ u32 addr;
+
+ stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
+ ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+
+ if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
+ pr_err("Host read timeout at address %x\n", addr);
+ }
+
+ if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
+ addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
+ pr_err("Host write timeout at address %x\n", addr);
+ }
+
+ writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
+ writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
+ writel(BIT(id),
+ sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+
+/**
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
+{
+ struct nvhost_intr_syncpt *syncpt = dev_id;
+ unsigned int id = syncpt->id;
+ struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
+ syncpt[id]);
+ struct nvhost_master *dev = intr_to_dev(intr);
+ void __iomem *sync_regs = dev->sync_aperture;
+
+ struct list_head completed[NVHOST_INTR_ACTION_COUNT];
+ u32 sync;
+ unsigned int i;
+
+ for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
+ INIT_LIST_HEAD(completed + i);
+
+ sync = nvhost_syncpt_update_min(&dev->syncpt, id);
+
+ spin_lock(&syncpt->lock);
+
+ remove_completed_waiters(&syncpt->wait_head, sync, completed);
+
+ if (!list_empty(&syncpt->wait_head)) {
+ u32 thresh = list_first_entry(&syncpt->wait_head,
+ struct nvhost_waitlist, list)->thresh;
+
+ set_syncpt_threshold(sync_regs, id, thresh);
+ enable_syncpt_interrupt(sync_regs, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ run_handlers(completed);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * lazily request a syncpt's irq
+ */
+static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
+{
+ static DEFINE_MUTEX(mutex);
+ int err;
+
+ mutex_lock(&mutex);
+ if (!syncpt->irq_requested) {
+ err = request_threaded_irq(syncpt->irq,
+ syncpt_thresh_isr, syncpt_thresh_fn,
+ 0, syncpt->thresh_irq_name, syncpt);
+ if (!err)
+ syncpt->irq_requested = 1;
+ }
+ mutex_unlock(&mutex);
+ return err;
+}
+
+
+/*** Main API ***/
+
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref)
+{
+ struct nvhost_waitlist *waiter;
+ struct nvhost_intr_syncpt *syncpt;
+ void __iomem *sync_regs;
+ int queue_was_empty;
+ int err;
+
+ /* create and initialize a new waiter */
+ waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&waiter->list);
+ kref_init(&waiter->refcount);
+ if (ref)
+ kref_get(&waiter->refcount);
+ waiter->thresh = thresh;
+ waiter->action = action;
+ atomic_set(&waiter->state, WLS_PENDING);
+ waiter->data = data;
+ waiter->count = 1;
+
+ BUG_ON(id >= NV_HOST1X_SYNCPT_NB_PTS);
+ syncpt = intr->syncpt + id;
+ sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ spin_lock(&syncpt->lock);
+
+ /* lazily request irq for this sync point */
+ if (!syncpt->irq_requested) {
+ spin_unlock(&syncpt->lock);
+
+ err = request_syncpt_irq(syncpt);
+ if (err) {
+ kfree(waiter);
+ return err;
+ }
+
+ spin_lock(&syncpt->lock);
+ }
+
+ queue_was_empty = list_empty(&syncpt->wait_head);
+
+ if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
+ /* added at head of list - new threshold value */
+ set_syncpt_threshold(sync_regs, id, thresh);
+
+ /* added as first waiter - enable interrupt */
+ if (queue_was_empty)
+ enable_syncpt_interrupt(sync_regs, id);
+ }
+
+ spin_unlock(&syncpt->lock);
+
+ if (ref)
+ *ref = waiter;
+ return 0;
+}
+
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
+{
+ struct nvhost_waitlist *waiter = ref;
+
+ while (atomic_cmpxchg(&waiter->state,
+ WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
+ schedule();
+
+ kref_put(&waiter->refcount, waiter_release);
+}
+
+
+/*** Init & shutdown ***/
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+ int err;
+
+ err = request_irq(irq_gen, host1x_isr, 0, "host_status", intr);
+ if (err)
+ goto fail;
+ intr->host1x_irq = irq_gen;
+ intr->host1x_isr_started = true;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt) {
+ syncpt->id = id;
+ syncpt->irq = irq_sync + id;
+ syncpt->irq_requested = 0;
+ spin_lock_init(&syncpt->lock);
+ INIT_LIST_HEAD(&syncpt->wait_head);
+ snprintf(syncpt->thresh_irq_name,
+ sizeof(syncpt->thresh_irq_name),
+ "%s", nvhost_syncpt_name(id));
+ }
+
+ return 0;
+
+fail:
+ nvhost_intr_deinit(intr);
+ return err;
+}
+
+void nvhost_intr_deinit(struct nvhost_intr *intr)
+{
+ unsigned int id;
+ struct nvhost_intr_syncpt *syncpt;
+
+ for (id = 0, syncpt = intr->syncpt;
+ id < NV_HOST1X_SYNCPT_NB_PTS;
+ ++id, ++syncpt) {
+ struct nvhost_waitlist *waiter, *next;
+ list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
+ if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
+ == WLS_CANCELLED) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ }
+
+ if(!list_empty(&syncpt->wait_head)) { // output diagnostics
+ printk("%s id=%d\n",__func__,id);
+ BUG_ON(1);
+ }
+
+ if (syncpt->irq_requested)
+ free_irq(syncpt->irq, syncpt);
+ }
+
+ if (intr->host1x_isr_started) {
+ free_irq(intr->host1x_irq, intr);
+ intr->host1x_isr_started = false;
+ }
+}
+
+void nvhost_intr_configure (struct nvhost_intr *intr, u32 hz)
+{
+ void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+
+ // write microsecond clock register
+ writel((hz + 1000000 - 1)/1000000, sync_regs + HOST1X_SYNC_USEC_CLK);
+
+ /* disable the ip_busy_timeout. this prevents write drops, etc.
+ * there's no real way to recover from a hung client anyway.
+ */
+ writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /* increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on ap20.
+ */
+ writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+ /* disable interrupts for both cpu's */
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0);
+ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1);
+
+ /* masking all of the interrupts actually means "enable" */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
+
+ /* enable HOST_INT_C0MASK */
+ writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
+
+ /* enable HINTMASK_EXT */
+ writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
+
+ /* enable IP_READ_INT and IP_WRITE_INT */
+ writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
+}
diff --git a/drivers/video/tegra/host/nvhost_intr.h b/drivers/video/tegra/host/nvhost_intr.h
new file mode 100644
index 000000000000..b546c54dde06
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.h
@@ -0,0 +1,102 @@
+/*
+ * drivers/video/tegra/host/nvhost_intr.h
+ *
+ * Tegra Graphics Host Interrupt Management
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_INTR_H
+#define __NVHOST_INTR_H
+
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+
+#include "nvhost_hardware.h"
+
+struct nvhost_channel;
+
+enum nvhost_intr_action {
+ /**
+ * Perform cleanup after a submit has completed.
+ * 'data' points to a channel
+ */
+ NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+ /**
+ * Save a HW context.
+ * 'data' points to a context
+ */
+ NVHOST_INTR_ACTION_CTXSAVE,
+
+ /**
+ * Wake up a task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP,
+
+ /**
+ * Wake up a interruptible task.
+ * 'data' points to a wait_queue_head_t
+ */
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+ NVHOST_INTR_ACTION_COUNT
+};
+
+struct nvhost_intr_syncpt {
+ u8 id;
+ u8 irq_requested;
+ u16 irq;
+ spinlock_t lock;
+ struct list_head wait_head;
+ char thresh_irq_name[12];
+};
+
+struct nvhost_intr {
+ struct nvhost_intr_syncpt syncpt[NV_HOST1X_SYNCPT_NB_PTS];
+ int host1x_irq;
+ bool host1x_isr_started;
+};
+
+/**
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
+ enum nvhost_intr_action action, void *data,
+ void **ref);
+
+/**
+ * Unreference an action submitted to nvhost_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from nvhost_intr_add_action()
+ */
+void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref);
+
+int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
+void nvhost_intr_deinit(struct nvhost_intr *intr);
+void nvhost_intr_configure(struct nvhost_intr *intr, u32 hz);
+
+#endif
diff --git a/drivers/video/tegra/host/nvhost_mpectx.c b/drivers/video/tegra/host/nvhost_mpectx.c
new file mode 100644
index 000000000000..a5812e7469a3
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_mpectx.c
@@ -0,0 +1,23 @@
+/*
+ * drivers/video/tegra/host/nvhost_mpectx.c
+ *
+ * Tegra Graphics Host MPE HW Context
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* Placeholder */
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c
new file mode 100644
index 000000000000..dd2ab0d379e0
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.c
@@ -0,0 +1,256 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.c
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "nvhost_syncpt.h"
+#include "dev.h"
+
+#define client_managed(id) (BIT(id) & NVSYNCPTS_CLIENT_MANAGED)
+#define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt)
+#define SYNCPT_CHECK_PERIOD 2*HZ
+
+static bool check_max(struct nvhost_syncpt *sp, u32 id, u32 real)
+{
+ u32 max;
+ if (client_managed(id))
+ return true;
+ smp_rmb();
+ max = (u32)atomic_read(&sp->max_val[id]);
+ return ((s32)(max - real) >= 0);
+}
+
+/**
+ * Write the current syncpoint value back to hw.
+ */
+static void reset_syncpt(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ int min;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+}
+
+/**
+ * Write the current waitbase value back to hw.
+ */
+static void reset_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ writel(sp->base_val[id],
+ dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Read waitbase value from hw.
+ */
+static void read_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ sp->base_val[id] = readl(dev->sync_aperture +
+ (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
+}
+
+/**
+ * Resets syncpoint and waitbase values to sw shadows
+ */
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++)
+ reset_syncpt(sp, i);
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ reset_syncpt_wait_base(sp, i);
+ wmb();
+}
+
+/**
+ * Updates sw shadow state for client managed registers
+ */
+void nvhost_syncpt_save(struct nvhost_syncpt *sp)
+{
+ u32 i;
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ if (client_managed(i))
+ nvhost_syncpt_update_min(sp, i);
+ else
+ BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
+ }
+
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
+ read_syncpt_wait_base(sp, i);
+}
+
+/**
+ * Updates the last value read from hardware.
+ */
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ void __iomem *sync_regs = dev->sync_aperture;
+ u32 old, live;
+
+ do {
+ smp_rmb();
+ old = (u32)atomic_read(&sp->min_val[id]);
+ live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
+ } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
+
+ BUG_ON(!check_max(sp, id, live));
+
+ return live;
+}
+
+/**
+ * Get the current syncpoint value
+ */
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
+{
+ u32 val;
+
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ val = nvhost_syncpt_update_min(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return val;
+}
+
+/**
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ struct nvhost_master *dev = syncpt_to_dev(sp);
+ BUG_ON(!nvhost_module_powered(&dev->mod));
+ BUG_ON(!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id));
+ writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
+ wmb();
+}
+
+/**
+ * Increment syncpoint value from cpu, updating cache
+ */
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
+{
+ nvhost_syncpt_incr_max(sp, id, 1);
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+ nvhost_syncpt_cpu_incr(sp, id);
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+}
+
+/**
+ * Main entrypoint for syncpoint value waits.
+ */
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
+ u32 thresh, u32 timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ void *ref;
+ int err = 0;
+
+ BUG_ON(!check_max(sp, id, thresh));
+
+ /* first check cache */
+ if (nvhost_syncpt_min_cmp(sp, id, thresh))
+ return 0;
+
+ /* keep host alive */
+ nvhost_module_busy(&syncpt_to_dev(sp)->mod);
+
+ if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
+ /* try to read from register */
+ u32 val = nvhost_syncpt_update_min(sp, id);
+ if ((s32)(val - thresh) >= 0)
+ goto done;
+ }
+
+ if (!timeout) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* schedule a wakeup when the syncpoint value is reached */
+ err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
+ NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
+ if (err)
+ goto done;
+
+ err = -EAGAIN;
+ /* wait for the syncpoint, or timeout, or signal */
+ while (timeout) {
+ u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
+ int remain = wait_event_interruptible_timeout(wq,
+ nvhost_syncpt_min_cmp(sp, id, thresh),
+ check);
+ if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
+ err = 0;
+ break;
+ }
+ if (remain < 0) {
+ err = remain;
+ break;
+ }
+ if (timeout != NVHOST_NO_TIMEOUT)
+ timeout -= check;
+ if (timeout) {
+ dev_warn(&syncpt_to_dev(sp)->pdev->dev,
+ "syncpoint id %d (%s) stuck waiting %d\n",
+ id, nvhost_syncpt_name(id), thresh);
+ nvhost_syncpt_debug(sp);
+ }
+ };
+ nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
+
+done:
+ nvhost_module_idle(&syncpt_to_dev(sp)->mod);
+ return err;
+}
+
+static const char *s_syncpt_names[32] = {
+ "", "", "", "", "", "", "", "", "", "", "", "",
+ "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4", "vi_isp_5",
+ "2d_0", "2d_1",
+ "", "",
+ "3d", "mpe", "disp0", "disp1", "vblank0", "vblank1", "mpe_ebm_eof", "mpe_wr_safe",
+ "2d_tinyblt", "dsi"
+};
+
+const char *nvhost_syncpt_name(u32 id)
+{
+ BUG_ON(id > ARRAY_SIZE(s_syncpt_names));
+ return s_syncpt_names[id];
+}
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
+{
+ u32 i;
+ for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
+ u32 max = nvhost_syncpt_read_max(sp, i);
+ if (!max)
+ continue;
+ dev_info(&syncpt_to_dev(sp)->pdev->dev,
+ "id %d (%s) min %d max %d\n",
+ i, nvhost_syncpt_name(i),
+ nvhost_syncpt_update_min(sp, i), max);
+
+ }
+}
diff --git a/drivers/video/tegra/host/nvhost_syncpt.h b/drivers/video/tegra/host/nvhost_syncpt.h
new file mode 100644
index 000000000000..f161f2051406
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.h
@@ -0,0 +1,150 @@
+/*
+ * drivers/video/tegra/host/nvhost_syncpt.h
+ *
+ * Tegra Graphics Host Syncpoints
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVHOST_SYNCPT_H
+#define __NVHOST_SYNCPT_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#include "nvhost_hardware.h"
+
+#define NVSYNCPT_VI_ISP_0 (12)
+#define NVSYNCPT_VI_ISP_1 (13)
+#define NVSYNCPT_VI_ISP_2 (14)
+#define NVSYNCPT_VI_ISP_3 (15)
+#define NVSYNCPT_VI_ISP_4 (16)
+#define NVSYNCPT_VI_ISP_5 (17)
+#define NVSYNCPT_2D_0 (18)
+#define NVSYNCPT_2D_1 (19)
+#define NVSYNCPT_3D (22)
+#define NVSYNCPT_MPE (23)
+#define NVSYNCPT_DISP0 (24)
+#define NVSYNCPT_DISP1 (25)
+#define NVSYNCPT_VBLANK0 (26)
+#define NVSYNCPT_VBLANK1 (27)
+#define NVSYNCPT_MPE_EBM_EOF (28)
+#define NVSYNCPT_MPE_WR_SAFE (29)
+#define NVSYNCPT_DSI (31)
+#define NVSYNCPT_INVALID (-1)
+
+/*#define NVSYNCPT_2D_CHANNEL2_0 (20) */
+/*#define NVSYNCPT_2D_CHANNEL2_1 (21) */
+/*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/
+/*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/
+
+/* sync points that are wholly managed by the client */
+#define NVSYNCPTS_CLIENT_MANAGED ( \
+ BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) | BIT(NVSYNCPT_DSI) | \
+ BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_2) | \
+ BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5) | \
+ BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
+ BIT(NVSYNCPT_2D_1))
+
+#define NVWAITBASE_2D_0 (1)
+#define NVWAITBASE_2D_1 (2)
+#define NVWAITBASE_3D (3)
+#define NVWAITBASE_MPE (4)
+
+struct nvhost_syncpt {
+ atomic_t min_val[NV_HOST1X_SYNCPT_NB_PTS];
+ atomic_t max_val[NV_HOST1X_SYNCPT_NB_PTS];
+ u32 base_val[NV_HOST1X_SYNCPT_NB_BASES];
+};
+
+/**
+ * Updates the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
+ u32 id, u32 incrs)
+{
+ return (u32)atomic_add_return(incrs, &sp->max_val[id]);
+}
+
+/**
+ * Updated the value sent to hardware.
+ */
+static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
+ u32 id, u32 val)
+{
+ atomic_set(&sp->max_val[id], val);
+ smp_wmb();
+ return val;
+}
+
+static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
+{
+ smp_rmb();
+ return (u32)atomic_read(&sp->max_val[id]);
+}
+
+/**
+ * Returns true if syncpoint has reached threshold
+ */
+static inline bool nvhost_syncpt_min_cmp(struct nvhost_syncpt *sp,
+ u32 id, u32 thresh)
+{
+ u32 cur;
+ smp_rmb();
+ cur = (u32)atomic_read(&sp->min_val[id]);
+ return ((s32)(cur - thresh) >= 0);
+}
+
+/**
+ * Returns true if syncpoint min == max
+ */
+static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
+{
+ int min, max;
+ smp_rmb();
+ min = atomic_read(&sp->min_val[id]);
+ max = atomic_read(&sp->max_val[id]);
+ return (min == max);
+}
+
+void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
+
+u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_save(struct nvhost_syncpt *sp);
+
+void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
+
+u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
+
+void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
+
+int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
+ u32 timeout);
+
+static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
+{
+ return nvhost_syncpt_wait_timeout(sp, id, thresh, MAX_SCHEDULE_TIMEOUT);
+}
+
+
+const char *nvhost_syncpt_name(u32 id);
+
+void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
+
+#endif
diff --git a/drivers/video/tegra/nvmap/Makefile b/drivers/video/tegra/nvmap/Makefile
new file mode 100644
index 000000000000..59449abc3edc
--- /dev/null
+++ b/drivers/video/tegra/nvmap/Makefile
@@ -0,0 +1,6 @@
+obj-y += nvmap.o
+obj-y += nvmap_dev.o
+obj-y += nvmap_handle.o
+obj-y += nvmap_heap.o
+obj-y += nvmap_ioctl.o
+obj-${CONFIG_NVMAP_RECLAIM_UNPINNED_VM} += nvmap_mru.o \ No newline at end of file
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c
new file mode 100644
index 000000000000..262f1b9b357d
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap.c
@@ -0,0 +1,725 @@
+/*
+ * drivers/video/tegra/nvmap.c
+ *
+ * Memory manager for Tegra GPU
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/rbtree.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* private nvmap_handle flag for pinning duplicate detection */
+#define NVMAP_HANDLE_VISITED (0x1ul << 31)
+
+/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
+static void map_iovmm_area(struct nvmap_handle *h)
+{
+ tegra_iovmm_addr_t va;
+ unsigned long i;
+
+ BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
+ BUG_ON(h->size & ~PAGE_MASK);
+ WARN_ON(!h->pgalloc.dirty);
+
+ for (va = h->pgalloc.area->iovm_start, i = 0;
+ va < (h->pgalloc.area->iovm_start + h->size);
+ i++, va += PAGE_SIZE) {
+ BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
+ tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
+ page_to_pfn(h->pgalloc.pages[i]));
+ }
+ h->pgalloc.dirty = false;
+}
+
+/* must be called inside nvmap_pin_lock, to ensure that an entire stream
+ * of pins will complete without racing with a second stream. handle should
+ * have nvmap_handle_get (or nvmap_validate_get) called before calling
+ * this function. */
+static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ struct tegra_iovmm_area *area;
+ BUG_ON(!h->alloc);
+
+ if (atomic_inc_return(&h->pin) == 1) {
+ if (h->heap_pgalloc && !h->pgalloc.contig) {
+ area = nvmap_handle_iovmm(client, h);
+ if (!area) {
+ /* no race here, inside the pin mutex */
+ atomic_dec(&h->pin);
+ return -ENOMEM;
+ }
+ if (area != h->pgalloc.area)
+ h->pgalloc.dirty = true;
+ h->pgalloc.area = area;
+ }
+ }
+ return 0;
+}
+
+static int wait_pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ int ret = 0;
+
+ ret = pin_locked(client, h);
+
+ if (ret) {
+ ret = wait_event_interruptible(client->share->pin_wait,
+ !pin_locked(client, h));
+ }
+
+ return ret ? -EINTR : 0;
+
+}
+
+/* doesn't need to be called inside nvmap_pin_lock, since this will only
+ * expand the available VM area */
+static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
+{
+ int ret = 0;
+
+ nvmap_mru_lock(client->share);
+
+ if (atomic_read(&h->pin) == 0) {
+ nvmap_err(client, "%s unpinning unpinned handle %p\n",
+ current->group_leader->comm, h);
+ nvmap_mru_unlock(client->share);
+ return 0;
+ }
+
+ BUG_ON(!h->alloc);
+
+ if (!atomic_dec_return(&h->pin)) {
+ if (h->heap_pgalloc && h->pgalloc.area) {
+ /* if a secure handle is clean (i.e., mapped into
+ * IOVMM, it needs to be zapped on unpin. */
+ if (h->secure && !h->pgalloc.dirty) {
+ tegra_iovmm_zap_vm(h->pgalloc.area);
+ h->pgalloc.dirty = true;
+ }
+ nvmap_mru_insert_locked(client->share, h);
+ ret = 1;
+ }
+ }
+
+ nvmap_mru_unlock(client->share);
+
+ nvmap_handle_put(h);
+ return ret;
+}
+
+static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle *h;
+ int w;
+
+ h = nvmap_validate_get(client, id);
+ if (unlikely(!h)) {
+ nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
+ current->group_leader->comm, (void *)id);
+ return 0;
+ }
+
+ nvmap_err(client, "%s unpinning unreferenced handle %p\n",
+ current->group_leader->comm, h);
+ WARN_ON(1);
+
+ w = handle_unpin(client, h);
+ nvmap_handle_put(h);
+ return w;
+}
+
+void nvmap_unpin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ unsigned int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (!ids[i])
+ continue;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ struct nvmap_handle *h = ref->handle;
+ int e = atomic_add_unless(&ref->pin, -1, 0);
+
+ nvmap_ref_unlock(client);
+
+ if (!e) {
+ nvmap_err(client, "%s unpinning unpinned "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ } else {
+ do_wake |= handle_unpin(client, h);
+ }
+ } else {
+ nvmap_ref_unlock(client);
+ if (client->super)
+ do_wake |= handle_unpin_noref(client, ids[i]);
+ else
+ nvmap_err(client, "%s unpinning invalid "
+ "handle %08lx\n",
+ current->group_leader->comm, ids[i]);
+ }
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+/* pins a list of handle_ref objects; same conditions apply as to
+ * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids)
+{
+ int ret = 0;
+ int cnt = 0;
+ unsigned int i;
+ struct nvmap_handle **h = (struct nvmap_handle **)ids;
+ struct nvmap_handle_ref *ref;
+
+ /* to optimize for the common case (client provided valid handle
+ * references and the pin succeeds), increment the handle_ref pin
+ * count during validation. in error cases, the tree will need to
+ * be re-walked, since the handle_ref is discarded so that an
+ * allocation isn't required. if a handle_ref is not found,
+ * locally validate that the caller has permission to pin the handle;
+ * handle_refs are not created in this case, so it is possible that
+ * if the caller crashes after pinning a global handle, the handle
+ * will be permanently leaked. */
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr && !ret; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (ref) {
+ atomic_inc(&ref->pin);
+ nvmap_handle_get(h[i]);
+ } else {
+ struct nvmap_handle *verify;
+ nvmap_ref_unlock(client);
+ verify = nvmap_validate_get(client, ids[i]);
+ if (verify)
+ nvmap_warn(client, "%s pinning unreferenced "
+ "handle %p\n",
+ current->group_leader->comm, h[i]);
+ else
+ ret = -EPERM;
+ nvmap_ref_lock(client);
+ }
+ }
+ nvmap_ref_unlock(client);
+
+ nr = i;
+
+ if (ret)
+ goto out;
+
+ ret = mutex_lock_interruptible(&client->share->pin_lock);
+ if (WARN_ON(ret))
+ goto out;
+
+ for (cnt = 0; cnt < nr && !ret; cnt++) {
+ ret = wait_pin_locked(client, h[cnt]);
+ }
+ mutex_unlock(&client->share->pin_lock);
+
+ if (ret) {
+ int do_wake = 0;
+
+ for (i = 0; i < cnt; i++)
+ do_wake |= handle_unpin(client, h[i]);
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+
+ ret = -EINTR;
+ } else {
+ for (i = 0; i < nr; i++) {
+ if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
+ map_iovmm_area(h[i]);
+ }
+ }
+
+out:
+ if (ret) {
+ nvmap_ref_lock(client);
+ for (i = 0; i < nr; i++) {
+ ref = _nvmap_validate_id_locked(client, ids[i]);
+ if (!ref) {
+ nvmap_warn(client, "%s freed handle %p "
+ "during pinning\n",
+ current->group_leader->comm,
+ (void *)ids[i]);
+ continue;
+ }
+ atomic_dec(&ref->pin);
+ }
+ nvmap_ref_unlock(client);
+
+ for (i = cnt; i < nr; i++)
+ nvmap_handle_put(h[i]);
+ }
+
+ return ret;
+}
+
+static unsigned long handle_phys(struct nvmap_handle *h)
+{
+ u32 addr;
+
+ if (h->heap_pgalloc && h->pgalloc.contig) {
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ } else if (h->heap_pgalloc) {
+ BUG_ON(!h->pgalloc.area);
+ addr = h->pgalloc.area->iovm_start;
+ } else {
+ addr = h->carveout->base;
+ }
+
+ return addr;
+}
+
+/* stores the physical address (+offset) of each handle relocation entry
+ * into its output location. see nvmap_pin_array for more details.
+ *
+ * each entry in arr (i.e., each relocation request) specifies two handles:
+ * the handle to pin (pin), and the handle where the address of pin should be
+ * written (patch). in pseudocode, this loop basically looks like:
+ *
+ * for (i = 0; i < nr; i++) {
+ * (pin, pin_offset, patch, patch_offset) = arr[i];
+ * patch[patch_offset] = address_of(pin) + pin_offset;
+ * }
+ */
+static int nvmap_reloc_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle *gather)
+{
+ struct nvmap_handle *last_patch = NULL;
+ unsigned int last_pfn = 0;
+ pte_t **pte;
+ void *addr;
+ int i;
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle *patch;
+ struct nvmap_handle *pin;
+ unsigned long reloc_addr;
+ unsigned long phys;
+ unsigned int pfn;
+
+ /* all of the handles are validated and get'ted prior to
+ * calling this function, so casting is safe here */
+ pin = (struct nvmap_handle *)arr[i].pin_mem;
+
+ if (arr[i].patch_mem == (unsigned long)last_patch) {
+ patch = last_patch;
+ } else if (arr[i].patch_mem == (unsigned long)gather) {
+ patch = gather;
+ } else {
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ patch = nvmap_get_handle_id(client, arr[i].patch_mem);
+ if (!patch) {
+ nvmap_free_pte(client->dev, pte);
+ return -EPERM;
+ }
+ last_patch = patch;
+ }
+
+ if (patch->heap_pgalloc) {
+ unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
+ phys = page_to_phys(patch->pgalloc.pages[page]);
+ phys += (arr[i].patch_offset & ~PAGE_MASK);
+ } else {
+ phys = patch->carveout->base + arr[i].patch_offset;
+ }
+
+ pfn = __phys_to_pfn(phys);
+ if (pfn != last_pfn) {
+ pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel);
+ unsigned long kaddr = (unsigned long)addr;
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(kaddr);
+ }
+
+ reloc_addr = handle_phys(pin) + arr[i].pin_offset;
+ __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK));
+ }
+
+ nvmap_free_pte(client->dev, pte);
+
+ if (last_patch)
+ nvmap_handle_put(last_patch);
+
+ wmb();
+
+ return 0;
+}
+
+static int nvmap_validate_get_pin_array(struct nvmap_client *client,
+ const struct nvmap_pinarray_elem *arr,
+ int nr, struct nvmap_handle **h)
+{
+ int i;
+ int ret = 0;
+ int count = 0;
+
+ nvmap_ref_lock(client);
+
+ for (i = 0; i < nr; i++) {
+ struct nvmap_handle_ref *ref;
+
+ if (need_resched()) {
+ nvmap_ref_unlock(client);
+ schedule();
+ nvmap_ref_lock(client);
+ }
+
+ ref = _nvmap_validate_id_locked(client, arr[i].pin_mem);
+
+ if (!ref)
+ nvmap_warn(client, "falied to validate id\n");
+ else if (!ref->handle)
+ nvmap_warn(client, "id had no associated handle\n");
+ else if (!ref->handle->alloc)
+ nvmap_warn(client, "handle had no allocation\n");
+
+ if (!ref || !ref->handle || !ref->handle->alloc) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* a handle may be referenced multiple times in arr, but
+ * it will only be pinned once; this ensures that the
+ * minimum number of sync-queue slots in the host driver
+ * are dedicated to storing unpin lists, which allows
+ * for greater parallelism between the CPU and graphics
+ * processor */
+ if (ref->handle->flags & NVMAP_HANDLE_VISITED)
+ continue;
+
+ ref->handle->flags |= NVMAP_HANDLE_VISITED;
+
+ h[count] = nvmap_handle_get(ref->handle);
+ BUG_ON(!h[count]);
+ count++;
+ }
+
+ nvmap_ref_unlock(client);
+
+ if (ret) {
+ for (i = 0; i < count; i++) {
+ h[i]->flags &= ~NVMAP_HANDLE_VISITED;
+ nvmap_handle_put(h[i]);
+ }
+ }
+
+ return ret ?: count;
+}
+
+/* a typical mechanism host1x clients use for using the Tegra graphics
+ * processor is to build a command buffer which contains relocatable
+ * memory handle commands, and rely on the kernel to convert these in-place
+ * to addresses which are understood by the GPU hardware.
+ *
+ * this is implemented by having clients provide a sideband array
+ * of relocatable handles (+ offsets) and the location in the command
+ * buffer handle to patch with the GPU address when the client submits
+ * its command buffer to the host1x driver.
+ *
+ * the host driver also uses this relocation mechanism internally to
+ * relocate the client's (unpinned) command buffers into host-addressable
+ * memory.
+ *
+ * @client: nvmap_client which should be used for validation; should be
+ * owned by the process which is submitting command buffers
+ * @gather: special handle for relocated command buffer outputs used
+ * internally by the host driver. if this handle is encountered
+ * as an output handle in the relocation array, it is assumed
+ * to be a known-good output and is not validated.
+ * @arr: array of ((relocatable handle, offset), (output handle, offset))
+ * tuples.
+ * @nr: number of entries in arr
+ * @unique_arr: list of nvmap_handle objects which were pinned by
+ * nvmap_pin_array. must be unpinned by the caller after the
+ * command buffers referenced in gather have completed.
+ */
+int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
+ const struct nvmap_pinarray_elem *arr, int nr,
+ struct nvmap_handle **unique_arr)
+{
+ int count = 0;
+ int pinned = 0;
+ int ret = 0;
+ int i;
+
+ if (mutex_lock_interruptible(&client->share->pin_lock)) {
+ nvmap_warn(client, "%s interrupted when acquiring pin lock\n",
+ current->group_leader->comm);
+ return -EINTR;
+ }
+
+ count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr);
+ if (count < 0) {
+ mutex_unlock(&client->share->pin_lock);
+ nvmap_warn(client, "failed to validate pin array\n");
+ return count;
+ }
+
+ for (i = 0; i < count; i++)
+ unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
+
+ for (pinned = 0; pinned < count && !ret; pinned++)
+ ret = wait_pin_locked(client, unique_arr[pinned]);
+
+ mutex_unlock(&client->share->pin_lock);
+
+ if (!ret)
+ ret = nvmap_reloc_pin_array(client, arr, nr, gather);
+
+ if (WARN_ON(ret)) {
+ int do_wake = 0;
+
+ for (i = pinned; i < count; i++)
+ nvmap_handle_put(unique_arr[i]);
+
+ for (i = 0; i < pinned; i++)
+ do_wake |= handle_unpin(client, unique_arr[i]);
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+
+ return ret;
+ } else {
+ for (i = 0; i < count; i++) {
+ if (unique_arr[i]->heap_pgalloc &&
+ unique_arr[i]->pgalloc.dirty)
+ map_iovmm_area(unique_arr[i]);
+ }
+ }
+
+ return count;
+}
+
+unsigned long nvmap_pin(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ unsigned long phys;
+ int ret = 0;
+
+ h = nvmap_handle_get(ref->handle);
+ if (WARN_ON(!h))
+ return -EINVAL;
+
+ atomic_inc(&ref->pin);
+
+ if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
+ ret = -EINTR;
+ } else {
+ ret = wait_pin_locked(client, h);
+ mutex_unlock(&client->share->pin_lock);
+ }
+
+ if (ret) {
+ atomic_dec(&ref->pin);
+ nvmap_handle_put(h);
+ } else {
+ if (h->heap_pgalloc && h->pgalloc.dirty)
+ map_iovmm_area(h);
+ phys = handle_phys(h);
+ }
+
+ return ret ?: phys;
+}
+
+unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id)
+{
+ struct nvmap_handle *h;
+ unsigned long phys;
+
+ h = nvmap_get_handle_id(c, id);
+ if (!h)
+ return -EPERM;
+
+ phys = handle_phys(h);
+ nvmap_handle_put(h);
+
+ return phys;
+}
+
+void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
+{
+ atomic_dec(&ref->pin);
+ if (handle_unpin(client, ref->handle))
+ wake_up(&client->share->pin_wait);
+}
+
+void nvmap_unpin_handles(struct nvmap_client *client,
+ struct nvmap_handle **h, int nr)
+{
+ int i;
+ int do_wake = 0;
+
+ for (i = 0; i < nr; i++) {
+ if (WARN_ON(!h[i]))
+ continue;
+ do_wake |= handle_unpin(client, h[i]);
+ }
+
+ if (do_wake)
+ wake_up(&client->share->pin_wait);
+}
+
+void *nvmap_mmap(struct nvmap_handle_ref *ref)
+{
+ struct nvmap_handle *h;
+ pgprot_t prot;
+ unsigned long adj_size;
+ unsigned long offs;
+ struct vm_struct *v;
+ void *p;
+
+ h = nvmap_handle_get(ref->handle);
+ if (!h)
+ return NULL;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->heap_pgalloc)
+ return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
+ -1, prot);
+
+ /* carveout - explicitly map the pfns into a vmalloc area */
+ adj_size = h->carveout->base & ~PAGE_MASK;
+ adj_size += h->size;
+ adj_size = PAGE_ALIGN(adj_size);
+
+ v = alloc_vm_area(adj_size);
+ if (!v) {
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ p = v->addr + (h->carveout->base & ~PAGE_MASK);
+
+ for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
+ unsigned long addr = (unsigned long) v->addr + offs;
+ unsigned int pfn;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pfn = __phys_to_pfn(h->carveout->base + offs);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ break;
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ break;
+ set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+ flush_tlb_kernel_page(addr);
+ }
+
+ if (offs != adj_size) {
+ free_vm_area(v);
+ nvmap_handle_put(h);
+ return NULL;
+ }
+
+ /* leave the handle ref count incremented by 1, so that
+ * the handle will not be freed while the kernel mapping exists.
+ * nvmap_handle_put will be called by unmapping this address */
+ return p;
+}
+
+void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
+{
+ struct nvmap_handle *h;
+
+ if (!ref)
+ return;
+
+ h = ref->handle;
+
+ if (h->heap_pgalloc) {
+ vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
+ } else {
+ struct vm_struct *vm;
+ addr -= (h->carveout->base & ~PAGE_MASK);
+ vm = remove_vm_area(addr);
+ BUG_ON(!vm);
+ }
+
+ nvmap_handle_put(h);
+}
+
+struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
+ size_t align, unsigned int flags)
+{
+ const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
+ NVMAP_HEAP_CARVEOUT_GENERIC);
+ struct nvmap_handle_ref *r = NULL;
+ int err;
+
+ r = nvmap_create_handle(client, size);
+ if (IS_ERR(r))
+ return r;
+
+ err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
+ default_heap, align, flags);
+
+ if (err) {
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+ return ERR_PTR(err);
+ }
+
+ return r;
+}
+
+void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
+{
+ nvmap_free_handle_id(client, nvmap_ref_to_id(r));
+}
diff --git a/drivers/video/tegra/nvmap/nvmap.h b/drivers/video/tegra/nvmap/nvmap.h
new file mode 100644
index 000000000000..9bb7da77a501
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap.h
@@ -0,0 +1,238 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap.h
+ *
+ * GPU memory management driver for Tegra
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *'
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
+#define __VIDEO_TEGRA_NVMAP_NVMAP_H
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <asm/atomic.h>
+
+#include <mach/nvmap.h>
+
+#include "nvmap_heap.h"
+
+#define nvmap_err(_client, _fmt, ...) \
+ dev_err(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_warn(_client, _fmt, ...) \
+ dev_warn(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_debug(_client, _fmt, ...) \
+ dev_dbg(nvmap_client_to_device(_client), \
+ "%s: "_fmt, __func__, ##__VA_ARGS__)
+
+#define nvmap_ref_to_id(_ref) ((unsigned long)(_ref)->handle)
+
+struct nvmap_device;
+struct page;
+struct tegra_iovmm_area;
+
+/* handles allocated using shared system memory (either IOVMM- or high-order
+ * page allocations */
+struct nvmap_pgalloc {
+ struct page **pages;
+ struct tegra_iovmm_area *area;
+ struct list_head mru_list; /* MRU entry for IOVMM reclamation */
+ bool contig; /* contiguous system memory */
+ bool dirty; /* area is invalid and needs mapping */
+};
+
+struct nvmap_handle {
+ struct rb_node node; /* entry on global handle tree */
+ atomic_t ref; /* reference count (i.e., # of duplications) */
+ atomic_t pin; /* pin count */
+ unsigned long flags;
+ size_t size; /* padded (as-allocated) size */
+ size_t orig_size; /* original (as-requested) size */
+ struct nvmap_client *owner;
+ struct nvmap_device *dev;
+ union {
+ struct nvmap_pgalloc pgalloc;
+ struct nvmap_heap_block *carveout;
+ };
+ bool global; /* handle may be duplicated by other clients */
+ bool secure; /* zap IOVMM area on unpin */
+ bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
+ bool alloc; /* handle has memory allocated */
+ struct mutex lock;
+};
+
+struct nvmap_share {
+ struct tegra_iovmm_client *iovmm;
+ wait_queue_head_t pin_wait;
+ struct mutex pin_lock;
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ spinlock_t mru_lock;
+ struct list_head *mru_lists;
+ int nr_mru;
+#endif
+};
+
+struct nvmap_carveout_commit {
+ size_t commit;
+ struct list_head list;
+};
+
+struct nvmap_client {
+ const char *name;
+ struct nvmap_device *dev;
+ struct nvmap_share *share;
+ struct rb_root handle_refs;
+ atomic_t iovm_commit;
+ size_t iovm_limit;
+ spinlock_t ref_lock;
+ bool super;
+ atomic_t count;
+ struct task_struct *task;
+ struct nvmap_carveout_commit carveout_commit[0];
+};
+
+/* handle_ref objects are client-local references to an nvmap_handle;
+ * they are distinct objects so that handles can be unpinned and
+ * unreferenced the correct number of times when a client abnormally
+ * terminates */
+struct nvmap_handle_ref {
+ struct nvmap_handle *handle;
+ struct rb_node node;
+ atomic_t dupes; /* number of times to free on file close */
+ atomic_t pin; /* number of times to unpin on free */
+};
+
+struct nvmap_vma_priv {
+ struct nvmap_handle *handle;
+ size_t offs;
+ atomic_t count; /* number of processes cloning the VMA */
+};
+
+static inline void nvmap_ref_lock(struct nvmap_client *priv)
+{
+ spin_lock(&priv->ref_lock);
+}
+
+static inline void nvmap_ref_unlock(struct nvmap_client *priv)
+{
+ spin_unlock(&priv->ref_lock);
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client);
+
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
+
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
+
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot);
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b);
+
+struct nvmap_carveout_node;
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+ struct nvmap_carveout_node *node, size_t len);
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len);
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
+
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long handle);
+
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
+ unsigned long id);
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size);
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id);
+
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags);
+
+void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
+
+int nvmap_pin_ids(struct nvmap_client *client,
+ unsigned int nr, const unsigned long *ids);
+
+void nvmap_unpin_ids(struct nvmap_client *priv,
+ unsigned int nr, const unsigned long *ids);
+
+void _nvmap_handle_free(struct nvmap_handle *h);
+
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
+
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
+
+static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
+{
+ if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
+ pr_err("%s: %s getting a freed handle\n",
+ __func__, current->group_leader->comm);
+ if (atomic_read(&h->ref) <= 0)
+ return NULL;
+ }
+ return h;
+}
+
+static inline void nvmap_handle_put(struct nvmap_handle *h)
+{
+ int cnt = atomic_dec_return(&h->ref);
+
+ if (WARN_ON(cnt < 0)) {
+ pr_err("%s: %s put to negative references\n",
+ __func__, current->comm);
+ } else if (cnt == 0)
+ _nvmap_handle_free(h);
+}
+
+static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
+{
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
+ return pgprot_dmacoherent(prot);
+ else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
+ return pgprot_writecombine(prot);
+ else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
+ return pgprot_inner_writeback(prot);
+ return prot;
+}
+
+int is_nvmap_vma(struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
new file mode 100644
index 000000000000..1961c714efe5
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -0,0 +1,1106 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_dev.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/backing-dev.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_ioctl.h"
+#include "nvmap_mru.h"
+
+#define NVMAP_NUM_PTES 64
+
+struct nvmap_carveout_node {
+ unsigned int heap_bit;
+ struct nvmap_heap *carveout;
+ int index;
+ struct list_head clients;
+ spinlock_t clients_lock;
+};
+
+struct nvmap_device {
+ struct vm_struct *vm_rgn;
+ pte_t *ptes[NVMAP_NUM_PTES];
+ unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
+ unsigned int lastpte;
+ spinlock_t ptelock;
+
+ struct rb_root handles;
+ spinlock_t handle_lock;
+ wait_queue_head_t pte_wait;
+ struct miscdevice dev_super;
+ struct miscdevice dev_user;
+ struct nvmap_carveout_node *heaps;
+ int nr_carveouts;
+ struct nvmap_share iovmm_master;
+};
+
+struct nvmap_device *nvmap_dev;
+
+static struct backing_dev_info nvmap_bdi = {
+ .ra_pages = 0,
+ .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
+};
+
+static int nvmap_open(struct inode *inode, struct file *filp);
+static int nvmap_release(struct inode *inode, struct file *filp);
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
+static void nvmap_vma_open(struct vm_area_struct *vma);
+static void nvmap_vma_close(struct vm_area_struct *vma);
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+static const struct file_operations nvmap_user_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static const struct file_operations nvmap_super_fops = {
+ .owner = THIS_MODULE,
+ .open = nvmap_open,
+ .release = nvmap_release,
+ .unlocked_ioctl = nvmap_ioctl,
+ .mmap = nvmap_map,
+};
+
+static struct vm_operations_struct nvmap_vma_ops = {
+ .open = nvmap_vma_open,
+ .close = nvmap_vma_close,
+ .fault = nvmap_vma_fault,
+};
+
+int is_nvmap_vma(struct vm_area_struct *vma)
+{
+ return vma->vm_ops == &nvmap_vma_ops;
+}
+
+struct device *nvmap_client_to_device(struct nvmap_client *client)
+{
+ if (client->super)
+ return client->dev->dev_super.this_device;
+ else
+ return client->dev->dev_user.this_device;
+}
+
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
+{
+ return &dev->iovmm_master;
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. may be called from IRQs */
+pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
+{
+ unsigned long flags;
+ unsigned long bit;
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
+ if (bit == NVMAP_NUM_PTES) {
+ bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
+ if (bit == dev->lastpte)
+ bit = NVMAP_NUM_PTES;
+ }
+
+ if (bit == NVMAP_NUM_PTES) {
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev->lastpte = bit;
+ set_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+
+ *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
+ return &(dev->ptes[bit]);
+}
+
+/* allocates a PTE for the caller's use; returns the PTE pointer or
+ * a negative errno. must be called from sleepable contexts */
+pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
+{
+ int ret;
+ pte_t **pte;
+ ret = wait_event_interruptible(dev->pte_wait,
+ !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
+
+ if (ret == -ERESTARTSYS)
+ return ERR_PTR(-EINTR);
+
+ return pte;
+}
+
+/* frees a PTE */
+void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
+{
+ unsigned long addr;
+ unsigned int bit = pte - dev->ptes;
+ unsigned long flags;
+
+ if (WARN_ON(bit >= NVMAP_NUM_PTES))
+ return;
+
+ addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
+ set_pte_at(&init_mm, addr, *pte, 0);
+
+ spin_lock_irqsave(&dev->ptelock, flags);
+ clear_bit(bit, dev->ptebits);
+ spin_unlock_irqrestore(&dev->ptelock, flags);
+ wake_up(&dev->pte_wait);
+}
+
+/* verifies that the handle ref value "ref" is a valid handle ref for the
+ * file. caller must hold the file's ref_lock prior to calling this function */
+struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
+ unsigned long id)
+{
+ struct rb_node *n = c->handle_refs.rb_node;
+
+ while (n) {
+ struct nvmap_handle_ref *ref;
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ if ((unsigned long)ref->handle == id)
+ return ref;
+ else if (id > (unsigned long)ref->handle)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+
+ return NULL;
+}
+
+struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h = NULL;
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, id);
+ if (ref)
+ h = ref->handle;
+ if (h)
+ h = nvmap_handle_get(h);
+ nvmap_ref_unlock(client);
+ return h;
+}
+
+unsigned long nvmap_carveout_usage(struct nvmap_client *c,
+ struct nvmap_heap_block *b)
+{
+ struct nvmap_heap *h = nvmap_block_to_heap(b);
+ struct nvmap_carveout_node *n;
+ int i;
+
+ for (i = 0; i < c->dev->nr_carveouts; i++) {
+ n = &c->dev->heaps[i];
+ if (n->carveout == h)
+ return n->heap_bit;
+ }
+ return 0;
+}
+
+static int nvmap_flush_heap_block(struct nvmap_client *client,
+ struct nvmap_heap_block *block, size_t len)
+{
+ pte_t **pte;
+ void *addr;
+ unsigned long kaddr;
+ unsigned long phys = block->base;
+ unsigned long end = block->base + len;
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ kaddr = (unsigned long)addr;
+
+ while (phys < end) {
+ unsigned long next = (phys + PAGE_SIZE) & PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys);
+ void *base = (void *)kaddr + (phys & ~PAGE_MASK);
+
+ next = min(next, end);
+ set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
+ flush_tlb_kernel_page(kaddr);
+ __cpuc_flush_dcache_area(base, next - phys);
+ phys = next;
+ }
+
+ outer_flush_range(block->base, block->base + len);
+
+ nvmap_free_pte(client->dev, pte);
+ return 0;
+}
+
+void nvmap_carveout_commit_add(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len)
+{
+ unsigned long flags;
+
+ nvmap_ref_lock(client);
+ spin_lock_irqsave(&node->clients_lock, flags);
+ BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
+ client->carveout_commit[node->index].commit != 0);
+
+ client->carveout_commit[node->index].commit += len;
+ /* if this client isn't already on the list of nodes for this heap,
+ add it */
+ if (list_empty(&client->carveout_commit[node->index].list)) {
+ list_add(&client->carveout_commit[node->index].list,
+ &node->clients);
+ }
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+ nvmap_ref_unlock(client);
+}
+
+void nvmap_carveout_commit_subtract(struct nvmap_client *client,
+ struct nvmap_carveout_node *node,
+ size_t len)
+{
+ unsigned long flags;
+
+ if (!client)
+ return;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ client->carveout_commit[node->index].commit -= len;
+ BUG_ON(client->carveout_commit[node->index].commit < 0);
+ /* if no more allocation in this carveout for this node, delete it */
+ if (!client->carveout_commit[node->index].commit)
+ list_del_init(&client->carveout_commit[node->index].list);
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+}
+
+static struct nvmap_client* get_client_from_carveout_commit(
+ struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
+{
+ struct nvmap_carveout_commit *first_commit = commit - node->index;
+ return (void *)first_commit - offsetof(struct nvmap_client,
+ carveout_commit);
+}
+
+struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
+ size_t len, size_t align,
+ unsigned long usage,
+ unsigned int prot)
+{
+ struct nvmap_carveout_node *co_heap;
+ struct nvmap_device *dev = client->dev;
+ int i;
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_heap_block *block;
+ co_heap = &dev->heaps[i];
+
+ if (!(co_heap->heap_bit & usage))
+ continue;
+
+ block = nvmap_heap_alloc(co_heap->carveout, len, align, prot);
+ if (block) {
+ /* flush any stale data that may be left in the
+ * cache at the block's address, since the new
+ * block may be mapped uncached */
+ if (nvmap_flush_heap_block(client, block, len)) {
+ nvmap_heap_free(block);
+ return NULL;
+ } else
+ return block;
+ }
+ }
+
+ return NULL;
+}
+
+/* remove a handle from the device's tree of all handles; called
+ * when freeing handles. */
+int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ spin_lock(&dev->handle_lock);
+
+ /* re-test inside the spinlock if the handle really has no clients;
+ * only remove the handle if it is unreferenced */
+ if (atomic_add_return(0, &h->ref) > 0) {
+ spin_unlock(&dev->handle_lock);
+ return -EBUSY;
+ }
+ smp_rmb();
+ BUG_ON(atomic_read(&h->ref) < 0);
+ BUG_ON(atomic_read(&h->pin) != 0);
+
+ rb_erase(&h->node, &dev->handles);
+
+ spin_unlock(&dev->handle_lock);
+ return 0;
+}
+
+/* adds a newly-created handle to the device master tree */
+void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ spin_lock(&dev->handle_lock);
+ p = &dev->handles.rb_node;
+ while (*p) {
+ struct nvmap_handle *b;
+
+ parent = *p;
+ b = rb_entry(parent, struct nvmap_handle, node);
+ if (h > b)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&h->node, parent, p);
+ rb_insert_color(&h->node, &dev->handles);
+ spin_unlock(&dev->handle_lock);
+}
+
+/* validates that a handle is in the device master tree, and that the
+ * client has permission to access it */
+struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle *h = NULL;
+ struct rb_node *n;
+
+ spin_lock(&client->dev->handle_lock);
+
+ n = client->dev->handles.rb_node;
+
+ while (n) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ if ((unsigned long)h == id) {
+ if (client->super || h->global || (h->owner == client))
+ h = nvmap_handle_get(h);
+ else
+ h = NULL;
+ spin_unlock(&client->dev->handle_lock);
+ return h;
+ }
+ if (id > (unsigned long)h)
+ n = n->rb_right;
+ else
+ n = n->rb_left;
+ }
+ spin_unlock(&client->dev->handle_lock);
+ return NULL;
+}
+
+struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
+ const char *name)
+{
+ struct nvmap_client *client;
+ int i;
+
+ if (WARN_ON(!dev))
+ return NULL;
+
+ client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
+ * dev->nr_carveouts), GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->name = name;
+ client->super = true;
+ client->dev = dev;
+ /* TODO: allocate unique IOVMM client for each nvmap client */
+ client->share = &dev->iovmm_master;
+ client->handle_refs = RB_ROOT;
+
+ atomic_set(&client->iovm_commit, 0);
+
+ client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ INIT_LIST_HEAD(&client->carveout_commit[i].list);
+ client->carveout_commit[i].commit = 0;
+ }
+
+ get_task_struct(current);
+ client->task = current;
+
+ spin_lock_init(&client->ref_lock);
+ atomic_set(&client->count, 1);
+
+ return client;
+}
+
+static void destroy_client(struct nvmap_client *client)
+{
+ struct rb_node *n;
+ int i;
+
+ if (!client)
+ return;
+
+ while ((n = rb_first(&client->handle_refs))) {
+ struct nvmap_handle_ref *ref;
+ int pins, dupes;
+
+ ref = rb_entry(n, struct nvmap_handle_ref, node);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+
+ mutex_lock(&ref->handle->lock);
+ if (ref->handle->owner == client)
+ ref->handle->owner = NULL;
+ mutex_unlock(&ref->handle->lock);
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ dupes = atomic_read(&ref->dupes);
+ while (dupes--)
+ nvmap_handle_put(ref->handle);
+
+ kfree(ref);
+ }
+
+ for (i = 0; i < client->dev->nr_carveouts; i++)
+ list_del(&client->carveout_commit[i].list);
+
+ if (client->task)
+ put_task_struct(client->task);
+
+ kfree(client);
+}
+
+struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
+{
+ if (WARN_ON(!client))
+ return NULL;
+
+ if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
+ return NULL;
+
+ return client;
+}
+
+struct nvmap_client *nvmap_client_get_file(int fd)
+{
+ struct nvmap_client *client = ERR_PTR(-EFAULT);
+ struct file *f = fget(fd);
+ if (!f)
+ return ERR_PTR(-EINVAL);
+
+ if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
+ client = f->private_data;
+ atomic_inc(&client->count);
+ }
+
+ fput(f);
+ return client;
+}
+
+void nvmap_client_put(struct nvmap_client *client)
+{
+ if (!client)
+ return;
+
+ if (!atomic_dec_return(&client->count))
+ destroy_client(client);
+}
+
+static int nvmap_open(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
+ struct nvmap_client *priv;
+ int ret;
+
+ ret = nonseekable_open(inode, filp);
+ if (unlikely(ret))
+ return ret;
+
+ BUG_ON(dev != nvmap_dev);
+ priv = nvmap_create_client(dev, "user");
+ if (!priv)
+ return -ENOMEM;
+
+ priv->super = (filp->f_op == &nvmap_super_fops);
+
+ filp->f_mapping->backing_dev_info = &nvmap_bdi;
+
+ filp->private_data = priv;
+ return 0;
+}
+
+static int nvmap_release(struct inode *inode, struct file *filp)
+{
+ nvmap_client_put(filp->private_data);
+ return 0;
+}
+
+static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
+ * will be stored in vm_private_data and faulted in. until the
+ * ioctl is made, the VMA is mapped no-access */
+ vma->vm_private_data = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->offs = 0;
+ priv->handle = NULL;
+ atomic_set(&priv->count, 1);
+
+ vma->vm_flags |= VM_SHARED;
+ vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
+ vma->vm_ops = &nvmap_vma_ops;
+ vma->vm_private_data = priv;
+
+ return 0;
+}
+
+static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ void __user *uarg = (void __user *)arg;
+
+ if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case NVMAP_IOC_CLAIM:
+ nvmap_warn(filp->private_data, "preserved handles not"
+ "supported\n");
+ err = -ENODEV;
+ break;
+ case NVMAP_IOC_CREATE:
+ case NVMAP_IOC_FROM_ID:
+ err = nvmap_ioctl_create(filp, cmd, uarg);
+ break;
+
+ case NVMAP_IOC_GET_ID:
+ err = nvmap_ioctl_getid(filp, uarg);
+ break;
+
+ case NVMAP_IOC_PARAM:
+ err = nvmap_ioctl_get_param(filp, uarg);
+ break;
+
+ case NVMAP_IOC_UNPIN_MULT:
+ case NVMAP_IOC_PIN_MULT:
+ err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
+ break;
+
+ case NVMAP_IOC_ALLOC:
+ err = nvmap_ioctl_alloc(filp, uarg);
+ break;
+
+ case NVMAP_IOC_FREE:
+ err = nvmap_ioctl_free(filp, arg);
+ break;
+
+ case NVMAP_IOC_MMAP:
+ err = nvmap_map_into_caller_ptr(filp, uarg);
+ break;
+
+ case NVMAP_IOC_WRITE:
+ case NVMAP_IOC_READ:
+ err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
+ break;
+
+ case NVMAP_IOC_CACHE:
+ err = nvmap_ioctl_cache_maint(filp, uarg);
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+ return err;
+}
+
+/* to ensure that the backing store for the VMA isn't freed while a fork'd
+ * reference still exists, nvmap_vma_open increments the reference count on
+ * the handle, and nvmap_vma_close decrements it. alternatively, we could
+ * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
+*/
+static void nvmap_vma_open(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv;
+
+ priv = vma->vm_private_data;
+
+ BUG_ON(!priv);
+
+ atomic_inc(&priv->count);
+}
+
+static void nvmap_vma_close(struct vm_area_struct *vma)
+{
+ struct nvmap_vma_priv *priv = vma->vm_private_data;
+
+ if (priv && !atomic_dec_return(&priv->count)) {
+ if (priv->handle)
+ nvmap_handle_put(priv->handle);
+ kfree(priv);
+ }
+
+ vma->vm_private_data = NULL;
+}
+
+static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct nvmap_vma_priv *priv;
+ unsigned long offs;
+
+ offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
+ priv = vma->vm_private_data;
+ if (!priv || !priv->handle || !priv->handle->alloc)
+ return VM_FAULT_SIGBUS;
+
+ offs += priv->offs;
+ /* if the VMA was split for some reason, vm_pgoff will be the VMA's
+ * offset from the original VMA */
+ offs += (vma->vm_pgoff << PAGE_SHIFT);
+
+ if (offs >= priv->handle->size)
+ return VM_FAULT_SIGBUS;
+
+ if (!priv->handle->heap_pgalloc) {
+ unsigned long pfn;
+ BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
+ pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
+ vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ return VM_FAULT_NOPAGE;
+ } else {
+ struct page *page;
+ offs >>= PAGE_SHIFT;
+ page = priv->handle->pgalloc.pages[offs];
+ if (page)
+ get_page(page);
+ vmf->page = page;
+ return (page) ? 0 : VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t attr_show_usage(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
+
+ return sprintf(buf, "%08x\n", node->heap_bit);
+}
+
+static struct device_attribute heap_attr_show_usage =
+ __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
+
+static struct attribute *heap_extra_attrs[] = {
+ &heap_attr_show_usage.attr,
+ NULL,
+};
+
+static struct attribute_group heap_extra_attr_group = {
+ .attrs = heap_extra_attrs,
+};
+
+static void client_stringify(struct nvmap_client *client, struct seq_file *s)
+{
+ char task_comm[sizeof(client->task->comm)];
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%8s %16s %8u", client->name, task_comm,
+ client->task->pid);
+}
+
+static void allocations_stringify(struct nvmap_client *client,
+ struct seq_file *s)
+{
+ struct rb_node *n = rb_first(&client->handle_refs);
+ unsigned long long total = 0;
+
+ for (; n != NULL; n = rb_next(n)) {
+ struct nvmap_handle_ref *ref =
+ rb_entry(n, struct nvmap_handle_ref, node);
+ struct nvmap_handle *handle = ref->handle;
+ if (handle->alloc && !handle->heap_pgalloc) {
+ seq_printf(s, " %8u@%8lx ", handle->size,
+ handle->carveout->base);
+ total += handle->size;
+ }
+ }
+ seq_printf(s, " total: %llu\n", total);
+}
+
+static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
+ struct nvmap_carveout_commit *commit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ client_stringify(client, s);
+ allocations_stringify(client, s);
+ }
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_allocations_show,
+ inode->i_private);
+}
+
+static struct file_operations debug_allocations_fops = {
+ .open = nvmap_debug_allocations_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
+{
+ struct nvmap_carveout_node *node = s->private;
+ struct nvmap_carveout_commit *commit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&node->clients_lock, flags);
+ list_for_each_entry(commit, &node->clients, list) {
+ struct nvmap_client *client =
+ get_client_from_carveout_commit(node, commit);
+ client_stringify(client, s);
+ seq_printf(s, " %8u\n", commit->commit);
+ }
+ spin_unlock_irqrestore(&node->clients_lock, flags);
+
+ return 0;
+}
+
+static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvmap_debug_clients_show, inode->i_private);
+}
+
+static struct file_operations debug_clients_fops = {
+ .open = nvmap_debug_clients_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvmap_probe(struct platform_device *pdev)
+{
+ struct nvmap_platform_data *plat = pdev->dev.platform_data;
+ struct nvmap_device *dev;
+ struct dentry *nvmap_debug_root;
+ unsigned int i;
+ int e;
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(nvmap_dev != NULL)) {
+ dev_err(&pdev->dev, "only one nvmap device may be present\n");
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "out of memory for device\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_user.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_user.name = "nvmap";
+ dev->dev_user.fops = &nvmap_user_fops;
+ dev->dev_user.parent = &pdev->dev;
+
+ dev->dev_super.minor = MISC_DYNAMIC_MINOR;
+ dev->dev_super.name = "knvmap";
+ dev->dev_super.fops = &nvmap_super_fops;
+ dev->dev_super.parent = &pdev->dev;
+
+ dev->handles = RB_ROOT;
+
+ init_waitqueue_head(&dev->pte_wait);
+
+ init_waitqueue_head(&dev->iovmm_master.pin_wait);
+ mutex_init(&dev->iovmm_master.pin_lock);
+ dev->iovmm_master.iovmm =
+ tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL);
+ if (IS_ERR(dev->iovmm_master.iovmm)) {
+ e = PTR_ERR(dev->iovmm_master.iovmm);
+ dev_err(&pdev->dev, "couldn't create iovmm client\n");
+ goto fail;
+ }
+ dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
+ if (!dev->vm_rgn) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate remapping region\n");
+ goto fail;
+ }
+ e = nvmap_mru_init(&dev->iovmm_master);
+ if (e) {
+ dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
+ goto fail;
+ }
+
+ spin_lock_init(&dev->ptelock);
+ spin_lock_init(&dev->handle_lock);
+
+ for (i = 0; i < NVMAP_NUM_PTES; i++) {
+ unsigned long addr;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
+ pgd = pgd_offset_k(addr);
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ dev->ptes[i] = pte_alloc_kernel(pmd, addr);
+ if (!dev->ptes[i]) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate page tables\n");
+ goto fail;
+ }
+ }
+
+ e = misc_register(&dev->dev_user);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_user.name);
+ goto fail;
+ }
+
+ e = misc_register(&dev->dev_super);
+ if (e) {
+ dev_err(&pdev->dev, "unable to register miscdevice %s\n",
+ dev->dev_super.name);
+ goto fail;
+ }
+
+ dev->nr_carveouts = 0;
+ dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
+ plat->nr_carveouts, GFP_KERNEL);
+ if (!dev->heaps) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
+ goto fail;
+ }
+
+ nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
+ if (IS_ERR_OR_NULL(nvmap_debug_root))
+ dev_err(&pdev->dev, "couldn't create debug files\n");
+
+ for (i = 0; i < plat->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ const struct nvmap_platform_carveout *co = &plat->carveouts[i];
+ node->carveout = nvmap_heap_create(dev->dev_user.this_device,
+ co->name, co->base, co->size,
+ co->buddy_size, node);
+ if (!node->carveout) {
+ e = -ENOMEM;
+ dev_err(&pdev->dev, "couldn't create %s\n", co->name);
+ goto fail_heaps;
+ }
+ dev->nr_carveouts++;
+ spin_lock_init(&node->clients_lock);
+ node->index = i;
+ INIT_LIST_HEAD(&node->clients);
+ node->heap_bit = co->usage_mask;
+ if (nvmap_heap_create_group(node->carveout,
+ &heap_extra_attr_group))
+ dev_warn(&pdev->dev, "couldn't add extra attributes\n");
+
+ dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
+ co->name, co->size / 1024);
+
+ if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
+ struct dentry *heap_root =
+ debugfs_create_dir(co->name, nvmap_debug_root);
+ if (!IS_ERR_OR_NULL(heap_root)) {
+ debugfs_create_file("clients", 0664, heap_root,
+ node, &debug_clients_fops);
+ debugfs_create_file("allocations", 0664,
+ heap_root, node, &debug_allocations_fops);
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, dev);
+ nvmap_dev = dev;
+ return 0;
+fail_heaps:
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ }
+fail:
+ kfree(dev->heaps);
+ nvmap_mru_destroy(&dev->iovmm_master);
+ if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_super);
+ if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&dev->dev_user);
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+ if (dev->vm_rgn)
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return e;
+}
+
+static int nvmap_remove(struct platform_device *pdev)
+{
+ struct nvmap_device *dev = platform_get_drvdata(pdev);
+ struct rb_node *n;
+ struct nvmap_handle *h;
+ int i;
+
+ misc_deregister(&dev->dev_super);
+ misc_deregister(&dev->dev_user);
+
+ while ((n = rb_first(&dev->handles))) {
+ h = rb_entry(n, struct nvmap_handle, node);
+ rb_erase(&h->node, &dev->handles);
+ kfree(h);
+ }
+
+ if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
+ tegra_iovmm_free_client(dev->iovmm_master.iovmm);
+
+ nvmap_mru_destroy(&dev->iovmm_master);
+
+ for (i = 0; i < dev->nr_carveouts; i++) {
+ struct nvmap_carveout_node *node = &dev->heaps[i];
+ nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
+ nvmap_heap_destroy(node->carveout);
+ }
+ kfree(dev->heaps);
+
+ free_vm_area(dev->vm_rgn);
+ kfree(dev);
+ nvmap_dev = NULL;
+ return 0;
+}
+
+static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int nvmap_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver nvmap_driver = {
+ .probe = nvmap_probe,
+ .remove = nvmap_remove,
+ .suspend = nvmap_suspend,
+ .resume = nvmap_resume,
+
+ .driver = {
+ .name = "tegra-nvmap",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nvmap_init_driver(void)
+{
+ int e;
+
+ nvmap_dev = NULL;
+
+ e = nvmap_heap_init();
+ if (e)
+ goto fail;
+
+ e = platform_driver_register(&nvmap_driver);
+ if (e) {
+ nvmap_heap_deinit();
+ goto fail;
+ }
+
+fail:
+ return e;
+}
+fs_initcall(nvmap_init_driver);
+
+static void __exit nvmap_exit_driver(void)
+{
+ platform_driver_unregister(&nvmap_driver);
+ nvmap_heap_deinit();
+ nvmap_dev = NULL;
+}
+module_exit(nvmap_exit_driver);
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
new file mode 100644
index 000000000000..44f55b3f59ba
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -0,0 +1,518 @@
+/*
+ * drivers/video/tegra/nvmap_handle.c
+ *
+ * Handle allocation and freeing routines for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
+#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
+#define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
+#else
+#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+#endif
+/* handles may be arbitrarily large (16+MiB), and any handle allocated from
+ * the kernel (i.e., not a carveout handle) includes its array of pages. to
+ * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
+ * the array is allocated using vmalloc. */
+#define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
+
+static inline void *altalloc(size_t len)
+{
+ if (len >= PAGELIST_VMALLOC_MIN)
+ return vmalloc(len);
+ else
+ return kmalloc(len, GFP_KERNEL);
+}
+
+static inline void altfree(void *ptr, size_t len)
+{
+ if (!ptr)
+ return;
+
+ if (len >= PAGELIST_VMALLOC_MIN)
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+void _nvmap_handle_free(struct nvmap_handle *h)
+{
+ struct nvmap_device *dev = h->dev;
+ unsigned int i, nr_page;
+
+ if (nvmap_handle_remove(dev, h) != 0)
+ return;
+
+ if (!h->alloc)
+ goto out;
+
+ if (!h->heap_pgalloc) {
+ nvmap_heap_free(h->carveout);
+ goto out;
+ }
+
+ nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
+
+ BUG_ON(h->size & ~PAGE_MASK);
+ BUG_ON(!h->pgalloc.pages);
+
+ nvmap_mru_remove(nvmap_get_share_from_dev(dev), h);
+
+ if (h->pgalloc.area)
+ tegra_iovmm_free_vm(h->pgalloc.area);
+
+ for (i = 0; i < nr_page; i++)
+ __free_page(h->pgalloc.pages[i]);
+
+ altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
+
+out:
+ kfree(h);
+}
+
+extern void __flush_dcache_page(struct address_space *, struct page *);
+
+static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
+{
+ struct page *page, *p, *e;
+ unsigned int order;
+ unsigned long base;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+ page = alloc_pages(gfp, order);
+
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ e = page + (1 << order);
+ for (p = page + (size >> PAGE_SHIFT); p < e; p++)
+ __free_page(p);
+
+ e = page + (size >> PAGE_SHIFT);
+ for (p = page; p < e; p++)
+ __flush_dcache_page(page_mapping(p), p);
+
+ base = page_to_phys(page);
+ outer_flush_range(base, base + size);
+ return page;
+}
+
+static int handle_page_alloc(struct nvmap_client *client,
+ struct nvmap_handle *h, bool contiguous)
+{
+ size_t size = PAGE_ALIGN(h->size);
+ unsigned int nr_page = size >> PAGE_SHIFT;
+ pgprot_t prot;
+ unsigned int i = 0;
+ struct page **pages;
+
+ pages = altalloc(nr_page * sizeof(*pages));
+ if (!pages)
+ return -ENOMEM;
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ if (nr_page == 1)
+ contiguous = true;
+#endif
+
+ h->pgalloc.area = NULL;
+ if (contiguous) {
+ struct page *page;
+ page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
+ if (!page)
+ goto fail;
+
+ for (i = 0; i < nr_page; i++)
+ pages[i] = nth_page(page, i);
+
+ } else {
+ for (i = 0; i < nr_page; i++) {
+ pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, PAGE_SIZE);
+ if (!pages[i])
+ goto fail;
+ }
+
+#ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+ h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
+ NULL, size, prot);
+ if (!h->pgalloc.area)
+ goto fail;
+
+ h->pgalloc.dirty = true;
+#endif
+ }
+
+
+ h->size = size;
+ h->pgalloc.pages = pages;
+ h->pgalloc.contig = contiguous;
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return 0;
+
+fail:
+ while (i--)
+ __free_page(pages[i]);
+ altfree(pages, nr_page * sizeof(*pages));
+ return -ENOMEM;
+}
+
+static void alloc_handle(struct nvmap_client *client, size_t align,
+ struct nvmap_handle *h, unsigned int type)
+{
+ BUG_ON(type & (type - 1));
+
+ if (type & NVMAP_HEAP_CARVEOUT_MASK) {
+ struct nvmap_heap_block *b;
+ b = nvmap_carveout_alloc(client, h->size, align,
+ type, h->flags);
+ if (b) {
+ h->carveout = b;
+ h->heap_pgalloc = false;
+ h->alloc = true;
+ nvmap_carveout_commit_add(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(b)),
+ h->size);
+ }
+ } else if (type & NVMAP_HEAP_IOVMM) {
+ size_t reserved = PAGE_ALIGN(h->size);
+ int commit;
+ int ret;
+
+ BUG_ON(align > PAGE_SIZE);
+
+ /* increment the committed IOVM space prior to allocation
+ * to avoid race conditions with other threads simultaneously
+ * allocating. */
+ commit = atomic_add_return(reserved, &client->iovm_commit);
+
+ if (commit < client->iovm_limit)
+ ret = handle_page_alloc(client, h, false);
+ else
+ ret = -ENOMEM;
+
+ if (!ret) {
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ } else {
+ atomic_sub(reserved, &client->iovm_commit);
+ }
+
+ } else if (type & NVMAP_HEAP_SYSMEM) {
+
+ if (handle_page_alloc(client, h, true) == 0) {
+ BUG_ON(!h->pgalloc.contig);
+ h->heap_pgalloc = true;
+ h->alloc = true;
+ }
+ }
+}
+
+/* small allocations will try to allocate from generic OS memory before
+ * any of the limited heaps, to increase the effective memory for graphics
+ * allocations, and to reduce fragmentation of the graphics heaps with
+ * sub-page splinters */
+static const unsigned int heap_policy_small[] = {
+ NVMAP_HEAP_CARVEOUT_IRAM,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ NVMAP_HEAP_SYSMEM,
+#endif
+ NVMAP_HEAP_CARVEOUT_MASK,
+ NVMAP_HEAP_IOVMM,
+ 0,
+};
+
+static const unsigned int heap_policy_large[] = {
+ NVMAP_HEAP_CARVEOUT_IRAM,
+ NVMAP_HEAP_IOVMM,
+ NVMAP_HEAP_CARVEOUT_MASK,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+ NVMAP_HEAP_SYSMEM,
+#endif
+ 0,
+};
+
+int nvmap_alloc_handle_id(struct nvmap_client *client,
+ unsigned long id, unsigned int heap_mask,
+ size_t align, unsigned int flags)
+{
+ struct nvmap_handle *h = NULL;
+ const unsigned int *alloc_policy;
+ int nr_page;
+ int err = -ENOMEM;
+
+ align = max_t(size_t, align, L1_CACHE_BYTES);
+
+ /* can't do greater than page size alignment with page alloc */
+ if (align > PAGE_SIZE)
+ heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
+
+ h = nvmap_get_handle_id(client, id);
+
+ if (!h)
+ return -EINVAL;
+
+ if (h->alloc)
+ goto out;
+
+ nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ h->secure = !!(flags & NVMAP_HANDLE_SECURE);
+ h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
+
+ /* secure allocations can only be served from secure heaps */
+ if (h->secure)
+ heap_mask &= NVMAP_SECURE_HEAPS;
+
+ if (!heap_mask) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
+
+ while (!h->alloc && *alloc_policy) {
+ unsigned int heap_type;
+
+ heap_type = *alloc_policy++;
+ heap_type &= heap_mask;
+
+ if (!heap_type)
+ continue;
+
+ heap_mask &= ~heap_type;
+
+ while (heap_type && !h->alloc) {
+ unsigned int heap;
+
+ /* iterate possible heaps MSB-to-LSB, since higher-
+ * priority carveouts will have higher usage masks */
+ heap = 1 << __fls(heap_type);
+ alloc_handle(client, align, h, heap);
+ heap_type &= ~heap;
+ }
+ }
+
+out:
+ err = (h->alloc) ? 0 : err;
+ nvmap_handle_put(h);
+ return err;
+}
+
+void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
+{
+ struct nvmap_handle_ref *ref;
+ struct nvmap_handle *h;
+ int pins;
+
+ nvmap_ref_lock(client);
+
+ ref = _nvmap_validate_id_locked(client, id);
+ if (!ref) {
+ nvmap_ref_unlock(client);
+ return;
+ }
+
+ BUG_ON(!ref->handle);
+ h = ref->handle;
+
+ if (atomic_dec_return(&ref->dupes)) {
+ nvmap_ref_unlock(client);
+ goto out;
+ }
+
+ smp_rmb();
+ pins = atomic_read(&ref->pin);
+ rb_erase(&ref->node, &client->handle_refs);
+
+ if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
+ atomic_sub(h->size, &client->iovm_commit);
+
+ if (h->alloc && !h->heap_pgalloc)
+ nvmap_carveout_commit_subtract(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+ h->size);
+
+ nvmap_ref_unlock(client);
+
+ if (pins)
+ nvmap_err(client, "%s freeing pinned handle %p\n",
+ current->group_leader->comm, h);
+
+ while (pins--)
+ nvmap_unpin_handles(client, &ref->handle, 1);
+
+ if (h->owner == client)
+ h->owner = NULL;
+
+ kfree(ref);
+
+out:
+ BUG_ON(!atomic_read(&h->ref));
+ nvmap_handle_put(h);
+}
+
+static void add_handle_ref(struct nvmap_client *client,
+ struct nvmap_handle_ref *ref)
+{
+ struct rb_node **p, *parent = NULL;
+
+ nvmap_ref_lock(client);
+ p = &client->handle_refs.rb_node;
+ while (*p) {
+ struct nvmap_handle_ref *node;
+ parent = *p;
+ node = rb_entry(parent, struct nvmap_handle_ref, node);
+ if (ref->handle > node->handle)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&ref->node, parent, p);
+ rb_insert_color(&ref->node, &client->handle_refs);
+ nvmap_ref_unlock(client);
+}
+
+struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
+ size_t size)
+{
+ struct nvmap_handle *h;
+ struct nvmap_handle_ref *ref = NULL;
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ kfree(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&h->ref, 1);
+ atomic_set(&h->pin, 0);
+ h->owner = client;
+ h->dev = client->dev;
+ BUG_ON(!h->owner);
+ h->size = h->orig_size = size;
+ h->flags = NVMAP_HANDLE_WRITE_COMBINE;
+ mutex_init(&h->lock);
+
+ nvmap_handle_add(client->dev, h);
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
+
+struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
+ unsigned long id)
+{
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_handle *h = NULL;
+
+ BUG_ON(!client || client->dev != nvmap_dev);
+ /* on success, the reference count for the handle should be
+ * incremented, so the success paths will not call nvmap_handle_put */
+ h = nvmap_validate_get(client, id);
+
+ if (!h) {
+ nvmap_debug(client, "%s duplicate handle failed\n",
+ current->group_leader->comm);
+ return ERR_PTR(-EPERM);
+ }
+
+ if (!h->alloc) {
+ nvmap_err(client, "%s duplicating unallocated handle\n",
+ current->group_leader->comm);
+ nvmap_handle_put(h);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvmap_ref_lock(client);
+ ref = _nvmap_validate_id_locked(client, (unsigned long)h);
+
+ if (ref) {
+ /* handle already duplicated in client; just increment
+ * the reference count rather than re-duplicating it */
+ atomic_inc(&ref->dupes);
+ nvmap_ref_unlock(client);
+ return ref;
+ }
+
+ nvmap_ref_unlock(client);
+
+ /* verify that adding this handle to the process' access list
+ * won't exceed the IOVM limit */
+ if (h->heap_pgalloc && !h->pgalloc.contig && !client->super) {
+ int oc;
+ oc = atomic_add_return(h->size, &client->iovm_commit);
+ if (oc > client->iovm_limit) {
+ atomic_sub(h->size, &client->iovm_commit);
+ nvmap_handle_put(h);
+ nvmap_err(client, "duplicating %p in %s over-commits"
+ " IOVMM space\n", (void *)id,
+ current->group_leader->comm);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ nvmap_handle_put(h);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (!h->heap_pgalloc)
+ nvmap_carveout_commit_add(client,
+ nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
+ h->size);
+
+ atomic_set(&ref->dupes, 1);
+ ref->handle = h;
+ atomic_set(&ref->pin, 0);
+ add_handle_ref(client, ref);
+ return ref;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c
new file mode 100644
index 000000000000..abc72cc99720
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_heap.c
@@ -0,0 +1,812 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_heap.c
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <mach/nvmap.h>
+
+#include "nvmap_heap.h"
+
+/*
+ * "carveouts" are platform-defined regions of physically contiguous memory
+ * which are not managed by the OS. a platform may specify multiple carveouts,
+ * for either small special-purpose memory regions (like IRAM on Tegra SoCs)
+ * or reserved regions of main system memory.
+ *
+ * the carveout allocator returns allocations which are physically contiguous.
+ * to reduce external fragmentation, the allocation algorithm implemented in
+ * this file employs 3 strategies for keeping allocations of similar size
+ * grouped together inside the larger heap: the "small", "normal" and "huge"
+ * strategies. the size thresholds (in bytes) for determining which strategy
+ * to employ should be provided by the platform for each heap. it is possible
+ * for a platform to define a heap where only the "normal" strategy is used.
+ *
+ * o "normal" allocations use an address-order first-fit allocator (called
+ * BOTTOM_UP in the code below). each allocation is rounded up to be
+ * an integer multiple of the "small" allocation size.
+ *
+ * o "huge" allocations use an address-order last-fit allocator (called
+ * TOP_DOWN in the code below). like "normal" allocations, each allocation
+ * is rounded up to be an integer multiple of the "small" allocation size.
+ *
+ * o "small" allocations are treatedy differently: the heap manager maintains
+ * a pool of "small"-sized blocks internally from which allocations less
+ * than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
+ * is requested and none of the buddy sub-heaps is able to service it,
+ * the heap manager will try to allocate a new buddy-heap.
+ *
+ * this allocator is intended to keep "splinters" colocated in the carveout,
+ * and to ensure that the minimum free block size in the carveout (i.e., the
+ * "small" threshold) is still a meaningful size.
+ *
+ */
+
+#define MAX_BUDDY_NR 128 /* maximum buddies in a buddy allocator */
+
+enum direction {
+ TOP_DOWN,
+ BOTTOM_UP
+};
+
+enum block_type {
+ BLOCK_FIRST_FIT, /* block was allocated directly from the heap */
+ BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */
+};
+
+struct heap_stat {
+ size_t free; /* total free size */
+ size_t free_largest; /* largest free block */
+ size_t free_count; /* number of free blocks */
+ size_t total; /* total size */
+ size_t largest; /* largest unique block */
+ size_t count; /* total number of blocks */
+};
+
+struct buddy_heap;
+
+struct buddy_block {
+ struct nvmap_heap_block block;
+ struct buddy_heap *heap;
+};
+
+struct list_block {
+ struct nvmap_heap_block block;
+ struct list_head all_list;
+ unsigned int mem_prot;
+ unsigned long orig_addr;
+ size_t size;
+ struct nvmap_heap *heap;
+ struct list_head free_list;
+};
+
+struct combo_block {
+ union {
+ struct list_block lb;
+ struct buddy_block bb;
+ };
+};
+
+struct buddy_bits {
+ unsigned int alloc:1;
+ unsigned int order:7; /* log2(MAX_BUDDY_NR); */
+};
+
+struct buddy_heap {
+ struct list_block *heap_base;
+ unsigned int nr_buddies;
+ struct list_head buddy_list;
+ struct buddy_bits bitmap[MAX_BUDDY_NR];
+};
+
+struct nvmap_heap {
+ struct list_head all_list;
+ struct list_head free_list;
+ struct mutex lock;
+ struct list_head buddy_list;
+ unsigned int min_buddy_shift;
+ unsigned int buddy_heap_size;
+ unsigned int small_alloc;
+ const char *name;
+ void *arg;
+ struct device dev;
+};
+
+static struct kmem_cache *buddy_heap_cache;
+static struct kmem_cache *block_cache;
+
+static inline struct nvmap_heap *parent_of(struct buddy_heap *heap)
+{
+ return heap->heap_base->heap;
+}
+
+static inline unsigned int order_of(size_t len, size_t min_shift)
+{
+ len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1;
+ return fls(len)-1;
+}
+
+/* returns the free size in bytes of the buddy heap; must be called while
+ * holding the parent heap's lock. */
+static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat)
+{
+ unsigned int index;
+ unsigned int shift = parent_of(heap)->min_buddy_shift;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+ size_t curr = 1 << (heap->bitmap[index].order + shift);
+
+ stat->largest = max(stat->largest, curr);
+ stat->total += curr;
+ stat->count++;
+
+ if (!heap->bitmap[index].alloc) {
+ stat->free += curr;
+ stat->free_largest = max(stat->free_largest, curr);
+ stat->free_count++;
+ }
+ }
+}
+
+/* returns the free size of the heap (including any free blocks in any
+ * buddy-heap suballocators; must be called while holding the parent
+ * heap's lock. */
+static unsigned long heap_stat(struct nvmap_heap *heap, struct heap_stat *stat)
+{
+ struct buddy_heap *bh;
+ struct list_block *l = NULL;
+ unsigned long base = -1ul;
+
+ memset(stat, 0, sizeof(*stat));
+ mutex_lock(&heap->lock);
+ list_for_each_entry(l, &heap->all_list, all_list) {
+ stat->total += l->size;
+ stat->largest = max(l->size, stat->largest);
+ stat->count++;
+ base = min(base, l->orig_addr);
+ }
+
+ list_for_each_entry(bh, &heap->buddy_list, buddy_list) {
+ buddy_stat(bh, stat);
+ /* the total counts are double-counted for buddy heaps
+ * since the blocks allocated for buddy heaps exist in the
+ * all_list; subtract out the doubly-added stats */
+ stat->total -= bh->heap_base->size;
+ stat->count--;
+ }
+
+ list_for_each_entry(l, &heap->free_list, free_list) {
+ stat->free += l->size;
+ stat->free_count++;
+ stat->free_largest = max(l->size, stat->free_largest);
+ }
+ mutex_unlock(&heap->lock);
+
+ return base;
+}
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static struct device_attribute heap_stat_total_max =
+ __ATTR(total_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_count =
+ __ATTR(total_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_total_size =
+ __ATTR(total_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_max =
+ __ATTR(free_max, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_count =
+ __ATTR(free_count, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_free_size =
+ __ATTR(free_size, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_stat_base =
+ __ATTR(base, S_IRUGO, heap_stat_show, NULL);
+
+static struct device_attribute heap_attr_name =
+ __ATTR(name, S_IRUGO, heap_name_show, NULL);
+
+static struct attribute *heap_stat_attrs[] = {
+ &heap_stat_total_max.attr,
+ &heap_stat_total_count.attr,
+ &heap_stat_total_size.attr,
+ &heap_stat_free_max.attr,
+ &heap_stat_free_count.attr,
+ &heap_stat_free_size.attr,
+ &heap_stat_base.attr,
+ &heap_attr_name.attr,
+ NULL,
+};
+
+static struct attribute_group heap_stat_attr_group = {
+ .attrs = heap_stat_attrs,
+};
+
+static ssize_t heap_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return sprintf(buf, "%s\n", heap->name);
+}
+
+static ssize_t heap_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ struct heap_stat stat;
+ unsigned long base;
+
+ base = heap_stat(heap, &stat);
+
+ if (attr == &heap_stat_total_max)
+ return sprintf(buf, "%u\n", stat.largest);
+ else if (attr == &heap_stat_total_count)
+ return sprintf(buf, "%u\n", stat.count);
+ else if (attr == &heap_stat_total_size)
+ return sprintf(buf, "%u\n", stat.total);
+ else if (attr == &heap_stat_free_max)
+ return sprintf(buf, "%u\n", stat.free_largest);
+ else if (attr == &heap_stat_free_count)
+ return sprintf(buf, "%u\n", stat.free_count);
+ else if (attr == &heap_stat_free_size)
+ return sprintf(buf, "%u\n", stat.free);
+ else if (attr == &heap_stat_base)
+ return sprintf(buf, "%08lx\n", base);
+ else
+ return -EINVAL;
+}
+
+static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
+ size_t size, size_t align,
+ unsigned int mem_prot)
+{
+ unsigned int index = 0;
+ unsigned int min_shift = parent_of(heap)->min_buddy_shift;
+ unsigned int order = order_of(size, min_shift);
+ unsigned int align_mask;
+ unsigned int best = heap->nr_buddies;
+ struct buddy_block *b;
+
+ if (heap->heap_base->mem_prot != mem_prot)
+ return NULL;
+
+ align = max(align, (size_t)(1 << min_shift));
+ align_mask = (align >> min_shift) - 1;
+
+ for (index = 0; index < heap->nr_buddies;
+ index += (1 << heap->bitmap[index].order)) {
+
+ if (heap->bitmap[index].alloc || (index & align_mask) ||
+ (heap->bitmap[index].order < order))
+ continue;
+
+ if (best == heap->nr_buddies ||
+ heap->bitmap[index].order < heap->bitmap[best].order)
+ best = index;
+
+ if (heap->bitmap[best].order == order)
+ break;
+ }
+
+ if (best == heap->nr_buddies)
+ return NULL;
+
+ b = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ while (heap->bitmap[best].order != order) {
+ unsigned int buddy;
+ heap->bitmap[best].order--;
+ buddy = best ^ (1 << heap->bitmap[best].order);
+ heap->bitmap[buddy].order = heap->bitmap[best].order;
+ heap->bitmap[buddy].alloc = 0;
+ }
+ heap->bitmap[best].alloc = 1;
+ b->block.base = heap->heap_base->block.base + (best << min_shift);
+ b->heap = heap;
+ b->block.type = BLOCK_BUDDY;
+ return &b->block;
+}
+
+static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
+{
+ struct buddy_block *b = container_of(block, struct buddy_block, block);
+ struct buddy_heap *h = b->heap;
+ unsigned int min_shift = parent_of(h)->min_buddy_shift;
+ unsigned int index;
+
+ index = (block->base - h->heap_base->block.base) >> min_shift;
+ h->bitmap[index].alloc = 0;
+
+ for (;;) {
+ unsigned int buddy = index ^ (1 << h->bitmap[index].order);
+ if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc ||
+ h->bitmap[buddy].order != h->bitmap[index].order)
+ break;
+
+ h->bitmap[buddy].order++;
+ h->bitmap[index].order++;
+ index = min(buddy, index);
+ }
+
+ kmem_cache_free(block_cache, b);
+ if ((1 << h->bitmap[0].order) == h->nr_buddies)
+ return h;
+
+ return NULL;
+}
+
+static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
+ size_t len, size_t align,
+ unsigned int mem_prot)
+{
+ struct list_block *b = NULL;
+ struct list_block *i = NULL;
+ struct list_block *rem = NULL;
+ unsigned long fix_base;
+ enum direction dir;
+
+ /* since pages are only mappable with one cache attribute,
+ * and most allocations from carveout heaps are DMA coherent
+ * (i.e., non-cacheable), round cacheable allocations up to
+ * a page boundary to ensure that the physical pages will
+ * only be mapped one way. */
+ if (mem_prot == NVMAP_HANDLE_CACHEABLE ||
+ mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) {
+ align = max_t(size_t, align, PAGE_SIZE);
+ len = PAGE_ALIGN(len);
+ }
+
+ dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
+
+ if (dir == BOTTOM_UP) {
+ list_for_each_entry(i, &heap->free_list, free_list) {
+ size_t fix_size;
+ fix_base = ALIGN(i->block.base, align);
+ fix_size = i->size - (fix_base - i->block.base);
+
+ if (fix_size >= len) {
+ b = i;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_reverse(i, &heap->free_list, free_list) {
+ if (i->size >= len) {
+ fix_base = i->block.base + i->size - len;
+ fix_base &= ~(align-1);
+ if (fix_base >= i->block.base) {
+ b = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!b)
+ return NULL;
+
+ if (b->block.base != fix_base) {
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem) {
+ b->orig_addr = b->block.base;
+ b->block.base = fix_base;
+ b->size -= (b->block.base - b->orig_addr);
+ goto out;
+ }
+
+ rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.base = b->block.base;
+ rem->orig_addr = rem->block.base;
+ rem->size = fix_base - rem->block.base;
+ b->block.base = fix_base;
+ b->orig_addr = fix_base;
+ b->size -= rem->size;
+ list_add_tail(&rem->all_list, &heap->all_list);
+ list_add_tail(&rem->free_list, &b->free_list);
+ }
+
+ b->orig_addr = b->block.base;
+
+ if (b->size > len) {
+ rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!rem)
+ goto out;
+
+ rem->block.type = BLOCK_FIRST_FIT;
+ rem->block.base = b->block.base + len;
+ rem->size = b->size - len;
+ BUG_ON(rem->size > b->size);
+ rem->orig_addr = rem->block.base;
+ b->size = len;
+ list_add_tail(&rem->all_list, &heap->all_list);
+ list_add(&rem->free_list, &b->free_list);
+ }
+
+out:
+ list_del(&b->free_list);
+ b->heap = heap;
+ b->mem_prot = mem_prot;
+ return &b->block;
+}
+
+#ifdef DEBUG_FREE_LIST
+static void freelist_debug(struct nvmap_heap *heap, const char *title,
+ struct list_block *token)
+{
+ int i;
+ struct list_block *n;
+
+ dev_debug(&heap->dev, "%s\n", title);
+ i = 0;
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ dev_debug(&heap->dev,"\t%d [%p..%p]%s\n", i, (void *)n->orig_addr,
+ (void *)(n->orig_addr + n->size),
+ (n == token) ? "<--" : "");
+ i++;
+ }
+}
+#else
+#define freelist_debug(_heap, _title, _token) do { } while (0)
+#endif
+
+static void do_heap_free(struct nvmap_heap_block *block)
+{
+ struct list_block *b = container_of(block, struct list_block, block);
+ struct list_block *n = NULL;
+ struct nvmap_heap *heap = b->heap;
+
+ BUG_ON(b->block.base > b->orig_addr);
+ b->size += (b->block.base - b->orig_addr);
+ b->block.base = b->orig_addr;
+
+ freelist_debug(heap, "free list before", b);
+
+ list_for_each_entry(n, &heap->free_list, free_list) {
+ if (n->block.base > b->block.base)
+ break;
+ }
+
+ list_add_tail(&b->free_list, &n->free_list);
+ BUG_ON(list_empty(&b->all_list));
+
+ freelist_debug(heap, "free list pre-merge", b);
+
+ if (!list_is_last(&b->free_list, &heap->free_list)) {
+ n = list_first_entry(&b->free_list, struct list_block, free_list);
+ if (n->block.base == b->block.base + b->size) {
+ list_del(&n->all_list);
+ list_del(&n->free_list);
+ BUG_ON(b->orig_addr >= n->orig_addr);
+ b->size += n->size;
+ kmem_cache_free(block_cache, n);
+ }
+ }
+
+ if (b->free_list.prev != &heap->free_list) {
+ n = list_entry(b->free_list.prev, struct list_block, free_list);
+ if (n->block.base + n->size == b->block.base) {
+ list_del(&b->all_list);
+ list_del(&b->free_list);
+ BUG_ON(n->orig_addr >= b->orig_addr);
+ n->size += b->size;
+ kmem_cache_free(block_cache, b);
+ }
+ }
+
+ freelist_debug(heap, "free list after", b);
+}
+
+static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
+ size_t len, size_t align,
+ unsigned int mem_prot)
+{
+ struct buddy_heap *bh;
+ struct nvmap_heap_block *b = NULL;
+
+ list_for_each_entry(bh, &h->buddy_list, buddy_list) {
+ b = buddy_alloc(bh, len, align, mem_prot);
+ if (b)
+ return b;
+ }
+
+ /* no buddy heaps could service this allocation: try to create a new
+ * buddy heap instead */
+ bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL);
+ if (!bh)
+ return NULL;
+
+ b = do_heap_alloc(h, h->buddy_heap_size, h->buddy_heap_size, mem_prot);
+ if (!b) {
+ kmem_cache_free(buddy_heap_cache, bh);
+ return NULL;
+ }
+
+ bh->heap_base = container_of(b, struct list_block, block);
+ bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift;
+ bh->bitmap[0].alloc = 0;
+ bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift);
+ list_add_tail(&bh->buddy_list, &h->buddy_list);
+ return buddy_alloc(bh, len, align, mem_prot);
+}
+
+/* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
+ * align bytes. */
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, size_t len,
+ size_t align, unsigned int prot)
+{
+ struct nvmap_heap_block *b;
+
+ mutex_lock(&h->lock);
+ if (len <= h->buddy_heap_size / 2) {
+ b = do_buddy_alloc(h, len, align, prot);
+ } else {
+ if (h->buddy_heap_size)
+ len = ALIGN(len, h->buddy_heap_size);
+ align = max(align, (size_t)L1_CACHE_BYTES);
+ b = do_heap_alloc(h, len, align, prot);
+ }
+ mutex_unlock(&h->lock);
+ return b;
+}
+
+/* nvmap_heap_free: frees block b*/
+void nvmap_heap_free(struct nvmap_heap_block *b)
+{
+ struct buddy_heap *bh = NULL;
+ struct nvmap_heap *h;
+
+ if (b->type == BLOCK_BUDDY) {
+ struct buddy_block *bb;
+ bb = container_of(b, struct buddy_block, block);
+ h = bb->heap->heap_base->heap;
+ } else {
+ struct list_block *lb;
+ lb = container_of(b, struct list_block, block);
+ h = lb->heap;
+ }
+
+ mutex_lock(&h->lock);
+ if (b->type == BLOCK_BUDDY)
+ bh = do_buddy_free(b);
+ else
+ do_heap_free(b);
+
+ if (bh) {
+ list_del(&bh->buddy_list);
+ mutex_unlock(&h->lock);
+ nvmap_heap_free(&bh->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, bh);
+ } else
+ mutex_unlock(&h->lock);
+}
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
+{
+ if (b->type == BLOCK_BUDDY) {
+ struct buddy_block *bb;
+ bb = container_of(b, struct buddy_block, block);
+ return parent_of(bb->heap);
+ } else {
+ struct list_block *lb;
+ lb = container_of(b, struct list_block, block);
+ return lb->heap;
+ }
+}
+
+static void heap_release(struct device *heap)
+{
+}
+
+/* nvmap_heap_create: create a heap object of len bytes, starting from
+ * address base.
+ *
+ * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2
+ * of the buddy heap size will use a buddy sub-allocator, where each buddy
+ * heap is buddy_size bytes (should be a power of 2). all other allocations
+ * will be rounded up to be a multiple of buddy_size bytes.
+ */
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ unsigned long base, size_t len,
+ size_t buddy_size, void *arg)
+{
+ struct nvmap_heap *h = NULL;
+ struct list_block *l = NULL;
+
+ if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) {
+ dev_warn(parent, "%s: buddy_size %u too small\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size >= len)) {
+ dev_warn(parent, "%s: buddy_size %u too large\n", __func__,
+ buddy_size);
+ buddy_size = 0;
+ } else if (WARN_ON(buddy_size & (buddy_size - 1))) {
+ dev_warn(parent, "%s: buddy_size %u not a power of 2\n",
+ __func__, buddy_size);
+ buddy_size = 1 << (ilog2(buddy_size) + 1);
+ }
+
+ if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) {
+ unsigned long orig = base;
+ dev_warn(parent, "%s: base address %p not aligned to "
+ "buddy_size %u\n", __func__, (void *)base, buddy_size);
+ base = ALIGN(base, buddy_size);
+ len -= (base - orig);
+ }
+
+ if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) {
+ dev_warn(parent, "%s: length %u not aligned to "
+ "buddy_size %u\n", __func__, len, buddy_size);
+ len &= ~(buddy_size - 1);
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ l = kmem_cache_zalloc(block_cache, GFP_KERNEL);
+ if (!l) {
+ dev_err(parent, "%s: out of memory\n", __func__);
+ goto fail_alloc;
+ }
+
+ dev_set_name(&h->dev, "heap-%s", name);
+ h->name = name;
+ h->arg = arg;
+ h->dev.parent = parent;
+ h->dev.driver = NULL;
+ h->dev.release = heap_release;
+ if (device_register(&h->dev)) {
+ dev_err(parent, "%s: failed to register %s\n", __func__,
+ dev_name(&h->dev));
+ goto fail_alloc;
+ }
+ if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) {
+ dev_err(&h->dev, "%s: failed to create attributes\n", __func__);
+ goto fail_register;
+ }
+ h->small_alloc = max(2 * buddy_size, len / 256);
+ h->buddy_heap_size = buddy_size;
+ if (buddy_size)
+ h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR);
+ INIT_LIST_HEAD(&h->free_list);
+ INIT_LIST_HEAD(&h->buddy_list);
+ INIT_LIST_HEAD(&h->all_list);
+ mutex_init(&h->lock);
+ l->block.base = base;
+ l->block.type = BLOCK_FIRST_FIT;
+ l->size = len;
+ l->orig_addr = base;
+ list_add_tail(&l->free_list, &h->free_list);
+ list_add_tail(&l->all_list, &h->all_list);
+ return h;
+
+fail_register:
+ device_unregister(&h->dev);
+fail_alloc:
+ if (l)
+ kmem_cache_free(block_cache, l);
+ kfree(h);
+ return NULL;
+}
+
+void *nvmap_heap_device_to_arg(struct device *dev)
+{
+ struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
+ return heap->arg;
+}
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap)
+{
+ return heap->arg;
+}
+
+/* nvmap_heap_destroy: frees all resources in heap */
+void nvmap_heap_destroy(struct nvmap_heap *heap)
+{
+ WARN_ON(!list_empty(&heap->buddy_list));
+
+ sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group);
+ device_unregister(&heap->dev);
+
+ while (!list_empty(&heap->buddy_list)) {
+ struct buddy_heap *b;
+ b = list_first_entry(&heap->buddy_list, struct buddy_heap,
+ buddy_list);
+ list_del(&heap->buddy_list);
+ nvmap_heap_free(&b->heap_base->block);
+ kmem_cache_free(buddy_heap_cache, b);
+ }
+
+ WARN_ON(!list_is_singular(&heap->all_list));
+ while (!list_empty(&heap->all_list)) {
+ struct list_block *l;
+ l = list_first_entry(&heap->all_list, struct list_block,
+ all_list);
+ list_del(&l->all_list);
+ kmem_cache_free(block_cache, l);
+ }
+
+ kfree(heap);
+}
+
+/* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ return sysfs_create_group(&heap->dev.kobj, grp);
+}
+
+/* nvmap_heap_remove_group: removes the attribute_group grp */
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp)
+{
+ sysfs_remove_group(&heap->dev.kobj, grp);
+}
+
+int nvmap_heap_init(void)
+{
+ BUG_ON(buddy_heap_cache != NULL);
+ buddy_heap_cache = KMEM_CACHE(buddy_heap, 0);
+ if (!buddy_heap_cache) {
+ pr_err("%s: unable to create buddy heap cache\n", __func__);
+ return -ENOMEM;
+ }
+
+ block_cache = KMEM_CACHE(combo_block, 0);
+ if (!block_cache) {
+ kmem_cache_destroy(buddy_heap_cache);
+ pr_err("%s: unable to create block cache\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void nvmap_heap_deinit(void)
+{
+ if (buddy_heap_cache)
+ kmem_cache_destroy(buddy_heap_cache);
+ if (block_cache)
+ kmem_cache_destroy(block_cache);
+
+ block_cache = NULL;
+ buddy_heap_cache = NULL;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.h b/drivers/video/tegra/nvmap/nvmap_heap.h
new file mode 100644
index 000000000000..40ee4ba02cb2
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_heap.h
@@ -0,0 +1,64 @@
+/*
+ * drivers/video/tegra/nvmap_heap.h
+ *
+ * GPU heap allocator.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __NVMAP_HEAP_H
+#define __NVMAP_HEAP_H
+
+struct device;
+struct nvmap_heap;
+struct attribute_group;
+
+struct nvmap_heap_block {
+ unsigned long base;
+ unsigned int type;
+};
+
+#define NVMAP_HEAP_MIN_BUDDY_SIZE 8192
+
+struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
+ unsigned long base, size_t len,
+ unsigned int buddy_size, void *arg);
+
+void nvmap_heap_destroy(struct nvmap_heap *heap);
+
+void *nvmap_heap_device_to_arg(struct device *dev);
+
+void *nvmap_heap_to_arg(struct nvmap_heap *heap);
+
+struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *heap, size_t len,
+ size_t align, unsigned int prot);
+
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b);
+
+void nvmap_heap_free(struct nvmap_heap_block *block);
+
+int nvmap_heap_create_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+void nvmap_heap_remove_group(struct nvmap_heap *heap,
+ const struct attribute_group *grp);
+
+int __init nvmap_heap_init(void);
+
+void nvmap_heap_deinit(void);
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c
new file mode 100644
index 000000000000..b943065a44c0
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c
@@ -0,0 +1,630 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.c
+ *
+ * User-space interface to nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/tlbflush.h>
+
+#include <mach/iovmm.h>
+#include <mach/nvmap.h>
+
+#include "nvmap_ioctl.h"
+#include "nvmap.h"
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count);
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op);
+
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
+{
+ struct nvmap_pin_handle op;
+ struct nvmap_handle *h;
+ unsigned long on_stack[16];
+ unsigned long *refs;
+ unsigned long __user *output;
+ unsigned int i;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.count)
+ return -EINVAL;
+
+ if (op.count > 1) {
+ size_t bytes = op.count * sizeof(unsigned long *);
+
+ if (op.count > ARRAY_SIZE(on_stack))
+ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
+ else
+ refs = on_stack;
+
+ if (!refs)
+ return -ENOMEM;
+
+ if (copy_from_user(refs, (void *)op.handles, bytes)) {
+ err = -EFAULT;
+ goto out;
+ }
+ } else {
+ refs = on_stack;
+ on_stack[0] = (unsigned long)op.handles;
+ }
+
+ if (is_pin)
+ err = nvmap_pin_ids(filp->private_data, op.count, refs);
+ else
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+ /* skip the output stage on unpin */
+ if (err || !is_pin)
+ goto out;
+
+ /* it is guaranteed that if nvmap_pin_ids returns 0 that
+ * all of the handle_ref objects are valid, so dereferencing
+ * directly here is safe */
+ if (op.count > 1)
+ output = (unsigned long __user *)op.addr;
+ else {
+ struct nvmap_pin_handle __user *tmp = arg;
+ output = (unsigned long __user *)&(tmp->addr);
+ }
+
+ if (!output)
+ goto out;
+
+ for (i = 0; i < op.count && !err; i++) {
+ unsigned long addr;
+
+ h = (struct nvmap_handle *)refs[i];
+
+ if (h->heap_pgalloc && h->pgalloc.contig)
+ addr = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->heap_pgalloc)
+ addr = h->pgalloc.area->iovm_start;
+ else
+ addr = h->carveout->base;
+
+ err = put_user(addr, &output[i]);
+ }
+
+ if (err)
+ nvmap_unpin_ids(filp->private_data, op.count, refs);
+
+out:
+ if (refs != on_stack)
+ kfree(refs);
+
+ return err;
+}
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_create_handle op;
+ struct nvmap_handle *h = NULL;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ op.id = (__u32)h;
+ if (client == h->owner)
+ h->global = true;
+
+ nvmap_handle_put(h);
+
+ return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
+}
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
+{
+ struct nvmap_alloc_handle op;
+ struct nvmap_client *client = filp->private_data;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ if (op.align & (op.align - 1))
+ return -EINVAL;
+
+ /* user-space handles are aligned to page boundaries, to prevent
+ * data leakage. */
+ op.align = max_t(size_t, op.align, PAGE_SIZE);
+
+ return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
+ op.align, op.flags);
+}
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
+{
+ struct nvmap_create_handle op;
+ struct nvmap_handle_ref *ref = NULL;
+ struct nvmap_client *client = filp->private_data;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!client)
+ return -ENODEV;
+
+ if (cmd == NVMAP_IOC_CREATE) {
+ ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
+ if (!IS_ERR(ref))
+ ref->handle->orig_size = op.size;
+ } else if (cmd == NVMAP_IOC_FROM_ID) {
+ ref = nvmap_duplicate_handle_id(client, op.id);
+ } else {
+ return -EINVAL;
+ }
+
+ if (IS_ERR(ref))
+ return PTR_ERR(ref);
+
+ op.handle = nvmap_ref_to_id(ref);
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ err = -EFAULT;
+ nvmap_free_handle_id(client, op.handle);
+ }
+
+ return err;
+}
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_map_caller op;
+ struct nvmap_vma_priv *vpriv;
+ struct vm_area_struct *vma;
+ struct nvmap_handle *h = NULL;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+
+ if (!h)
+ return -EPERM;
+
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->mm, op.addr);
+ if (!vma || !vma->vm_private_data) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (op.offset & ~PAGE_MASK) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if ((op.offset + op.length) > h->size) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = vma->vm_private_data;
+ BUG_ON(!vpriv);
+
+ /* the VMA must exactly match the requested mapping operation, and the
+ * VMA that is targetted must have been created by this driver
+ */
+ if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
+ (vma->vm_end-vma->vm_start != op.length)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ /* verify that each mmap() system call creates a unique VMA */
+
+ if (vpriv->handle && (h == vpriv->handle)) {
+ goto out;
+ } else if (vpriv->handle) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ vpriv->handle = h;
+ vpriv->offs = op.offset;
+
+ vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
+
+out:
+ up_read(&current->mm->mmap_sem);
+ if (err)
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
+{
+ struct nvmap_handle_param op;
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_handle *h;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EINVAL;
+
+ switch (op.param) {
+ case NVMAP_HANDLE_PARAM_SIZE:
+ op.result = h->orig_size;
+ break;
+ case NVMAP_HANDLE_PARAM_ALIGNMENT:
+ if (!h->alloc)
+ op.result = 0;
+ else if (h->heap_pgalloc)
+ op.result = PAGE_SIZE;
+ else if (h->carveout->base)
+ op.result = (h->carveout->base & -h->carveout->base);
+ else
+ op.result = SZ_4M;
+ break;
+ case NVMAP_HANDLE_PARAM_BASE:
+ if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
+ op.result = -1ul;
+ else if (!h->heap_pgalloc)
+ op.result = h->carveout->base;
+ else if (h->pgalloc.contig)
+ op.result = page_to_phys(h->pgalloc.pages[0]);
+ else if (h->pgalloc.area)
+ op.result = h->pgalloc.area->iovm_start;
+ else
+ op.result = -1ul;
+ break;
+ case NVMAP_HANDLE_PARAM_HEAP:
+ if (!h->alloc)
+ op.result = 0;
+ else if (!h->heap_pgalloc)
+ op.result = nvmap_carveout_usage(client, h->carveout);
+ else if (h->pgalloc.contig)
+ op.result = NVMAP_HEAP_SYSMEM;
+ else
+ op.result = NVMAP_HEAP_IOVMM;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (!err && copy_to_user(arg, &op, sizeof(op)))
+ err = -EFAULT;
+
+ nvmap_handle_put(h);
+ return err;
+}
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_rw_handle __user *uarg = arg;
+ struct nvmap_rw_handle op;
+ struct nvmap_handle *h;
+ ssize_t copied;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || !op.count || !op.elem_size)
+ return -EINVAL;
+
+ h = nvmap_get_handle_id(client, op.handle);
+ if (!h)
+ return -EPERM;
+
+ copied = rw_handle(client, h, is_read, op.offset,
+ (unsigned long)op.addr, op.hmem_stride,
+ op.user_stride, op.elem_size, op.count);
+
+ if (copied < 0) {
+ err = copied;
+ copied = 0;
+ } else if (copied < (op.count * op.elem_size))
+ err = -EINTR;
+
+ __put_user(copied, &uarg->count);
+
+ nvmap_handle_put(h);
+
+ return err;
+}
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
+{
+ struct nvmap_client *client = filp->private_data;
+ struct nvmap_cache_op op;
+ struct vm_area_struct *vma;
+ struct nvmap_vma_priv *vpriv;
+ unsigned long start;
+ unsigned long end;
+ int err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
+ op.op > NVMAP_CACHE_OP_WB_INV)
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->active_mm, (unsigned long)op.addr);
+ if (!vma || !is_nvmap_vma(vma) ||
+ (unsigned long)op.addr + op.len > vma->vm_end) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
+
+ if ((unsigned long)vpriv->handle != op.handle) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ start = (unsigned long)op.addr - vma->vm_start;
+ end = start + op.len;
+
+ err = cache_maint(client, vpriv->handle, start, end, op.op);
+out:
+ up_read(&current->mm->mmap_sem);
+ return err;
+}
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg)
+{
+ struct nvmap_client *client = filp->private_data;
+
+ if (!arg)
+ return 0;
+
+ nvmap_free_handle_id(client, arg);
+ return 0;
+}
+
+static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
+ unsigned long start, unsigned long end, unsigned int op)
+{
+ enum dma_data_direction dir;
+ pgprot_t prot;
+ pte_t **pte = NULL;
+ unsigned long kaddr;
+ unsigned long loop;
+ int err = 0;
+
+ h = nvmap_handle_get(h);
+ if (!h)
+ return -EFAULT;
+
+ if (!h->alloc) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
+ h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
+ start == end)
+ goto out;
+
+ if (WARN_ON_ONCE(op == NVMAP_CACHE_OP_WB_INV))
+ dir = DMA_BIDIRECTIONAL;
+ else if (op == NVMAP_CACHE_OP_WB)
+ dir = DMA_TO_DEVICE;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ if (h->heap_pgalloc) {
+ while (start < end) {
+ unsigned long next = (start + PAGE_SIZE) & PAGE_MASK;
+ struct page *page;
+
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ next = min(next, end);
+ __dma_page_cpu_to_dev(page, start & ~PAGE_MASK,
+ next - start, dir);
+ start = next;
+ }
+ goto out;
+ }
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+ pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
+ if (IS_ERR(pte)) {
+ err = PTR_ERR(pte);
+ pte = NULL;
+ goto out;
+ }
+
+ if (start > h->size || end > h->size) {
+ nvmap_warn(client, "cache maintenance outside handle\n");
+ return -EINVAL;
+ }
+
+ start += h->carveout->base;
+ end += h->carveout->base;
+
+ loop = start;
+
+ while (loop < end) {
+ unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
+ void *base = (void *)kaddr + (loop & ~PAGE_MASK);
+ next = min(next, end);
+
+ set_pte_at(&init_mm, kaddr, *pte,
+ pfn_pte(__phys_to_pfn(loop), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ dmac_map_area(base, next - loop, dir);
+ loop = next;
+ }
+
+ if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
+ if (dir != DMA_FROM_DEVICE)
+ outer_clean_range(start, end);
+ else
+ outer_inv_range(start, end);
+ }
+
+out:
+ if (pte)
+ nvmap_free_pte(client->dev, pte);
+ nvmap_handle_put(h);
+ wmb();
+ return err;
+}
+
+static int rw_handle_page(struct nvmap_handle *h, int is_read,
+ unsigned long start, unsigned long rw_addr,
+ unsigned long bytes, unsigned long kaddr, pte_t *pte)
+{
+ pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
+ unsigned long end = start + bytes;
+ int err = 0;
+
+ while (!err && start < end) {
+ struct page *page = NULL;
+ unsigned long phys;
+ size_t count;
+ void *src;
+
+ if (!h->heap_pgalloc) {
+ phys = h->carveout->base + start;
+ } else {
+ page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ BUG_ON(!page);
+ get_page(page);
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
+ }
+
+ set_pte_at(&init_mm, kaddr, pte,
+ pfn_pte(__phys_to_pfn(phys), prot));
+ flush_tlb_kernel_page(kaddr);
+
+ src = (void *)kaddr + (phys & ~PAGE_MASK);
+ phys = PAGE_SIZE - (phys & ~PAGE_MASK);
+ count = min_t(size_t, end - start, phys);
+
+ if (is_read)
+ err = copy_to_user((void *)rw_addr, src, count);
+ else
+ err = copy_from_user(src, (void *)rw_addr, count);
+
+ if (err)
+ err = -EFAULT;
+
+ rw_addr += count;
+ start += count;
+
+ if (page)
+ put_page(page);
+ }
+
+ return err;
+}
+
+static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
+ int is_read, unsigned long h_offs,
+ unsigned long sys_addr, unsigned long h_stride,
+ unsigned long sys_stride, unsigned long elem_size,
+ unsigned long count)
+{
+ ssize_t copied = 0;
+ pte_t **pte;
+ void *addr;
+ int ret = 0;
+
+ if (!elem_size)
+ return -EINVAL;
+
+ if (!h->alloc)
+ return -EFAULT;
+
+ if (elem_size == h_stride && elem_size == sys_stride) {
+ elem_size *= count;
+ h_stride = elem_size;
+ sys_stride = elem_size;
+ count = 1;
+ }
+
+ pte = nvmap_alloc_pte(client->dev, &addr);
+ if (IS_ERR(pte))
+ return PTR_ERR(pte);
+
+ while (count--) {
+ if (h_offs + elem_size > h->size) {
+ nvmap_warn(client, "read/write outside of handle\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = rw_handle_page(h, is_read, h_offs, sys_addr,
+ elem_size, (unsigned long)addr, *pte);
+
+ if (ret)
+ break;
+
+ copied += elem_size;
+ sys_addr += sys_stride;
+ h_offs += h_stride;
+ }
+
+ nvmap_free_pte(client->dev, pte);
+ return ret ?: copied;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.h b/drivers/video/tegra/nvmap/nvmap_ioctl.h
new file mode 100644
index 000000000000..c802cd4dd7ae
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.h
@@ -0,0 +1,159 @@
+/*
+ * drivers/video/tegra/nvmap/nvmap_ioctl.h
+ *
+ * ioctl declarations for nvmap
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __VIDEO_TEGRA_NVMAP_IOCTL_H
+#define __VIDEO_TEGRA_NVMAP_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/file.h>
+
+#include <mach/nvmap.h>
+
+enum {
+ NVMAP_HANDLE_PARAM_SIZE = 1,
+ NVMAP_HANDLE_PARAM_ALIGNMENT,
+ NVMAP_HANDLE_PARAM_BASE,
+ NVMAP_HANDLE_PARAM_HEAP,
+};
+
+enum {
+ NVMAP_CACHE_OP_WB = 0,
+ NVMAP_CACHE_OP_INV,
+ NVMAP_CACHE_OP_WB_INV,
+};
+
+
+struct nvmap_create_handle {
+ union {
+ __u32 key; /* ClaimPreservedHandle */
+ __u32 id; /* FromId */
+ __u32 size; /* CreateHandle */
+ };
+ __u32 handle;
+};
+
+struct nvmap_alloc_handle {
+ __u32 handle;
+ __u32 heap_mask;
+ __u32 flags;
+ __u32 align;
+};
+
+struct nvmap_map_caller {
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem; should be page-aligned */
+ __u32 length; /* number of bytes to map */
+ __u32 flags;
+ unsigned long addr; /* user pointer */
+};
+
+struct nvmap_rw_handle {
+ unsigned long addr; /* user pointer */
+ __u32 handle; /* hmem */
+ __u32 offset; /* offset into hmem */
+ __u32 elem_size; /* individual atom size */
+ __u32 hmem_stride; /* delta in bytes between atoms in hmem */
+ __u32 user_stride; /* delta in bytes between atoms in user */
+ __u32 count; /* number of atoms to copy */
+};
+
+struct nvmap_pin_handle {
+ unsigned long handles; /* array of handles to pin/unpin */
+ unsigned long addr; /* array of addresses to return */
+ __u32 count; /* number of entries in handles */
+};
+
+struct nvmap_handle_param {
+ __u32 handle;
+ __u32 param;
+ unsigned long result;
+};
+
+struct nvmap_cache_op {
+ unsigned long addr;
+ __u32 handle;
+ __u32 len;
+ __s32 op;
+};
+
+#define NVMAP_IOC_MAGIC 'N'
+
+/* Creates a new memory handle. On input, the argument is the size of the new
+ * handle; on return, the argument is the name of the new handle
+ */
+#define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle)
+#define NVMAP_IOC_CLAIM _IOWR(NVMAP_IOC_MAGIC, 1, struct nvmap_create_handle)
+#define NVMAP_IOC_FROM_ID _IOWR(NVMAP_IOC_MAGIC, 2, struct nvmap_create_handle)
+
+/* Actually allocates memory for the specified handle */
+#define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle)
+
+/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
+ */
+#define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4)
+
+/* Maps the region of the specified handle into a user-provided virtual address
+ * that was previously created via an mmap syscall on this fd */
+#define NVMAP_IOC_MMAP _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller)
+
+/* Reads/writes data (possibly strided) from a user-provided buffer into the
+ * hmem at the specified offset */
+#define NVMAP_IOC_WRITE _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle)
+#define NVMAP_IOC_READ _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle)
+
+#define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param)
+
+/* Pins a list of memory handles into IO-addressable memory (either IOVMM
+ * space or physical memory, depending on the allocation), and returns the
+ * address. Handles may be pinned recursively. */
+#define NVMAP_IOC_PIN_MULT _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle)
+#define NVMAP_IOC_UNPIN_MULT _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle)
+
+#define NVMAP_IOC_CACHE _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op)
+
+/* Returns a global ID usable to allow a remote process to create a handle
+ * reference to the same handle */
+#define NVMAP_IOC_GET_ID _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle)
+
+#define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_GET_ID))
+
+int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg);
+
+int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
+
+int nvmap_ioctl_getid(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_free(struct file *filp, unsigned long arg);
+
+int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg);
+
+int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
+
+int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg);
+
+
+
+#endif
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.c b/drivers/video/tegra/nvmap/nvmap_mru.c
new file mode 100644
index 000000000000..252665427568
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_mru.c
@@ -0,0 +1,194 @@
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/iovmm.h>
+
+#include "nvmap.h"
+#include "nvmap_mru.h"
+
+/* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM),
+ * unpinned handles are placed onto a most-recently-used eviction list;
+ * multiple lists are maintained, segmented by size (sizes were chosen to
+ * roughly correspond with common sizes for graphics surfaces).
+ *
+ * if a handle is located on the MRU list, then the code below may
+ * steal its IOVMM area at any time to satisfy a pin operation if no
+ * free IOVMM space is available
+ */
+
+static const size_t mru_cutoff[] = {
+ 262144, 393216, 786432, 1048576, 1572864
+};
+
+static inline struct list_head *mru_list(struct nvmap_share *share, size_t size)
+{
+ unsigned int i;
+
+ BUG_ON(!share->mru_lists);
+ for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++)
+ if (size <= mru_cutoff[i])
+ break;
+
+ return &share->mru_lists[i];
+}
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm)
+{
+ size_t vm_size = tegra_iovmm_get_vm_size(iovmm);
+ return (vm_size >> 2) * 3;
+}
+
+/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h)
+{
+ size_t len = h->pgalloc.area->iovm_length;
+ list_add(&h->pgalloc.mru_list, mru_list(share, len));
+}
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
+{
+ nvmap_mru_lock(s);
+ if (!list_empty(&h->pgalloc.mru_list))
+ list_del(&h->pgalloc.mru_list);
+ nvmap_mru_unlock(s);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+}
+
+/* returns a tegra_iovmm_area for a handle. if the handle already has
+ * an iovmm_area allocated, the handle is simply removed from its MRU list
+ * and the existing iovmm_area is returned.
+ *
+ * if no existing allocation exists, try to allocate a new IOVMM area.
+ *
+ * if a new area can not be allocated, try to re-use the most-recently-unpinned
+ * handle's allocation.
+ *
+ * and if that fails, iteratively evict handles from the MRU lists and free
+ * their allocations, until the new allocation succeeds.
+ */
+struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ struct list_head *mru;
+ struct nvmap_handle *evict = NULL;
+ struct tegra_iovmm_area *vm = NULL;
+ unsigned int i, idx;
+ pgprot_t prot;
+
+ BUG_ON(!h || !c || !c->share);
+
+ prot = nvmap_pgprot(h, pgprot_kernel);
+
+ if (h->pgalloc.area) {
+ /* since this is only called inside the pin lock, and the
+ * handle is gotten before it is pinned, there are no races
+ * where h->pgalloc.area is changed after the comparison */
+ nvmap_mru_lock(c->share);
+ BUG_ON(list_empty(&h->pgalloc.mru_list));
+ list_del(&h->pgalloc.mru_list);
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ return h->pgalloc.area;
+ }
+
+ vm = tegra_iovmm_create_vm(c->share->iovmm, NULL, h->size, prot);
+
+ if (vm) {
+ INIT_LIST_HEAD(&h->pgalloc.mru_list);
+ return vm;
+ }
+ /* attempt to re-use the most recently unpinned IOVMM area in the
+ * same size bin as the current handle. If that fails, iteratively
+ * evict handles (starting from the current bin) until an allocation
+ * succeeds or no more areas can be evicted */
+
+ nvmap_mru_lock(c->share);
+ mru = mru_list(c->share, h->size);
+ if (!list_empty(mru))
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ if (evict && evict->pgalloc.area->iovm_length >= h->size) {
+ list_del(&evict->pgalloc.mru_list);
+ vm = evict->pgalloc.area;
+ evict->pgalloc.area = NULL;
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ return vm;
+ }
+
+ idx = mru - c->share->mru_lists;
+
+ for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
+ if (idx >= c->share->nr_mru)
+ idx = 0;
+ mru = &c->share->mru_lists[idx];
+ while (!list_empty(mru) && !vm) {
+ evict = list_first_entry(mru, struct nvmap_handle,
+ pgalloc.mru_list);
+
+ BUG_ON(atomic_read(&evict->pin) != 0);
+ BUG_ON(!evict->pgalloc.area);
+ list_del(&evict->pgalloc.mru_list);
+ INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ nvmap_mru_unlock(c->share);
+ tegra_iovmm_free_vm(evict->pgalloc.area);
+ evict->pgalloc.area = NULL;
+ vm = tegra_iovmm_create_vm(c->share->iovmm,
+ NULL, h->size, prot);
+ nvmap_mru_lock(c->share);
+ }
+ }
+ nvmap_mru_unlock(c->share);
+ return vm;
+}
+
+int nvmap_mru_init(struct nvmap_share *share)
+{
+ int i;
+ spin_lock_init(&share->mru_lock);
+ share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;
+
+ share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
+ GFP_KERNEL);
+
+ if (!share->mru_lists)
+ return -ENOMEM;
+
+ for (i = 0; i <= share->nr_mru; i++)
+ INIT_LIST_HEAD(&share->mru_lists[i]);
+
+ return 0;
+}
+
+void nvmap_mru_destroy(struct nvmap_share *share)
+{
+ if (share->mru_lists)
+ kfree(share->mru_lists);
+
+ share->mru_lists = NULL;
+}
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.h b/drivers/video/tegra/nvmap/nvmap_mru.h
new file mode 100644
index 000000000000..bfc7fceae856
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_mru.h
@@ -0,0 +1,84 @@
+/*
+ * drivers/video/tegra/nvmap_mru.c
+ *
+ * IOVMM virtualization support for nvmap
+ *
+ * Copyright (c) 2009-2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef __VIDEO_TEGRA_NVMAP_MRU_H
+#define __VIDEO_TEGRA_NVMAP_MRU_H
+
+#include <linux/spinlock.h>
+
+#include "nvmap.h"
+
+struct tegra_iovmm_area;
+struct tegra_iovmm_client;
+
+#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
+
+static inline void nvmap_mru_lock(struct nvmap_share *share)
+{
+ spin_lock(&share->mru_lock);
+}
+
+static inline void nvmap_mru_unlock(struct nvmap_share *share)
+{
+ spin_unlock(&share->mru_lock);
+}
+
+int nvmap_mru_init(struct nvmap_share *share);
+
+void nvmap_mru_destroy(struct nvmap_share *share);
+
+size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm);
+
+void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h);
+
+void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h);
+
+struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h);
+
+#else
+
+#define nvmap_mru_lock(_s) do { } while (0)
+#define nvmap_mru_unlock(_s) do { } while (0)
+#define nvmap_mru_init(_s) 0
+#define nvmap_mru_destroy(_s) do { } while (0)
+#define nvmap_mru_vm_size(_a) tegra_iovmm_get_vm_size(_a)
+
+static inline void nvmap_mru_insert_locked(struct nvmap_share *share,
+ struct nvmap_handle *h)
+{ }
+
+static inline void nvmap_mru_remove(struct nvmap_share *s,
+ struct nvmap_handle *h)
+{ }
+
+static inline struct tegra_iovmm_area *nvmap_handle_iovmm(struct nvmap_client *c,
+ struct nvmap_handle *h)
+{
+ BUG_ON(!h->pgalloc.area);
+ return h->pgalloc.area;
+}
+
+#endif
+
+#endif
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 80b3b123dd7f..8d5d6381abd7 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -58,6 +58,13 @@ config W1_MASTER_GPIO
This support is also available as a module. If so, the module
will be called w1-gpio.
+config W1_MASTER_TEGRA
+ tristate "NVidia Tegra SoC 1-wire busmaster"
+ depends on ARCH_TEGRA
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ the NVidia Tegra SoC one-wire interfaces.
+
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
depends on ARCH_OMAP2430 || ARCH_OMAP3
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index c5a3e96fcbab..41c5d28e60e7 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
+obj-$(CONFIG_W1_MASTER_TEGRA) += tegra_w1.o
diff --git a/drivers/w1/masters/tegra_w1.c b/drivers/w1/masters/tegra_w1.c
new file mode 100644
index 000000000000..9443c4b1dbc6
--- /dev/null
+++ b/drivers/w1/masters/tegra_w1.c
@@ -0,0 +1,491 @@
+/*
+ * drivers/w1/masters/tegra-w1.c
+ *
+ * W1 master driver for internal OWR controllers in NVIDIA Tegra SoCs.
+ *
+ * Copyright (C) 2010 Motorola, Inc
+ * Author: Andrei Warkentin <andreiw@motorola.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <mach/w1.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_log.h"
+
+#define DRIVER_NAME "tegra_w1"
+
+/* OWR_CONTROL_0 is the main control register, and should be configured
+ last after configuring all other settings. */
+#define OWR_CONTROL (0x0)
+#define OC_RD_BIT (1 << 31)
+#define OC_WR0_BIT (1 << 30)
+#define OC_RD_SCLK_SHIFT (23)
+#define OC_RD_SCLK_MASK (0xF)
+#define OC_P_SCLK_SHIFT (15)
+#define OC_P_SCLK_MASK (0xFF)
+#define OC_BIT_XMODE (1 << 2)
+#define OC_GO (1 << 0)
+
+/* OWR_WR_RD_TCTL_0 controls read/write timings. */
+#define OWR_WR_RD_TCTL (0xc)
+#define ORWT_TSU_SHIFT (28)
+#define ORWT_TSU_MASK (0x3)
+#define ORWT_TRELEASE_SHIFT (22)
+#define ORWT_TRELEASE_MASK (0x3F)
+#define ORWT_TRDV_SHIFT (18)
+#define ORWT_TRDV_MASK (0xF)
+#define ORWT_TLOW0_SHIFT (11)
+#define ORWT_TLOW0_MASK (0x7F)
+#define ORWT_TLOW1_SHIFT (7)
+#define ORWT_TLOW1_MASK (0xF)
+#define ORWT_TSLOT_SHIFT (0)
+#define ORWT_TSLOT_MASK (0x7F)
+
+/* OWR_RST_PRES_TCTL_0 controls reset presence timings. */
+#define OWR_RST_PRES_TCTL (0x10)
+#define ORPT_TPDL_SHIFT (24)
+#define ORPT_TPDL_MASK (0xFF)
+#define ORPT_TPDH_SHIFT (18)
+#define ORPT_TPDH_MASK (0x3F)
+#define ORPT_TRSTL_SHIFT (9)
+#define ORPT_TRSTL_MASK (0x1FF)
+#define ORPT_TRSTH_SHIFT (0)
+#define ORPT_TRSTH_MASK (0x1FF)
+
+/* OWR_INTR_MASK_0 stores the masks for the interrupts. */
+#define OWR_INTR_MASK (0x24)
+#define OI_BIT_XFER_DONE (1 << 13)
+#define OI_PRESENCE_DONE (1 << 5)
+#define OI_PRESENCE_ERR (1 << 0)
+
+/* OWR_INTR_STATUS_0 is the interrupt status register. */
+#define OWR_INTR_STATUS (0x28)
+
+/* OWR_STATUS_0 is the status register. */
+#define OWR_STATUS (0x34)
+#define OS_READ_BIT_SHIFT (23)
+#define OS_RDY (1 << 0)
+
+/* Transfer_completion wait time. */
+#define BIT_XFER_COMPLETION_TIMEOUT_MSEC (5000)
+
+/* Errors in the interrupt status register for bit
+ transfers. */
+#define BIT_XFER_ERRORS (OI_PRESENCE_ERR)
+
+/* OWR requires 1MHz clock. This value is in Herz. */
+#define OWR_CLOCK (1000000)
+
+#define W1_ERR(format, ...) \
+ printk(KERN_ERR "(%s: line %d) " format, \
+ __func__, __LINE__, ## __VA_ARGS__)
+
+struct tegra_device {
+ bool ready;
+ struct w1_bus_master bus_master;
+ struct clk *clk;
+ void __iomem *ioaddr;
+ struct mutex mutex;
+ spinlock_t spinlock;
+ struct completion *transfer_completion;
+ unsigned long intr_status;
+ struct tegra_w1_timings *timings;
+};
+
+/* If debug_print & DEBUG_PRESENCE, print whether slaves detected
+ or not in reset_bus. */
+#define DEBUG_PRESENCE (0x1)
+
+/* If debug_print & DEBUG_TIMEOUT, print whether timeouts on waiting
+ for device interrupts occurs. */
+#define DEBUG_TIMEOUT (0x2)
+
+static uint32_t debug_print;
+module_param_named(debug, debug_print, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debugging output commands:\n"
+ "\tbit 0 - log reset_bus presence detects\n"
+ "\tbit 1 - log interrupt timeouts\n");
+
+/* Reads the OWR register specified by base offset in 'reg'. */
+static inline unsigned long w1_readl(struct tegra_device *dev,
+ unsigned long reg)
+{
+ return readl(dev->ioaddr + reg);
+}
+
+/* Writes 'val' into the OWR registers specified by base offset in 'reg'. */
+static inline void w1_writel(struct tegra_device *dev, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, dev->ioaddr + reg);
+}
+
+/* Sets interrupt mask the device. */
+static inline void w1_imask(struct tegra_device *dev, unsigned long mask)
+{
+ w1_writel(dev, mask, OWR_INTR_MASK);
+}
+
+/* Waits for completion of a bit transfer, checks intr_status against
+ BIT_XFER_ERRORS and an additional provided bit mask. */
+static inline int w1_wait(struct tegra_device *dev, unsigned long mask)
+{
+ int ret;
+ unsigned long irq_flags;
+ unsigned long intr_status;
+
+ ret = wait_for_completion_timeout(dev->transfer_completion,
+ msecs_to_jiffies(BIT_XFER_COMPLETION_TIMEOUT_MSEC));
+
+ if (unlikely(!ret)) {
+ if (debug_print & DEBUG_TIMEOUT)
+ W1_ERR("timeout\n");
+ return -ETIME;
+ }
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+ intr_status = dev->intr_status;
+ dev->intr_status = 0;
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+
+ if (unlikely(intr_status & BIT_XFER_ERRORS ||
+ !(intr_status & mask)))
+ return -EIO;
+ return 0;
+}
+
+/* Programs timing registers, and puts the device into a known state.
+ Interrupts are safe to enable past this point. */
+static int w1_setup(struct tegra_device *dev)
+{
+ unsigned long value;
+ clk_enable(dev->clk);
+
+ value =
+ ((dev->timings->tslot & ORWT_TSLOT_MASK) << ORWT_TSLOT_SHIFT) |
+ ((dev->timings->tlow1 & ORWT_TLOW1_MASK) << ORWT_TLOW1_SHIFT) |
+ ((dev->timings->tlow0 & ORWT_TLOW0_MASK) << ORWT_TLOW0_SHIFT) |
+ ((dev->timings->trdv & ORWT_TRDV_MASK) << ORWT_TRDV_SHIFT) |
+ ((dev->timings->trelease & ORWT_TRELEASE_MASK) <<
+ ORWT_TRELEASE_SHIFT) |
+ ((dev->timings->tsu & ORWT_TSU_MASK) << ORWT_TSU_SHIFT);
+ w1_writel(dev, value, OWR_WR_RD_TCTL);
+
+ value =
+ ((dev->timings->trsth & ORPT_TRSTH_MASK) << ORPT_TRSTH_SHIFT) |
+ ((dev->timings->trstl & ORPT_TRSTL_MASK) << ORPT_TRSTL_SHIFT) |
+ ((dev->timings->tpdh & ORPT_TPDH_MASK) << ORPT_TPDH_SHIFT) |
+ ((dev->timings->tpdl & ORPT_TPDL_MASK) << ORPT_TPDL_SHIFT);
+ w1_writel(dev, value, OWR_RST_PRES_TCTL);
+
+ /* Clear interrupt status/mask registers in case
+ anything was set in it. */
+ w1_imask(dev, 0);
+ w1_writel(dev, 0xFFFFFFFF, OWR_INTR_STATUS);
+ clk_disable(dev->clk);
+ return 0;
+}
+
+/* Interrupt handler for OWR communication. */
+static irqreturn_t tegra_w1_irq(int irq, void *cookie)
+{
+ unsigned long irq_flags;
+ unsigned long status;
+ struct tegra_device *dev = cookie;
+
+ status = w1_readl(dev, OWR_INTR_STATUS);
+ if (unlikely(!status)) {
+
+ /* Not for me if no status bits are set. */
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&dev->spinlock, irq_flags);
+
+ if (likely(dev->transfer_completion)) {
+ dev->intr_status = status;
+ w1_writel(dev, status, OWR_INTR_STATUS);
+ complete(dev->transfer_completion);
+ } else {
+ W1_ERR("spurious interrupt, status = 0x%lx\n", status);
+ }
+
+ spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ return IRQ_HANDLED;
+}
+
+/* Perform a write-0 cycle if bit == 0, otherwise
+ perform a read cycle. */
+static u8 tegra_w1_touch_bit(void *data, u8 bit)
+{
+ int rc;
+ u8 return_bit;
+ unsigned long control;
+ DECLARE_COMPLETION_ONSTACK(touch_done);
+ struct tegra_device *dev = (struct tegra_device *) data;
+
+ return_bit = 0;
+ mutex_lock(&dev->mutex);
+ if (!dev->ready)
+ goto done;
+
+ clk_enable(dev->clk);
+ w1_imask(dev, OI_BIT_XFER_DONE);
+ dev->transfer_completion = &touch_done;
+ control =
+ ((dev->timings->rdsclk & OC_RD_SCLK_MASK) << OC_RD_SCLK_SHIFT) |
+ ((dev->timings->psclk & OC_P_SCLK_MASK) << OC_P_SCLK_SHIFT) |
+ OC_BIT_XMODE;
+
+ /* Read bit (well, writes a 1 to the bus as well). */
+ if (bit) {
+ w1_writel(dev, control | OC_RD_BIT, OWR_CONTROL);
+ rc = w1_wait(dev, OI_BIT_XFER_DONE);
+
+ if (rc) {
+ W1_ERR("write-1/read failed\n");
+ goto done;
+ }
+
+ return_bit =
+ (w1_readl(dev, OWR_STATUS) >> OS_READ_BIT_SHIFT) & 1;
+
+ }
+
+ /* Write 0. */
+ else {
+ w1_writel(dev, control | OC_WR0_BIT, OWR_CONTROL);
+ rc = w1_wait(dev, OI_BIT_XFER_DONE);
+ if (rc) {
+ W1_ERR("write-0 failed\n");
+ goto done;
+ }
+ }
+
+done:
+
+ w1_imask(dev, 0);
+ dev->transfer_completion = NULL;
+ clk_disable(dev->clk);
+ mutex_unlock(&dev->mutex);
+ return return_bit;
+}
+
+/* Performs a bus reset cycle, and returns 0 if slaves present. */
+static u8 tegra_w1_reset_bus(void *data)
+{
+ int rc;
+ int presence;
+ unsigned long value;
+ DECLARE_COMPLETION_ONSTACK(reset_done);
+ struct tegra_device *dev = (struct tegra_device *) data;
+
+ presence = 1;
+ mutex_lock(&dev->mutex);
+ if (!dev->ready)
+ goto done;
+
+ clk_enable(dev->clk);
+ w1_imask(dev, OI_PRESENCE_DONE);
+ dev->transfer_completion = &reset_done;
+ value =
+ ((dev->timings->rdsclk & OC_RD_SCLK_MASK) << OC_RD_SCLK_SHIFT) |
+ ((dev->timings->psclk & OC_P_SCLK_MASK) << OC_P_SCLK_SHIFT) |
+ OC_BIT_XMODE | OC_GO;
+ w1_writel(dev, value, OWR_CONTROL);
+
+ rc = w1_wait(dev, OI_PRESENCE_DONE);
+ if (rc)
+ goto done;
+
+ presence = 0;
+done:
+
+ if (debug_print & DEBUG_PRESENCE) {
+ if (presence)
+ W1_ERR("no slaves present\n");
+ else
+ W1_ERR("slaves present\n");
+ }
+
+ w1_imask(dev, 0);
+ dev->transfer_completion = NULL;
+ clk_disable(dev->clk);
+ mutex_unlock(&dev->mutex);
+ return presence;
+}
+
+static int tegra_w1_probe(struct platform_device *pdev)
+{
+ int rc;
+ int irq;
+ struct resource *res;
+ struct tegra_device *dev;
+ struct tegra_w1_platform_data *plat = pdev->dev.platform_data;
+
+ printk(KERN_INFO "Driver for Tegra SoC 1-wire controller\n");
+
+ if (plat == NULL || plat->timings == NULL)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ dev = kzalloc(sizeof(struct tegra_device), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+ dev->clk = clk_get(&pdev->dev, plat->clk_id);
+ if (IS_ERR(dev->clk)) {
+ rc = PTR_ERR(dev->clk);
+ goto cleanup_alloc;
+ }
+
+ /* OWR requires 1MHz clock. */
+ rc = clk_set_rate(dev->clk, OWR_CLOCK);
+ if (rc)
+ goto cleanup_clock;
+
+ if (!request_mem_region
+ (res->start, res->end - res->start + 1, dev_name(&pdev->dev))) {
+ rc = -EBUSY;
+ goto cleanup_clock;
+ }
+
+ dev->ioaddr = ioremap(res->start, res->end - res->start + 1);
+ if (!dev->ioaddr) {
+ rc = -ENOMEM;
+ goto cleanup_reqmem;
+ }
+
+ dev->timings = plat->timings;
+ dev->bus_master.data = dev;
+ dev->bus_master.touch_bit = tegra_w1_touch_bit;
+ dev->bus_master.reset_bus = tegra_w1_reset_bus;
+
+ spin_lock_init(&dev->spinlock);
+ mutex_init(&dev->mutex);
+
+ /* Program device into known state. */
+ w1_setup(dev);
+
+ rc = request_irq(irq, tegra_w1_irq, IRQF_SHARED, DRIVER_NAME, dev);
+ if (rc)
+ goto cleanup_ioremap;
+
+ rc = w1_add_master_device(&dev->bus_master);
+ if (rc)
+ goto cleanup_irq;
+
+ dev->ready = true;
+ return 0;
+
+cleanup_irq:
+ free_irq(irq, dev);
+cleanup_ioremap:
+ iounmap(dev->ioaddr);
+cleanup_reqmem:
+ release_mem_region(res->start,
+ res->end - res->start + 1);
+cleanup_clock:
+ clk_put(dev->clk);
+cleanup_alloc:
+ platform_set_drvdata(pdev, NULL);
+ kfree(dev);
+ return rc;
+}
+
+static int tegra_w1_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tegra_device *dev = platform_get_drvdata(pdev);
+
+ mutex_lock(&dev->mutex);
+ dev->ready = false;
+ mutex_unlock(&dev->mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ free_irq(res->start, dev);
+ iounmap(dev->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start + 1);
+ clk_put(dev->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(dev);
+ return 0;
+}
+
+static int tegra_w1_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int tegra_w1_resume(struct platform_device *pdev)
+{
+ struct tegra_device *dev = platform_get_drvdata(pdev);
+
+ /* TODO: Is this necessary? I would assume yes. */
+ w1_setup(dev);
+ return 0;
+}
+
+static struct platform_driver tegra_w1_driver = {
+ .probe = tegra_w1_probe,
+ .remove = tegra_w1_remove,
+ .suspend = tegra_w1_suspend,
+ .resume = tegra_w1_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_w1_init(void)
+{
+ return platform_driver_register(&tegra_w1_driver);
+}
+
+static void __exit tegra_w1_exit(void)
+{
+ platform_driver_unregister(&tegra_w1_driver);
+}
+
+module_init(tegra_w1_init);
+module_exit(tegra_w1_exit);
+
+MODULE_DESCRIPTION("Tegra W1 master driver");
+MODULE_AUTHOR("Andrei Warkentin <andreiw@motorola.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 24efd8ea41bb..1addd785739a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -195,6 +195,17 @@ config MPCORE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called mpcore_wdt.
+config TEGRA_WATCHDOG
+ tristate "Tegra watchdog"
+ depends on ARCH_TEGRA
+ help
+ Say Y here to include support for the watchdog timer
+ embedded in NVIDIA Tegra SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tegra_wdt.
+
+
config EP93XX_WATCHDOG
tristate "EP93xx Watchdog"
depends on ARCH_EP93XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 8374503fcc6a..62d90bee8dbe 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
+obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
new file mode 100644
index 000000000000..d7a9d7609a1f
--- /dev/null
+++ b/drivers/watchdog/tegra_wdt.c
@@ -0,0 +1,381 @@
+/*
+ * drivers/watchdog/tegra_wdt.c
+ *
+ * watchdog driver for NVIDIA tegra internal watchdog
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * based on drivers/watchdog/softdog.c and drivers/watchdog/omap_wdt.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+/* minimum and maximum watchdog trigger periods, in seconds */
+#define MIN_WDT_PERIOD 5
+#define MAX_WDT_PERIOD 1000
+
+#define TIMER_PTV 0x0
+ #define TIMER_EN (1 << 31)
+ #define TIMER_PERIODIC (1 << 30)
+
+#define TIMER_PCR 0x4
+ #define TIMER_PCR_INTR (1 << 30)
+
+#define WDT_EN (1 << 5)
+#define WDT_SEL_TMR1 (0 << 4)
+#define WDT_SYS_RST (1 << 2)
+
+static int heartbeat = 60;
+
+struct tegra_wdt {
+ struct miscdevice miscdev;
+ struct notifier_block notifier;
+ struct resource *res_src;
+ struct resource *res_wdt;
+ unsigned long users;
+ void __iomem *wdt_source;
+ void __iomem *wdt_timer;
+ int irq;
+ int timeout;
+ bool enabled;
+};
+
+static struct tegra_wdt *tegra_wdt_dev;
+
+static void tegra_wdt_set_timeout(struct tegra_wdt *wdt, int sec)
+{
+ u32 ptv, src;
+
+ ptv = readl(wdt->wdt_timer + TIMER_PTV);
+ src = readl(wdt->wdt_source);
+
+ writel(0, wdt->wdt_source);
+ wdt->timeout = clamp(sec, MIN_WDT_PERIOD, MAX_WDT_PERIOD);
+ if (ptv & TIMER_EN) {
+ /* since the watchdog reset occurs when a second interrupt
+ * is asserted before the first is processed, program the
+ * timer period to one-half of the watchdog period */
+ ptv = wdt->timeout * 1000000ul / 2;
+ ptv |= (TIMER_EN | TIMER_PERIODIC);
+ writel(ptv, wdt->wdt_timer + TIMER_PTV);
+ }
+ writel(src, wdt->wdt_source);
+}
+
+
+static void tegra_wdt_enable(struct tegra_wdt *wdt)
+{
+ u32 val;
+
+ val = wdt->timeout * 1000000ul / 2;
+ val |= (TIMER_EN | TIMER_PERIODIC);
+ writel(val, wdt->wdt_timer + TIMER_PTV);
+
+ val = WDT_EN | WDT_SEL_TMR1 | WDT_SYS_RST;
+ writel(val, wdt->wdt_source);
+}
+
+static void tegra_wdt_disable(struct tegra_wdt *wdt)
+{
+ writel(0, wdt->wdt_source);
+ writel(0, wdt->wdt_timer + TIMER_PTV);
+}
+
+static irqreturn_t tegra_wdt_interrupt(int irq, void *dev_id)
+{
+ struct tegra_wdt *wdt = dev_id;
+
+ writel(TIMER_PCR_INTR, wdt->wdt_timer + TIMER_PCR);
+ return IRQ_HANDLED;
+}
+
+static int tegra_wdt_notify(struct notifier_block *this,
+ unsigned long code, void *dev)
+{
+ struct tegra_wdt *wdt = container_of(this, struct tegra_wdt, notifier);
+
+ if (code == SYS_DOWN || code == SYS_HALT)
+ tegra_wdt_disable(wdt);
+ return NOTIFY_DONE;
+}
+
+static int tegra_wdt_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct tegra_wdt *wdt = dev_get_drvdata(miscdev->parent);
+
+ if (test_and_set_bit(1, &wdt->users))
+ return -EBUSY;
+
+ wdt->enabled = true;
+ tegra_wdt_set_timeout(wdt, heartbeat);
+ tegra_wdt_enable(wdt);
+ file->private_data = wdt;
+ return nonseekable_open(inode, file);
+}
+
+static int tegra_wdt_release(struct inode *inode, struct file *file)
+{
+ struct tegra_wdt *wdt = file->private_data;
+
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ tegra_wdt_disable(wdt);
+ wdt->enabled = false;
+#endif
+ wdt->users = 0;
+ return 0;
+}
+
+static long tegra_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tegra_wdt *wdt = file->private_data;
+ static DEFINE_SPINLOCK(lock);
+ int new_timeout;
+ static const struct watchdog_info ident = {
+ .identity = "Tegra Watchdog",
+ .options = WDIOF_SETTIMEOUT,
+ .firmware_version = 0,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info __user *)arg, &ident,
+ sizeof(ident));
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int __user *)arg);
+
+ case WDIOC_KEEPALIVE:
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, (int __user *)arg))
+ return -EFAULT;
+ spin_lock(&lock);
+ tegra_wdt_disable(wdt);
+ tegra_wdt_set_timeout(wdt, new_timeout);
+ tegra_wdt_enable(wdt);
+ spin_unlock(&lock);
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt->timeout, (int __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t tegra_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ return len;
+}
+
+static const struct file_operations tegra_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = tegra_wdt_write,
+ .unlocked_ioctl = tegra_wdt_ioctl,
+ .open = tegra_wdt_open,
+ .release = tegra_wdt_release,
+};
+
+static int tegra_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res_src, *res_wdt, *res_irq;
+ struct tegra_wdt *wdt;
+ int ret = 0;
+
+ if (pdev->id != -1) {
+ dev_err(&pdev->dev, "only id -1 supported\n");
+ return -ENODEV;
+ }
+
+ if (tegra_wdt_dev != NULL) {
+ dev_err(&pdev->dev, "watchdog already registered\n");
+ return -EIO;
+ }
+
+ res_src = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_wdt = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!res_src || !res_wdt || !res_irq) {
+ dev_err(&pdev->dev, "incorrect resources\n");
+ return -ENOENT;
+ }
+
+ wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
+ if (!wdt) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ wdt->irq = -1;
+ wdt->miscdev.parent = &pdev->dev;
+ wdt->miscdev.minor = WATCHDOG_MINOR;
+ wdt->miscdev.name = "watchdog";
+ wdt->miscdev.fops = &tegra_wdt_fops;
+
+ wdt->notifier.notifier_call = tegra_wdt_notify;
+
+ res_src = request_mem_region(res_src->start, resource_size(res_src),
+ pdev->name);
+ res_wdt = request_mem_region(res_wdt->start, resource_size(res_wdt),
+ pdev->name);
+
+ if (!res_src || !res_wdt) {
+ dev_err(&pdev->dev, "unable to request memory resources\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ wdt->wdt_source = ioremap(res_src->start, resource_size(res_src));
+ wdt->wdt_timer = ioremap(res_wdt->start, resource_size(res_wdt));
+ if (!wdt->wdt_source || !wdt->wdt_timer) {
+ dev_err(&pdev->dev, "unable to map registers\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ tegra_wdt_disable(wdt);
+
+ ret = request_irq(res_irq->start, tegra_wdt_interrupt, IRQF_DISABLED,
+ dev_name(&pdev->dev), wdt);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to configure IRQ\n");
+ goto fail;
+ }
+
+ wdt->irq = res_irq->start;
+ wdt->res_src = res_src;
+ wdt->res_wdt = res_wdt;
+
+ wdt->timeout = heartbeat;
+
+ ret = register_reboot_notifier(&wdt->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register reboot notifier\n");
+ goto fail;
+ }
+
+ ret = misc_register(&wdt->miscdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register misc device\n");
+ unregister_reboot_notifier(&wdt->notifier);
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+ tegra_wdt_dev = wdt;
+ return 0;
+fail:
+ if (wdt->irq != -1)
+ free_irq(wdt->irq, wdt);
+ if (wdt->wdt_source)
+ iounmap(wdt->wdt_source);
+ if (wdt->wdt_timer)
+ iounmap(wdt->wdt_timer);
+ if (res_src)
+ release_mem_region(res_src->start, resource_size(res_src));
+ if (res_wdt)
+ release_mem_region(res_wdt->start, resource_size(res_wdt));
+ kfree(wdt);
+ return ret;
+}
+
+static int tegra_wdt_remove(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_disable(wdt);
+
+ unregister_reboot_notifier(&wdt->notifier);
+ misc_deregister(&wdt->miscdev);
+ free_irq(wdt->irq, wdt);
+ iounmap(wdt->wdt_source);
+ iounmap(wdt->wdt_timer);
+ release_mem_region(wdt->res_src->start, resource_size(wdt->res_src));
+ release_mem_region(wdt->res_wdt->start, resource_size(wdt->res_wdt));
+ kfree(wdt);
+ tegra_wdt_dev = NULL;
+ return 0;
+}
+
+static int tegra_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_disable(wdt);
+ return 0;
+}
+
+static int tegra_wdt_resume(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt->enabled)
+ tegra_wdt_enable(wdt);
+
+ return 0;
+}
+
+static struct platform_driver tegra_wdt_driver = {
+ .probe = tegra_wdt_probe,
+ .remove = __devexit_p(tegra_wdt_remove),
+ .suspend = tegra_wdt_suspend,
+ .resume = tegra_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tegra_wdt",
+ },
+};
+
+static int __init tegra_wdt_init(void)
+{
+ return platform_driver_register(&tegra_wdt_driver);
+}
+
+static void __exit tegra_wdt_exit(void)
+{
+ platform_driver_unregister(&tegra_wdt_driver);
+}
+
+module_init(tegra_wdt_init);
+module_exit(tegra_wdt_exit);
+
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_DESCRIPTION("Tegra Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:tegra_wdt");
+