summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVarun Wadekar <vwadekar@nvidia.com>2011-07-07 16:50:04 +0530
committerNiket Sirsi <nsirsi@nvidia.com>2011-07-21 18:02:03 -0700
commit5437e3230679fbc6e342f7e6787e3cc1692e5df6 (patch)
treec72900a437f7ef1d74c4e711aacfeec51e8e9ad7
parent9ce7a159660b9f9087a4d655b4155d199a6e4eff (diff)
crypto: tegra-aes: store keys in individual requests
store the keys in the individual request context structure, since there is no other way to manage keys across different crypto operations. also retry requests thrice in case of errors Bug 833165 Bug 818126 Change-Id: I7d3381477b5bc134da5727bb39ea89abe05dc17c Signed-off-by: Varun Wadekar <vwadekar@nvidia.com> Reviewed-on: http://git-master/r/40008 Reviewed-by: WooJung Min <wmin@nvidia.com> Tested-by: WooJung Min <wmin@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
-rw-r--r--drivers/crypto/tegra-aes.c171
1 files changed, 114 insertions, 57 deletions
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index c4990dd504cb..946d51035262 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -142,6 +142,7 @@ struct tegra_aes_reqctx {
struct tegra_aes_engine {
struct tegra_aes_dev *dd;
+ struct tegra_aes_ctx *ctx;
struct clk *iclk;
struct clk *pclk;
struct ablkcipher_request *req;
@@ -163,12 +164,10 @@ struct tegra_aes_engine {
u32 *buf_in;
u32 *buf_out;
int res_id;
- int slot_num;
- int keylen;
unsigned long busy;
u8 irq;
bool new_key;
- bool use_ssk;
+ u32 status;
};
struct tegra_aes_dev {
@@ -178,7 +177,6 @@ struct tegra_aes_dev {
struct tegra_aes_engine bsea;
struct nvmap_client *client;
struct nvmap_handle_ref *h_ref;
- struct tegra_aes_ctx *ctx;
struct crypto_queue queue;
spinlock_t lock;
u64 ctr;
@@ -190,7 +188,12 @@ static struct tegra_aes_dev *aes_dev;
struct tegra_aes_ctx {
struct tegra_aes_dev *dd;
+ struct tegra_aes_engine *eng;
struct tegra_aes_slot *slot;
+ int key[AES_MAX_KEY_SIZE];
+ int keylen;
+ bool use_ssk;
+ u8 dt[DEFAULT_RNG_BLK_SZ];
};
static struct tegra_aes_ctx rng_ctx;
@@ -225,13 +228,13 @@ static inline void aes_writel(struct tegra_aes_engine *engine,
static int alloc_iram(struct tegra_aes_dev *dd)
{
- size_t size, align ;
+ size_t size;
int err;
dd->h_ref = NULL;
/* [key+iv+u-iv=64B] * 8 = 512Bytes */
- size = align = (AES_HW_KEY_TABLE_LENGTH_BYTES * AES_NR_KEYSLOTS);
+ size = AES_MAX_KEY_SIZE;
dd->client = nvmap_create_client(nvmap_dev, "aes_bsea");
if (IS_ERR(dd->client)) {
dev_err(dd->dev, "nvmap_create_client failed\n");
@@ -246,7 +249,7 @@ static int alloc_iram(struct tegra_aes_dev *dd)
/* Allocate memory in the iram */
err = nvmap_alloc_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref),
- NVMAP_HEAP_CARVEOUT_IRAM, align, 0);
+ NVMAP_HEAP_CARVEOUT_IRAM, size, 0);
if (err) {
dev_err(dd->dev, "nvmap_alloc_handle_id failed\n");
nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref));
@@ -258,11 +261,11 @@ static int alloc_iram(struct tegra_aes_dev *dd)
dd->bsea.iram_virt = nvmap_mmap(dd->h_ref); /* get virtual address */
if (!dd->bsea.iram_virt) {
dev_err(dd->dev, "%s: no mem, BSEA IRAM alloc failure\n",
- __func__);
+ __func__);
goto out;
}
- memset(dd->bsea.iram_virt, 0, dd->h_ref->handle->size);
+ memset(dd->bsea.iram_virt, 0, size);
return 0;
out:
@@ -325,12 +328,21 @@ static void aes_hw_deinit(struct tegra_aes_engine *engine)
clk_disable(engine->iclk);
}
+#define MIN_RETRIES 3
static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
u32 out_addr, int nblocks, int mode, bool upd_iv)
{
u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
int qlen = 0, i, eng_busy, icq_empty, ret;
u32 value;
+ int retries = MIN_RETRIES;
+
+start:
+ do {
+ value = aes_readl(eng, INTR_STATUS);
+ eng_busy = value & BIT(0);
+ icq_empty = value & BIT(3);
+ } while (eng_busy || (!icq_empty));
aes_writel(eng, 0xFFFFFFFF, INTR_STATUS);
@@ -354,7 +366,7 @@ static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
value = 0;
if (mode & FLAGS_CBC) {
value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
- ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
(((mode & FLAGS_ENCRYPT) ? 2 : 3)
<< SECURE_XOR_POS_SHIFT) |
@@ -367,7 +379,7 @@ static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
(0 << SECURE_HASH_ENB_SHIFT));
} else if (mode & FLAGS_OFB) {
value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
- ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
((u32)0 << SECURE_IV_SELECT_SHIFT) |
(SECURE_XOR_POS_FIELD) |
@@ -378,7 +390,7 @@ static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
(0 << SECURE_HASH_ENB_SHIFT));
} else if (mode & FLAGS_RNG){
value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
- ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
(0 << SECURE_XOR_POS_SHIFT) |
(0 << SECURE_INPUT_SEL_SHIFT) |
@@ -388,7 +400,7 @@ static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
(0 << SECURE_HASH_ENB_SHIFT));
} else {
value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) |
- ((eng->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
+ ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) |
((u32)upd_iv << SECURE_IV_SELECT_SHIFT) |
(0 << SECURE_XOR_POS_SHIFT) |
(0 << SECURE_INPUT_SEL_SHIFT) |
@@ -422,6 +434,12 @@ static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr,
disable_irq(eng->irq);
aes_writel(eng, cmdq[qlen - 1], ICMDQUE_WR);
+
+ if ((eng->status != 0) && (retries-- > 0)) {
+ qlen = 0;
+ goto start;
+ }
+
return 0;
}
@@ -453,7 +471,7 @@ static struct tegra_aes_slot *aes_find_key_slot(struct tegra_aes_dev *dd)
return found ? slot : NULL;
}
-static int aes_set_key(struct tegra_aes_engine *eng)
+static int aes_set_key(struct tegra_aes_engine *eng, int slot_num)
{
struct tegra_aes_dev *dd = aes_dev;
u32 value, cmdq[2];
@@ -472,13 +490,16 @@ static int aes_set_key(struct tegra_aes_engine *eng)
/* select the key slot */
value = aes_readl(eng, SECURE_CONFIG);
value &= ~SECURE_KEY_INDEX_FIELD;
- value |= (eng->slot_num << SECURE_KEY_INDEX_SHIFT);
+ value |= (slot_num << SECURE_KEY_INDEX_SHIFT);
aes_writel(eng, value, SECURE_CONFIG);
- if (eng->use_ssk)
+ if (slot_num == SSK_SLOT_NUM)
goto out;
if (eng->res_id == TEGRA_ARB_BSEV) {
+ memset(dd->bsev.ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->bsev.ivkey_base, eng->ctx->key, eng->ctx->keylen);
+
/* copy the key table from sdram to vram */
cmdq[0] = 0;
cmdq[0] = UCQOPCODE_MEMDMAVD << ICQBITSHIFT_OPCODE |
@@ -500,7 +521,7 @@ static int aes_set_key(struct tegra_aes_engine *eng)
value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
UCQCMD_VRAM_SEL << ICQBITSHIFT_VRAMSEL |
- (UCQCMD_KEYTABLESEL | eng->slot_num)
+ (UCQCMD_KEYTABLESEL | slot_num)
<< ICQBITSHIFT_KEYTABLEID;
aes_writel(eng, value, ICMDQUE_WR);
do {
@@ -509,11 +530,20 @@ static int aes_set_key(struct tegra_aes_engine *eng)
icq_empty = value & BIT(3);
} while (eng_busy & (!icq_empty));
} else {
+ memset(dd->bsea.iram_virt, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
+ memcpy(dd->bsea.iram_virt, eng->ctx->key, eng->ctx->keylen);
+
+ /* set iram access cfg bit 0 if address >128K */
+ if (dd->bsea.iram_phys > 0x00020000)
+ aes_writel(eng, BIT(0), IRAM_ACCESS_CFG);
+ else
+ aes_writel(eng, 0, IRAM_ACCESS_CFG);
+
/* settable command to get key into internal registers */
value = 0;
value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE |
UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL |
- (UCQCMD_KEYTABLESEL | eng->slot_num)
+ (UCQCMD_KEYTABLESEL | slot_num)
<< ICQBITSHIFT_KEYTABLEID |
dd->bsea.iram_phys >> 2;
aes_writel(eng, value, ICMDQUE_WR);
@@ -523,6 +553,7 @@ static int aes_set_key(struct tegra_aes_engine *eng)
icq_empty = value & BIT(3);
} while (eng_busy & (!icq_empty));
}
+
out:
return 0;
}
@@ -556,6 +587,7 @@ static int tegra_aes_handle_req(struct tegra_aes_engine *eng)
req = ablkcipher_request_cast(async_req);
dev_dbg(dd->dev, "%s: get new req (engine #%d)\n", __func__,
eng->res_id);
+
if (!req->src || !req->dst)
return -EINVAL;
@@ -582,9 +614,14 @@ static int tegra_aes_handle_req(struct tegra_aes_engine *eng)
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+ eng->ctx = ctx;
if (eng->new_key) {
- aes_set_key(eng);
+ if (ctx->use_ssk)
+ aes_set_key(eng, SSK_SLOT_NUM);
+ else
+ aes_set_key(eng, ctx->slot->slot_num);
+
eng->new_key = false;
}
@@ -592,7 +629,7 @@ static int tegra_aes_handle_req(struct tegra_aes_engine *eng)
/* set iv to the aes hw slot
* Hw generates updated iv only after iv is set in slot.
* So key and iv is passed asynchronously.
- */
+ */
memcpy(eng->buf_in, (u8 *)req->info, AES_BLOCK_SIZE);
ret = aes_start_crypt(eng, (u32)eng->dma_buf_in,
@@ -687,21 +724,16 @@ static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->slot = key_slot;
}
- /* copy the key */
- memset(dd->bsev.ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
- memset(dd->bsea.iram_virt, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
- memcpy(dd->bsev.ivkey_base, key, keylen);
- memcpy(dd->bsea.iram_virt, key, keylen);
+ /* copy the key to the proper slot */
+ memset(ctx->key, 0, AES_MAX_KEY_SIZE);
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ ctx->use_ssk = false;
} else {
- dd->bsev.slot_num = SSK_SLOT_NUM;
- dd->bsea.slot_num = SSK_SLOT_NUM;
- dd->bsev.use_ssk = true;
- dd->bsea.use_ssk = true;
- keylen = AES_KEYSIZE_128;
+ ctx->use_ssk = true;
+ ctx->keylen = AES_KEYSIZE_128;
}
- dd->bsev.keylen = keylen;
- dd->bsea.keylen = keylen;
dd->bsev.new_key = true;
dd->bsea.new_key = true;
dev_dbg(dd->dev, "done\n");
@@ -747,8 +779,11 @@ static irqreturn_t aes_bsev_irq(int irq, void *dev_id)
u32 value = aes_readl(&dd->bsev, INTR_STATUS);
dev_dbg(dd->dev, "bsev irq_stat: 0x%x", value);
- if (value & INT_ERROR_MASK)
+ dd->bsev.status = 0;
+ if (value & INT_ERROR_MASK) {
aes_writel(&dd->bsev, INT_ERROR_MASK, INTR_STATUS);
+ dd->bsev.status = value & INT_ERROR_MASK;
+ }
value = aes_readl(&dd->bsev, INTR_STATUS);
if (!(value & ENGINE_BUSY_FIELD))
@@ -763,8 +798,11 @@ static irqreturn_t aes_bsea_irq(int irq, void *dev_id)
u32 value = aes_readl(&dd->bsea, INTR_STATUS);
dev_dbg(dd->dev, "bsea irq_stat: 0x%x", value);
- if (value & INT_ERROR_MASK)
+ dd->bsea.status = 0;
+ if (value & INT_ERROR_MASK) {
aes_writel(&dd->bsea, INT_ERROR_MASK, INTR_STATUS);
+ dd->bsea.status = value & INT_ERROR_MASK;
+ }
value = aes_readl(&dd->bsea, INTR_STATUS);
if (!(value & ENGINE_BUSY_FIELD))
@@ -782,9 +820,10 @@ static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
int bsev_busy;
int bsea_busy;
- dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n", req->nbytes,
!!(mode & FLAGS_ENCRYPT),
- !!(mode & FLAGS_CBC));
+ !!(mode & FLAGS_CBC),
+ !!(mode & FLAGS_OFB));
rctx->mode = mode;
@@ -830,13 +869,15 @@ static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req)
{
return tegra_aes_crypt(req, FLAGS_OFB);
}
+
static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen)
{
struct tegra_aes_dev *dd = aes_dev;
- struct tegra_aes_engine *eng = &dd->bsea;
+ struct tegra_aes_engine *eng = rng_ctx.eng;
+ unsigned long flags;
int ret, i;
- u8 *dest = rdata, *dt = dd->dt;
+ u8 *dest = rdata, *dt = rng_ctx.dt;
/* take mutex to access the aes hw */
mutex_lock(&aes_lock);
@@ -878,6 +919,11 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
out:
aes_hw_deinit(eng);
+ spin_lock_irqsave(&dd->lock, flags);
+ clear_bit(FLAGS_BUSY, &eng->busy);
+ rng_ctx.eng = NULL;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
fail:
/* release the hardware semaphore */
tegra_arb_mutex_unlock(eng->res_id);
@@ -891,23 +937,36 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
{
struct tegra_aes_dev *dd = aes_dev;
struct tegra_aes_ctx *ctx = &rng_ctx;
- struct tegra_aes_engine *eng = &dd->bsea;
+ struct tegra_aes_engine *eng = NULL;
struct tegra_aes_slot *key_slot;
+ int bsev_busy = false;
+ int bsea_busy = false;
+ unsigned long flags;
struct timespec ts;
- int ret = 0;
u64 nsec, tmp[2];
+ int ret = 0;
u8 *dt;
- if (!eng || !dd) {
- dev_err(dd->dev, "eng=0x%x, dd=0x%x\n",
- (unsigned int)eng, (unsigned int)dd);
- return -EINVAL;
- }
-
if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
return -ENOMEM;
}
+ spin_lock_irqsave(&dd->lock, flags);
+ bsev_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsev.busy);
+ if (bsev_busy)
+ bsea_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsea.busy);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!bsev_busy) {
+ eng = &dd->bsev;
+ } else if (!bsea_busy) {
+ eng = &dd->bsea;
+ } else {
+ dev_err(dd->dev, "%s: hardware engine is busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ctx->eng = eng;
dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
/* take mutex to access the aes hw */
@@ -923,10 +982,6 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
ctx->slot = key_slot;
}
- /* copy the key to the key slot */
- memset(dd->bsea.iram_virt, 0, AES_HW_KEY_TABLE_LENGTH_BYTES);
- memcpy(dd->bsea.iram_virt, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
-
/* take the hardware semaphore */
if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) {
dev_err(dd->dev, "aes hardware (%d) not available\n",
@@ -941,10 +996,11 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
goto fail;
}
- dd->ctx = ctx;
- eng->slot_num = ctx->slot->slot_num;
- eng->keylen = AES_KEYSIZE_128;
- aes_set_key(eng);
+ memcpy(ctx->key, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
+
+ eng->ctx = ctx;
+ eng->ctx->keylen = AES_KEYSIZE_128;
+ aes_set_key(eng, ctx->slot->slot_num);
/* set seed to the aes hw slot */
memset(eng->buf_in, 0, AES_BLOCK_SIZE);
@@ -968,7 +1024,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
tmp[1] = tegra_chip_uid();
dt = (u8 *)tmp;
}
- memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
+ memcpy(ctx->dt, dt, DEFAULT_RNG_BLK_SZ);
out:
aes_hw_deinit(eng);
@@ -1175,14 +1231,15 @@ static int tegra_aes_probe(struct platform_device *pdev)
* - hardware key table
* - key schedule
*/
- dd->bsev.ivkey_base = dma_alloc_coherent(dev, SZ_512,
- &dd->bsev.ivkey_phys_base, GFP_KERNEL);
dd->bsea.ivkey_base = NULL;
+ dd->bsev.ivkey_base = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
+ &dd->bsev.ivkey_phys_base, GFP_KERNEL);
if (!dd->bsev.ivkey_base) {
dev_err(dev, "can not allocate iv/key buffer for BSEV\n");
err = -ENOMEM;
goto out;
}
+ memset(dd->bsev.ivkey_base, 0, AES_MAX_KEY_SIZE);
dd->bsev.buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
&dd->bsev.dma_buf_in, GFP_KERNEL);