summaryrefslogtreecommitdiff
path: root/drivers/crypto/mxs-dcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/mxs-dcp.c')
-rw-r--r--drivers/crypto/mxs-dcp.c79
1 files changed, 78 insertions, 1 deletions
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 78212ba16eeb..3e1df1594b4e 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/stmp_device.h>
+#include <linux/clk.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
@@ -83,6 +84,10 @@ struct dcp {
spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
+#ifdef CONFIG_ARM
+ struct clk *dcp_clk;
+#endif
+ int enable_sha_workaround;
};
enum dcp_chan {
@@ -116,6 +121,11 @@ struct dcp_sha_req_ctx {
unsigned int fini:1;
};
+struct dcp_export_state {
+ struct dcp_sha_req_ctx req_ctx;
+ struct dcp_async_ctx async_ctx;
+};
+
/*
* There can even be only one instance of the MXS DCP due to the
* design of Linux Crypto API.
@@ -583,7 +593,8 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
/*
* Align driver with hw behavior when generating null hashes
*/
- if (rctx->init && rctx->fini && desc->size == 0) {
+ if (rctx->init && rctx->fini && desc->size == 0 &&
+ sdcp->enable_sha_workaround) {
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
const uint8_t *sha_buf =
(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
@@ -809,6 +820,35 @@ static int dcp_sha_finup(struct ahash_request *req)
return dcp_sha_update_fx(req, 1);
}
+static int dcp_sha_export(struct ahash_request *req, void *out)
+{
+ struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
+ struct dcp_export_state *export = out;
+
+ memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
+ memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
+
+ return 0;
+}
+
+static int dcp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ const struct dcp_export_state *export = in;
+
+ memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
+ memset(actx, 0, sizeof(struct dcp_async_ctx));
+
+ memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
+ memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
+
+ return 0;
+}
+
static int dcp_sha_digest(struct ahash_request *req)
{
int ret;
@@ -890,8 +930,11 @@ static struct ahash_alg dcp_sha1_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
+ .export = dcp_sha_export,
+ .import = dcp_sha_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-dcp",
@@ -914,8 +957,11 @@ static struct ahash_alg dcp_sha256_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
+ .export = dcp_sha_export,
+ .import = dcp_sha_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-dcp",
@@ -985,6 +1031,26 @@ static int mxs_dcp_probe(struct platform_device *pdev)
if (IS_ERR(sdcp->base))
return PTR_ERR(sdcp->base);
+#ifdef CONFIG_ARM
+ sdcp->dcp_clk = devm_clk_get(dev, "dcp");
+
+ if (IS_ERR(sdcp->dcp_clk)) {
+ ret = PTR_ERR(sdcp->dcp_clk);
+ dev_err(dev, "can't identify DCP clk: %d\n", ret);
+ return -ENODEV;
+ }
+
+ ret = clk_prepare(sdcp->dcp_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't prepare DCP clock: %d\n", ret);
+ return -ENODEV;
+ }
+ ret = clk_enable(sdcp->dcp_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable DCP clock: %d\n", ret);
+ return -ENODEV;
+ }
+#endif
ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
"dcp-vmi-irq", sdcp);
@@ -1045,6 +1111,11 @@ static int mxs_dcp_probe(struct platform_device *pdev)
crypto_init_queue(&sdcp->queue[i], 50);
}
+ /*
+ * Enable driver alignment with hw behavior for sha generation
+ */
+ sdcp->enable_sha_workaround = 1;
+
/* Create the SHA and AES handler threads. */
sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
NULL, "mxs_dcp_chan/sha");
@@ -1126,6 +1197,11 @@ static int mxs_dcp_remove(struct platform_device *pdev)
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+#ifdef CONFIG_ARM
+ /* shut clocks off before finalizing shutdown */
+ clk_disable(sdcp->dcp_clk);
+#endif
+
platform_set_drvdata(pdev, NULL);
global_sdcp = NULL;
@@ -1136,6 +1212,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
static const struct of_device_id mxs_dcp_dt_ids[] = {
{ .compatible = "fsl,imx23-dcp", .data = NULL, },
{ .compatible = "fsl,imx28-dcp", .data = NULL, },
+ { .compatible = "fsl,imx6sl-dcp", .data = NULL, },
{ /* sentinel */ }
};