summaryrefslogtreecommitdiff
path: root/drivers/dma/imx-sdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/imx-sdma.c')
-rw-r--r--drivers/dma/imx-sdma.c1160
1 files changed, 909 insertions, 251 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 558d509b7d85..cf90b7f36209 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -7,7 +7,8 @@
*
* Based on code from Freescale:
*
- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2004-2016 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2018 NXP.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -30,7 +31,9 @@
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -48,6 +51,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "dmaengine.h"
+#include "virt-dma.h"
/* SDMA registers */
#define SDMA_H_C0PTR 0x000
@@ -79,6 +83,9 @@
#define SDMA_CHNENBL0_IMX35 0x200
#define SDMA_CHNENBL0_IMX31 0x080
#define SDMA_CHNPRI_0 0x100
+#define SDMA_DONE0_CONFIG 0x1000
+#define SDMA_DONE0_CONFIG_DONE_SEL 0x7
+#define SDMA_DONE0_CONFIG_DONE_DIS 0x6
/*
* Buffer descriptor status values.
@@ -173,11 +180,26 @@
#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
#define SDMA_WATERMARK_LEVEL_SP BIT(11)
#define SDMA_WATERMARK_LEVEL_DP BIT(12)
+#define SDMA_WATERMARK_LEVEL_SD BIT(13)
+#define SDMA_WATERMARK_LEVEL_DD BIT(14)
#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
+#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
+ BIT(DMA_MEM_TO_DEV) | \
+ BIT(DMA_DEV_TO_DEV))
+
+#define SDMA_WATERMARK_LEVEL_FIFOS_OFF 8
+#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
+#define SDMA_WATERMARK_LEVEL_SW_DONE_SEL_OFF 24
+
/*
* Mode/Count of data node descriptors - IPCv2
*/
@@ -284,9 +306,22 @@ struct sdma_context_data {
} __attribute__ ((packed));
#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
+#define SDMA_BD_MAX_CNT 0xfffc /* align with 4 bytes */
struct sdma_engine;
+struct sdma_desc {
+ struct virt_dma_desc vd;
+ struct list_head node;
+ unsigned int num_bd;
+ dma_addr_t bd_phys;
+ bool bd_iram;
+ unsigned int buf_tail;
+ unsigned int buf_ptail;
+ struct sdma_channel *sdmac;
+ struct sdma_buffer_descriptor *bd;
+};
+
/**
* struct sdma_channel - housekeeping for a SDMA channel
*
@@ -300,36 +335,40 @@ struct sdma_engine;
* @buf_tail ID of the buffer that was processed
* @buf_ptail ID of the previous buffer that was processed
* @num_bd max NUM_BD. number of descriptors currently handling
+ * @bd_iram flag indicating the memory location of buffer descriptor
*/
struct sdma_channel {
+ struct virt_dma_chan vc;
+ struct list_head pending;
struct sdma_engine *sdma;
+ struct sdma_desc *desc;
unsigned int channel;
enum dma_transfer_direction direction;
enum sdma_peripheral_type peripheral_type;
unsigned int event_id0;
unsigned int event_id1;
enum dma_slave_buswidth word_size;
- unsigned int buf_tail;
- unsigned int buf_ptail;
- unsigned int num_bd;
unsigned int period_len;
- struct sdma_buffer_descriptor *bd;
- dma_addr_t bd_phys;
unsigned int pc_from_device, pc_to_device;
unsigned int device_to_device;
+ unsigned int pc_to_pc;
unsigned long flags;
dma_addr_t per_address, per_address2;
unsigned long event_mask[2];
unsigned long watermark_level;
u32 shp_addr, per_addr;
- struct dma_chan chan;
- spinlock_t lock;
- struct dma_async_tx_descriptor desc;
enum dma_status status;
+ struct imx_dma_data data;
unsigned int chn_count;
unsigned int chn_real_count;
- struct tasklet_struct tasklet;
- struct imx_dma_data data;
+ bool context_loaded;
+ u32 bd_size_sum;
+ bool src_dualfifo;
+ bool dst_dualfifo;
+ unsigned int fifo_num;
+ bool sw_done;
+ u32 sw_done_sel;
+ struct dma_pool *bd_pool;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -338,6 +377,14 @@ struct sdma_channel {
#define MXC_SDMA_DEFAULT_PRIORITY 1
#define MXC_SDMA_MIN_PRIORITY 1
#define MXC_SDMA_MAX_PRIORITY 7
+/*
+ * 0x78(SDMA_XTRIG_CONF2+4)~0x100(SDMA_CHNPRI_O) registers are reserved and
+ * can't be accessed. Skip these register touch in suspend/resume. Also below
+ * two macros are only used on i.mx6sx.
+ */
+#define MXC_SDMA_RESERVED_REG (SDMA_CHNPRI_0 - SDMA_XTRIG_CONF2 - 4)
+#define MXC_SDMA_SAVED_REG_NUM (((SDMA_CHNENBL0_IMX35 + 4 * 48) - \
+ MXC_SDMA_RESERVED_REG) / 4)
#define SDMA_FIRMWARE_MAGIC 0x414d4453
@@ -376,6 +423,8 @@ struct sdma_engine {
struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
+ u32 save_regs[MXC_SDMA_SAVED_REG_NUM];
+ const char *fw_name;
void __iomem *regs;
struct sdma_context_data *context;
dma_addr_t context_phys;
@@ -389,6 +438,15 @@ struct sdma_engine {
u32 spba_start_addr;
u32 spba_end_addr;
unsigned int irq;
+ struct gen_pool *iram_pool;
+ /* channel0 bd */
+ dma_addr_t bd0_phys;
+ bool bd0_iram;
+ struct sdma_buffer_descriptor *bd0;
+ bool suspend_off;
+ int idx;
+ /* clock ration for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
+ bool clk_ratio;
};
static struct sdma_driver_data sdma_imx31 = {
@@ -466,7 +524,6 @@ static struct sdma_script_start_addrs sdma_script_imx6q = {
.ap_2_ap_addr = 642,
.uart_2_mcu_addr = 817,
.mcu_2_app_addr = 747,
- .per_2_per_addr = 6331,
.uartsh_2_mcu_addr = 1032,
.mcu_2_shp_addr = 960,
.app_2_mcu_addr = 683,
@@ -481,6 +538,30 @@ static struct sdma_driver_data sdma_imx6q = {
.script_addrs = &sdma_script_imx6q,
};
+static struct sdma_script_start_addrs sdma_script_imx6sx = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .uartsh_2_mcu_addr = 1032,
+ .mcu_2_shp_addr = 960,
+ .app_2_mcu_addr = 683,
+ .shp_2_mcu_addr = 891,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+};
+
+static struct sdma_driver_data sdma_imx6sx = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx6sx,
+};
+
+static struct sdma_driver_data sdma_imx6ul = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx6sx,
+};
+
static struct sdma_script_start_addrs sdma_script_imx7d = {
.ap_2_ap_addr = 644,
.uart_2_mcu_addr = 819,
@@ -499,6 +580,12 @@ static struct sdma_driver_data sdma_imx7d = {
.script_addrs = &sdma_script_imx7d,
};
+static struct sdma_driver_data sdma_imx8m = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx7d,
+};
+
static const struct platform_device_id sdma_devtypes[] = {
{
.name = "imx25-sdma",
@@ -519,15 +606,23 @@ static const struct platform_device_id sdma_devtypes[] = {
.name = "imx6q-sdma",
.driver_data = (unsigned long)&sdma_imx6q,
}, {
+ .name = "imx6sx-sdma",
+ .driver_data = (unsigned long)&sdma_imx6sx,
+ }, {
.name = "imx7d-sdma",
.driver_data = (unsigned long)&sdma_imx7d,
}, {
+ .name = "imx8mq-sdma",
+ .driver_data = (unsigned long)&sdma_imx8m,
+ }, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(platform, sdma_devtypes);
static const struct of_device_id sdma_dt_ids[] = {
+ { .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
+ { .compatible = "fsl,imx6sx-sdma", .data = &sdma_imx6sx, },
{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
@@ -535,15 +630,20 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
+ { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8m, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
+static int sdma_dev_idx;
+
#define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
+static void sdma_start_desc(struct sdma_channel *sdmac);
+
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
u32 chnenbl0 = sdma->drvdata->chnenbl0;
@@ -616,17 +716,19 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
u32 address)
{
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
void *buf_virt;
dma_addr_t buf_phys;
int ret;
unsigned long flags;
+ bool use_iram = true;
- buf_virt = dma_alloc_coherent(NULL,
- size,
- &buf_phys, GFP_KERNEL);
+ buf_virt = gen_pool_dma_alloc(sdma->iram_pool, size, &buf_phys);
if (!buf_virt) {
- return -ENOMEM;
+ use_iram = false;
+ buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
+ if (!buf_virt)
+ return -ENOMEM;
}
spin_lock_irqsave(&sdma->channel_0_lock, flags);
@@ -643,7 +745,10 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ if (use_iram)
+ gen_pool_free(sdma->iram_pool, (unsigned long)buf_virt, size);
+ else
+ dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
return ret;
}
@@ -658,6 +763,21 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
val = readl_relaxed(sdma->regs + chnenbl);
__set_bit(channel, &val);
writel_relaxed(val, sdma->regs + chnenbl);
+
+ /* Set SDMA_DONEx_CONFIG is sw_done enabled */
+ if (sdmac->sw_done) {
+ u32 offset = SDMA_DONE0_CONFIG + sdmac->sw_done_sel / 4;
+ u32 done_sel = SDMA_DONE0_CONFIG_DONE_SEL +
+ ((sdmac->sw_done_sel % 4) << 3);
+ u32 sw_done_dis = SDMA_DONE0_CONFIG_DONE_DIS +
+ ((sdmac->sw_done_sel % 4) << 3);
+
+ val = readl_relaxed(sdma->regs + offset);
+ __set_bit(done_sel, &val);
+ __clear_bit(sw_done_dis, &val);
+ writel_relaxed(val, sdma->regs + offset);
+ }
+
}
static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
@@ -682,8 +802,10 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* loop mode. Iterate over descriptors, re-setup them and
* call callback function.
*/
- while (1) {
- bd = &sdmac->bd[sdmac->buf_tail];
+ while (sdmac->desc) {
+ struct sdma_desc *desc = sdmac->desc;
+
+ bd = &desc->bd[desc->buf_tail];
if (bd->mode.status & BD_DONE)
break;
@@ -702,36 +824,35 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
sdmac->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
bd->mode.count = sdmac->period_len;
- sdmac->buf_ptail = sdmac->buf_tail;
- sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
-
- /*
- * The callback is called from the interrupt context in order
- * to reduce latency and to avoid the risk of altering the
- * SDMA transaction status by the time the client tasklet is
- * executed.
- */
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
+ desc->buf_ptail = desc->buf_tail;
+ desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
if (error)
sdmac->status = old_status;
+ /*
+ * The callback is called from the interrupt context in order
+ * to reduce latency and to avoid the risk of altering the
+ * SDMA transaction status by the time the client tasklet is
+ * executed.
+ */
+ spin_unlock(&sdmac->vc.lock);
+ dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+ spin_lock(&sdmac->vc.lock);
}
}
-static void mxc_sdma_handle_channel_normal(unsigned long data)
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
struct sdma_buffer_descriptor *bd;
int i, error = 0;
- sdmac->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
*/
- for (i = 0; i < sdmac->num_bd; i++) {
- bd = &sdmac->bd[i];
+ for (i = 0; i < sdmac->desc->num_bd; i++) {
+ bd = &sdmac->desc->bd[i];
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
@@ -742,10 +863,6 @@ static void mxc_sdma_handle_channel_normal(unsigned long data)
sdmac->status = DMA_ERROR;
else
sdmac->status = DMA_COMPLETE;
-
- dma_cookie_complete(&sdmac->desc);
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static irqreturn_t sdma_int_handler(int irq, void *dev_id)
@@ -761,13 +878,26 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
while (stat) {
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
-
- if (sdmac->flags & IMX_DMA_SG_LOOP)
- sdma_update_channel_loop(sdmac);
- else
- tasklet_schedule(&sdmac->tasklet);
-
+ struct sdma_desc *desc;
+
+ spin_lock(&sdmac->vc.lock);
+ desc = sdmac->desc;
+ if (desc) {
+ if (sdmac->flags & IMX_DMA_SG_LOOP) {
+ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
+ sdma_update_channel_loop(sdmac);
+ else
+ vchan_cyclic_callback(&desc->vd);
+ } else {
+ mxc_sdma_handle_channel_normal(sdmac);
+ vchan_cookie_complete(&desc->vd);
+ if (!list_empty(&sdmac->pending))
+ list_del(&desc->node);
+ sdma_start_desc(sdmac);
+ }
+ }
__clear_bit(channel, &stat);
+ spin_unlock(&sdmac->vc.lock);
}
return IRQ_HANDLED;
@@ -785,14 +915,16 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
* These are needed once we start to support transfers between
* two peripherals or memory-to-memory transfers
*/
- int per_2_per = 0;
+ int per_2_per = 0, emi_2_emi = 0;
sdmac->pc_from_device = 0;
sdmac->pc_to_device = 0;
sdmac->device_to_device = 0;
+ sdmac->pc_to_pc = 0;
switch (peripheral_type) {
case IMX_DMATYPE_MEMORY:
+ emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
break;
case IMX_DMATYPE_DSP:
emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -815,6 +947,9 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
break;
case IMX_DMATYPE_CSPI:
+ per_2_emi = sdma->script_addrs->app_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
+ break;
case IMX_DMATYPE_EXT:
case IMX_DMATYPE_SSI:
case IMX_DMATYPE_SAI:
@@ -858,6 +993,12 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
case IMX_DMATYPE_IPU_MEMORY:
emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
break;
+ case IMX_DMATYPE_HDMI:
+ emi_2_per = sdma->script_addrs->hdmi_dma_addr;
+ break;
+ case IMX_DMATYPE_MULTI_SAI:
+ per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
default:
break;
}
@@ -865,6 +1006,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
sdmac->pc_from_device = per_2_emi;
sdmac->pc_to_device = emi_2_per;
sdmac->device_to_device = per_2_per;
+ sdmac->pc_to_pc = emi_2_emi;
}
static int sdma_load_context(struct sdma_channel *sdmac)
@@ -873,14 +1015,19 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int channel = sdmac->channel;
int load_address;
struct sdma_context_data *context = sdma->context;
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
int ret;
unsigned long flags;
+ if (sdmac->context_loaded)
+ return 0;
+
if (sdmac->direction == DMA_DEV_TO_MEM)
load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV)
load_address = sdmac->device_to_device;
+ else if (sdmac->direction == DMA_MEM_TO_MEM)
+ load_address = sdmac->pc_to_pc;
else
load_address = sdmac->pc_to_device;
@@ -902,11 +1049,16 @@ static int sdma_load_context(struct sdma_channel *sdmac)
/* Send by context the event mask,base address for peripheral
* and watermark level
*/
- context->gReg[0] = sdmac->event_mask[1];
- context->gReg[1] = sdmac->event_mask[0];
- context->gReg[2] = sdmac->per_addr;
- context->gReg[6] = sdmac->shp_addr;
- context->gReg[7] = sdmac->watermark_level;
+ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
+ context->gReg[4] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ } else {
+ context->gReg[0] = sdmac->event_mask[1];
+ context->gReg[1] = sdmac->event_mask[0];
+ context->gReg[2] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ context->gReg[7] = sdmac->watermark_level;
+ }
bd0->mode.command = C0_SETDM;
bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
@@ -917,12 +1069,39 @@ static int sdma_load_context(struct sdma_channel *sdmac)
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+ sdmac->context_loaded = true;
+
+ return ret;
+}
+
+static int sdma_save_restore_context(struct sdma_engine *sdma, bool save)
+{
+ struct sdma_context_data *context = sdma->context;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
+ if (save)
+ bd0->mode.command = C0_GETDM;
+ else
+ bd0->mode.command = C0_SETDM;
+
+ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
+ bd0->mode.count = MAX_DMA_CHANNELS * sizeof(*context) / 4;
+ bd0->buffer_addr = sdma->context_phys;
+ bd0->ext_buffer_addr = 2048;
+ ret = sdma_run_channel0(sdma);
+
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+
return ret;
}
static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
{
- return container_of(chan, struct sdma_channel, chan);
+ return container_of(chan, struct sdma_channel, vc.chan);
}
static int sdma_disable_channel(struct dma_chan *chan)
@@ -937,21 +1116,6 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
-static int sdma_disable_channel_with_delay(struct dma_chan *chan)
-{
- sdma_disable_channel(chan);
-
- /*
- * According to NXP R&D team a delay of one BD SDMA cost time
- * (maximum is 1ms) should be added after disable of the channel
- * bit, to ensure SDMA core has really been stopped after SDMA
- * clients call .device_terminate_all.
- */
- mdelay(1);
-
- return 0;
-}
-
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
@@ -990,6 +1154,31 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
+
+ if (sdmac->src_dualfifo)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD;
+ if (sdmac->dst_dualfifo)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD;
+}
+
+static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
+{
+ sdmac->watermark_level &= ~(0xFFF << SDMA_WATERMARK_LEVEL_FIFOS_OFF |
+ SDMA_WATERMARK_LEVEL_SW_DONE |
+ 0xf << SDMA_WATERMARK_LEVEL_SW_DONE_SEL_OFF);
+
+ if (sdmac->sw_done)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE |
+ sdmac->sw_done_sel <<
+ SDMA_WATERMARK_LEVEL_SW_DONE_SEL_OFF;
+
+ /* For fifo_num
+ * bit 0-7 is the fifo number;
+ * bit 8-11 is the fifo offset,
+ * so here only need to shift left fifo_num 8 bit for watermake_level
+ */
+ sdmac->watermark_level |= sdmac->fifo_num<<
+ SDMA_WATERMARK_LEVEL_FIFOS_OFF;
}
static int sdma_config_channel(struct dma_chan *chan)
@@ -1004,11 +1193,9 @@ static int sdma_config_channel(struct dma_chan *chan)
sdmac->shp_addr = 0;
sdmac->per_addr = 0;
- if (sdmac->event_id0) {
- if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
- return -EINVAL;
- sdma_event_enable(sdmac, sdmac->event_id0);
- }
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
if (sdmac->event_id1) {
if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
@@ -1037,8 +1224,18 @@ static int sdma_config_channel(struct dma_chan *chan)
if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
sdmac->peripheral_type == IMX_DMATYPE_ASRC)
sdma_set_watermarklevel_for_p2p(sdmac);
- } else
+ } else {
+ /* ERR008517 fixed on i.mx6ul, no workaround needed */
+ if (sdmac->peripheral_type == IMX_DMATYPE_CSPI &&
+ sdmac->direction == DMA_MEM_TO_DEV &&
+ sdmac->sdma->drvdata == &sdma_imx6ul)
+ __set_bit(31, &sdmac->watermark_level);
+ else if (sdmac->peripheral_type ==
+ IMX_DMATYPE_MULTI_SAI)
+ sdma_set_watermarklevel_for_sais(sdmac);
+
__set_bit(sdmac->event_id0, sdmac->event_mask);
+ }
/* Address */
sdmac->shp_addr = sdmac->per_address;
@@ -1047,6 +1244,8 @@ static int sdma_config_channel(struct dma_chan *chan)
sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
}
+ sdmac->context_loaded = false;
+
ret = sdma_load_context(sdmac);
return ret;
@@ -1068,52 +1267,191 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
return 0;
}
-static int sdma_request_channel(struct sdma_channel *sdmac)
+static int sdma_alloc_bd(struct sdma_desc *desc)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
- int ret = -EBUSY;
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ int ret = -ENOMEM;
+ unsigned long flags;
- sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
- GFP_KERNEL);
- if (!sdmac->bd) {
- ret = -ENOMEM;
- goto out;
+ desc->bd_iram = true;
+ desc->bd = gen_pool_dma_alloc(desc->sdmac->sdma->iram_pool, bd_size,
+ &desc->bd_phys);
+ if (!desc->bd) {
+ desc->bd_iram = false;
+ desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_ATOMIC,
+ &desc->bd_phys);
+ if (!desc->bd)
+ return ret;
+ }
+ spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
+ desc->sdmac->bd_size_sum += bd_size;
+ spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
+
+ memset(desc->bd, 0, bd_size);
+
+ return 0;
+}
+
+static void sdma_free_bd(struct sdma_desc *desc)
+{
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ unsigned long flags;
+
+ if (desc->bd) {
+ if (desc->bd_iram)
+ gen_pool_free(desc->sdmac->sdma->iram_pool,
+ (unsigned long)desc->bd, bd_size);
+ else
+ dma_pool_free(desc->sdmac->bd_pool, desc->bd,
+ desc->bd_phys);
+ spin_lock_irqsave(&desc->sdmac->vc.lock, flags);
+ desc->sdmac->bd_size_sum -= bd_size;
+ spin_unlock_irqrestore(&desc->sdmac->vc.lock, flags);
+ }
+}
+
+static int sdma_request_channel0(struct sdma_engine *sdma)
+{
+ int ret = 0;
+
+ sdma->bd0_iram = true;
+ sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE, &sdma->bd0_phys);
+ if (!sdma->bd0) {
+ sdma->bd0_iram = false;
+ sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE,
+ &sdma->bd0_phys, GFP_KERNEL);
+ if (!sdma->bd0) {
+ ret = -ENOMEM;
+ goto out;
+ }
}
- sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+ memset(sdma->bd0, 0, PAGE_SIZE);
+
+ sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
+ sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
- sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+ sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
return 0;
out:
return ret;
}
-static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct sdma_desc, vd.tx);
+}
+
+static void sdma_desc_free(struct virt_dma_desc *vd)
{
+ struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
+ if (desc) {
+ sdma_free_bd(desc);
+ kfree(desc);
+ }
+}
+
+static int sdma_channel_pause(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
unsigned long flags;
- struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
- dma_cookie_t cookie;
- spin_lock_irqsave(&sdmac->lock, flags);
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+ return -EINVAL;
+
+ sdma_disable_channel(chan);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ sdmac->status = DMA_PAUSED;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
- cookie = dma_cookie_assign(tx);
+ return 0;
+}
- spin_unlock_irqrestore(&sdmac->lock, flags);
+static int sdma_channel_resume(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ unsigned long flags;
+
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+ return -EINVAL;
- return cookie;
+ /*
+ * restore back context since context may loss if mega/fast OFF
+ */
+ if (sdma->suspend_off) {
+ if (sdma_load_context(sdmac)) {
+ dev_err(sdmac->sdma->dev, "context load failed.\n");
+ return -EINVAL;
+ }
+ }
+
+ sdma_enable_channel(sdmac->sdma, sdmac->channel);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ sdmac->status = DMA_IN_PROGRESS;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+
+ return 0;
+}
+
+static int sdma_terminate_all(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_get_all_descriptors(&sdmac->vc, &head);
+ while (!list_empty(&sdmac->pending)) {
+ struct sdma_desc *desc = list_first_entry(&sdmac->pending,
+ struct sdma_desc, node);
+
+ list_del(&desc->node);
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ sdmac->vc.desc_free(&desc->vd);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ }
+ if (sdmac->desc)
+ sdmac->desc = NULL;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_dma_desc_free_list(&sdmac->vc, &head);
+ sdma_disable_channel(chan);
+ sdmac->context_loaded = false;
+
+ return 0;
}
static int sdma_alloc_chan_resources(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct imx_dma_data *data = chan->private;
+ struct imx_dma_data default_data;
int prio, ret;
- if (!data)
- return -EINVAL;
+ ret = clk_enable(sdmac->sdma->clk_ipg);
+ if (ret)
+ return ret;
+ ret = clk_enable(sdmac->sdma->clk_ahb);
+ if (ret)
+ goto disable_clk_ipg;
+
+ /*
+ * dmatest(memcpy) will never call slave_config before prep, so we need
+ * do some job in slave_config in this case.
+ */
+ if (!data) {
+ sdmac->word_size = sdmac->sdma->dma_device.copy_align;
+ default_data.priority = 2;
+ default_data.peripheral_type = IMX_DMATYPE_MEMORY;
+ default_data.dma_request = 0;
+ default_data.dma_request2 = 0;
+ data = &default_data;
+
+ sdma_config_ownership(sdmac, false, true, false);
+ sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
+ sdma_load_context(sdmac);
+ }
switch (data->priority) {
case DMA_PRIO_HIGH:
@@ -1131,26 +1469,23 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->peripheral_type = data->peripheral_type;
sdmac->event_id0 = data->dma_request;
sdmac->event_id1 = data->dma_request2;
-
- ret = clk_enable(sdmac->sdma->clk_ipg);
- if (ret)
- return ret;
- ret = clk_enable(sdmac->sdma->clk_ahb);
- if (ret)
- goto disable_clk_ipg;
-
- ret = sdma_request_channel(sdmac);
- if (ret)
- goto disable_clk_ahb;
+ sdmac->src_dualfifo = data->src_dualfifo;
+ sdmac->dst_dualfifo = data->dst_dualfifo;
+ /* Get software done selector if sw_done enabled */
+ if (data->done_sel & BIT(31)) {
+ sdmac->sw_done = true;
+ sdmac->sw_done_sel = (data->done_sel >> 8) & 0xff;
+ }
ret = sdma_set_channel_priority(sdmac, prio);
if (ret)
goto disable_clk_ahb;
- dma_async_tx_descriptor_init(&sdmac->desc, chan);
- sdmac->desc.tx_submit = sdma_tx_submit;
- /* txd.flags will be overwritten in prep funcs */
- sdmac->desc.flags = DMA_CTRL_ACK;
+ sdmac->bd_size_sum = 0;
+
+ sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
+ sizeof(struct sdma_buffer_descriptor),
+ 32, 0);
return 0;
@@ -1166,10 +1501,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel(chan);
+ sdma_terminate_all(chan);
- if (sdmac->event_id0)
- sdma_event_disable(sdmac, sdmac->event_id0);
+ sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1);
@@ -1178,115 +1512,242 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_set_channel_priority(sdmac, 0);
- dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
-
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
+
+ dma_pool_destroy(sdmac->bd_pool);
+ sdmac->bd_pool = NULL;
}
-static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
+ enum dma_transfer_direction direction, u32 bds)
+{
+ struct sdma_desc *desc;
+ /* Now allocate and setup the descriptor. */
+ desc = kzalloc((sizeof(*desc)), GFP_ATOMIC);
+ if (!desc)
+ goto err_out;
+
+ sdmac->status = DMA_IN_PROGRESS;
+ sdmac->direction = direction;
+ sdmac->flags = 0;
+ sdmac->chn_count = 0;
+ sdmac->chn_real_count = 0;
+
+ desc->sdmac = sdmac;
+ desc->num_bd = bds;
+ INIT_LIST_HEAD(&desc->node);
+
+ if (sdma_alloc_bd(desc))
+ goto err_desc_out;
+
+ if (sdma_load_context(sdmac))
+ goto err_desc_out;
+
+ return desc;
+
+err_desc_out:
+ kfree(desc);
+err_out:
+ return NULL;
+}
+
+static int check_bd_buswidth(struct sdma_buffer_descriptor *bd,
+ struct sdma_channel *sdmac, int count,
+ dma_addr_t dma_dst, dma_addr_t dma_src)
+{
+ int ret = 0;
+
+ switch (sdmac->word_size) {
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ bd->mode.command = 0;
+ if ((count | dma_dst | dma_src) & 3)
+ ret = -EINVAL;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ bd->mode.command = 2;
+ if ((count | dma_dst | dma_src) & 1)
+ ret = -EINVAL;
+ break;
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ bd->mode.command = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- int ret, i, count;
int channel = sdmac->channel;
- struct scatterlist *sg;
+ size_t count;
+ int i = 0, param;
+ struct sdma_buffer_descriptor *bd;
+ struct sdma_desc *desc;
- if (sdmac->status == DMA_IN_PROGRESS)
+ if (!chan || !len)
return NULL;
- sdmac->status = DMA_IN_PROGRESS;
- sdmac->flags = 0;
+ dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
+ &dma_src, &dma_dst, len, channel);
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
+ desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM, len / SDMA_BD_MAX_CNT + 1);
+ if (!desc)
+ goto err_out;
- dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
- sg_len, channel);
+ do {
+ count = min_t(size_t, len, SDMA_BD_MAX_CNT);
+ bd = &desc->bd[i];
+ bd->buffer_addr = dma_src;
+ bd->ext_buffer_addr = dma_dst;
+ bd->mode.count = count;
+ sdmac->chn_count += count;
- sdmac->direction = direction;
- ret = sdma_load_context(sdmac);
- if (ret)
- goto err_out;
+ if (check_bd_buswidth(bd, sdmac, count, dma_dst, dma_src))
+ goto err_bd_out;
- if (sg_len > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, sg_len, NUM_BD);
- ret = -EINVAL;
+ dma_src += count;
+ dma_dst += count;
+ len -= count;
+ i++;
+
+ param = BD_DONE | BD_EXTD | BD_CONT;
+ /* last bd */
+ if (!len) {
+ param |= BD_INTR;
+ param |= BD_LAST;
+ param &= ~BD_CONT;
+ }
+
+ dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
+ i, count, bd->buffer_addr,
+ param & BD_WRAP ? "wrap" : "",
+ param & BD_INTR ? " intr" : "");
+
+ bd->mode.status = param;
+ } while (len);
+
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
+err_out:
+ return NULL;
+}
+
+/*
+ * Please ensure dst_nents no smaller than src_nents , also every sg_len of
+ * dst_sg node no smaller than src_sg. To simply things, please use the same
+ * size of dst_sg as src_sg.
+ */
+static struct dma_async_tx_descriptor *sdma_prep_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ int ret, i, count;
+ int channel = sdmac->channel;
+ struct scatterlist *sg_src = src_sg, *sg_dst = dst_sg;
+ struct sdma_desc *desc;
+
+ if (!chan)
+ return NULL;
+
+ dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
+ src_nents, channel);
+
+ desc = sdma_transfer_init(sdmac, direction, src_nents);
+ if (!desc)
goto err_out;
- }
- sdmac->chn_count = 0;
- for_each_sg(sgl, sg, sg_len, i) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ for_each_sg(src_sg, sg_src, src_nents, i) {
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
- bd->buffer_addr = sg->dma_address;
+ bd->buffer_addr = sg_src->dma_address;
- count = sg_dma_len(sg);
+ if (direction == DMA_MEM_TO_MEM) {
+ BUG_ON(!sg_dst);
+ bd->ext_buffer_addr = sg_dst->dma_address;
+ }
- if (count > 0xffff) {
+ count = sg_dma_len(sg_src);
+
+ if (count > SDMA_BD_MAX_CNT) {
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
- channel, count, 0xffff);
+ channel, count, SDMA_BD_MAX_CNT);
ret = -EINVAL;
- goto err_out;
+ goto err_bd_out;
}
bd->mode.count = count;
sdmac->chn_count += count;
- if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
- ret = -EINVAL;
- goto err_out;
- }
-
- switch (sdmac->word_size) {
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- bd->mode.command = 0;
- if (count & 3 || sg->dma_address & 3)
- return NULL;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- bd->mode.command = 2;
- if (count & 1 || sg->dma_address & 1)
- return NULL;
- break;
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- bd->mode.command = 1;
- break;
- default:
- return NULL;
- }
+ if (direction == DMA_MEM_TO_MEM)
+ ret = check_bd_buswidth(bd, sdmac, count,
+ sg_dst->dma_address,
+ sg_src->dma_address);
+ else
+ ret = check_bd_buswidth(bd, sdmac, count, 0,
+ sg_src->dma_address);
+ if (ret)
+ goto err_bd_out;
param = BD_DONE | BD_EXTD | BD_CONT;
- if (i + 1 == sg_len) {
+ if (i + 1 == src_nents) {
param |= BD_INTR;
param |= BD_LAST;
param &= ~BD_CONT;
}
- dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
- i, count, (u64)sg->dma_address,
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%pad %s%s\n",
+ i, count, &sg_src->dma_address,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
bd->mode.status = param;
+ if (direction == DMA_MEM_TO_MEM)
+ sg_dst = sg_next(sg_dst);
}
- sdmac->num_bd = sg_len;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
- return &sdmac->desc;
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
- sdmac->status = DMA_ERROR;
+ dev_dbg(sdma->dev, "Can't get desc.\n");
return NULL;
}
+static struct dma_async_tx_descriptor *sdma_prep_memcpy_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents,
+ DMA_MEM_TO_MEM, flags);
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction, flags);
+}
+
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
@@ -1294,42 +1755,42 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- int num_periods = buf_len / period_len;
int channel = sdmac->channel;
- int ret, i = 0, buf = 0;
+ int i = 0, buf = 0;
+ int num_periods = 0;
+ struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
- if (sdmac->status == DMA_IN_PROGRESS)
- return NULL;
-
- sdmac->status = DMA_IN_PROGRESS;
+ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
+ num_periods = buf_len / period_len;
+ /* Now allocate and setup the descriptor. */
+ desc = sdma_transfer_init(sdmac, direction, num_periods);
+ if (!desc)
+ goto err_out;
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
sdmac->period_len = period_len;
-
sdmac->flags |= IMX_DMA_SG_LOOP;
- sdmac->direction = direction;
- ret = sdma_load_context(sdmac);
- if (ret)
- goto err_out;
- if (num_periods > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, num_periods, NUM_BD);
- goto err_out;
- }
+ /* for hdmi-audio without BDs */
+ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
- if (period_len > 0xffff) {
- dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
- channel, period_len, 0xffff);
- goto err_out;
+ desc->buf_tail = 0;
+ desc->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
+
+ if (period_len > SDMA_BD_MAX_CNT) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
+ channel, period_len, SDMA_BD_MAX_CNT);
+ goto err_bd_out;
}
+ if (sdmac->peripheral_type == IMX_DMATYPE_UART)
+ sdmac->chn_count = period_len;
+
while (buf < buf_len) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
bd->buffer_addr = dma_addr;
@@ -1337,7 +1798,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
bd->mode.count = period_len;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
- goto err_out;
+ goto err_bd_out;
if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
bd->mode.command = 0;
else
@@ -1347,8 +1808,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
if (i + 1 == num_periods)
param |= BD_WRAP;
- dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
- i, period_len, (u64)dma_addr,
+ dev_dbg(sdma->dev, "entry %d: count: %zd dma: %pad %s%s\n",
+ i, period_len, &dma_addr,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
@@ -1359,13 +1820,12 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
i++;
}
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
- sdmac->num_bd = num_periods;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
- return &sdmac->desc;
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
- sdmac->status = DMA_ERROR;
return NULL;
}
@@ -1373,12 +1833,14 @@ static int sdma_config(struct dma_chan *chan,
struct dma_slave_config *dmaengine_cfg)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
-
+ /* clear watermark_level before setting */
+ sdmac->watermark_level = 0;
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
sdmac->per_address = dmaengine_cfg->src_addr;
sdmac->watermark_level = dmaengine_cfg->src_maxburst *
dmaengine_cfg->src_addr_width;
sdmac->word_size = dmaengine_cfg->src_addr_width;
+ sdmac->fifo_num = dmaengine_cfg->src_fifo_num;
} else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
sdmac->per_address2 = dmaengine_cfg->src_addr;
sdmac->per_address = dmaengine_cfg->dst_addr;
@@ -1387,48 +1849,115 @@ static int sdma_config(struct dma_chan *chan,
sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
SDMA_WATERMARK_LEVEL_HWML;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ } else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->per_address2 = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = 0;
+ } else if (dmaengine_cfg->direction == DMA_MEM_TO_MEM) {
+ sdmac->word_size = dmaengine_cfg->dst_addr_width;
} else {
sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
dmaengine_cfg->dst_addr_width;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ sdmac->fifo_num = dmaengine_cfg->dst_fifo_num;
}
sdmac->direction = dmaengine_cfg->direction;
return sdma_config_channel(chan);
}
+static void sdma_wait_tasklet(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+ tasklet_kill(&sdmac->vc.task);
+}
+
static enum dma_status sdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
u32 residue;
+ struct virt_dma_desc *vd;
+ struct sdma_desc *desc;
+ enum dma_status ret;
+ unsigned long flags;
- if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_ptail) *
- sdmac->period_len - sdmac->chn_real_count;
- else
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (!txstate) {
+ return ret;
+ } else if (ret == DMA_COMPLETE) {
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ txstate->residue = sdmac->chn_count - sdmac->chn_real_count;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ return ret;
+ }
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vd = vchan_find_desc(&sdmac->vc, cookie);
+ desc = to_sdma_desc(&vd->tx);
+ if (vd) {
+ if ((sdmac->flags & IMX_DMA_SG_LOOP)) {
+ if (sdmac->peripheral_type != IMX_DMATYPE_UART)
+ residue = (desc->num_bd - desc->buf_ptail) *
+ sdmac->period_len - sdmac->chn_real_count;
+ else
+ residue = sdmac->chn_count - sdmac->chn_real_count;
+ } else
+ residue = sdmac->chn_count;
+ } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
residue = sdmac->chn_count - sdmac->chn_real_count;
+ else
+ residue = 0;
+
+ txstate->residue = residue;
+ ret = sdmac->status;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+
+ return ret;
+}
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
- residue);
+static void sdma_start_desc(struct sdma_channel *sdmac)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
+ struct sdma_desc *desc;
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
- return sdmac->status;
+ if (!vd) {
+ sdmac->desc = NULL;
+ return;
+ }
+ sdmac->desc = desc = to_sdma_desc(&vd->tx);
+ /*
+ * Do not delete the node in desc_issued list in cyclic mode, otherwise
+ * the desc alloced will never be freed in vchan_dma_desc_free_list
+ */
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP)) {
+ list_add_tail(&sdmac->desc->node, &sdmac->pending);
+ list_del(&vd->node);
+ }
+ sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+ sdma_enable_channel(sdma, sdmac->channel);
}
static void sdma_issue_pending(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_engine *sdma = sdmac->sdma;
+ unsigned long flags;
- if (sdmac->status == DMA_IN_PROGRESS)
- sdma_enable_channel(sdma, sdmac->channel);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
+ sdma_start_desc(sdmac);
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 44
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -1596,7 +2125,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
static int sdma_init(struct sdma_engine *sdma)
{
- int i, ret;
+ int i, ret, ccbsize;
dma_addr_t ccb_phys;
ret = clk_enable(sdma->clk_ipg);
@@ -1609,14 +2138,17 @@ static int sdma_init(struct sdma_engine *sdma)
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
- sdma->channel_control = dma_alloc_coherent(NULL,
- MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
- sizeof(struct sdma_context_data),
- &ccb_phys, GFP_KERNEL);
+ ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control)
+ + sizeof(struct sdma_context_data));
+ sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
if (!sdma->channel_control) {
- ret = -ENOMEM;
- goto err_dma_alloc;
+ sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize,
+ &ccb_phys, GFP_KERNEL);
+ if (!sdma->channel_control) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
}
sdma->context = (void *)sdma->channel_control +
@@ -1636,7 +2168,7 @@ static int sdma_init(struct sdma_engine *sdma)
for (i = 0; i < MAX_DMA_CHANNELS; i++)
writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
- ret = sdma_request_channel(&sdma->channel[0]);
+ ret = sdma_request_channel0(sdma);
if (ret)
goto err_dma_alloc;
@@ -1647,7 +2179,10 @@ static int sdma_init(struct sdma_engine *sdma)
/* Set bits of CONFIG register but with static context switching */
/* FIXME: Check whether to set ACR bit depending on clock ratios */
- writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
+ if (sdma->clk_ratio)
+ writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
+ else
+ writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
@@ -1674,6 +2209,10 @@ static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
if (!imx_dma_is_general_purpose(chan))
return false;
+ /* return false if it's not the right device */
+ if ((sdmac->sdma->drvdata == &sdma_imx8m)
+ && (sdmac->sdma->idx != data->idx))
+ return false;
sdmac->data = *data;
chan->private = &sdmac->data;
@@ -1691,17 +2230,15 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 3)
return NULL;
+ memset(&data, 0, sizeof(data));
+
data.dma_request = dma_spec->args[0];
data.peripheral_type = dma_spec->args[1];
- data.priority = dma_spec->args[2];
- /*
- * init dma_request2 to zero, which is not used by the dts.
- * For P2P, dma_request2 is init from dma_request_channel(),
- * chan->private will point to the imx_dma_data, and in
- * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
- * be set to sdmac->event_id1.
- */
- data.dma_request2 = 0;
+ /* Get sw_done setting if sw_done enabled */
+ if (dma_spec->args[2] & BIT(31))
+ data.done_sel = dma_spec->args[2];
+ data.priority = dma_spec->args[2] & 0xff;
+ data.idx = sdma->idx;
return dma_request_channel(mask, sdma_filter_fn, &data);
}
@@ -1741,6 +2278,8 @@ static int sdma_probe(struct platform_device *pdev)
if (!sdma)
return -ENOMEM;
+ sdma->clk_ratio = of_property_read_bool(np, "fsl,ratio-1-1");
+
spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
@@ -1791,6 +2330,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */
@@ -1798,24 +2338,26 @@ static int sdma_probe(struct platform_device *pdev)
struct sdma_channel *sdmac = &sdma->channel[i];
sdmac->sdma = sdma;
- spin_lock_init(&sdmac->lock);
-
- sdmac->chan.device = &sdma->dma_device;
- dma_cookie_init(&sdmac->chan);
+ sdmac->context_loaded = false;
sdmac->channel = i;
+ sdmac->status = DMA_IN_PROGRESS;
+ sdmac->vc.desc_free = sdma_desc_free;
+ INIT_LIST_HEAD(&sdmac->pending);
- tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
- (unsigned long) sdmac);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
* that channel 0 in dmaengine counting matches sdma channel 1.
*/
if (i)
- list_add_tail(&sdmac->chan.device_node,
- &sdma->dma_device.channels);
+ vchan_init(&sdmac->vc, &sdma->dma_device);
}
+ if (np)
+ sdma->iram_pool = of_gen_pool_get(np, "iram", 0);
+ if (!sdma->iram_pool)
+ dev_warn(&pdev->dev, "no iram assigned, using external mem\n");
+
ret = sdma_init(sdma);
if (ret)
goto err_init;
@@ -1834,17 +2376,23 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
sdma->dma_device.device_tx_status = sdma_tx_status;
+ sdma->dma_device.device_synchronize = sdma_wait_tasklet;
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
- sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ sdma->dma_device.device_terminate_all = sdma_terminate_all;
+ sdma->dma_device.device_pause = sdma_channel_pause;
+ sdma->dma_device.device_resume = sdma_channel_resume;
+ sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
+ sdma->dma_device.device_prep_dma_sg = sdma_prep_memcpy_sg;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
- dma_set_max_seg_size(sdma->dma_device.dev, 65535);
+ sdma->dma_device.copy_align = 2;
+ dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
platform_set_drvdata(pdev, sdma);
@@ -1869,6 +2417,8 @@ static int sdma_probe(struct platform_device *pdev)
}
of_node_put(spba_bus);
}
+ /* There maybe multi sdma devices such as i.mx8mscale */
+ sdma->idx = sdma_dev_idx++;
/*
* Kick off firmware loading as the very last step:
@@ -1896,6 +2446,7 @@ static int sdma_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
}
}
+ sdma->fw_name = fw_name;
return 0;
@@ -1924,17 +2475,124 @@ static int sdma_remove(struct platform_device *pdev)
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
- tasklet_kill(&sdmac->tasklet);
+ tasklet_kill(&sdmac->vc.task);
+ sdma_free_chan_resources(&sdmac->vc.chan);
}
platform_set_drvdata(pdev, NULL);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int sdma_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdma_engine *sdma = platform_get_drvdata(pdev);
+ int i, ret = 0;
+
+ sdma->suspend_off = false;
+
+ /* Do nothing if not i.MX6SX or i.MX7D*/
+ if (sdma->drvdata != &sdma_imx6sx && sdma->drvdata != &sdma_imx7d
+ && sdma->drvdata != &sdma_imx6ul)
+ return 0;
+
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
+
+ ret = sdma_save_restore_context(sdma, true);
+ if (ret) {
+ dev_err(sdma->dev, "save context error!\n");
+ return ret;
+ }
+ /* save regs */
+ for (i = 0; i < MXC_SDMA_SAVED_REG_NUM; i++) {
+ /*
+ * 0x78(SDMA_XTRIG_CONF2+4)~0x100(SDMA_CHNPRI_O) registers are
+ * reserved and can't be touched. Skip these regs.
+ */
+ if (i > SDMA_XTRIG_CONF2 / 4)
+ sdma->save_regs[i] = readl_relaxed(sdma->regs +
+ MXC_SDMA_RESERVED_REG
+ + 4 * i);
+ else
+ sdma->save_regs[i] = readl_relaxed(sdma->regs + 4 * i);
+ }
+
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
+
+ return 0;
+}
+
+static int sdma_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdma_engine *sdma = platform_get_drvdata(pdev);
+ int i, ret;
+
+ /* Do nothing if not i.MX6SX or i.MX7D*/
+ if (sdma->drvdata != &sdma_imx6sx && sdma->drvdata != &sdma_imx7d
+ && sdma->drvdata != &sdma_imx6ul)
+ return 0;
+
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
+ /* Do nothing if mega/fast mix not turned off */
+ if (readl_relaxed(sdma->regs + SDMA_H_C0PTR)) {
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
+ return 0;
+ }
+
+ sdma->suspend_off = true;
+
+ /* restore regs and load firmware */
+ for (i = 0; i < MXC_SDMA_SAVED_REG_NUM; i++) {
+ /*
+ * 0x78(SDMA_XTRIG_CONF2+4)~0x100(SDMA_CHNPRI_O) registers are
+ * reserved and can't be touched. Skip these regs.
+ */
+ if (i > SDMA_XTRIG_CONF2 / 4)
+ writel_relaxed(sdma->save_regs[i], sdma->regs +
+ MXC_SDMA_RESERVED_REG + 4 * i);
+ else
+ writel_relaxed(sdma->save_regs[i] , sdma->regs + 4 * i);
+ }
+
+ /* prepare priority for channel0 to start */
+ sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
+
+ ret = sdma_get_firmware(sdma, sdma->fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firware\n");
+ goto out;
+ }
+
+ ret = sdma_save_restore_context(sdma, false);
+ if (ret) {
+ dev_err(sdma->dev, "restore context error!\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops sdma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(sdma_suspend, sdma_resume)
+};
+
static struct platform_driver sdma_driver = {
.driver = {
.name = "imx-sdma",
.of_match_table = sdma_dt_ids,
+ .pm = &sdma_pm_ops,
},
.id_table = sdma_devtypes,
.remove = sdma_remove,