diff options
Diffstat (limited to 'drivers/spi')
-rw-r--r-- | drivers/spi/Kconfig | 27 | ||||
-rw-r--r-- | drivers/spi/Makefile | 5 | ||||
-rw-r--r-- | drivers/spi/at25.c | 2 | ||||
-rw-r--r-- | drivers/spi/atmel_spi.c | 2 | ||||
-rw-r--r-- | drivers/spi/au1550_spi.c | 974 | ||||
-rw-r--r-- | drivers/spi/spi.c | 43 | ||||
-rw-r--r-- | drivers/spi/spi_bfin5xx.c | 1313 | ||||
-rw-r--r-- | drivers/spi/spi_bitbang.c | 26 | ||||
-rw-r--r-- | drivers/spi/spi_butterfly.c | 83 | ||||
-rw-r--r-- | drivers/spi/spi_s3c24xx.c | 8 | ||||
-rw-r--r-- | drivers/spi/spidev.c | 584 |
11 files changed, 2948 insertions, 119 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 7e54e48efd5c..07c587ec71be 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -58,6 +58,23 @@ config SPI_ATMEL This selects a driver for the Atmel SPI Controller, present on many AT32 (AVR32) and AT91 (ARM) chips. +config SPI_BFIN + tristate "SPI controller driver for ADI Blackfin5xx" + depends on SPI_MASTER && BFIN + help + This is the SPI controller master driver for Blackfin 5xx processor. + +config SPI_AU1550 + tristate "Au1550/Au12x0 SPI Controller" + depends on SPI_MASTER && (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL + select SPI_BITBANG + help + If you say yes to this option, support will be included for the + Au1550 SPI controller (may also work with Au1200,Au1210,Au1250). + + This driver can also be built as a module. If so, the module + will be called au1550_spi. + config SPI_BITBANG tristate "Bitbanging SPI master" depends on SPI_MASTER && EXPERIMENTAL @@ -153,11 +170,19 @@ config SPI_AT25 This driver can also be built as a module. If so, the module will be called at25. +config SPI_SPIDEV + tristate "User mode SPI device driver support" + depends on SPI_MASTER && EXPERIMENTAL + help + This supports user mode SPI protocol drivers. + + Note that this application programming interface is EXPERIMENTAL + and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes. + # # Add new SPI protocol masters in alphabetical order above this line # - # (slave support would go here) endmenu # "SPI support" diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 3c280ad89202..624b6363f490 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -11,8 +11,10 @@ endif obj-$(CONFIG_SPI_MASTER) += spi.o # SPI master controller drivers (bus) -obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o +obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o +obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o +obj-$(CONFIG_SPI_AU1550) += au1550_spi.o obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o obj-$(CONFIG_SPI_IMX) += spi_imx.o obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o @@ -24,6 +26,7 @@ obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o # SPI protocol drivers (device/link on bus) obj-$(CONFIG_SPI_AT25) += at25.o +obj-$(CONFIG_SPI_SPIDEV) += spidev.o # ... add above this line ... # SPI slave controller drivers (upstream link) diff --git a/drivers/spi/at25.c b/drivers/spi/at25.c index 48e4f48e779f..8efa07e8b8c2 100644 --- a/drivers/spi/at25.c +++ b/drivers/spi/at25.c @@ -291,7 +291,7 @@ static int at25_probe(struct spi_device *spi) */ sr = spi_w8r8(spi, AT25_RDSR); if (sr < 0 || sr & AT25_SR_nRDY) { - dev_dbg(&at25->spi->dev, "rdsr --> %d (%02x)\n", sr, sr); + dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr); err = -ENXIO; goto fail; } diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index 6fa260d1a9be..66e7bc985797 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -425,7 +425,7 @@ static int atmel_spi_setup(struct spi_device *spi) if (ret) return ret; spi->controller_state = (void *)npcs_pin; - gpio_direction_output(npcs_pin); + gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); } dev_dbg(&spi->dev, diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c new file mode 100644 index 000000000000..ae2b1af0dba4 --- /dev/null +++ b/drivers/spi/au1550_spi.c @@ -0,0 +1,974 @@ +/* + * au1550_spi.c - au1550 psc spi controller driver + * may work also with au1200, au1210, au1250 + * will not work on au1000, au1100 and au1500 (no full spi controller there) + * + * Copyright (c) 2006 ATRON electronic GmbH + * Author: Jan Nikitenko <jan.nikitenko@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/dma-mapping.h> +#include <linux/completion.h> +#include <asm/mach-au1x00/au1000.h> +#include <asm/mach-au1x00/au1xxx_psc.h> +#include <asm/mach-au1x00/au1xxx_dbdma.h> + +#include <asm/mach-au1x00/au1550_spi.h> + +static unsigned usedma = 1; +module_param(usedma, uint, 0644); + +/* +#define AU1550_SPI_DEBUG_LOOPBACK +*/ + + +#define AU1550_SPI_DBDMA_DESCRIPTORS 1 +#define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U + +struct au1550_spi { + struct spi_bitbang bitbang; + + volatile psc_spi_t __iomem *regs; + int irq; + unsigned freq_max; + unsigned freq_min; + + unsigned len; + unsigned tx_count; + unsigned rx_count; + const u8 *tx; + u8 *rx; + + void (*rx_word)(struct au1550_spi *hw); + void (*tx_word)(struct au1550_spi *hw); + int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); + irqreturn_t (*irq_callback)(struct au1550_spi *hw); + + struct completion master_done; + + unsigned usedma; + u32 dma_tx_id; + u32 dma_rx_id; + u32 dma_tx_ch; + u32 dma_rx_ch; + + u8 *dma_rx_tmpbuf; + unsigned dma_rx_tmpbuf_size; + u32 dma_rx_tmpbuf_addr; + + struct spi_master *master; + struct device *dev; + struct au1550_spi_info *pdata; +}; + + +/* we use an 8-bit memory device for dma transfers to/from spi fifo */ +static dbdev_tab_t au1550_spi_mem_dbdev = +{ + .dev_id = DBDMA_MEM_CHAN, + .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC, + .dev_tsize = 0, + .dev_devwidth = 8, + .dev_physaddr = 0x00000000, + .dev_intlevel = 0, + .dev_intpolarity = 0 +}; + +static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); + + +/** + * compute BRG and DIV bits to setup spi clock based on main input clock rate + * that was specified in platform data structure + * according to au1550 datasheet: + * psc_tempclk = psc_mainclk / (2 << DIV) + * spiclk = psc_tempclk / (2 * (BRG + 1)) + * BRG valid range is 4..63 + * DIV valid range is 0..3 + */ +static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz) +{ + u32 mainclk_hz = hw->pdata->mainclk_hz; + u32 div, brg; + + for (div = 0; div < 4; div++) { + brg = mainclk_hz / speed_hz / (4 << div); + /* now we have BRG+1 in brg, so count with that */ + if (brg < (4 + 1)) { + brg = (4 + 1); /* speed_hz too big */ + break; /* set lowest brg (div is == 0) */ + } + if (brg <= (63 + 1)) + break; /* we have valid brg and div */ + } + if (div == 4) { + div = 3; /* speed_hz too small */ + brg = (63 + 1); /* set highest brg and div */ + } + brg--; + return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div); +} + +static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw) +{ + hw->regs->psc_spimsk = + PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO + | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO + | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD; + au_sync(); + + hw->regs->psc_spievent = + PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO + | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO + | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD; + au_sync(); +} + +static void au1550_spi_reset_fifos(struct au1550_spi *hw) +{ + u32 pcr; + + hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC; + au_sync(); + do { + pcr = hw->regs->psc_spipcr; + au_sync(); + } while (pcr != 0); +} + +/* + * dma transfers are used for the most common spi word size of 8-bits + * we cannot easily change already set up dma channels' width, so if we wanted + * dma support for more than 8-bit words (up to 24 bits), we would need to + * setup dma channels from scratch on each spi transfer, based on bits_per_word + * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits + * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode + * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set() + */ +static void au1550_spi_chipsel(struct spi_device *spi, int value) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; + u32 cfg, stat; + + switch (value) { + case BITBANG_CS_INACTIVE: + if (hw->pdata->deactivate_cs) + hw->pdata->deactivate_cs(hw->pdata, spi->chip_select, + cspol); + break; + + case BITBANG_CS_ACTIVE: + au1550_spi_bits_handlers_set(hw, spi->bits_per_word); + + cfg = hw->regs->psc_spicfg; + au_sync(); + hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; + au_sync(); + + if (spi->mode & SPI_CPOL) + cfg |= PSC_SPICFG_BI; + else + cfg &= ~PSC_SPICFG_BI; + if (spi->mode & SPI_CPHA) + cfg &= ~PSC_SPICFG_CDE; + else + cfg |= PSC_SPICFG_CDE; + + if (spi->mode & SPI_LSB_FIRST) + cfg |= PSC_SPICFG_MLF; + else + cfg &= ~PSC_SPICFG_MLF; + + if (hw->usedma && spi->bits_per_word <= 8) + cfg &= ~PSC_SPICFG_DD_DISABLE; + else + cfg |= PSC_SPICFG_DD_DISABLE; + cfg = PSC_SPICFG_CLR_LEN(cfg); + cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word); + + cfg = PSC_SPICFG_CLR_BAUD(cfg); + cfg &= ~PSC_SPICFG_SET_DIV(3); + cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz); + + hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE; + au_sync(); + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_DR) == 0); + + if (hw->pdata->activate_cs) + hw->pdata->activate_cs(hw->pdata, spi->chip_select, + cspol); + break; + } +} + +static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + unsigned bpw, hz; + u32 cfg, stat; + + bpw = t ? t->bits_per_word : spi->bits_per_word; + hz = t ? t->speed_hz : spi->max_speed_hz; + + if (bpw < 4 || bpw > 24) { + dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n", + bpw); + return -EINVAL; + } + if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) { + dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n", + hz); + return -EINVAL; + } + + au1550_spi_bits_handlers_set(hw, spi->bits_per_word); + + cfg = hw->regs->psc_spicfg; + au_sync(); + hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; + au_sync(); + + if (hw->usedma && bpw <= 8) + cfg &= ~PSC_SPICFG_DD_DISABLE; + else + cfg |= PSC_SPICFG_DD_DISABLE; + cfg = PSC_SPICFG_CLR_LEN(cfg); + cfg |= PSC_SPICFG_SET_LEN(bpw); + + cfg = PSC_SPICFG_CLR_BAUD(cfg); + cfg &= ~PSC_SPICFG_SET_DIV(3); + cfg |= au1550_spi_baudcfg(hw, hz); + + hw->regs->psc_spicfg = cfg; + au_sync(); + + if (cfg & PSC_SPICFG_DE_ENABLE) { + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_DR) == 0); + } + + au1550_spi_reset_fifos(hw); + au1550_spi_mask_ack_all(hw); + return 0; +} + +static int au1550_spi_setup(struct spi_device *spi) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + + if (spi->bits_per_word == 0) + spi->bits_per_word = 8; + if (spi->bits_per_word < 4 || spi->bits_per_word > 24) { + dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n", + spi->bits_per_word); + return -EINVAL; + } + + if (spi->max_speed_hz == 0) + spi->max_speed_hz = hw->freq_max; + if (spi->max_speed_hz > hw->freq_max + || spi->max_speed_hz < hw->freq_min) + return -EINVAL; + /* + * NOTE: cannot change speed and other hw settings immediately, + * otherwise sharing of spi bus is not possible, + * so do not call setupxfer(spi, NULL) here + */ + return 0; +} + +/* + * for dma spi transfers, we have to setup rx channel, otherwise there is + * no reliable way how to recognize that spi transfer is done + * dma complete callbacks are called before real spi transfer is finished + * and if only tx dma channel is set up (and rx fifo overflow event masked) + * spi master done event irq is not generated unless rx fifo is empty (emptied) + * so we need rx tmp buffer to use for rx dma if user does not provide one + */ +static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) +{ + hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL); + if (!hw->dma_rx_tmpbuf) + return -ENOMEM; + hw->dma_rx_tmpbuf_size = size; + hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, + size, DMA_FROM_DEVICE); + if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) { + kfree(hw->dma_rx_tmpbuf); + hw->dma_rx_tmpbuf = 0; + hw->dma_rx_tmpbuf_size = 0; + return -EFAULT; + } + return 0; +} + +static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw) +{ + dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr, + hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE); + kfree(hw->dma_rx_tmpbuf); + hw->dma_rx_tmpbuf = 0; + hw->dma_rx_tmpbuf_size = 0; +} + +static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + dma_addr_t dma_tx_addr; + dma_addr_t dma_rx_addr; + u32 res; + + hw->len = t->len; + hw->tx_count = 0; + hw->rx_count = 0; + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + dma_tx_addr = t->tx_dma; + dma_rx_addr = t->rx_dma; + + /* + * check if buffers are already dma mapped, map them otherwise + * use rx buffer in place of tx if tx buffer was not provided + * use temp rx buffer (preallocated or realloc to fit) for rx dma + */ + if (t->rx_buf) { + if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ + dma_rx_addr = dma_map_single(hw->dev, + (void *)t->rx_buf, + t->len, DMA_FROM_DEVICE); + if (dma_mapping_error(dma_rx_addr)) + dev_err(hw->dev, "rx dma map error\n"); + } + } else { + if (t->len > hw->dma_rx_tmpbuf_size) { + int ret; + + au1550_spi_dma_rxtmp_free(hw); + ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len, + AU1550_SPI_DMA_RXTMP_MINSIZE)); + if (ret < 0) + return ret; + } + hw->rx = hw->dma_rx_tmpbuf; + dma_rx_addr = hw->dma_rx_tmpbuf_addr; + dma_sync_single_for_device(hw->dev, dma_rx_addr, + t->len, DMA_FROM_DEVICE); + } + if (t->tx_buf) { + if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ + dma_tx_addr = dma_map_single(hw->dev, + (void *)t->tx_buf, + t->len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_tx_addr)) + dev_err(hw->dev, "tx dma map error\n"); + } + } else { + dma_sync_single_for_device(hw->dev, dma_rx_addr, + t->len, DMA_BIDIRECTIONAL); + hw->tx = hw->rx; + } + + /* put buffers on the ring */ + res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, hw->rx, t->len); + if (!res) + dev_err(hw->dev, "rx dma put dest error\n"); + + res = au1xxx_dbdma_put_source(hw->dma_tx_ch, (void *)hw->tx, t->len); + if (!res) + dev_err(hw->dev, "tx dma put source error\n"); + + au1xxx_dbdma_start(hw->dma_rx_ch); + au1xxx_dbdma_start(hw->dma_tx_ch); + + /* by default enable nearly all events interrupt */ + hw->regs->psc_spimsk = PSC_SPIMSK_SD; + au_sync(); + + /* start the transfer */ + hw->regs->psc_spipcr = PSC_SPIPCR_MS; + au_sync(); + + wait_for_completion(&hw->master_done); + + au1xxx_dbdma_stop(hw->dma_tx_ch); + au1xxx_dbdma_stop(hw->dma_rx_ch); + + if (!t->rx_buf) { + /* using the temporal preallocated and premapped buffer */ + dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len, + DMA_FROM_DEVICE); + } + /* unmap buffers if mapped above */ + if (t->rx_buf && t->rx_dma == 0 ) + dma_unmap_single(hw->dev, dma_rx_addr, t->len, + DMA_FROM_DEVICE); + if (t->tx_buf && t->tx_dma == 0 ) + dma_unmap_single(hw->dev, dma_tx_addr, t->len, + DMA_TO_DEVICE); + + return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; +} + +static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) +{ + u32 stat, evnt; + + stat = hw->regs->psc_spistat; + evnt = hw->regs->psc_spievent; + au_sync(); + if ((stat & PSC_SPISTAT_DI) == 0) { + dev_err(hw->dev, "Unexpected IRQ!\n"); + return IRQ_NONE; + } + + if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO + | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO + | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) + != 0) { + /* + * due to an spi error we consider transfer as done, + * so mask all events until before next transfer start + * and stop the possibly running dma immediatelly + */ + au1550_spi_mask_ack_all(hw); + au1xxx_dbdma_stop(hw->dma_rx_ch); + au1xxx_dbdma_stop(hw->dma_tx_ch); + + /* get number of transfered bytes */ + hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); + hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); + + au1xxx_dbdma_reset(hw->dma_rx_ch); + au1xxx_dbdma_reset(hw->dma_tx_ch); + au1550_spi_reset_fifos(hw); + + dev_err(hw->dev, + "Unexpected SPI error: event=0x%x stat=0x%x!\n", + evnt, stat); + + complete(&hw->master_done); + return IRQ_HANDLED; + } + + if ((evnt & PSC_SPIEVNT_MD) != 0) { + /* transfer completed successfully */ + au1550_spi_mask_ack_all(hw); + hw->rx_count = hw->len; + hw->tx_count = hw->len; + complete(&hw->master_done); + } + return IRQ_HANDLED; +} + + +/* routines to handle different word sizes in pio mode */ +#define AU1550_SPI_RX_WORD(size, mask) \ +static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \ +{ \ + u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \ + au_sync(); \ + if (hw->rx) { \ + *(u##size *)hw->rx = (u##size)fifoword; \ + hw->rx += (size) / 8; \ + } \ + hw->rx_count += (size) / 8; \ +} + +#define AU1550_SPI_TX_WORD(size, mask) \ +static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \ +{ \ + u32 fifoword = 0; \ + if (hw->tx) { \ + fifoword = *(u##size *)hw->tx & (u32)(mask); \ + hw->tx += (size) / 8; \ + } \ + hw->tx_count += (size) / 8; \ + if (hw->tx_count >= hw->len) \ + fifoword |= PSC_SPITXRX_LC; \ + hw->regs->psc_spitxrx = fifoword; \ + au_sync(); \ +} + +AU1550_SPI_RX_WORD(8,0xff) +AU1550_SPI_RX_WORD(16,0xffff) +AU1550_SPI_RX_WORD(32,0xffffff) +AU1550_SPI_TX_WORD(8,0xff) +AU1550_SPI_TX_WORD(16,0xffff) +AU1550_SPI_TX_WORD(32,0xffffff) + +static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t) +{ + u32 stat, mask; + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->len = t->len; + hw->tx_count = 0; + hw->rx_count = 0; + + /* by default enable nearly all events after filling tx fifo */ + mask = PSC_SPIMSK_SD; + + /* fill the transmit FIFO */ + while (hw->tx_count < hw->len) { + + hw->tx_word(hw); + + if (hw->tx_count >= hw->len) { + /* mask tx fifo request interrupt as we are done */ + mask |= PSC_SPIMSK_TR; + } + + stat = hw->regs->psc_spistat; + au_sync(); + if (stat & PSC_SPISTAT_TF) + break; + } + + /* enable event interrupts */ + hw->regs->psc_spimsk = mask; + au_sync(); + + /* start the transfer */ + hw->regs->psc_spipcr = PSC_SPIPCR_MS; + au_sync(); + + wait_for_completion(&hw->master_done); + + return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; +} + +static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) +{ + int busy; + u32 stat, evnt; + + stat = hw->regs->psc_spistat; + evnt = hw->regs->psc_spievent; + au_sync(); + if ((stat & PSC_SPISTAT_DI) == 0) { + dev_err(hw->dev, "Unexpected IRQ!\n"); + return IRQ_NONE; + } + + if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO + | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO + | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) + != 0) { + dev_err(hw->dev, + "Unexpected SPI error: event=0x%x stat=0x%x!\n", + evnt, stat); + /* + * due to an error we consider transfer as done, + * so mask all events until before next transfer start + */ + au1550_spi_mask_ack_all(hw); + au1550_spi_reset_fifos(hw); + complete(&hw->master_done); + return IRQ_HANDLED; + } + + /* + * while there is something to read from rx fifo + * or there is a space to write to tx fifo: + */ + do { + busy = 0; + stat = hw->regs->psc_spistat; + au_sync(); + + if ((stat & PSC_SPISTAT_RE) == 0 && hw->rx_count < hw->len) { + hw->rx_word(hw); + /* ack the receive request event */ + hw->regs->psc_spievent = PSC_SPIEVNT_RR; + au_sync(); + busy = 1; + } + + if ((stat & PSC_SPISTAT_TF) == 0 && hw->tx_count < hw->len) { + hw->tx_word(hw); + /* ack the transmit request event */ + hw->regs->psc_spievent = PSC_SPIEVNT_TR; + au_sync(); + busy = 1; + } + } while (busy); + + evnt = hw->regs->psc_spievent; + au_sync(); + + if (hw->rx_count >= hw->len || (evnt & PSC_SPIEVNT_MD) != 0) { + /* transfer completed successfully */ + au1550_spi_mask_ack_all(hw); + complete(&hw->master_done); + } + return IRQ_HANDLED; +} + +static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + return hw->txrx_bufs(spi, t); +} + +static irqreturn_t au1550_spi_irq(int irq, void *dev, struct pt_regs *regs) +{ + struct au1550_spi *hw = dev; + return hw->irq_callback(hw); +} + +static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw) +{ + if (bpw <= 8) { + if (hw->usedma) { + hw->txrx_bufs = &au1550_spi_dma_txrxb; + hw->irq_callback = &au1550_spi_dma_irq_callback; + } else { + hw->rx_word = &au1550_spi_rx_word_8; + hw->tx_word = &au1550_spi_tx_word_8; + hw->txrx_bufs = &au1550_spi_pio_txrxb; + hw->irq_callback = &au1550_spi_pio_irq_callback; + } + } else if (bpw <= 16) { + hw->rx_word = &au1550_spi_rx_word_16; + hw->tx_word = &au1550_spi_tx_word_16; + hw->txrx_bufs = &au1550_spi_pio_txrxb; + hw->irq_callback = &au1550_spi_pio_irq_callback; + } else { + hw->rx_word = &au1550_spi_rx_word_32; + hw->tx_word = &au1550_spi_tx_word_32; + hw->txrx_bufs = &au1550_spi_pio_txrxb; + hw->irq_callback = &au1550_spi_pio_irq_callback; + } +} + +static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) +{ + u32 stat, cfg; + + /* set up the PSC for SPI mode */ + hw->regs->psc_ctrl = PSC_CTRL_DISABLE; + au_sync(); + hw->regs->psc_sel = PSC_SEL_PS_SPIMODE; + au_sync(); + + hw->regs->psc_spicfg = 0; + au_sync(); + + hw->regs->psc_ctrl = PSC_CTRL_ENABLE; + au_sync(); + + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_SR) == 0); + + + cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE; + cfg |= PSC_SPICFG_SET_LEN(8); + cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8; + /* use minimal allowed brg and div values as initial setting: */ + cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0); + +#ifdef AU1550_SPI_DEBUG_LOOPBACK + cfg |= PSC_SPICFG_LB; +#endif + + hw->regs->psc_spicfg = cfg; + au_sync(); + + au1550_spi_mask_ack_all(hw); + + hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE; + au_sync(); + + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_DR) == 0); +} + + +static int __init au1550_spi_probe(struct platform_device *pdev) +{ + struct au1550_spi *hw; + struct spi_master *master; + int err = 0; + + master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); + if (master == NULL) { + dev_err(&pdev->dev, "No memory for spi_master\n"); + err = -ENOMEM; + goto err_nomem; + } + + hw = spi_master_get_devdata(master); + + hw->master = spi_master_get(master); + hw->pdata = pdev->dev.platform_data; + hw->dev = &pdev->dev; + + if (hw->pdata == NULL) { + dev_err(&pdev->dev, "No platform data supplied\n"); + err = -ENOENT; + goto err_no_pdata; + } + + platform_set_drvdata(pdev, hw); + + init_completion(&hw->master_done); + + hw->bitbang.master = hw->master; + hw->bitbang.setup_transfer = au1550_spi_setupxfer; + hw->bitbang.chipselect = au1550_spi_chipsel; + hw->bitbang.master->setup = au1550_spi_setup; + hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; + + switch (hw->pdata->bus_num) { + case 0: + hw->irq = AU1550_PSC0_INT; + hw->regs = (volatile psc_spi_t *)PSC0_BASE_ADDR; + hw->dma_rx_id = DSCR_CMD0_PSC0_RX; + hw->dma_tx_id = DSCR_CMD0_PSC0_TX; + break; + case 1: + hw->irq = AU1550_PSC1_INT; + hw->regs = (volatile psc_spi_t *)PSC1_BASE_ADDR; + hw->dma_rx_id = DSCR_CMD0_PSC1_RX; + hw->dma_tx_id = DSCR_CMD0_PSC1_TX; + break; + case 2: + hw->irq = AU1550_PSC2_INT; + hw->regs = (volatile psc_spi_t *)PSC2_BASE_ADDR; + hw->dma_rx_id = DSCR_CMD0_PSC2_RX; + hw->dma_tx_id = DSCR_CMD0_PSC2_TX; + break; + case 3: + hw->irq = AU1550_PSC3_INT; + hw->regs = (volatile psc_spi_t *)PSC3_BASE_ADDR; + hw->dma_rx_id = DSCR_CMD0_PSC3_RX; + hw->dma_tx_id = DSCR_CMD0_PSC3_TX; + break; + default: + dev_err(&pdev->dev, "Wrong bus_num of SPI\n"); + err = -ENOENT; + goto err_no_pdata; + } + + if (request_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t), + pdev->name) == NULL) { + dev_err(&pdev->dev, "Cannot reserve iomem region\n"); + err = -ENXIO; + goto err_no_iores; + } + + + if (usedma) { + if (pdev->dev.dma_mask == NULL) + dev_warn(&pdev->dev, "no dma mask\n"); + else + hw->usedma = 1; + } + + if (hw->usedma) { + /* + * create memory device with 8 bits dev_devwidth + * needed for proper byte ordering to spi fifo + */ + int memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); + if (!memid) { + dev_err(&pdev->dev, + "Cannot create dma 8 bit mem device\n"); + err = -ENXIO; + goto err_dma_add_dev; + } + + hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(memid, + hw->dma_tx_id, NULL, (void *)hw); + if (hw->dma_tx_ch == 0) { + dev_err(&pdev->dev, + "Cannot allocate tx dma channel\n"); + err = -ENXIO; + goto err_no_txdma; + } + au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8); + if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch, + AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { + dev_err(&pdev->dev, + "Cannot allocate tx dma descriptors\n"); + err = -ENXIO; + goto err_no_txdma_descr; + } + + + hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, + memid, NULL, (void *)hw); + if (hw->dma_rx_ch == 0) { + dev_err(&pdev->dev, + "Cannot allocate rx dma channel\n"); + err = -ENXIO; + goto err_no_rxdma; + } + au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8); + if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch, + AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { + dev_err(&pdev->dev, + "Cannot allocate rx dma descriptors\n"); + err = -ENXIO; + goto err_no_rxdma_descr; + } + + err = au1550_spi_dma_rxtmp_alloc(hw, + AU1550_SPI_DMA_RXTMP_MINSIZE); + if (err < 0) { + dev_err(&pdev->dev, + "Cannot allocate initial rx dma tmp buffer\n"); + goto err_dma_rxtmp_alloc; + } + } + + au1550_spi_bits_handlers_set(hw, 8); + + err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw); + if (err) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + goto err_no_irq; + } + + master->bus_num = hw->pdata->bus_num; + master->num_chipselect = hw->pdata->num_chipselect; + + /* + * precompute valid range for spi freq - from au1550 datasheet: + * psc_tempclk = psc_mainclk / (2 << DIV) + * spiclk = psc_tempclk / (2 * (BRG + 1)) + * BRG valid range is 4..63 + * DIV valid range is 0..3 + * round the min and max frequencies to values that would still + * produce valid brg and div + */ + { + int min_div = (2 << 0) * (2 * (4 + 1)); + int max_div = (2 << 3) * (2 * (63 + 1)); + hw->freq_max = hw->pdata->mainclk_hz / min_div; + hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1; + } + + au1550_spi_setup_psc_as_spi(hw); + + err = spi_bitbang_start(&hw->bitbang); + if (err) { + dev_err(&pdev->dev, "Failed to register SPI master\n"); + goto err_register; + } + + dev_info(&pdev->dev, + "spi master registered: bus_num=%d num_chipselect=%d\n", + master->bus_num, master->num_chipselect); + + return 0; + +err_register: + free_irq(hw->irq, hw); + +err_no_irq: + au1550_spi_dma_rxtmp_free(hw); + +err_dma_rxtmp_alloc: +err_no_rxdma_descr: + if (hw->usedma) + au1xxx_dbdma_chan_free(hw->dma_rx_ch); + +err_no_rxdma: +err_no_txdma_descr: + if (hw->usedma) + au1xxx_dbdma_chan_free(hw->dma_tx_ch); + +err_no_txdma: +err_dma_add_dev: + release_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t)); + +err_no_iores: +err_no_pdata: + spi_master_put(hw->master); + +err_nomem: + return err; +} + +static int __exit au1550_spi_remove(struct platform_device *pdev) +{ + struct au1550_spi *hw = platform_get_drvdata(pdev); + + dev_info(&pdev->dev, "spi master remove: bus_num=%d\n", + hw->master->bus_num); + + spi_bitbang_stop(&hw->bitbang); + free_irq(hw->irq, hw); + release_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t)); + + if (hw->usedma) { + au1550_spi_dma_rxtmp_free(hw); + au1xxx_dbdma_chan_free(hw->dma_rx_ch); + au1xxx_dbdma_chan_free(hw->dma_tx_ch); + } + + platform_set_drvdata(pdev, NULL); + + spi_master_put(hw->master); + return 0; +} + +static struct platform_driver au1550_spi_drv = { + .remove = __exit_p(au1550_spi_remove), + .driver = { + .name = "au1550-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init au1550_spi_init(void) +{ + return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); +} +module_init(au1550_spi_init); + +static void __exit au1550_spi_exit(void) +{ + platform_driver_unregister(&au1550_spi_drv); +} +module_exit(au1550_spi_exit); + +MODULE_DESCRIPTION("Au1550 PSC SPI Driver"); +MODULE_AUTHOR("Jan Nikitenko <jan.nikitenko@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 2328128728be..c3219b29b5ac 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -152,6 +152,11 @@ static void spi_drv_shutdown(struct device *dev) sdrv->shutdown(to_spi_device(dev)); } +/** + * spi_register_driver - register a SPI driver + * @sdrv: the driver to register + * Context: can sleep + */ int spi_register_driver(struct spi_driver *sdrv) { sdrv->driver.bus = &spi_bus_type; @@ -183,14 +188,20 @@ static LIST_HEAD(board_list); static DECLARE_MUTEX(board_lock); -/* On typical mainboards, this is purely internal; and it's not needed +/** + * spi_new_device - instantiate one new SPI device + * @master: Controller to which device is connected + * @chip: Describes the SPI device + * Context: can sleep + * + * On typical mainboards, this is purely internal; and it's not needed * after board init creates the hard-wired devices. Some development * platforms may not be able to use spi_register_board_info though, and * this is exported so that for example a USB or parport based adapter * driver could add devices (which it would learn about out-of-band). */ -struct spi_device *__init_or_module -spi_new_device(struct spi_master *master, struct spi_board_info *chip) +struct spi_device *spi_new_device(struct spi_master *master, + struct spi_board_info *chip) { struct spi_device *proxy; struct device *dev = master->cdev.dev; @@ -251,7 +262,12 @@ fail: } EXPORT_SYMBOL_GPL(spi_new_device); -/* +/** + * spi_register_board_info - register SPI devices for a given board + * @info: array of chip descriptors + * @n: how many descriptors are provided + * Context: can sleep + * * Board-specific early init code calls this (probably during arch_initcall) * with segments of the SPI device table. Any device nodes are created later, * after the relevant parent SPI controller (bus_num) is defined. We keep @@ -337,9 +353,10 @@ static struct class spi_master_class = { /** * spi_alloc_master - allocate SPI master controller * @dev: the controller, possibly using the platform_bus - * @size: how much driver-private data to preallocate; the pointer to this + * @size: how much zeroed driver-private data to allocate; the pointer to this * memory is in the class_data field of the returned class_device, * accessible with spi_master_get_devdata(). + * Context: can sleep * * This call is used only by SPI master controller drivers, which are the * only ones directly touching chip registers. It's how they allocate @@ -352,8 +369,7 @@ static struct class spi_master_class = { * the master's methods before calling spi_register_master(); and (after errors * adding the device) calling spi_master_put() to prevent a memory leak. */ -struct spi_master * __init_or_module -spi_alloc_master(struct device *dev, unsigned size) +struct spi_master *spi_alloc_master(struct device *dev, unsigned size) { struct spi_master *master; @@ -376,6 +392,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master); /** * spi_register_master - register SPI master controller * @master: initialized master, originally from spi_alloc_master() + * Context: can sleep * * SPI master controllers connect to their drivers using some non-SPI bus, * such as the platform bus. The final stage of probe() in that code @@ -392,8 +409,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master); * After a successful return, the caller is responsible for calling * spi_unregister_master(). */ -int __init_or_module -spi_register_master(struct spi_master *master) +int spi_register_master(struct spi_master *master) { static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1); struct device *dev = master->cdev.dev; @@ -439,6 +455,7 @@ static int __unregister(struct device *dev, void *unused) /** * spi_unregister_master - unregister SPI master controller * @master: the master being unregistered + * Context: can sleep * * This call is used only by SPI master controller drivers, which are the * only ones directly touching chip registers. @@ -457,6 +474,7 @@ EXPORT_SYMBOL_GPL(spi_unregister_master); /** * spi_busnum_to_master - look up master associated with bus_num * @bus_num: the master's bus number + * Context: can sleep * * This call may be used with devices that are registered after * arch init time. It returns a refcounted pointer to the relevant @@ -494,6 +512,7 @@ static void spi_complete(void *arg) * spi_sync - blocking/synchronous SPI data transfers * @spi: device with which data will be exchanged * @message: describes the data transfers + * Context: can sleep * * This call may only be used from a context that may sleep. The sleep * is non-interruptible, and has no timeout. Low-overhead controller @@ -510,7 +529,7 @@ static void spi_complete(void *arg) * * The return value is a negative error code if the message could not be * submitted, else zero. When the value is zero, then message->status is - * also defined: it's the completion code for the transfer, either zero + * also defined; it's the completion code for the transfer, either zero * or a negative error code from the controller driver. */ int spi_sync(struct spi_device *spi, struct spi_message *message) @@ -540,6 +559,7 @@ static u8 *buf; * @n_tx: size of txbuf, in bytes * @rxbuf: buffer into which data will be read * @n_rx: size of rxbuf, in bytes (need not be dma-safe) + * Context: can sleep * * This performs a half duplex MicroWire style transaction with the * device, sending txbuf and then reading rxbuf. The return value @@ -547,7 +567,8 @@ static u8 *buf; * This call may only be used from a context that may sleep. * * Parameters to this routine are always copied using a small buffer; - * performance-sensitive or bulk transfer code should instead use + * portable code should never use this for more than 32 bytes. + * Performance-sensitive or bulk transfer code should instead use * spi_{async,sync}() calls with dma-safe buffers. */ int spi_write_then_read(struct spi_device *spi, diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c new file mode 100644 index 000000000000..ce3c0ce2316e --- /dev/null +++ b/drivers/spi/spi_bfin5xx.c @@ -0,0 +1,1313 @@ +/* + * File: drivers/spi/bfin5xx_spi.c + * Based on: N/A + * Author: Luke Yang (Analog Devices Inc.) + * + * Created: March. 10th 2006 + * Description: SPI controller driver for Blackfin 5xx + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * Modified: + * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang) + * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang) + * + * Copyright 2004-2006 Analog Devices Inc. + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program ; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/spi/spi.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/delay.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/delay.h> +#include <asm/dma.h> + +#include <asm/bfin5xx_spi.h> + +MODULE_AUTHOR("Luke Yang"); +MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller"); +MODULE_LICENSE("GPL"); + +#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) + +#define DEFINE_SPI_REG(reg, off) \ +static inline u16 read_##reg(void) \ + { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \ +static inline void write_##reg(u16 v) \ + {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\ + SSYNC();} + +DEFINE_SPI_REG(CTRL, 0x00) +DEFINE_SPI_REG(FLAG, 0x04) +DEFINE_SPI_REG(STAT, 0x08) +DEFINE_SPI_REG(TDBR, 0x0C) +DEFINE_SPI_REG(RDBR, 0x10) +DEFINE_SPI_REG(BAUD, 0x14) +DEFINE_SPI_REG(SHAW, 0x18) +#define START_STATE ((void*)0) +#define RUNNING_STATE ((void*)1) +#define DONE_STATE ((void*)2) +#define ERROR_STATE ((void*)-1) +#define QUEUE_RUNNING 0 +#define QUEUE_STOPPED 1 +int dma_requested; + +struct driver_data { + /* Driver model hookup */ + struct platform_device *pdev; + + /* SPI framework hookup */ + struct spi_master *master; + + /* BFIN hookup */ + struct bfin5xx_spi_master *master_info; + + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t lock; + struct list_head queue; + int busy; + int run; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + size_t len_in_bytes; + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + int dma_mapped; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + size_t rx_map_len; + size_t tx_map_len; + u8 n_bytes; + void (*write) (struct driver_data *); + void (*read) (struct driver_data *); + void (*duplex) (struct driver_data *); +}; + +struct chip_data { + u16 ctl_reg; + u16 baud; + u16 flag; + + u8 chip_select_num; + u8 n_bytes; + u32 width; /* 0 or 1 */ + u8 enable_dma; + u8 bits_per_word; /* 8 or 16 */ + u8 cs_change_per_word; + u8 cs_chg_udelay; + void (*write) (struct driver_data *); + void (*read) (struct driver_data *); + void (*duplex) (struct driver_data *); +}; + +void bfin_spi_enable(struct driver_data *drv_data) +{ + u16 cr; + + cr = read_CTRL(); + write_CTRL(cr | BIT_CTL_ENABLE); + SSYNC(); +} + +void bfin_spi_disable(struct driver_data *drv_data) +{ + u16 cr; + + cr = read_CTRL(); + write_CTRL(cr & (~BIT_CTL_ENABLE)); + SSYNC(); +} + +/* Caculate the SPI_BAUD register value based on input HZ */ +static u16 hz_to_spi_baud(u32 speed_hz) +{ + u_long sclk = get_sclk(); + u16 spi_baud = (sclk / (2 * speed_hz)); + + if ((sclk % (2 * speed_hz)) > 0) + spi_baud++; + + pr_debug("sclk = %ld, speed_hz = %d, spi_baud = %d\n", sclk, speed_hz, + spi_baud); + + return spi_baud; +} + +static int flush(struct driver_data *drv_data) +{ + unsigned long limit = loops_per_jiffy << 1; + + /* wait for stop and clear stat */ + while (!(read_STAT() & BIT_STAT_SPIF) && limit--) + continue; + + write_STAT(BIT_STAT_CLR); + + return limit; +} + +/* stop controller and re-config current chip*/ +static void restore_state(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + /* Clear status and disable clock */ + write_STAT(BIT_STAT_CLR); + bfin_spi_disable(drv_data); + pr_debug("restoring spi ctl state\n"); + +#if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537) + pr_debug("chip select number is %d\n", chip->chip_select_num); + + switch (chip->chip_select_num) { + case 1: + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00); + SSYNC(); + break; + + case 2: + case 3: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800); + SSYNC(); + break; + + case 4: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840); + SSYNC(); + break; + + case 5: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820); + SSYNC(); + break; + + case 6: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810); + SSYNC(); + break; + + case 7: + bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI); + SSYNC(); + bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800); + SSYNC(); + break; + } +#endif + + /* Load the registers */ + write_CTRL(chip->ctl_reg); + write_BAUD(chip->baud); + write_FLAG(chip->flag); +} + +/* used to kick off transfer in rx mode */ +static unsigned short dummy_read(void) +{ + unsigned short tmp; + tmp = read_RDBR(); + return tmp; +} + +static void null_writer(struct driver_data *drv_data) +{ + u8 n_bytes = drv_data->n_bytes; + + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(0); + while ((read_STAT() & BIT_STAT_TXS)) + continue; + drv_data->tx += n_bytes; + } +} + +static void null_reader(struct driver_data *drv_data) +{ + u8 n_bytes = drv_data->n_bytes; + dummy_read(); + + while (drv_data->rx < drv_data->rx_end) { + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + dummy_read(); + drv_data->rx += n_bytes; + } +} + +static void u8_writer(struct driver_data *drv_data) +{ + pr_debug("cr8-s is 0x%x\n", read_STAT()); + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(*(u8 *) (drv_data->tx)); + while (read_STAT() & BIT_STAT_TXS) + continue; + ++drv_data->tx; + } + + /* poll for SPI completion before returning */ + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; +} + +static void u8_cs_chg_writer(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->tx < drv_data->tx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u8 *) (drv_data->tx)); + while (read_STAT() & BIT_STAT_TXS) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + ++drv_data->tx; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u8_reader(struct driver_data *drv_data) +{ + pr_debug("cr-8 is 0x%x\n", read_STAT()); + + /* clear TDBR buffer before read(else it will be shifted out) */ + write_TDBR(0xFFFF); + + dummy_read(); + + while (drv_data->rx < drv_data->rx_end - 1) { + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_RDBR(); + ++drv_data->rx; + } + + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_SHAW(); + ++drv_data->rx; +} + +static void u8_cs_chg_reader(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->rx < drv_data->rx_end) { + write_FLAG(chip->flag); + SSYNC(); + + read_RDBR(); /* kick off */ + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + *(u8 *) (drv_data->rx) = read_SHAW(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + ++drv_data->rx; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u8_duplex(struct driver_data *drv_data) +{ + /* in duplex mode, clk is triggered by writing of TDBR */ + while (drv_data->rx < drv_data->rx_end) { + write_TDBR(*(u8 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_RDBR(); + ++drv_data->rx; + ++drv_data->tx; + } +} + +static void u8_cs_chg_duplex(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->rx < drv_data->rx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u8 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u8 *) (drv_data->rx) = read_RDBR(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + ++drv_data->rx; + ++drv_data->tx; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u16_writer(struct driver_data *drv_data) +{ + pr_debug("cr16 is 0x%x\n", read_STAT()); + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(*(u16 *) (drv_data->tx)); + while ((read_STAT() & BIT_STAT_TXS)) + continue; + drv_data->tx += 2; + } + + /* poll for SPI completion before returning */ + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; +} + +static void u16_cs_chg_writer(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->tx < drv_data->tx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u16 *) (drv_data->tx)); + while ((read_STAT() & BIT_STAT_TXS)) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + drv_data->tx += 2; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u16_reader(struct driver_data *drv_data) +{ + pr_debug("cr-16 is 0x%x\n", read_STAT()); + dummy_read(); + + while (drv_data->rx < (drv_data->rx_end - 2)) { + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_RDBR(); + drv_data->rx += 2; + } + + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_SHAW(); + drv_data->rx += 2; +} + +static void u16_cs_chg_reader(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->rx < drv_data->rx_end) { + write_FLAG(chip->flag); + SSYNC(); + + read_RDBR(); /* kick off */ + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + *(u16 *) (drv_data->rx) = read_SHAW(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + drv_data->rx += 2; + } + write_FLAG(0xFF00); + SSYNC(); +} + +static void u16_duplex(struct driver_data *drv_data) +{ + /* in duplex mode, clk is triggered by writing of TDBR */ + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(*(u16 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_RDBR(); + drv_data->rx += 2; + drv_data->tx += 2; + } +} + +static void u16_cs_chg_duplex(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + while (drv_data->tx < drv_data->tx_end) { + write_FLAG(chip->flag); + SSYNC(); + + write_TDBR(*(u16 *) (drv_data->tx)); + while (!(read_STAT() & BIT_STAT_SPIF)) + continue; + while (!(read_STAT() & BIT_STAT_RXS)) + continue; + *(u16 *) (drv_data->rx) = read_RDBR(); + write_FLAG(0xFF00 | chip->flag); + SSYNC(); + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); + drv_data->rx += 2; + drv_data->tx += 2; + } + write_FLAG(0xFF00); + SSYNC(); +} + +/* test if ther is more transfer to be done */ +static void *next_transfer(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct spi_transfer *trans = drv_data->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + drv_data->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, transfer_list); + return RUNNING_STATE; + } else + return DONE_STATE; +} + +/* + * caller already set message->status; + * dma and pio irqs are blocked give finished message back + */ +static void giveback(struct driver_data *drv_data) +{ + struct spi_transfer *last_transfer; + unsigned long flags; + struct spi_message *msg; + + spin_lock_irqsave(&drv_data->lock, flags); + msg = drv_data->cur_msg; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + queue_work(drv_data->workqueue, &drv_data->pump_messages); + spin_unlock_irqrestore(&drv_data->lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, transfer_list); + + msg->state = NULL; + + /* disable chip select signal. And not stop spi in autobuffer mode */ + if (drv_data->tx_dma != 0xFFFF) { + write_FLAG(0xFF00); + bfin_spi_disable(drv_data); + } + + if (msg->complete) + msg->complete(msg->context); +} + +static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs) +{ + struct driver_data *drv_data = (struct driver_data *)dev_id; + struct spi_message *msg = drv_data->cur_msg; + + pr_debug("in dma_irq_handler\n"); + clear_dma_irqstat(CH_SPI); + + /* + * wait for the last transaction shifted out. yes, these two + * while loops are supposed to be the same (see the HRM). + */ + if (drv_data->tx != NULL) { + while (bfin_read_SPI_STAT() & TXS) + continue; + while (bfin_read_SPI_STAT() & TXS) + continue; + } + + while (!(bfin_read_SPI_STAT() & SPIF)) + continue; + + bfin_spi_disable(drv_data); + + msg->actual_length += drv_data->len_in_bytes; + + /* Move to next transfer */ + msg->state = next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + /* free the irq handler before next transfer */ + pr_debug("disable dma channel irq%d\n", CH_SPI); + dma_disable_irq(CH_SPI); + + return IRQ_HANDLED; +} + +static void pump_transfers(unsigned long data) +{ + struct driver_data *drv_data = (struct driver_data *)data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct chip_data *chip = NULL; + u16 cr, width, dma_width, dma_config; + u32 tranf_success = 1; + + /* Get current state information */ + message = drv_data->cur_msg; + transfer = drv_data->cur_transfer; + chip = drv_data->cur_chip; + + /* + * if msg is error or done, report it back using complete() callback + */ + + /* Handle for abort */ + if (message->state == ERROR_STATE) { + message->status = -EIO; + giveback(drv_data); + return; + } + + /* Handle end of message */ + if (message->state == DONE_STATE) { + message->status = 0; + giveback(drv_data); + return; + } + + /* Delay if requested at end of transfer */ + if (message->state == RUNNING_STATE) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + } + + /* Setup the transfer state based on the type of transfer */ + if (flush(drv_data) == 0) { + dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); + message->status = -EIO; + giveback(drv_data); + return; + } + + if (transfer->tx_buf != NULL) { + drv_data->tx = (void *)transfer->tx_buf; + drv_data->tx_end = drv_data->tx + transfer->len; + pr_debug("tx_buf is %p, tx_end is %p\n", transfer->tx_buf, + drv_data->tx_end); + } else { + drv_data->tx = NULL; + } + + if (transfer->rx_buf != NULL) { + drv_data->rx = transfer->rx_buf; + drv_data->rx_end = drv_data->rx + transfer->len; + pr_debug("rx_buf is %p, rx_end is %p\n", transfer->rx_buf, + drv_data->rx_end); + } else { + drv_data->rx = NULL; + } + + drv_data->rx_dma = transfer->rx_dma; + drv_data->tx_dma = transfer->tx_dma; + drv_data->len_in_bytes = transfer->len; + + width = chip->width; + if (width == CFG_SPI_WORDSIZE16) { + drv_data->len = (transfer->len) >> 1; + } else { + drv_data->len = transfer->len; + } + drv_data->write = drv_data->tx ? chip->write : null_writer; + drv_data->read = drv_data->rx ? chip->read : null_reader; + drv_data->duplex = chip->duplex ? chip->duplex : null_writer; + pr_debug + ("transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", + drv_data->write, chip->write, null_writer); + + /* speed and width has been set on per message */ + message->state = RUNNING_STATE; + dma_config = 0; + + /* restore spi status for each spi transfer */ + if (transfer->speed_hz) { + write_BAUD(hz_to_spi_baud(transfer->speed_hz)); + } else { + write_BAUD(chip->baud); + } + write_FLAG(chip->flag); + + pr_debug("now pumping a transfer: width is %d, len is %d\n", width, + transfer->len); + + /* + * Try to map dma buffer and do a dma transfer if + * successful use different way to r/w according to + * drv_data->cur_chip->enable_dma + */ + if (drv_data->cur_chip->enable_dma && drv_data->len > 6) { + + write_STAT(BIT_STAT_CLR); + disable_dma(CH_SPI); + clear_dma_irqstat(CH_SPI); + bfin_spi_disable(drv_data); + + /* config dma channel */ + pr_debug("doing dma transfer\n"); + if (width == CFG_SPI_WORDSIZE16) { + set_dma_x_count(CH_SPI, drv_data->len); + set_dma_x_modify(CH_SPI, 2); + dma_width = WDSIZE_16; + } else { + set_dma_x_count(CH_SPI, drv_data->len); + set_dma_x_modify(CH_SPI, 1); + dma_width = WDSIZE_8; + } + + /* set transfer width,direction. And enable spi */ + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); + + /* dirty hack for autobuffer DMA mode */ + if (drv_data->tx_dma == 0xFFFF) { + pr_debug("doing autobuffer DMA out.\n"); + + /* no irq in autobuffer mode */ + dma_config = + (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); + set_dma_config(CH_SPI, dma_config); + set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); + enable_dma(CH_SPI); + write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | + (CFG_SPI_ENABLE << 14)); + + /* just return here, there can only be one transfer in this mode */ + message->status = 0; + giveback(drv_data); + return; + } + + /* In dma mode, rx or tx must be NULL in one transfer */ + if (drv_data->rx != NULL) { + /* set transfer mode, and enable SPI */ + pr_debug("doing DMA in.\n"); + + /* disable SPI before write to TDBR */ + write_CTRL(cr & ~BIT_CTL_ENABLE); + + /* clear tx reg soformer data is not shifted out */ + write_TDBR(0xFF); + + set_dma_x_count(CH_SPI, drv_data->len); + + /* start dma */ + dma_enable_irq(CH_SPI); + dma_config = (WNR | RESTART | dma_width | DI_EN); + set_dma_config(CH_SPI, dma_config); + set_dma_start_addr(CH_SPI, (unsigned long)drv_data->rx); + enable_dma(CH_SPI); + + cr |= + CFG_SPI_DMAREAD | (width << 8) | (CFG_SPI_ENABLE << + 14); + /* set transfer mode, and enable SPI */ + write_CTRL(cr); + } else if (drv_data->tx != NULL) { + pr_debug("doing DMA out.\n"); + + /* start dma */ + dma_enable_irq(CH_SPI); + dma_config = (RESTART | dma_width | DI_EN); + set_dma_config(CH_SPI, dma_config); + set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); + enable_dma(CH_SPI); + + write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | + (CFG_SPI_ENABLE << 14)); + + } + } else { + /* IO mode write then read */ + pr_debug("doing IO transfer\n"); + + write_STAT(BIT_STAT_CLR); + + if (drv_data->tx != NULL && drv_data->rx != NULL) { + /* full duplex mode */ + BUG_ON((drv_data->tx_end - drv_data->tx) != + (drv_data->rx_end - drv_data->rx)); + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); /* clear the TIMOD bits */ + cr |= + CFG_SPI_WRITE | (width << 8) | (CFG_SPI_ENABLE << + 14); + pr_debug("IO duplex: cr is 0x%x\n", cr); + + write_CTRL(cr); + SSYNC(); + + drv_data->duplex(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->tx != NULL) { + /* write only half duplex */ + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); /* clear the TIMOD bits */ + cr |= + CFG_SPI_WRITE | (width << 8) | (CFG_SPI_ENABLE << + 14); + pr_debug("IO write: cr is 0x%x\n", cr); + + write_CTRL(cr); + SSYNC(); + + drv_data->write(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->rx != NULL) { + /* read only half duplex */ + cr = (read_CTRL() & (~BIT_CTL_TIMOD)); /* cleare the TIMOD bits */ + cr |= + CFG_SPI_READ | (width << 8) | (CFG_SPI_ENABLE << + 14); + pr_debug("IO read: cr is 0x%x\n", cr); + + write_CTRL(cr); + SSYNC(); + + drv_data->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + tranf_success = 0; + } + + if (!tranf_success) { + pr_debug("IO write error!\n"); + message->state = ERROR_STATE; + } else { + /* Update total byte transfered */ + message->actual_length += drv_data->len; + + /* Move to next transfer of this msg */ + message->state = next_transfer(drv_data); + } + + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + } +} + +/* pop a msg from queue and kick off real transfer */ +static void pump_messages(struct work_struct *work) +{ + struct driver_data *drv_data = container_of(work, struct driver_data, pump_messages); + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&drv_data->lock, flags); + if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { + /* pumper kicked off but no work to do */ + drv_data->busy = 0; + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (drv_data->cur_msg) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Extract head of queue */ + drv_data->cur_msg = list_entry(drv_data->queue.next, + struct spi_message, queue); + list_del_init(&drv_data->cur_msg->queue); + + /* Initial message state */ + drv_data->cur_msg->state = START_STATE; + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, + struct spi_transfer, transfer_list); + + /* Setup the SSP using the per chip configuration */ + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + restore_state(drv_data); + pr_debug + ("got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n", + drv_data->cur_chip->baud, drv_data->cur_chip->flag, + drv_data->cur_chip->ctl_reg); + pr_debug("the first transfer len is %d\n", drv_data->cur_transfer->len); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&drv_data->pump_transfers); + + drv_data->busy = 1; + spin_unlock_irqrestore(&drv_data->lock, flags); +} + +/* + * got a msg to transfer, queue it in drv_data->queue. + * And kick off message pumper + */ +static int transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run == QUEUE_STOPPED) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -ESHUTDOWN; + } + + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + pr_debug("adding an msg in transfer() \n"); + list_add_tail(&msg->queue, &drv_data->queue); + + if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return 0; +} + +/* first setup for new devices */ +static int setup(struct spi_device *spi) +{ + struct bfin5xx_spi_chip *chip_info = NULL; + struct chip_data *chip; + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + u8 spi_flg; + + /* Abort device setup if requested features are not supported */ + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { + dev_err(&spi->dev, "requested mode not fully supported\n"); + return -EINVAL; + } + + /* Zero (the default) here means 8 bits */ + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) + return -EINVAL; + + /* Only alloc (or use chip_info) on first setup */ + chip = spi_get_ctldata(spi); + if (chip == NULL) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->enable_dma = 0; + chip_info = spi->controller_data; + } + + /* chip_info isn't always needed */ + if (chip_info) { + chip->enable_dma = chip_info->enable_dma != 0 + && drv_data->master_info->enable_dma; + chip->ctl_reg = chip_info->ctl_reg; + chip->bits_per_word = chip_info->bits_per_word; + chip->cs_change_per_word = chip_info->cs_change_per_word; + chip->cs_chg_udelay = chip_info->cs_chg_udelay; + } + + /* translate common spi framework into our register */ + if (spi->mode & SPI_CPOL) + chip->ctl_reg |= CPOL; + if (spi->mode & SPI_CPHA) + chip->ctl_reg |= CPHA; + if (spi->mode & SPI_LSB_FIRST) + chip->ctl_reg |= LSBF; + /* we dont support running in slave mode (yet?) */ + chip->ctl_reg |= MSTR; + + /* + * if any one SPI chip is registered and wants DMA, request the + * DMA channel for it + */ + if (chip->enable_dma && !dma_requested) { + /* register dma irq handler */ + if (request_dma(CH_SPI, "BF53x_SPI_DMA") < 0) { + pr_debug + ("Unable to request BlackFin SPI DMA channel\n"); + return -ENODEV; + } + if (set_dma_callback(CH_SPI, (void *)dma_irq_handler, drv_data) + < 0) { + pr_debug("Unable to set dma callback\n"); + return -EPERM; + } + dma_disable_irq(CH_SPI); + dma_requested = 1; + } + + /* + * Notice: for blackfin, the speed_hz is the value of register + * SPI_BAUD, not the real baudrate + */ + chip->baud = hz_to_spi_baud(spi->max_speed_hz); + spi_flg = ~(1 << (spi->chip_select)); + chip->flag = ((u16) spi_flg << 8) | (1 << (spi->chip_select)); + chip->chip_select_num = spi->chip_select; + + switch (chip->bits_per_word) { + case 8: + chip->n_bytes = 1; + chip->width = CFG_SPI_WORDSIZE8; + chip->read = chip->cs_change_per_word ? + u8_cs_chg_reader : u8_reader; + chip->write = chip->cs_change_per_word ? + u8_cs_chg_writer : u8_writer; + chip->duplex = chip->cs_change_per_word ? + u8_cs_chg_duplex : u8_duplex; + break; + + case 16: + chip->n_bytes = 2; + chip->width = CFG_SPI_WORDSIZE16; + chip->read = chip->cs_change_per_word ? + u16_cs_chg_reader : u16_reader; + chip->write = chip->cs_change_per_word ? + u16_cs_chg_writer : u16_writer; + chip->duplex = chip->cs_change_per_word ? + u16_cs_chg_duplex : u16_duplex; + break; + + default: + dev_err(&spi->dev, "%d bits_per_word is not supported\n", + chip->bits_per_word); + kfree(chip); + return -ENODEV; + } + + pr_debug("setup spi chip %s, width is %d, dma is %d,", + spi->modalias, chip->width, chip->enable_dma); + pr_debug("ctl_reg is 0x%x, flag_reg is 0x%x\n", + chip->ctl_reg, chip->flag); + + spi_set_ctldata(spi, chip); + + return 0; +} + +/* + * callback for spi framework. + * clean driver specific data + */ +static void cleanup(const struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); + + kfree(chip); +} + +static inline int init_queue(struct driver_data *drv_data) +{ + INIT_LIST_HEAD(&drv_data->queue); + spin_lock_init(&drv_data->lock); + + drv_data->run = QUEUE_STOPPED; + drv_data->busy = 0; + + /* init transfer tasklet */ + tasklet_init(&drv_data->pump_transfers, + pump_transfers, (unsigned long)drv_data); + + /* init messages workqueue */ + INIT_WORK(&drv_data->pump_messages, pump_messages); + drv_data->workqueue = + create_singlethread_workqueue(drv_data->master->cdev.dev->bus_id); + if (drv_data->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static inline int start_queue(struct driver_data *drv_data) +{ + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -EBUSY; + } + + drv_data->run = QUEUE_RUNNING; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + spin_unlock_irqrestore(&drv_data->lock, flags); + + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + return 0; +} + +static inline int stop_queue(struct driver_data *drv_data) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&drv_data->lock, flags); + + /* + * This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the drv_data->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead + */ + drv_data->run = QUEUE_STOPPED; + while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { + spin_unlock_irqrestore(&drv_data->lock, flags); + msleep(10); + spin_lock_irqsave(&drv_data->lock, flags); + } + + if (!list_empty(&drv_data->queue) || drv_data->busy) + status = -EBUSY; + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return status; +} + +static inline int destroy_queue(struct driver_data *drv_data) +{ + int status; + + status = stop_queue(drv_data); + if (status != 0) + return status; + + destroy_workqueue(drv_data->workqueue); + + return 0; +} + +static int __init bfin5xx_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bfin5xx_spi_master *platform_info; + struct spi_master *master; + struct driver_data *drv_data = 0; + int status = 0; + + platform_info = dev->platform_data; + + /* Allocate master with space for drv_data */ + master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); + if (!master) { + dev_err(&pdev->dev, "can not alloc spi_master\n"); + return -ENOMEM; + } + drv_data = spi_master_get_devdata(master); + drv_data->master = master; + drv_data->master_info = platform_info; + drv_data->pdev = pdev; + + master->bus_num = pdev->id; + master->num_chipselect = platform_info->num_chipselect; + master->cleanup = cleanup; + master->setup = setup; + master->transfer = transfer; + + /* Initial and start queue */ + status = init_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem initializing queue\n"); + goto out_error_queue_alloc; + } + status = start_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem starting queue\n"); + goto out_error_queue_alloc; + } + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); + status = spi_register_master(master); + if (status != 0) { + dev_err(&pdev->dev, "problem registering spi master\n"); + goto out_error_queue_alloc; + } + pr_debug("controller probe successfully\n"); + return status; + + out_error_queue_alloc: + destroy_queue(drv_data); + spi_master_put(master); + return status; +} + +/* stop hardware and remove the driver */ +static int __devexit bfin5xx_spi_remove(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + if (!drv_data) + return 0; + + /* Remove the queue */ + status = destroy_queue(drv_data); + if (status != 0) + return status; + + /* Disable the SSP at the peripheral and SOC level */ + bfin_spi_disable(drv_data); + + /* Release DMA */ + if (drv_data->master_info->enable_dma) { + if (dma_channel_active(CH_SPI)) + free_dma(CH_SPI); + } + + /* Disconnect from the SPI framework */ + spi_unregister_master(drv_data->master); + + /* Prevent double remove */ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int bfin5xx_spi_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + status = stop_queue(drv_data); + if (status != 0) + return status; + + /* stop hardware */ + bfin_spi_disable(drv_data); + + return 0; +} + +static int bfin5xx_spi_resume(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + /* Enable the SPI interface */ + bfin_spi_enable(drv_data); + + /* Start the queue running */ + status = start_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem starting queue (%d)\n", status); + return status; + } + + return 0; +} +#else +#define bfin5xx_spi_suspend NULL +#define bfin5xx_spi_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver bfin5xx_spi_driver = { + .driver = { + .name = "bfin-spi-master", + .bus = &platform_bus_type, + .owner = THIS_MODULE, + }, + .probe = bfin5xx_spi_probe, + .remove = __devexit_p(bfin5xx_spi_remove), + .suspend = bfin5xx_spi_suspend, + .resume = bfin5xx_spi_resume, +}; + +static int __init bfin5xx_spi_init(void) +{ + return platform_driver_register(&bfin5xx_spi_driver); +} + +module_init(bfin5xx_spi_init); + +static void __exit bfin5xx_spi_exit(void) +{ + platform_driver_unregister(&bfin5xx_spi_driver); +} + +module_exit(bfin5xx_spi_exit); diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 24a330d82395..88425e1af4d3 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c @@ -302,10 +302,6 @@ static void bitbang_work(struct work_struct *work) setup_transfer = NULL; list_for_each_entry (t, &m->transfers, transfer_list) { - if (bitbang->shutdown) { - status = -ESHUTDOWN; - break; - } /* override or restore speed and wordsize */ if (t->speed_hz || t->bits_per_word) { @@ -410,8 +406,6 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) m->status = -EINPROGRESS; bitbang = spi_master_get_devdata(spi->master); - if (bitbang->shutdown) - return -ESHUTDOWN; spin_lock_irqsave(&bitbang->lock, flags); if (!spi->max_speed_hz) @@ -507,28 +501,12 @@ EXPORT_SYMBOL_GPL(spi_bitbang_start); */ int spi_bitbang_stop(struct spi_bitbang *bitbang) { - unsigned limit = 500; - - spin_lock_irq(&bitbang->lock); - bitbang->shutdown = 0; - while (!list_empty(&bitbang->queue) && limit--) { - spin_unlock_irq(&bitbang->lock); + spi_unregister_master(bitbang->master); - dev_dbg(bitbang->master->cdev.dev, "wait for queue\n"); - msleep(10); - - spin_lock_irq(&bitbang->lock); - } - spin_unlock_irq(&bitbang->lock); - if (!list_empty(&bitbang->queue)) { - dev_err(bitbang->master->cdev.dev, "queue didn't empty\n"); - return -EBUSY; - } + WARN_ON(!list_empty(&bitbang->queue)); destroy_workqueue(bitbang->workqueue); - spi_unregister_master(bitbang->master); - return 0; } EXPORT_SYMBOL_GPL(spi_bitbang_stop); diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c index 312987a03210..0ee2b2090252 100644 --- a/drivers/spi/spi_butterfly.c +++ b/drivers/spi/spi_butterfly.c @@ -20,7 +20,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/platform_device.h> +#include <linux/device.h> #include <linux/parport.h> #include <linux/sched.h> @@ -40,8 +40,6 @@ * and use this custom parallel port cable. */ -#undef HAVE_USI /* nyet */ - /* DATA output bits (pins 2..9 == D0..D7) */ #define butterfly_nreset (1 << 1) /* pin 3 */ @@ -49,19 +47,13 @@ #define spi_sck_bit (1 << 0) /* pin 2 */ #define spi_mosi_bit (1 << 7) /* pin 9 */ -#define usi_sck_bit (1 << 3) /* pin 5 */ -#define usi_mosi_bit (1 << 4) /* pin 6 */ - #define vcc_bits ((1 << 6) | (1 << 5)) /* pins 7, 8 */ /* STATUS input bits */ #define spi_miso_bit PARPORT_STATUS_BUSY /* pin 11 */ -#define usi_miso_bit PARPORT_STATUS_PAPEROUT /* pin 12 */ - /* CONTROL output bits */ #define spi_cs_bit PARPORT_CONTROL_SELECT /* pin 17 */ -/* USI uses no chipselect */ @@ -70,15 +62,6 @@ static inline struct butterfly *spidev_to_pp(struct spi_device *spi) return spi->controller_data; } -static inline int is_usidev(struct spi_device *spi) -{ -#ifdef HAVE_USI - return spi->chip_select != 1; -#else - return 0; -#endif -} - struct butterfly { /* REVISIT ... for now, this must be first */ @@ -97,23 +80,13 @@ struct butterfly { /*----------------------------------------------------------------------*/ -/* - * these routines may be slower than necessary because they're hiding - * the fact that there are two different SPI busses on this cable: one - * to the DataFlash chip (or AVR SPI controller), the other to the - * AVR USI controller. - */ - static inline void setsck(struct spi_device *spi, int is_on) { struct butterfly *pp = spidev_to_pp(spi); u8 bit, byte = pp->lastbyte; - if (is_usidev(spi)) - bit = usi_sck_bit; - else - bit = spi_sck_bit; + bit = spi_sck_bit; if (is_on) byte |= bit; @@ -129,10 +102,7 @@ setmosi(struct spi_device *spi, int is_on) struct butterfly *pp = spidev_to_pp(spi); u8 bit, byte = pp->lastbyte; - if (is_usidev(spi)) - bit = usi_mosi_bit; - else - bit = spi_mosi_bit; + bit = spi_mosi_bit; if (is_on) byte |= bit; @@ -148,10 +118,7 @@ static inline int getmiso(struct spi_device *spi) int value; u8 bit; - if (is_usidev(spi)) - bit = usi_miso_bit; - else - bit = spi_miso_bit; + bit = spi_miso_bit; /* only STATUS_BUSY is NOT negated */ value = !(parport_read_status(pp->port) & bit); @@ -166,10 +133,6 @@ static void butterfly_chipselect(struct spi_device *spi, int value) if (value != BITBANG_CS_INACTIVE) setsck(spi, spi->mode & SPI_CPOL); - /* no chipselect on this USI link config */ - if (is_usidev(spi)) - return; - /* here, value == "activate or not"; * most PARPORT_CONTROL_* bits are negated, so we must * morph it to value == "bit value to write in control register" @@ -237,24 +200,16 @@ static void butterfly_attach(struct parport *p) int status; struct butterfly *pp; struct spi_master *master; - struct platform_device *pdev; + struct device *dev = p->physport->dev; - if (butterfly) + if (butterfly || !dev) return; /* REVISIT: this just _assumes_ a butterfly is there ... no probe, * and no way to be selective about what it binds to. */ - /* FIXME where should master->cdev.dev come from? - * e.g. /sys/bus/pnp0/00:0b, some PCI thing, etc - * setting up a platform device like this is an ugly kluge... - */ - pdev = platform_device_register_simple("butterfly", -1, NULL, 0); - if (IS_ERR(pdev)) - return; - - master = spi_alloc_master(&pdev->dev, sizeof *pp); + master = spi_alloc_master(dev, sizeof *pp); if (!master) { status = -ENOMEM; goto done; @@ -300,7 +255,7 @@ static void butterfly_attach(struct parport *p) parport_frob_control(pp->port, spi_cs_bit, 0); /* stabilize power with chip in reset (nRESET), and - * both spi_sck_bit and usi_sck_bit clear (CPOL=0) + * spi_sck_bit clear (CPOL=0) */ pp->lastbyte |= vcc_bits; parport_write_data(pp->port, pp->lastbyte); @@ -334,23 +289,6 @@ static void butterfly_attach(struct parport *p) pr_debug("%s: dataflash at %s\n", p->name, pp->dataflash->dev.bus_id); -#ifdef HAVE_USI - /* Bus 2 is only for talking to the AVR, and it can work no - * matter who masters bus 1; needs appropriate AVR firmware. - */ - pp->info[1].max_speed_hz = 10 /* ?? */ * 1000 * 1000; - strcpy(pp->info[1].modalias, "butterfly"); - // pp->info[1].platform_data = ... TBD ... ; - pp->info[1].chip_select = 2, - pp->info[1].controller_data = pp; - pp->butterfly = spi_new_device(pp->bitbang.master, &pp->info[1]); - if (pp->butterfly) - pr_debug("%s: butterfly at %s\n", p->name, - pp->butterfly->dev.bus_id); - - /* FIXME setup ACK for the IRQ line ... */ -#endif - // dev_info(_what?_, ...) pr_info("%s: AVR Butterfly\n", p->name); butterfly = pp; @@ -366,14 +304,12 @@ clean1: clean0: (void) spi_master_put(pp->bitbang.master); done: - platform_device_unregister(pdev); pr_debug("%s: butterfly probe, fail %d\n", p->name, status); } static void butterfly_detach(struct parport *p) { struct butterfly *pp; - struct platform_device *pdev; int status; /* FIXME this global is ugly ... but, how to quickly get from @@ -386,7 +322,6 @@ static void butterfly_detach(struct parport *p) butterfly = NULL; /* stop() unregisters child devices too */ - pdev = to_platform_device(pp->bitbang.master->cdev.dev); status = spi_bitbang_stop(&pp->bitbang); /* turn off VCC */ @@ -397,8 +332,6 @@ static void butterfly_detach(struct parport *p) parport_unregister_device(pp->pd); (void) spi_master_put(pp->bitbang.master); - - platform_device_unregister(pdev); } static struct parport_driver butterfly_driver = { diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 651379c51ae6..d5a710f6e445 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -41,7 +41,7 @@ struct s3c24xx_spi { int len; int count; - int (*set_cs)(struct s3c2410_spi_info *spi, + void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); /* data buffers */ @@ -77,7 +77,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) switch (value) { case BITBANG_CS_INACTIVE: - hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol^1); + hw->set_cs(hw->pdata, spi->chip_select, cspol^1); break; case BITBANG_CS_ACTIVE: @@ -98,7 +98,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) /* write new configration */ writeb(spcon, hw->regs + S3C2410_SPCON); - hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol); + hw->set_cs(hw->pdata, spi->chip_select, cspol); break; } @@ -342,8 +342,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev) goto err_register; } - dev_dbg(hw->dev, "shutdown=%d\n", hw->bitbang.shutdown); - /* register all the devices associated */ bi = &hw->pdata->board_info[0]; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c new file mode 100644 index 000000000000..c0a6dce800a3 --- /dev/null +++ b/drivers/spi/spidev.c @@ -0,0 +1,584 @@ +/* + * spidev.c -- simple synchronous userspace interface to SPI devices + * + * Copyright (C) 2006 SWAPP + * Andrea Paterniani <a.paterniani@swapp-eng.it> + * Copyright (C) 2007 David Brownell (simplification, cleanup) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/ioctl.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +#include <linux/spi/spi.h> +#include <linux/spi/spidev.h> + +#include <asm/uaccess.h> + + +/* + * This supports acccess to SPI devices using normal userspace I/O calls. + * Note that while traditional UNIX/POSIX I/O semantics are half duplex, + * and often mask message boundaries, full SPI support requires full duplex + * transfers. There are several kinds of of internal message boundaries to + * handle chipselect management and other protocol options. + * + * SPI has a character major number assigned. We allocate minor numbers + * dynamically using a bitmask. You must use hotplug tools, such as udev + * (or mdev with busybox) to create and destroy the /dev/spidevB.C device + * nodes, since there is no fixed association of minor numbers with any + * particular SPI bus or device. + */ +#define SPIDEV_MAJOR 153 /* assigned */ +#define N_SPI_MINORS 32 /* ... up to 256 */ + +static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; + + +/* Bit masks for spi_device.mode management */ +#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL) + + +struct spidev_data { + struct device dev; + struct spi_device *spi; + struct list_head device_entry; + + struct mutex buf_lock; + unsigned users; + u8 *buffer; +}; + +static LIST_HEAD(device_list); +static DEFINE_MUTEX(device_list_lock); + +static unsigned bufsiz = 4096; +module_param(bufsiz, uint, S_IRUGO); +MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); + +/*-------------------------------------------------------------------------*/ + +/* Read-only message with current device setup */ +static ssize_t +spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) +{ + struct spidev_data *spidev; + struct spi_device *spi; + ssize_t status = 0; + + /* chipselect only toggles at start or end of operation */ + if (count > bufsiz) + return -EMSGSIZE; + + spidev = filp->private_data; + spi = spidev->spi; + + mutex_lock(&spidev->buf_lock); + status = spi_read(spi, spidev->buffer, count); + if (status == 0) { + unsigned long missing; + + missing = copy_to_user(buf, spidev->buffer, count); + if (count && missing == count) + status = -EFAULT; + else + status = count - missing; + } + mutex_unlock(&spidev->buf_lock); + + return status; +} + +/* Write-only message with current device setup */ +static ssize_t +spidev_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct spidev_data *spidev; + struct spi_device *spi; + ssize_t status = 0; + unsigned long missing; + + /* chipselect only toggles at start or end of operation */ + if (count > bufsiz) + return -EMSGSIZE; + + spidev = filp->private_data; + spi = spidev->spi; + + mutex_lock(&spidev->buf_lock); + missing = copy_from_user(spidev->buffer, buf, count); + if (missing == 0) { + status = spi_write(spi, spidev->buffer, count); + if (status == 0) + status = count; + } else + status = -EFAULT; + mutex_unlock(&spidev->buf_lock); + + return status; +} + +static int spidev_message(struct spidev_data *spidev, + struct spi_ioc_transfer *u_xfers, unsigned n_xfers) +{ + struct spi_message msg; + struct spi_transfer *k_xfers; + struct spi_transfer *k_tmp; + struct spi_ioc_transfer *u_tmp; + struct spi_device *spi = spidev->spi; + unsigned n, total; + u8 *buf; + int status = -EFAULT; + + spi_message_init(&msg); + k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL); + if (k_xfers == NULL) + return -ENOMEM; + + /* Construct spi_message, copying any tx data to bounce buffer. + * We walk the array of user-provided transfers, using each one + * to initialize a kernel version of the same transfer. + */ + mutex_lock(&spidev->buf_lock); + buf = spidev->buffer; + total = 0; + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; + n; + n--, k_tmp++, u_tmp++) { + k_tmp->len = u_tmp->len; + + if (u_tmp->rx_buf) { + k_tmp->rx_buf = buf; + if (!access_ok(VERIFY_WRITE, u_tmp->rx_buf, u_tmp->len)) + goto done; + } + if (u_tmp->tx_buf) { + k_tmp->tx_buf = buf; + if (copy_from_user(buf, (const u8 __user *)u_tmp->tx_buf, + u_tmp->len)) + goto done; + } + + total += k_tmp->len; + if (total > bufsiz) { + status = -EMSGSIZE; + goto done; + } + buf += k_tmp->len; + + k_tmp->cs_change = !!u_tmp->cs_change; + k_tmp->bits_per_word = u_tmp->bits_per_word; + k_tmp->delay_usecs = u_tmp->delay_usecs; + k_tmp->speed_hz = u_tmp->speed_hz; +#ifdef VERBOSE + dev_dbg(&spi->dev, + " xfer len %zd %s%s%s%dbits %u usec %uHz\n", + u_tmp->len, + u_tmp->rx_buf ? "rx " : "", + u_tmp->tx_buf ? "tx " : "", + u_tmp->cs_change ? "cs " : "", + u_tmp->bits_per_word ? : spi->bits_per_word, + u_tmp->delay_usecs, + u_tmp->speed_hz ? : spi->max_speed_hz); +#endif + spi_message_add_tail(k_tmp, &msg); + } + + status = spi_sync(spi, &msg); + if (status < 0) + goto done; + + /* copy any rx data out of bounce buffer */ + buf = spidev->buffer; + for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { + if (u_tmp->rx_buf) { + if (__copy_to_user((u8 __user *)u_tmp->rx_buf, buf, + u_tmp->len)) { + status = -EFAULT; + goto done; + } + } + buf += u_tmp->len; + } + status = total; + +done: + mutex_unlock(&spidev->buf_lock); + kfree(k_xfers); + return status; +} + +static int +spidev_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + int retval = 0; + struct spidev_data *spidev; + struct spi_device *spi; + u32 tmp; + unsigned n_ioc; + struct spi_ioc_transfer *ioc; + + /* Check type and command number */ + if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) + return -ENOTTY; + + /* Check access direction once here; don't repeat below. + * IOC_DIR is from the user perspective, while access_ok is + * from the kernel perspective; so they look reversed. + */ + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok(VERIFY_WRITE, + (void __user *)arg, _IOC_SIZE(cmd)); + if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok(VERIFY_READ, + (void __user *)arg, _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + + spidev = filp->private_data; + spi = spidev->spi; + + switch (cmd) { + /* read requests */ + case SPI_IOC_RD_MODE: + retval = __put_user(spi->mode & SPI_MODE_MASK, + (__u8 __user *)arg); + break; + case SPI_IOC_RD_LSB_FIRST: + retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, + (__u8 __user *)arg); + break; + case SPI_IOC_RD_BITS_PER_WORD: + retval = __put_user(spi->bits_per_word, (__u8 __user *)arg); + break; + case SPI_IOC_RD_MAX_SPEED_HZ: + retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg); + break; + + /* write requests */ + case SPI_IOC_WR_MODE: + retval = __get_user(tmp, (u8 __user *)arg); + if (retval == 0) { + u8 save = spi->mode; + + if (tmp & ~SPI_MODE_MASK) { + retval = -EINVAL; + break; + } + + tmp |= spi->mode & ~SPI_MODE_MASK; + spi->mode = (u8)tmp; + retval = spi_setup(spi); + if (retval < 0) + spi->mode = save; + else + dev_dbg(&spi->dev, "spi mode %02x\n", tmp); + } + break; + case SPI_IOC_WR_LSB_FIRST: + retval = __get_user(tmp, (__u8 __user *)arg); + if (retval == 0) { + u8 save = spi->mode; + + if (tmp) + spi->mode |= SPI_LSB_FIRST; + else + spi->mode &= ~SPI_LSB_FIRST; + retval = spi_setup(spi); + if (retval < 0) + spi->mode = save; + else + dev_dbg(&spi->dev, "%csb first\n", + tmp ? 'l' : 'm'); + } + break; + case SPI_IOC_WR_BITS_PER_WORD: + retval = __get_user(tmp, (__u8 __user *)arg); + if (retval == 0) { + u8 save = spi->bits_per_word; + + spi->bits_per_word = tmp; + retval = spi_setup(spi); + if (retval < 0) + spi->bits_per_word = save; + else + dev_dbg(&spi->dev, "%d bits per word\n", tmp); + } + break; + case SPI_IOC_WR_MAX_SPEED_HZ: + retval = __get_user(tmp, (__u32 __user *)arg); + if (retval == 0) { + u32 save = spi->max_speed_hz; + + spi->max_speed_hz = tmp; + retval = spi_setup(spi); + if (retval < 0) + spi->max_speed_hz = save; + else + dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); + } + break; + + default: + /* segmented and/or full-duplex I/O request */ + if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) + || _IOC_DIR(cmd) != _IOC_WRITE) + return -ENOTTY; + + tmp = _IOC_SIZE(cmd); + if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { + retval = -EINVAL; + break; + } + n_ioc = tmp / sizeof(struct spi_ioc_transfer); + if (n_ioc == 0) + break; + + /* copy into scratch area */ + ioc = kmalloc(tmp, GFP_KERNEL); + if (!ioc) { + retval = -ENOMEM; + break; + } + if (__copy_from_user(ioc, (void __user *)arg, tmp)) { + retval = -EFAULT; + break; + } + + /* translate to spi_message, execute */ + retval = spidev_message(spidev, ioc, n_ioc); + kfree(ioc); + break; + } + return retval; +} + +static int spidev_open(struct inode *inode, struct file *filp) +{ + struct spidev_data *spidev; + int status = -ENXIO; + + mutex_lock(&device_list_lock); + + list_for_each_entry(spidev, &device_list, device_entry) { + if (spidev->dev.devt == inode->i_rdev) { + status = 0; + break; + } + } + if (status == 0) { + if (!spidev->buffer) { + spidev->buffer = kmalloc(bufsiz, GFP_KERNEL); + if (!spidev->buffer) { + dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); + status = -ENOMEM; + } + } + if (status == 0) { + spidev->users++; + filp->private_data = spidev; + nonseekable_open(inode, filp); + } + } else + pr_debug("spidev: nothing for minor %d\n", iminor(inode)); + + mutex_unlock(&device_list_lock); + return status; +} + +static int spidev_release(struct inode *inode, struct file *filp) +{ + struct spidev_data *spidev; + int status = 0; + + mutex_lock(&device_list_lock); + spidev = filp->private_data; + filp->private_data = NULL; + spidev->users--; + if (!spidev->users) { + kfree(spidev->buffer); + spidev->buffer = NULL; + } + mutex_unlock(&device_list_lock); + + return status; +} + +static struct file_operations spidev_fops = { + .owner = THIS_MODULE, + /* REVISIT switch to aio primitives, so that userspace + * gets more complete API coverage. It'll simplify things + * too, except for the locking. + */ + .write = spidev_write, + .read = spidev_read, + .ioctl = spidev_ioctl, + .open = spidev_open, + .release = spidev_release, +}; + +/*-------------------------------------------------------------------------*/ + +/* The main reason to have this class is to make mdev/udev create the + * /dev/spidevB.C character device nodes exposing our userspace API. + * It also simplifies memory management. + */ + +static void spidev_classdev_release(struct device *dev) +{ + struct spidev_data *spidev; + + spidev = container_of(dev, struct spidev_data, dev); + kfree(spidev); +} + +static struct class spidev_class = { + .name = "spidev", + .owner = THIS_MODULE, + .dev_release = spidev_classdev_release, +}; + +/*-------------------------------------------------------------------------*/ + +static int spidev_probe(struct spi_device *spi) +{ + struct spidev_data *spidev; + int status; + unsigned long minor; + + /* Allocate driver data */ + spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); + if (!spidev) + return -ENOMEM; + + /* Initialize the driver data */ + spidev->spi = spi; + mutex_init(&spidev->buf_lock); + + INIT_LIST_HEAD(&spidev->device_entry); + + /* If we can allocate a minor number, hook up this device. + * Reusing minors is fine so long as udev or mdev is working. + */ + mutex_lock(&device_list_lock); + minor = find_first_zero_bit(minors, ARRAY_SIZE(minors)); + if (minor < N_SPI_MINORS) { + spidev->dev.parent = &spi->dev; + spidev->dev.class = &spidev_class; + spidev->dev.devt = MKDEV(SPIDEV_MAJOR, minor); + snprintf(spidev->dev.bus_id, sizeof spidev->dev.bus_id, + "spidev%d.%d", + spi->master->bus_num, spi->chip_select); + status = device_register(&spidev->dev); + } else { + dev_dbg(&spi->dev, "no minor number available!\n"); + status = -ENODEV; + } + if (status == 0) { + set_bit(minor, minors); + dev_set_drvdata(&spi->dev, spidev); + list_add(&spidev->device_entry, &device_list); + } + mutex_unlock(&device_list_lock); + + if (status != 0) + kfree(spidev); + + return status; +} + +static int spidev_remove(struct spi_device *spi) +{ + struct spidev_data *spidev = dev_get_drvdata(&spi->dev); + + mutex_lock(&device_list_lock); + + list_del(&spidev->device_entry); + dev_set_drvdata(&spi->dev, NULL); + clear_bit(MINOR(spidev->dev.devt), minors); + device_unregister(&spidev->dev); + + mutex_unlock(&device_list_lock); + + return 0; +} + +static struct spi_driver spidev_spi = { + .driver = { + .name = "spidev", + .owner = THIS_MODULE, + }, + .probe = spidev_probe, + .remove = __devexit_p(spidev_remove), + + /* NOTE: suspend/resume methods are not necessary here. + * We don't do anything except pass the requests to/from + * the underlying controller. The refrigerator handles + * most issues; the controller driver handles the rest. + */ +}; + +/*-------------------------------------------------------------------------*/ + +static int __init spidev_init(void) +{ + int status; + + /* Claim our 256 reserved device numbers. Then register a class + * that will key udev/mdev to add/remove /dev nodes. Last, register + * the driver which manages those device numbers. + */ + BUILD_BUG_ON(N_SPI_MINORS > 256); + status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops); + if (status < 0) + return status; + + status = class_register(&spidev_class); + if (status < 0) { + unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); + return status; + } + + status = spi_register_driver(&spidev_spi); + if (status < 0) { + class_unregister(&spidev_class); + unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); + } + return status; +} +module_init(spidev_init); + +static void __exit spidev_exit(void) +{ + spi_unregister_driver(&spidev_spi); + class_unregister(&spidev_class); + unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); +} +module_exit(spidev_exit); + +MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>"); +MODULE_DESCRIPTION("User mode SPI device interface"); +MODULE_LICENSE("GPL"); |