diff options
Diffstat (limited to 'drivers/mtd')
34 files changed, 1146 insertions, 400 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 6fde0a2e3567..bc33200535fc 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -120,6 +120,13 @@ config MTD_PHRAM doesn't have access to, memory beyond the mem=xxx limit, nvram, memory on the video card, etc... +config MTD_PS3VRAM + tristate "PS3 video RAM" + depends on FB_PS3 + help + This driver allows you to use excess PS3 video RAM as volatile + storage or system swap. + config MTD_LART tristate "28F160xx flash driver for LART" depends on SA1100_LART diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 0993d5cf3923..e51521df4e40 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_MTD_LART) += lart.o obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o obj-$(CONFIG_MTD_M25P80) += m25p80.o +obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 6659b2275c0c..5733f0643843 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -170,7 +170,7 @@ static int wait_till_ready(struct m25p *flash) static int erase_chip(struct m25p *flash) { DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", - flash->spi->dev.bus_id, __func__, + dev_name(&flash->spi->dev), __func__, flash->mtd.size / 1024); /* Wait until finished previous write command. */ @@ -197,7 +197,7 @@ static int erase_chip(struct m25p *flash) static int erase_sector(struct m25p *flash, u32 offset) { DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", - flash->spi->dev.bus_id, __func__, + dev_name(&flash->spi->dev), __func__, flash->mtd.erasesize / 1024, offset); /* Wait until finished previous write command. */ @@ -234,7 +234,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) u32 addr,len; DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n", - flash->spi->dev.bus_id, __func__, "at", + dev_name(&flash->spi->dev), __func__, "at", (u32)instr->addr, instr->len); /* sanity checks */ @@ -295,7 +295,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, struct spi_message m; DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", - flash->spi->dev.bus_id, __func__, "from", + dev_name(&flash->spi->dev), __func__, "from", (u32)from, len); /* sanity checks */ @@ -367,7 +367,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, struct spi_message m; DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", - flash->spi->dev.bus_id, __func__, "to", + dev_name(&flash->spi->dev), __func__, "to", (u32)to, len); if (retlen) @@ -563,7 +563,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) tmp = spi_write_then_read(spi, &code, 1, id, 5); if (tmp < 0) { DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", - spi->dev.bus_id, tmp); + dev_name(&spi->dev), tmp); return NULL; } jedec = id[0]; @@ -617,7 +617,7 @@ static int __devinit m25p_probe(struct spi_device *spi) /* unrecognized chip? */ if (i == ARRAY_SIZE(m25p_data)) { DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n", - spi->dev.bus_id, data->type); + dev_name(&spi->dev), data->type); info = NULL; /* recognized; is that chip really what's there? */ @@ -658,7 +658,7 @@ static int __devinit m25p_probe(struct spi_device *spi) if (data && data->name) flash->mtd.name = data->name; else - flash->mtd.name = spi->dev.bus_id; + flash->mtd.name = dev_name(&spi->dev); flash->mtd.type = MTD_NORFLASH; flash->mtd.writesize = 1; diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 6dd9aff8bb2d..65126cd668ff 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -128,7 +128,7 @@ static int dataflash_waitready(struct spi_device *spi) status = dataflash_status(spi); if (status < 0) { DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n", - spi->dev.bus_id, status); + dev_name(&spi->dev), status); status = 0; } @@ -154,7 +154,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) uint8_t *command; DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n", - spi->dev.bus_id, + dev_name(&spi->dev), instr->addr, instr->len); /* Sanity checks */ @@ -197,7 +197,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) if (status < 0) { printk(KERN_ERR "%s: erase %x, err %d\n", - spi->dev.bus_id, pageaddr, status); + dev_name(&spi->dev), pageaddr, status); /* REVISIT: can retry instr->retries times; or * giveup and instr->fail_addr = instr->addr; */ @@ -239,7 +239,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, int status; DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n", - priv->spi->dev.bus_id, (unsigned)from, (unsigned)(from + len)); + dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len)); *retlen = 0; @@ -288,7 +288,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, status = 0; } else DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n", - priv->spi->dev.bus_id, + dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len), status); return status; @@ -315,7 +315,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, uint8_t *command; DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n", - spi->dev.bus_id, (unsigned)to, (unsigned)(to + len)); + dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); *retlen = 0; @@ -374,7 +374,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, status = spi_sync(spi, &msg); if (status < 0) DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", - spi->dev.bus_id, addr, status); + dev_name(&spi->dev), addr, status); (void) dataflash_waitready(priv->spi); } @@ -396,7 +396,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, spi_transfer_del(x + 1); if (status < 0) DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", - spi->dev.bus_id, addr, writelen, status); + dev_name(&spi->dev), addr, writelen, status); (void) dataflash_waitready(priv->spi); @@ -416,14 +416,14 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, status = spi_sync(spi, &msg); if (status < 0) DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n", - spi->dev.bus_id, addr, status); + dev_name(&spi->dev), addr, status); status = dataflash_waitready(priv->spi); /* Check result of the compare operation */ if (status & (1 << 6)) { printk(KERN_ERR "%s: compare page %u, err %d\n", - spi->dev.bus_id, pageaddr, status); + dev_name(&spi->dev), pageaddr, status); remaining = 0; status = -EIO; break; @@ -779,7 +779,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) tmp = spi_write_then_read(spi, &code, 1, id, 3); if (tmp < 0) { DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", - spi->dev.bus_id, tmp); + dev_name(&spi->dev), tmp); return ERR_PTR(tmp); } if (id[0] != 0x1f) @@ -869,7 +869,7 @@ static int __devinit dataflash_probe(struct spi_device *spi) status = dataflash_status(spi); if (status <= 0 || status == 0xff) { DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n", - spi->dev.bus_id, status); + dev_name(&spi->dev), status); if (status == 0 || status == 0xff) status = -ENODEV; return status; @@ -905,13 +905,13 @@ static int __devinit dataflash_probe(struct spi_device *spi) /* obsolete AT45DB1282 not (yet?) supported */ default: DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n", - spi->dev.bus_id, status & 0x3c); + dev_name(&spi->dev), status & 0x3c); status = -ENODEV; } if (status < 0) DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n", - spi->dev.bus_id, status); + dev_name(&spi->dev), status); return status; } @@ -921,7 +921,7 @@ static int __devexit dataflash_remove(struct spi_device *spi) struct dataflash *flash = dev_get_drvdata(&spi->dev); int status; - DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", spi->dev.bus_id); + DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev)); if (mtd_has_partitions() && flash->partitioned) status = del_mtd_partitions(&flash->mtd); diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index d38bca64bb15..d2fd550f7e09 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c @@ -34,7 +34,7 @@ * aperture size, not the dram size, and the V370PDC supplies no * other method for memory size discovery. This problem is * mostly only relevant when compiled as a module, as the - * unloading of the module with an aperture size smaller then + * unloading of the module with an aperture size smaller than * the ram will cause the driver to detect the onboard memory * size to be equal to the aperture size when the module is * reloaded. Soooo, to help, the module supports an msize diff --git a/drivers/mtd/devices/ps3vram.c b/drivers/mtd/devices/ps3vram.c new file mode 100644 index 000000000000..d21e9beb7ed2 --- /dev/null +++ b/drivers/mtd/devices/ps3vram.c @@ -0,0 +1,768 @@ +/** + * ps3vram - Use extra PS3 video ram as MTD block device. + * + * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com> + * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr> + */ + +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/version.h> +#include <linux/gfp.h> +#include <linux/delay.h> +#include <linux/mtd/mtd.h> + +#include <asm/lv1call.h> +#include <asm/ps3.h> + +#define DEVICE_NAME "ps3vram" + +#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */ +#define XDR_IOIF 0x0c000000 + +#define FIFO_BASE XDR_IOIF +#define FIFO_SIZE (64 * 1024) + +#define DMA_PAGE_SIZE (4 * 1024) + +#define CACHE_PAGE_SIZE (256 * 1024) +#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE) + +#define CACHE_OFFSET CACHE_PAGE_SIZE +#define FIFO_OFFSET 0 + +#define CTRL_PUT 0x10 +#define CTRL_GET 0x11 +#define CTRL_TOP 0x15 + +#define UPLOAD_SUBCH 1 +#define DOWNLOAD_SUBCH 2 + +#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 + +#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 + +struct mtd_info ps3vram_mtd; + +#define CACHE_PAGE_PRESENT 1 +#define CACHE_PAGE_DIRTY 2 + +struct ps3vram_tag { + unsigned int address; + unsigned int flags; +}; + +struct ps3vram_cache { + unsigned int page_count; + unsigned int page_size; + struct ps3vram_tag *tags; +}; + +struct ps3vram_priv { + u64 memory_handle; + u64 context_handle; + u32 *ctrl; + u32 *reports; + u8 __iomem *ddr_base; + u8 *xdr_buf; + + u32 *fifo_base; + u32 *fifo_ptr; + + struct device *dev; + struct ps3vram_cache cache; + + /* Used to serialize cache/DMA operations */ + struct mutex lock; +}; + +#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ +#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ +#define DMA_NOTIFIER_SIZE 0x40 +#define NOTIFIER 7 /* notifier used for completion report */ + +/* A trailing '-' means to subtract off ps3fb_videomemory.size */ +char *size = "256M-"; +module_param(size, charp, 0); +MODULE_PARM_DESC(size, "memory size"); + +static u32 *ps3vram_get_notifier(u32 *reports, int notifier) +{ + return (void *) reports + + DMA_NOTIFIER_OFFSET_BASE + + DMA_NOTIFIER_SIZE * notifier; +} + +static void ps3vram_notifier_reset(struct mtd_info *mtd) +{ + int i; + + struct ps3vram_priv *priv = mtd->priv; + u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); + for (i = 0; i < 4; i++) + notify[i] = 0xffffffff; +} + +static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms) +{ + struct ps3vram_priv *priv = mtd->priv; + u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + do { + if (!notify[3]) + return 0; + msleep(1); + } while (time_before(jiffies, timeout)); + + return -ETIMEDOUT; +} + +static void ps3vram_init_ring(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; + priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET; +} + +static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms) +{ + struct ps3vram_priv *priv = mtd->priv; + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + do { + if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET]) + return 0; + msleep(1); + } while (time_before(jiffies, timeout)); + + dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__, + __LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET], + priv->ctrl[CTRL_TOP]); + + return -ETIMEDOUT; +} + +static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data) +{ + *(priv->fifo_ptr)++ = data; +} + +static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, + u32 tag, u32 size) +{ + ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag); +} + +static void ps3vram_rewind_ring(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + u64 status; + + ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET)); + + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; + + /* asking the HV for a blit will kick the fifo */ + status = lv1_gpu_context_attribute(priv->context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, + 0, 0, 0, 0); + if (status) + dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", + __func__, __LINE__); + + priv->fifo_ptr = priv->fifo_base; +} + +static void ps3vram_fire_ring(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + u64 status; + + mutex_lock(&ps3_gpu_mutex); + + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET + + (priv->fifo_ptr - priv->fifo_base) * sizeof(u32); + + /* asking the HV for a blit will kick the fifo */ + status = lv1_gpu_context_attribute(priv->context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, + 0, 0, 0, 0); + if (status) + dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", + __func__, __LINE__); + + if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) > + FIFO_SIZE - 1024) { + dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__, + __LINE__); + ps3vram_wait_ring(mtd, 200); + ps3vram_rewind_ring(mtd); + } + + mutex_unlock(&ps3_gpu_mutex); +} + +static void ps3vram_bind(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1); + ps3vram_out_ring(priv, 0x31337303); + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3); + ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); + ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ + ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ + + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1); + ps3vram_out_ring(priv, 0x3137c0de); + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3); + ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); + ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ + ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ + + ps3vram_fire_ring(mtd); +} + +static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset, + unsigned int dst_offset, int len, int count) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_begin_ring(priv, UPLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + ps3vram_out_ring(priv, XDR_IOIF + src_offset); + ps3vram_out_ring(priv, dst_offset); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, count); + ps3vram_out_ring(priv, (1 << 8) | 1); + ps3vram_out_ring(priv, 0); + + ps3vram_notifier_reset(mtd); + ps3vram_begin_ring(priv, UPLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); + ps3vram_out_ring(priv, 0); + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1); + ps3vram_out_ring(priv, 0); + ps3vram_fire_ring(mtd); + if (ps3vram_notifier_wait(mtd, 200) < 0) { + dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, + __LINE__); + return -1; + } + + return 0; +} + +static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset, + unsigned int dst_offset, int len, int count) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + ps3vram_out_ring(priv, src_offset); + ps3vram_out_ring(priv, XDR_IOIF + dst_offset); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, count); + ps3vram_out_ring(priv, (1 << 8) | 1); + ps3vram_out_ring(priv, 0); + + ps3vram_notifier_reset(mtd); + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); + ps3vram_out_ring(priv, 0); + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1); + ps3vram_out_ring(priv, 0); + ps3vram_fire_ring(mtd); + if (ps3vram_notifier_wait(mtd, 200) < 0) { + dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, + __LINE__); + return -1; + } + + return 0; +} + +static void ps3vram_cache_evict(struct mtd_info *mtd, int entry) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + + if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) { + dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__, + __LINE__, entry, cache->tags[entry].address); + if (ps3vram_upload(mtd, + CACHE_OFFSET + entry * cache->page_size, + cache->tags[entry].address, + DMA_PAGE_SIZE, + cache->page_size / DMA_PAGE_SIZE) < 0) { + dev_dbg(priv->dev, "%s:%d: failed to upload from " + "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, + entry * cache->page_size, + cache->tags[entry].address, cache->page_size); + } + cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; + } +} + +static void ps3vram_cache_load(struct mtd_info *mtd, int entry, + unsigned int address) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + + dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__, + entry, address); + if (ps3vram_download(mtd, + address, + CACHE_OFFSET + entry * cache->page_size, + DMA_PAGE_SIZE, + cache->page_size / DMA_PAGE_SIZE) < 0) { + dev_err(priv->dev, "%s:%d: failed to download from " + "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address, + entry * cache->page_size, cache->page_size); + } + + cache->tags[entry].address = address; + cache->tags[entry].flags |= CACHE_PAGE_PRESENT; +} + + +static void ps3vram_cache_flush(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + int i; + + dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__); + for (i = 0; i < cache->page_count; i++) { + ps3vram_cache_evict(mtd, i); + cache->tags[i].flags = 0; + } +} + +static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + unsigned int base; + unsigned int offset; + int i; + static int counter; + + offset = (unsigned int) (address & (cache->page_size - 1)); + base = (unsigned int) (address - offset); + + /* fully associative check */ + for (i = 0; i < cache->page_count; i++) { + if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && + cache->tags[i].address == base) { + dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n", + __func__, __LINE__, i, cache->tags[i].address); + return i; + } + } + + /* choose a random entry */ + i = (jiffies + (counter++)) % cache->page_count; + dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i); + + ps3vram_cache_evict(mtd, i); + ps3vram_cache_load(mtd, i, base); + + return i; +} + +static int ps3vram_cache_init(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + priv->cache.page_count = CACHE_PAGE_COUNT; + priv->cache.page_size = CACHE_PAGE_SIZE; + priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * + CACHE_PAGE_COUNT, GFP_KERNEL); + if (priv->cache.tags == NULL) { + dev_err(priv->dev, "%s:%d: could not allocate cache tags\n", + __func__, __LINE__); + return -ENOMEM; + } + + dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n", + CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); + + return 0; +} + +static void ps3vram_cache_cleanup(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_cache_flush(mtd); + kfree(priv->cache.tags); +} + +static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + struct ps3vram_priv *priv = mtd->priv; + + if (instr->addr + instr->len > mtd->size) + return -EINVAL; + + mutex_lock(&priv->lock); + + ps3vram_cache_flush(mtd); + + /* Set bytes to 0xFF */ + memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len); + + mutex_unlock(&priv->lock); + + instr->state = MTD_ERASE_DONE; + mtd_erase_callback(instr); + + return 0; +} + +static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct ps3vram_priv *priv = mtd->priv; + unsigned int cached, count; + + dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__, + (unsigned int)from, len); + + if (from >= mtd->size) + return -EINVAL; + + if (len > mtd->size - from) + len = mtd->size - from; + + /* Copy from vram to buf */ + count = len; + while (count) { + unsigned int offset, avail; + unsigned int entry; + + offset = (unsigned int) (from & (priv->cache.page_size - 1)); + avail = priv->cache.page_size - offset; + + mutex_lock(&priv->lock); + + entry = ps3vram_cache_match(mtd, from); + cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; + + dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x " + "avail=%08x count=%08x\n", __func__, __LINE__, + (unsigned int)from, cached, offset, avail, count); + + if (avail > count) + avail = count; + memcpy(buf, priv->xdr_buf + cached, avail); + + mutex_unlock(&priv->lock); + + buf += avail; + count -= avail; + from += avail; + } + + *retlen = len; + return 0; +} + +static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct ps3vram_priv *priv = mtd->priv; + unsigned int cached, count; + + if (to >= mtd->size) + return -EINVAL; + + if (len > mtd->size - to) + len = mtd->size - to; + + /* Copy from buf to vram */ + count = len; + while (count) { + unsigned int offset, avail; + unsigned int entry; + + offset = (unsigned int) (to & (priv->cache.page_size - 1)); + avail = priv->cache.page_size - offset; + + mutex_lock(&priv->lock); + + entry = ps3vram_cache_match(mtd, to); + cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; + + dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x " + "avail=%08x count=%08x\n", __func__, __LINE__, + (unsigned int)to, cached, offset, avail, count); + + if (avail > count) + avail = count; + memcpy(priv->xdr_buf + cached, buf, avail); + + priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; + + mutex_unlock(&priv->lock); + + buf += avail; + count -= avail; + to += avail; + } + + *retlen = len; + return 0; +} + +static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) +{ + struct ps3vram_priv *priv; + int status; + u64 ddr_lpar; + u64 ctrl_lpar; + u64 info_lpar; + u64 reports_lpar; + u64 ddr_size; + u64 reports_size; + int ret = -ENOMEM; + char *rest; + + ret = -EIO; + ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL); + if (!ps3vram_mtd.priv) + goto out; + priv = ps3vram_mtd.priv; + + mutex_init(&priv->lock); + priv->dev = &dev->core; + + /* Allocate XDR buffer (1MiB aligned) */ + priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL, + get_order(XDR_BUF_SIZE)); + if (priv->xdr_buf == NULL) { + dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_priv; + } + + /* Put FIFO at begginning of XDR buffer */ + priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET); + priv->fifo_ptr = priv->fifo_base; + + /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */ + if (ps3_open_hv_device(dev)) { + dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n", + __func__, __LINE__); + ret = -EAGAIN; + goto out_close_gpu; + } + + /* Request memory */ + status = -1; + ddr_size = memparse(size, &rest); + if (*rest == '-') + ddr_size -= ps3fb_videomemory.size; + ddr_size = ALIGN(ddr_size, 1024*1024); + if (ddr_size <= 0) { + dev_err(&dev->core, "%s:%d: specified size is too small\n", + __func__, __LINE__); + ret = -EINVAL; + goto out_close_gpu; + } + + while (ddr_size > 0) { + status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0, + &priv->memory_handle, + &ddr_lpar); + if (!status) + break; + ddr_size -= 1024*1024; + } + if (status || ddr_size <= 0) { + dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_xdr_buf; + } + + /* Request context */ + status = lv1_gpu_context_allocate(priv->memory_handle, + 0, + &priv->context_handle, + &ctrl_lpar, + &info_lpar, + &reports_lpar, + &reports_size); + if (status) { + dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_memory; + } + + /* Map XDR buffer to RSX */ + status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, + ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)), + XDR_BUF_SIZE, 0); + if (status) { + dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_context; + } + + priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE); + + if (!priv->ddr_base) { + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, + __LINE__); + ret = -ENOMEM; + goto out_free_context; + } + + priv->ctrl = ioremap(ctrl_lpar, 64 * 1024); + if (!priv->ctrl) { + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, + __LINE__); + ret = -ENOMEM; + goto out_unmap_vram; + } + + priv->reports = ioremap(reports_lpar, reports_size); + if (!priv->reports) { + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, + __LINE__); + ret = -ENOMEM; + goto out_unmap_ctrl; + } + + mutex_lock(&ps3_gpu_mutex); + ps3vram_init_ring(&ps3vram_mtd); + mutex_unlock(&ps3_gpu_mutex); + + ps3vram_mtd.name = "ps3vram"; + ps3vram_mtd.size = ddr_size; + ps3vram_mtd.flags = MTD_CAP_RAM; + ps3vram_mtd.erase = ps3vram_erase; + ps3vram_mtd.point = NULL; + ps3vram_mtd.unpoint = NULL; + ps3vram_mtd.read = ps3vram_read; + ps3vram_mtd.write = ps3vram_write; + ps3vram_mtd.owner = THIS_MODULE; + ps3vram_mtd.type = MTD_RAM; + ps3vram_mtd.erasesize = CACHE_PAGE_SIZE; + ps3vram_mtd.writesize = 1; + + ps3vram_bind(&ps3vram_mtd); + + mutex_lock(&ps3_gpu_mutex); + ret = ps3vram_wait_ring(&ps3vram_mtd, 100); + mutex_unlock(&ps3_gpu_mutex); + if (ret < 0) { + dev_err(&dev->core, "%s:%d: failed to initialize channels\n", + __func__, __LINE__); + ret = -ETIMEDOUT; + goto out_unmap_reports; + } + + ps3vram_cache_init(&ps3vram_mtd); + + if (add_mtd_device(&ps3vram_mtd)) { + dev_err(&dev->core, "%s:%d: add_mtd_device failed\n", + __func__, __LINE__); + ret = -EAGAIN; + goto out_cache_cleanup; + } + + dev_info(&dev->core, "reserved %u MiB of gpu memory\n", + (unsigned int)(ddr_size / 1024 / 1024)); + + return 0; + +out_cache_cleanup: + ps3vram_cache_cleanup(&ps3vram_mtd); +out_unmap_reports: + iounmap(priv->reports); +out_unmap_ctrl: + iounmap(priv->ctrl); +out_unmap_vram: + iounmap(priv->ddr_base); +out_free_context: + lv1_gpu_context_free(priv->context_handle); +out_free_memory: + lv1_gpu_memory_free(priv->memory_handle); +out_close_gpu: + ps3_close_hv_device(dev); +out_free_xdr_buf: + free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); +out_free_priv: + kfree(ps3vram_mtd.priv); + ps3vram_mtd.priv = NULL; +out: + return ret; +} + +static int ps3vram_shutdown(struct ps3_system_bus_device *dev) +{ + struct ps3vram_priv *priv; + + priv = ps3vram_mtd.priv; + + del_mtd_device(&ps3vram_mtd); + ps3vram_cache_cleanup(&ps3vram_mtd); + iounmap(priv->reports); + iounmap(priv->ctrl); + iounmap(priv->ddr_base); + lv1_gpu_context_free(priv->context_handle); + lv1_gpu_memory_free(priv->memory_handle); + ps3_close_hv_device(dev); + free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); + kfree(priv); + return 0; +} + +static struct ps3_system_bus_driver ps3vram_driver = { + .match_id = PS3_MATCH_ID_GPU, + .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK, + .core.name = DEVICE_NAME, + .core.owner = THIS_MODULE, + .probe = ps3vram_probe, + .remove = ps3vram_shutdown, + .shutdown = ps3vram_shutdown, +}; + +static int __init ps3vram_init(void) +{ + return ps3_system_bus_driver_register(&ps3vram_driver); +} + +static void __exit ps3vram_exit(void) +{ + ps3_system_bus_driver_unregister(&ps3vram_driver); +} + +module_init(ps3vram_init); +module_exit(ps3vram_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jim Paris <jim@jtan.com>"); +MODULE_DESCRIPTION("MTD driver for PS3 video RAM"); +MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK); diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c index 3aa018c092f8..42969fe051b2 100644 --- a/drivers/mtd/maps/dc21285.c +++ b/drivers/mtd/maps/dc21285.c @@ -32,16 +32,15 @@ static struct mtd_info *dc21285_mtd; */ static void nw_en_write(void) { - extern spinlock_t gpio_lock; unsigned long flags; /* * we want to write a bit pattern XXX1 to Xilinx to enable * the write gate, which will be open for about the next 2ms. */ - spin_lock_irqsave(&gpio_lock, flags); - cpld_modify(1, 1); - spin_unlock_irqrestore(&gpio_lock, flags); + spin_lock_irqsave(&nw_gpio_lock, flags); + nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); + spin_unlock_irqrestore(&nw_gpio_lock, flags); /* * let the ISA bus to catch on... diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c index 7100ee3c7b01..d2ec262666c7 100644 --- a/drivers/mtd/maps/integrator-flash.c +++ b/drivers/mtd/maps/integrator-flash.c @@ -105,7 +105,7 @@ static int armflash_probe(struct platform_device *dev) info->map.bankwidth = plat->width; info->map.phys = res->start; info->map.virt = base; - info->map.name = dev->dev.bus_id; + info->map.name = dev_name(&dev->dev); info->map.set_vpp = armflash_set_vpp; simple_map_init(&info->map); diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index dcdb1f17577d..d4fb9a3ab4df 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c @@ -170,7 +170,7 @@ static int ixp2000_flash_probe(struct platform_device *dev) err = -ENOMEM; goto Error; } - memzero(info, sizeof(struct ixp2000_flash_info)); + memset(info, 0, sizeof(struct ixp2000_flash_info)); platform_set_drvdata(dev, info); @@ -188,7 +188,7 @@ static int ixp2000_flash_probe(struct platform_device *dev) */ info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup; - info->map.name = dev->dev.bus_id; + info->map.name = dev_name(&dev->dev); info->map.read = ixp2000_flash_read8; info->map.write = ixp2000_flash_write8; info->map.copy_from = ixp2000_flash_copy_from; @@ -196,7 +196,7 @@ static int ixp2000_flash_probe(struct platform_device *dev) info->res = request_mem_region(dev->resource->start, dev->resource->end - dev->resource->start + 1, - dev->dev.bus_id); + dev_name(&dev->dev)); if (!info->res) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 9c7a5fbd4e51..7214b876feba 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -201,7 +201,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) err = -ENOMEM; goto Error; } - memzero(info, sizeof(struct ixp4xx_flash_info)); + memset(info, 0, sizeof(struct ixp4xx_flash_info)); platform_set_drvdata(dev, info); @@ -218,7 +218,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) * handle that. */ info->map.bankwidth = 2; - info->map.name = dev->dev.bus_id; + info->map.name = dev_name(&dev->dev); info->map.read = ixp4xx_read16, info->map.write = ixp4xx_probe_write16, info->map.copy_from = ixp4xx_copy_from, diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c index 05f276af15da..7e50e9b1b781 100644 --- a/drivers/mtd/maps/omap_nor.c +++ b/drivers/mtd/maps/omap_nor.c @@ -101,7 +101,7 @@ static int __init omapflash_probe(struct platform_device *pdev) err = -ENOMEM; goto out_release_mem_region; } - info->map.name = pdev->dev.bus_id; + info->map.name = dev_name(&pdev->dev); info->map.phys = res->start; info->map.size = size; info->map.bankwidth = pdata->width; diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index dfbf3f270cea..1db16e549e38 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -108,13 +108,13 @@ static int physmap_flash_probe(struct platform_device *dev) if (!devm_request_mem_region(&dev->dev, dev->resource[i].start, dev->resource[i].end - dev->resource[i].start + 1, - dev->dev.bus_id)) { + dev_name(&dev->dev))) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto err_out; } - info->map[i].name = dev->dev.bus_id; + info->map[i].name = dev_name(&dev->dev); info->map[i].phys = dev->resource[i].start; info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; info->map[i].bankwidth = physmap_data->width; @@ -150,7 +150,7 @@ static int physmap_flash_probe(struct platform_device *dev) * We detected multiple devices. Concatenate them together. */ #ifdef CONFIG_MTD_CONCAT - info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id); + info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); if (info->cmtd == NULL) err = -ENXIO; #else diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 5fcfec034a94..fbf0ca939d72 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -183,7 +183,7 @@ static int __devinit of_flash_probe(struct of_device *dev, err = -EBUSY; info->res = request_mem_region(res.start, res.end - res.start + 1, - dev->dev.bus_id); + dev_name(&dev->dev)); if (!info->res) goto err_out; @@ -194,7 +194,7 @@ static int __devinit of_flash_probe(struct of_device *dev, goto err_out; } - info->map.name = dev->dev.bus_id; + info->map.name = dev_name(&dev->dev); info->map.phys = res.start; info->map.size = res.end - res.start + 1; info->map.bankwidth = *width; diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 789842d0e6f2..1a05cf37851e 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -691,7 +691,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) */ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ int num_devs, /* number of subdevices */ - char *name) + const char *name) { /* name for the new device */ int i; size_t size; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 1c2e9450d663..f8ae0400c49c 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -408,7 +408,7 @@ config MTD_NAND_FSL_UPM config MTD_NAND_MXC tristate "MXC NAND support" - depends on ARCH_MX2 + depends on ARCH_MX2 || ARCH_MX3 help This enables the driver for the NAND flash controller on the MXC processors. diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index a83192f80eba..7815a404a632 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -222,7 +222,7 @@ static int __devinit fun_probe(struct of_device *ofdev, fun->rnb_gpio = of_get_gpio(ofdev->node, 0); if (fun->rnb_gpio >= 0) { - ret = gpio_request(fun->rnb_gpio, ofdev->dev.bus_id); + ret = gpio_request(fun->rnb_gpio, dev_name(&ofdev->dev)); if (ret) { dev_err(&ofdev->dev, "can't request RNB gpio\n"); goto err2; diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index f674c5427b17..75f9f4874ecf 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -54,7 +54,7 @@ static int __init plat_nand_probe(struct platform_device *pdev) data->chip.priv = &data; data->mtd.priv = &data->chip; data->mtd.owner = THIS_MODULE; - data->mtd.name = pdev->dev.bus_id; + data->mtd.name = dev_name(&pdev->dev); data->chip.IO_ADDR_R = data->io_base; data->chip.IO_ADDR_W = data->io_base; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 15f0a26730ae..fc4144495610 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -20,8 +20,8 @@ #include <linux/mtd/partitions.h> #include <linux/io.h> #include <linux/irq.h> -#include <asm/dma.h> +#include <mach/dma.h> #include <mach/pxa-regs.h> #include <mach/pxa3xx_nand.h> @@ -1080,7 +1080,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) this = &info->nand_chip; mtd->priv = info; - info->clk = clk_get(&pdev->dev, "NANDCLK"); + info->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed to get nand clock\n"); ret = PTR_ERR(info->clk); diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 556139ed1fdf..8e375d5fe231 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -45,8 +45,8 @@ #include <asm/io.h> -#include <asm/plat-s3c/regs-nand.h> -#include <asm/plat-s3c/nand.h> +#include <plat/regs-nand.h> +#include <plat/nand.h> #ifdef CONFIG_MTD_NAND_S3C2410_HWECC static int hardware_ecc = 1; @@ -818,7 +818,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, goto exit_error; } - memzero(info, sizeof(*info)); + memset(info, 0, sizeof(*info)); platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); @@ -883,7 +883,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, goto exit_error; } - memzero(info->mtds, size); + memset(info->mtds, 0, size); /* initialise all possible chips */ diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index edb1e322113d..daa6a4c3b8ce 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c @@ -433,7 +433,7 @@ static int tmio_probe(struct platform_device *dev) nand_chip->chip_delay = 15; retval = request_irq(irq, &tmio_irq, - IRQF_DISABLED, dev->dev.bus_id, tmio); + IRQF_DISABLED, dev_name(&dev->dev), tmio); if (retval) { dev_err(&dev->dev, "request_irq error %d\n", retval); goto err_irq; diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index ad81ab8e95e2..5b69e7773c6c 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c @@ -63,7 +63,7 @@ static int __devinit generic_onenand_probe(struct device *dev) info->onenand.mmcontrol = pdata->mmcontrol; info->onenand.irq = platform_get_irq(pdev, 0); - info->mtd.name = pdev->dev.bus_id; + info->mtd.name = dev_name(&pdev->dev); info->mtd.priv = &info->onenand; info->mtd.owner = THIS_MODULE; diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index a7e4d985f5ef..96ecc1766fa8 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -149,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) INIT_COMPLETION(c->irq_done); if (c->gpio_irq) { - result = omap_get_gpio_datain(c->gpio_irq); + result = gpio_get_value(c->gpio_irq); if (result == -1) { ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); intr = read_reg(c, ONENAND_REG_INTERRUPT); @@ -634,9 +634,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) "OneNAND\n", c->gpio_irq); goto err_iounmap; } - omap_set_gpio_direction(c->gpio_irq, 1); + gpio_direction_input(c->gpio_irq); - if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), + if ((r = request_irq(gpio_to_irq(c->gpio_irq), omap2_onenand_interrupt, IRQF_TRIGGER_RISING, pdev->dev.driver->name, c)) < 0) goto err_release_gpio; @@ -668,7 +668,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) c->onenand.base); c->pdev = pdev; - c->mtd.name = pdev->dev.bus_id; + c->mtd.name = dev_name(&pdev->dev); c->mtd.priv = &c->onenand; c->mtd.owner = THIS_MODULE; @@ -723,7 +723,7 @@ err_release_dma: if (c->dma_channel != -1) omap_free_dma(c->dma_channel); if (c->gpio_irq) - free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); + free_irq(gpio_to_irq(c->gpio_irq), c); err_release_gpio: if (c->gpio_irq) omap_free_gpio(c->gpio_irq); @@ -760,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev) omap2_onenand_shutdown(pdev); platform_set_drvdata(pdev, NULL); if (c->gpio_irq) { - free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); + free_irq(gpio_to_irq(c->gpio_irq), c); omap_free_gpio(c->gpio_irq); } iounmap(c->onenand.base); diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index c7630a228310..7caf22cd5ad0 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -280,7 +280,7 @@ static int ubi_sysfs_init(struct ubi_device *ubi) ubi->dev.release = dev_release; ubi->dev.devt = ubi->cdev.dev; ubi->dev.class = ubi_class; - sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); + dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); err = device_register(&ubi->dev); if (err) return err; @@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) if (err) goto out_free; + err = -ENOMEM; ubi->peb_buf1 = vmalloc(ubi->peb_size); if (!ubi->peb_buf1) goto out_free; ubi->peb_buf2 = vmalloc(ubi->peb_size); if (!ubi->peb_buf2) - goto out_free; + goto out_free; #ifdef CONFIG_MTD_UBI_DEBUG mutex_init(&ubi->dbg_buf_mutex); ubi->dbg_peb_buf = vmalloc(ubi->peb_size); if (!ubi->dbg_peb_buf) - goto out_free; + goto out_free; #endif err = attach_by_scanning(ubi); diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index b30a0b83d7f1..98cf31ed0814 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi, * It seems we need to remove volume with name @re->new_name, * if it exists. */ - desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); + desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, + UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); if (err == -ENODEV) diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 78e914d23ece..13777e5beac9 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -27,11 +27,11 @@ #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) #define ubi_assert(expr) do { \ - if (unlikely(!(expr))) { \ - printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ - __func__, __LINE__, current->pid); \ - ubi_dbg_dump_stack(); \ - } \ + if (unlikely(!(expr))) { \ + printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ + __func__, __LINE__, current->pid); \ + ubi_dbg_dump_stack(); \ + } \ } while (0) #define dbg_msg(fmt, ...) \ diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index d8966bae0e0b..25def348e5ba 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, if (!vid_hdr) return -ENOMEM; - mutex_lock(&ubi->buf_mutex); - retry: new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); if (new_pnum < 0) { - mutex_unlock(&ubi->buf_mutex); ubi_free_vid_hdr(ubi, vid_hdr); return new_pnum; } @@ -529,20 +526,23 @@ retry: goto write_error; data_size = offset + len; + mutex_lock(&ubi->buf_mutex); memset(ubi->peb_buf1 + offset, 0xFF, len); /* Read everything before the area where the write failure happened */ if (offset > 0) { err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); if (err && err != UBI_IO_BITFLIPS) - goto out_put; + goto out_unlock; } memcpy(ubi->peb_buf1 + offset, buf, len); err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); - if (err) + if (err) { + mutex_unlock(&ubi->buf_mutex); goto write_error; + } mutex_unlock(&ubi->buf_mutex); ubi_free_vid_hdr(ubi, vid_hdr); @@ -553,8 +553,9 @@ retry: ubi_msg("data was successfully recovered"); return 0; -out_put: +out_unlock: mutex_unlock(&ubi->buf_mutex); +out_put: ubi_wl_put_peb(ubi, new_pnum, 1); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -567,7 +568,6 @@ write_error: ubi_warn("failed to write to PEB %d", new_pnum); ubi_wl_put_peb(ubi, new_pnum, 1); if (++tries > UBI_IO_RETRIES) { - mutex_unlock(&ubi->buf_mutex); ubi_free_vid_hdr(ubi, vid_hdr); return err; } @@ -717,7 +717,7 @@ write_error: * to the real data size, although the @buf buffer has to contain the * alignment. In all other cases, @len has to be aligned. * - * It is prohibited to write more then once to logical eraseblocks of static + * It is prohibited to write more than once to logical eraseblocks of static * volumes. This function returns zero in case of success and a negative error * code in case of failure. */ @@ -949,10 +949,14 @@ write_error: * This function copies logical eraseblock from physical eraseblock @from to * physical eraseblock @to. The @vid_hdr buffer may be changed by this * function. Returns: - * o %0 in case of success; - * o %1 if the operation was canceled and should be tried later (e.g., - * because a bit-flip was detected at the target PEB); - * o %2 if the volume is being deleted and this LEB should not be moved. + * o %0 in case of success; + * o %1 if the operation was canceled because the volume is being deleted + * or because the PEB was put meanwhile; + * o %2 if the operation was canceled because there was a write error to the + * target PEB; + * o %-EAGAIN if the operation was canceled because a bit-flip was detected + * in the target PEB; + * o a negative error code in case of failure. */ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, struct ubi_vid_hdr *vid_hdr) @@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, /* * Note, we may race with volume deletion, which means that the volume * this logical eraseblock belongs to might be being deleted. Since the - * volume deletion unmaps all the volume's logical eraseblocks, it will + * volume deletion un-maps all the volume's logical eraseblocks, it will * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. */ vol = ubi->volumes[idx]; @@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, /* No need to do further work, cancel */ dbg_eba("volume %d is being removed, cancel", vol_id); spin_unlock(&ubi->volumes_lock); - return 2; + return 1; } spin_unlock(&ubi->volumes_lock); @@ -1023,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, /* * OK, now the LEB is locked and we can safely start moving it. Since - * this function utilizes thie @ubi->peb1_buf buffer which is shared + * this function utilizes the @ubi->peb1_buf buffer which is shared * with some other functions, so lock the buffer by taking the * @ubi->buf_mutex. */ @@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); - if (err) + if (err) { + if (err == -EIO) + err = 2; goto out_unlock_buf; + } cond_resched(); @@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, if (err != UBI_IO_BITFLIPS) ubi_warn("cannot read VID header back from PEB %d", to); else - err = 1; + err = -EAGAIN; goto out_unlock_buf; } if (data_size > 0) { err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); - if (err) + if (err) { + if (err == -EIO) + err = 2; goto out_unlock_buf; + } cond_resched(); @@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ubi_warn("cannot read data back from PEB %d", to); else - err = 1; + err = -EAGAIN; goto out_unlock_buf; } cond_resched(); if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { - ubi_warn("read data back from PEB %d - it is different", - to); + ubi_warn("read data back from PEB %d and it is " + "different", to); + err = -EINVAL; goto out_unlock_buf; } } diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 2fb64be44f1b..fe81039f2a7c 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -465,7 +465,7 @@ out: * This function synchronously erases physical eraseblock @pnum. If @torture * flag is not zero, the physical eraseblock is checked by means of writing * different patterns to it and reading them back. If the torturing is enabled, - * the physical eraseblock is erased more then once. + * the physical eraseblock is erased more than once. * * This function returns the number of erasures made in case of success, %-EIO * if the erasure failed or the torturing test failed, and other negative error @@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, dbg_io("read EC header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); - if (UBI_IO_DEBUG) - verbose = 1; err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); if (err) { @@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, if (verbose) ubi_warn("no EC header found at PEB %d, " "only 0xFF bytes", pnum); + else if (UBI_IO_DEBUG) + dbg_msg("no EC header found at PEB %d, " + "only 0xFF bytes", pnum); return UBI_IO_PEB_EMPTY; } @@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_EC_HDR_MAGIC); ubi_dbg_dump_ec_hdr(ec_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_EC_HDR_MAGIC); return UBI_IO_BAD_EC_HDR; } @@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad EC header CRC at PEB %d, calculated " "%#08x, read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_ec_hdr(ec_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad EC header CRC at PEB %d, calculated " + "%#08x, read %#08x", pnum, crc, hdr_crc); return UBI_IO_BAD_EC_HDR; } @@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, dbg_io("read VID header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); - if (UBI_IO_DEBUG) - verbose = 1; p = (char *)vid_hdr - ubi->vid_hdr_shift; err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, @@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, if (verbose) ubi_warn("no VID header found at PEB %d, " "only 0xFF bytes", pnum); + else if (UBI_IO_DEBUG) + dbg_msg("no VID header found at PEB %d, " + "only 0xFF bytes", pnum); return UBI_IO_PEB_FREE; } @@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_VID_HDR_MAGIC); ubi_dbg_dump_vid_hdr(vid_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_VID_HDR_MAGIC); return UBI_IO_BAD_VID_HDR; } @@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad CRC at PEB %d, calculated %#08x, " "read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_vid_hdr(vid_hdr); - } + } else if (UBI_IO_DEBUG) + dbg_msg("bad CRC at PEB %d, calculated %#08x, " + "read %#08x", pnum, crc, hdr_crc); return UBI_IO_BAD_VID_HDR; } @@ -1024,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, err = paranoid_check_peb_ec_hdr(ubi, pnum); if (err) - return err > 0 ? -EINVAL: err; + return err > 0 ? -EINVAL : err; vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); vid_hdr->version = UBI_VERSION; diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 5d9bcf109c13..4abbe573fa40 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap); * @dtype: expected data type * * This function maps an un-mapped logical eraseblock @lnum to a physical - * eraseblock. This means, that after a successfull invocation of this + * eraseblock. This means, that after a successful invocation of this * function the logical eraseblock @lnum will be empty (contain only %0xFF * bytes) and be mapped to a physical eraseblock, even if an unclean reboot * happens. diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 41d47e1cf15c..ecde202a5a12 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -478,7 +478,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, return 0; } else { /* - * This logical eraseblock is older then the one found + * This logical eraseblock is older than the one found * previously. */ if (cmp_res & 4) diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 2ad940409053..8419fdccc79c 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -135,7 +135,7 @@ enum { * The erase counter header takes 64 bytes and has a plenty of unused space for * future usage. The unused fields are zeroed. The @version field is used to * indicate the version of UBI implementation which is supposed to be able to - * work with this UBI image. If @version is greater then the current UBI + * work with this UBI image. If @version is greater than the current UBI * version, the image is rejected. This may be useful in future if something * is changed radically. This field is duplicated in the volume identifier * header. @@ -187,7 +187,7 @@ struct ubi_ec_hdr { * (sequence number) is used to distinguish between older and newer versions of * logical eraseblocks. * - * There are 2 situations when there may be more then one physical eraseblock + * There are 2 situations when there may be more than one physical eraseblock * corresponding to the same logical eraseblock, i.e., having the same @vol_id * and @lnum values in the volume identifier header. Suppose we have a logical * eraseblock L and it is mapped to the physical eraseblock P. diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 1c3fa18c26a7..4a8ec485c91d 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -74,6 +74,13 @@ #define UBI_IO_RETRIES 3 /* + * Length of the protection queue. The length is effectively equivalent to the + * number of (global) erase cycles PEBs are protected from the wear-leveling + * worker. + */ +#define UBI_PROT_QUEUE_LEN 10 + +/* * Error codes returned by the I/O sub-system. * * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only @@ -95,7 +102,8 @@ enum { /** * struct ubi_wl_entry - wear-leveling entry. - * @rb: link in the corresponding RB-tree + * @u.rb: link in the corresponding (free/used) RB-tree + * @u.list: link in the protection queue * @ec: erase counter * @pnum: physical eraseblock number * @@ -104,7 +112,10 @@ enum { * RB-trees. See WL sub-system for details. */ struct ubi_wl_entry { - struct rb_node rb; + union { + struct rb_node rb; + struct list_head list; + } u; int ec; int pnum; }; @@ -288,7 +299,7 @@ struct ubi_wl_entry; * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling * * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end - * of UBI ititializetion + * of UBI initialization * @vtbl_slots: how many slots are available in the volume table * @vtbl_size: size of the volume table in bytes * @vtbl: in-RAM volume table copy @@ -306,18 +317,17 @@ struct ubi_wl_entry; * @used: RB-tree of used physical eraseblocks * @free: RB-tree of free physical eraseblocks * @scrub: RB-tree of physical eraseblocks which need scrubbing - * @prot: protection trees - * @prot.pnum: protection tree indexed by physical eraseblock numbers - * @prot.aec: protection tree indexed by absolute erase counter value - * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, - * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works - * fields + * @pq: protection queue (contain physical eraseblocks which are temporarily + * protected from the wear-leveling worker) + * @pq_head: protection queue head + * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, + * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works + * fields * @move_mutex: serializes eraseblock moves - * @work_sem: sycnhronizes the WL worker with use tasks + * @work_sem: synchronizes the WL worker with use tasks * @wl_scheduled: non-zero if the wear-leveling was scheduled * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any * physical eraseblock - * @abs_ec: absolute erase counter * @move_from: physical eraseblock from where the data is being moved * @move_to: physical eraseblock where the data is being moved to * @move_to_put: if the "to" PEB was put @@ -351,11 +361,11 @@ struct ubi_wl_entry; * * @peb_buf1: a buffer of PEB size used for different purposes * @peb_buf2: another buffer of PEB size used for different purposes - * @buf_mutex: proptects @peb_buf1 and @peb_buf2 + * @buf_mutex: protects @peb_buf1 and @peb_buf2 * @ckvol_mutex: serializes static volume checking when opening - * @mult_mutex: serializes operations on multiple volumes, like re-nameing + * @mult_mutex: serializes operations on multiple volumes, like re-naming * @dbg_peb_buf: buffer of PEB size used for debugging - * @dbg_buf_mutex: proptects @dbg_peb_buf + * @dbg_buf_mutex: protects @dbg_peb_buf */ struct ubi_device { struct cdev cdev; @@ -392,16 +402,13 @@ struct ubi_device { struct rb_root used; struct rb_root free; struct rb_root scrub; - struct { - struct rb_root pnum; - struct rb_root aec; - } prot; + struct list_head pq[UBI_PROT_QUEUE_LEN]; + int pq_head; spinlock_t wl_lock; struct mutex move_mutex; struct rw_semaphore work_sem; int wl_scheduled; struct ubi_wl_entry **lookuptbl; - unsigned long long abs_ec; struct ubi_wl_entry *move_from; struct ubi_wl_entry *move_to; int move_to_put; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 3531ca9a1e24..22e1d7398fce 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -329,7 +329,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->dev.devt = dev; vol->dev.class = ubi_class; - sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); + dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); if (err) { ubi_err("cannot register device"); @@ -678,7 +678,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) vol->dev.parent = &ubi->dev; vol->dev.devt = dev; vol->dev.class = ubi_class; - sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); + dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); if (err) goto out_gluebi; diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 333c8941552f..1afc61e7455d 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -577,7 +577,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { /* Auto re-size flag may be set only for one volume */ if (ubi->autoresize_vol_id != -1) { - ubi_err("more then one auto-resize volume (%d " + ubi_err("more than one auto-resize volume (%d " "and %d)", ubi->autoresize_vol_id, i); kfree(vol); return -EINVAL; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index dcb6dac1dc54..891534f8210d 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -22,7 +22,7 @@ * UBI wear-leveling sub-system. * * This sub-system is responsible for wear-leveling. It works in terms of - * physical* eraseblocks and erase counters and knows nothing about logical + * physical eraseblocks and erase counters and knows nothing about logical * eraseblocks, volumes, etc. From this sub-system's perspective all physical * eraseblocks are of two types - used and free. Used physical eraseblocks are * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical @@ -55,8 +55,39 @@ * * As it was said, for the UBI sub-system all physical eraseblocks are either * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while - * used eraseblocks are kept in a set of different RB-trees: @wl->used, - * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. + * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or + * (temporarily) in the @wl->pq queue. + * + * When the WL sub-system returns a physical eraseblock, the physical + * eraseblock is protected from being moved for some "time". For this reason, + * the physical eraseblock is not directly moved from the @wl->free tree to the + * @wl->used tree. There is a protection queue in between where this + * physical eraseblock is temporarily stored (@wl->pq). + * + * All this protection stuff is needed because: + * o we don't want to move physical eraseblocks just after we have given them + * to the user; instead, we first want to let users fill them up with data; + * + * o there is a chance that the user will put the physical eraseblock very + * soon, so it makes sense not to move it for some time, but wait; this is + * especially important in case of "short term" physical eraseblocks. + * + * Physical eraseblocks stay protected only for limited time. But the "time" is + * measured in erase cycles in this case. This is implemented with help of the + * protection queue. Eraseblocks are put to the tail of this queue when they + * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the + * head of the queue on each erase operation (for any eraseblock). So the + * length of the queue defines how may (global) erase cycles PEBs are protected. + * + * To put it differently, each physical eraseblock has 2 main states: free and + * used. The former state corresponds to the @wl->free tree. The latter state + * is split up on several sub-states: + * o the WL movement is allowed (@wl->used tree); + * o the WL movement is temporarily prohibited (@wl->pq queue); + * o scrubbing is needed (@wl->scrub tree). + * + * Depending on the sub-state, wear-leveling entries of the used physical + * eraseblocks may be kept in one of those structures. * * Note, in this implementation, we keep a small in-RAM object for each physical * eraseblock. This is surely not a scalable solution. But it appears to be good @@ -70,9 +101,6 @@ * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we * pick target PEB with an average EC if our PEB is not very "old". This is a * room for future re-works of the WL sub-system. - * - * Note: the stuff with protection trees looks too complex and is difficult to - * understand. Should be fixed. */ #include <linux/slab.h> @@ -85,14 +113,6 @@ #define WL_RESERVED_PEBS 1 /* - * How many erase cycles are short term, unknown, and long term physical - * eraseblocks protected. - */ -#define ST_PROTECTION 16 -#define U_PROTECTION 10 -#define LT_PROTECTION 4 - -/* * Maximum difference between two erase counters. If this threshold is * exceeded, the WL sub-system starts moving data from used physical * eraseblocks with low erase counter to free physical eraseblocks with high @@ -108,7 +128,7 @@ * situation when the picked physical eraseblock is constantly erased after the * data is written to it. So, we have a constant which limits the highest erase * counter of the free physical eraseblock to pick. Namely, the WL sub-system - * does not pick eraseblocks with erase counter greater then the lowest erase + * does not pick eraseblocks with erase counter greater than the lowest erase * counter plus %WL_FREE_MAX_DIFF. */ #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) @@ -120,64 +140,9 @@ #define WL_MAX_FAILURES 32 /** - * struct ubi_wl_prot_entry - PEB protection entry. - * @rb_pnum: link in the @wl->prot.pnum RB-tree - * @rb_aec: link in the @wl->prot.aec RB-tree - * @abs_ec: the absolute erase counter value when the protection ends - * @e: the wear-leveling entry of the physical eraseblock under protection - * - * When the WL sub-system returns a physical eraseblock, the physical - * eraseblock is protected from being moved for some "time". For this reason, - * the physical eraseblock is not directly moved from the @wl->free tree to the - * @wl->used tree. There is one more tree in between where this physical - * eraseblock is temporarily stored (@wl->prot). - * - * All this protection stuff is needed because: - * o we don't want to move physical eraseblocks just after we have given them - * to the user; instead, we first want to let users fill them up with data; - * - * o there is a chance that the user will put the physical eraseblock very - * soon, so it makes sense not to move it for some time, but wait; this is - * especially important in case of "short term" physical eraseblocks. - * - * Physical eraseblocks stay protected only for limited time. But the "time" is - * measured in erase cycles in this case. This is implemented with help of the - * absolute erase counter (@wl->abs_ec). When it reaches certain value, the - * physical eraseblocks are moved from the protection trees (@wl->prot.*) to - * the @wl->used tree. - * - * Protected physical eraseblocks are searched by physical eraseblock number - * (when they are put) and by the absolute erase counter (to check if it is - * time to move them to the @wl->used tree). So there are actually 2 RB-trees - * storing the protected physical eraseblocks: @wl->prot.pnum and - * @wl->prot.aec. They are referred to as the "protection" trees. The - * first one is indexed by the physical eraseblock number. The second one is - * indexed by the absolute erase counter. Both trees store - * &struct ubi_wl_prot_entry objects. - * - * Each physical eraseblock has 2 main states: free and used. The former state - * corresponds to the @wl->free tree. The latter state is split up on several - * sub-states: - * o the WL movement is allowed (@wl->used tree); - * o the WL movement is temporarily prohibited (@wl->prot.pnum and - * @wl->prot.aec trees); - * o scrubbing is needed (@wl->scrub tree). - * - * Depending on the sub-state, wear-leveling entries of the used physical - * eraseblocks may be kept in one of those trees. - */ -struct ubi_wl_prot_entry { - struct rb_node rb_pnum; - struct rb_node rb_aec; - unsigned long long abs_ec; - struct ubi_wl_entry *e; -}; - -/** * struct ubi_work - UBI work description data structure. * @list: a link in the list of pending works * @func: worker function - * @priv: private data of the worker function * @e: physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * @@ -198,9 +163,11 @@ struct ubi_work { static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root); +static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e); #else #define paranoid_check_ec(ubi, pnum, ec) 0 #define paranoid_check_in_wl_tree(e, root) +#define paranoid_check_in_pq(ubi, e) 0 #endif /** @@ -220,7 +187,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) struct ubi_wl_entry *e1; parent = *p; - e1 = rb_entry(parent, struct ubi_wl_entry, rb); + e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); if (e->ec < e1->ec) p = &(*p)->rb_left; @@ -235,8 +202,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) } } - rb_link_node(&e->rb, parent, p); - rb_insert_color(&e->rb, root); + rb_link_node(&e->u.rb, parent, p); + rb_insert_color(&e->u.rb, root); } /** @@ -331,7 +298,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) while (p) { struct ubi_wl_entry *e1; - e1 = rb_entry(p, struct ubi_wl_entry, rb); + e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e->pnum == e1->pnum) { ubi_assert(e == e1); @@ -355,50 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) } /** - * prot_tree_add - add physical eraseblock to protection trees. + * prot_queue_add - add physical eraseblock to the protection queue. * @ubi: UBI device description object * @e: the physical eraseblock to add - * @pe: protection entry object to use - * @abs_ec: absolute erase counter value when this physical eraseblock has - * to be removed from the protection trees. * - * @wl->lock has to be locked. + * This function adds @e to the tail of the protection queue @ubi->pq, where + * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be + * temporarily protected from the wear-leveling worker. Note, @wl->lock has to + * be locked. */ -static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, - struct ubi_wl_prot_entry *pe, int abs_ec) +static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) { - struct rb_node **p, *parent = NULL; - struct ubi_wl_prot_entry *pe1; - - pe->e = e; - pe->abs_ec = ubi->abs_ec + abs_ec; - - p = &ubi->prot.pnum.rb_node; - while (*p) { - parent = *p; - pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); - - if (e->pnum < pe1->e->pnum) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&pe->rb_pnum, parent, p); - rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); - - p = &ubi->prot.aec.rb_node; - parent = NULL; - while (*p) { - parent = *p; - pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); + int pq_tail = ubi->pq_head - 1; - if (pe->abs_ec < pe1->abs_ec) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&pe->rb_aec, parent, p); - rb_insert_color(&pe->rb_aec, &ubi->prot.aec); + if (pq_tail < 0) + pq_tail = UBI_PROT_QUEUE_LEN - 1; + ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); + list_add_tail(&e->u.list, &ubi->pq[pq_tail]); + dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); } /** @@ -414,14 +355,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) struct rb_node *p; struct ubi_wl_entry *e; - e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); + e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); max += e->ec; p = root->rb_node; while (p) { struct ubi_wl_entry *e1; - e1 = rb_entry(p, struct ubi_wl_entry, rb); + e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e1->ec >= max) p = p->rb_left; else { @@ -443,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) */ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) { - int err, protect, medium_ec; + int err, medium_ec; struct ubi_wl_entry *e, *first, *last; - struct ubi_wl_prot_entry *pe; ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || dtype == UBI_UNKNOWN); - pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); - if (!pe) - return -ENOMEM; - retry: spin_lock(&ubi->wl_lock); if (!ubi->free.rb_node) { @@ -461,16 +397,13 @@ retry: ubi_assert(list_empty(&ubi->works)); ubi_err("no free eraseblocks"); spin_unlock(&ubi->wl_lock); - kfree(pe); return -ENOSPC; } spin_unlock(&ubi->wl_lock); err = produce_free_peb(ubi); - if (err < 0) { - kfree(pe); + if (err < 0) return err; - } goto retry; } @@ -483,7 +416,6 @@ retry: * %WL_FREE_MAX_DIFF. */ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); - protect = LT_PROTECTION; break; case UBI_UNKNOWN: /* @@ -492,81 +424,63 @@ retry: * eraseblock with erase counter greater or equivalent than the * lowest erase counter plus %WL_FREE_MAX_DIFF. */ - first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); - last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); + first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, + u.rb); + last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); if (last->ec - first->ec < WL_FREE_MAX_DIFF) e = rb_entry(ubi->free.rb_node, - struct ubi_wl_entry, rb); + struct ubi_wl_entry, u.rb); else { medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; e = find_wl_entry(&ubi->free, medium_ec); } - protect = U_PROTECTION; break; case UBI_SHORTTERM: /* * For short term data we pick a physical eraseblock with the * lowest erase counter as we expect it will be erased soon. */ - e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); - protect = ST_PROTECTION; + e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); break; default: - protect = 0; - e = NULL; BUG(); } + paranoid_check_in_wl_tree(e, &ubi->free); + /* - * Move the physical eraseblock to the protection trees where it will + * Move the physical eraseblock to the protection queue where it will * be protected from being moved for some time. */ - paranoid_check_in_wl_tree(e, &ubi->free); - rb_erase(&e->rb, &ubi->free); - prot_tree_add(ubi, e, pe, protect); - - dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); + rb_erase(&e->u.rb, &ubi->free); + dbg_wl("PEB %d EC %d", e->pnum, e->ec); + prot_queue_add(ubi, e); spin_unlock(&ubi->wl_lock); - return e->pnum; } /** - * prot_tree_del - remove a physical eraseblock from the protection trees + * prot_queue_del - remove a physical eraseblock from the protection queue. * @ubi: UBI device description object * @pnum: the physical eraseblock to remove * - * This function returns PEB @pnum from the protection trees and returns zero - * in case of success and %-ENODEV if the PEB was not found in the protection - * trees. + * This function deletes PEB @pnum from the protection queue and returns zero + * in case of success and %-ENODEV if the PEB was not found. */ -static int prot_tree_del(struct ubi_device *ubi, int pnum) +static int prot_queue_del(struct ubi_device *ubi, int pnum) { - struct rb_node *p; - struct ubi_wl_prot_entry *pe = NULL; - - p = ubi->prot.pnum.rb_node; - while (p) { - - pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); - - if (pnum == pe->e->pnum) - goto found; + struct ubi_wl_entry *e; - if (pnum < pe->e->pnum) - p = p->rb_left; - else - p = p->rb_right; - } + e = ubi->lookuptbl[pnum]; + if (!e) + return -ENODEV; - return -ENODEV; + if (paranoid_check_in_pq(ubi, e)) + return -ENODEV; -found: - ubi_assert(pe->e->pnum == pnum); - rb_erase(&pe->rb_aec, &ubi->prot.aec); - rb_erase(&pe->rb_pnum, &ubi->prot.pnum); - kfree(pe); + list_del(&e->u.list); + dbg_wl("deleted PEB %d from the protection queue", e->pnum); return 0; } @@ -632,47 +546,47 @@ out_free: } /** - * check_protection_over - check if it is time to stop protecting some PEBs. + * serve_prot_queue - check if it is time to stop protecting PEBs. * @ubi: UBI device description object * - * This function is called after each erase operation, when the absolute erase - * counter is incremented, to check if some physical eraseblock have not to be - * protected any longer. These physical eraseblocks are moved from the - * protection trees to the used tree. + * This function is called after each erase operation and removes PEBs from the + * tail of the protection queue. These PEBs have been protected for long enough + * and should be moved to the used tree. */ -static void check_protection_over(struct ubi_device *ubi) +static void serve_prot_queue(struct ubi_device *ubi) { - struct ubi_wl_prot_entry *pe; + struct ubi_wl_entry *e, *tmp; + int count; /* * There may be several protected physical eraseblock to remove, * process them all. */ - while (1) { - spin_lock(&ubi->wl_lock); - if (!ubi->prot.aec.rb_node) { - spin_unlock(&ubi->wl_lock); - break; - } - - pe = rb_entry(rb_first(&ubi->prot.aec), - struct ubi_wl_prot_entry, rb_aec); +repeat: + count = 0; + spin_lock(&ubi->wl_lock); + list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { + dbg_wl("PEB %d EC %d protection over, move to used tree", + e->pnum, e->ec); - if (pe->abs_ec > ubi->abs_ec) { + list_del(&e->u.list); + wl_tree_add(e, &ubi->used); + if (count++ > 32) { + /* + * Let's be nice and avoid holding the spinlock for + * too long. + */ spin_unlock(&ubi->wl_lock); - break; + cond_resched(); + goto repeat; } - - dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", - pe->e->pnum, ubi->abs_ec, pe->abs_ec); - rb_erase(&pe->rb_aec, &ubi->prot.aec); - rb_erase(&pe->rb_pnum, &ubi->prot.pnum); - wl_tree_add(pe->e, &ubi->used); - spin_unlock(&ubi->wl_lock); - - kfree(pe); - cond_resched(); } + + ubi->pq_head += 1; + if (ubi->pq_head == UBI_PROT_QUEUE_LEN) + ubi->pq_head = 0; + ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); + spin_unlock(&ubi->wl_lock); } /** @@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi) * @ubi: UBI device description object * @wrk: the work to schedule * - * This function enqueues a work defined by @wrk to the tail of the pending - * works list. + * This function adds a work defined by @wrk to the tail of the pending works + * list. */ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) { @@ -739,13 +653,11 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { - int err, put = 0, scrubbing = 0, protect = 0; - struct ubi_wl_prot_entry *uninitialized_var(pe); + int err, scrubbing = 0, torture = 0; struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; kfree(wrk); - if (cancel) return 0; @@ -781,7 +693,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * highly worn-out free physical eraseblock. If the erase * counters differ much enough, start wear-leveling. */ - e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); + e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { @@ -790,21 +702,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, goto out_cancel; } paranoid_check_in_wl_tree(e1, &ubi->used); - rb_erase(&e1->rb, &ubi->used); + rb_erase(&e1->u.rb, &ubi->used); dbg_wl("move PEB %d EC %d to PEB %d EC %d", e1->pnum, e1->ec, e2->pnum, e2->ec); } else { /* Perform scrubbing */ scrubbing = 1; - e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); + e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); paranoid_check_in_wl_tree(e1, &ubi->scrub); - rb_erase(&e1->rb, &ubi->scrub); + rb_erase(&e1->u.rb, &ubi->scrub); dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); } paranoid_check_in_wl_tree(e2, &ubi->free); - rb_erase(&e2->rb, &ubi->free); + rb_erase(&e2->u.rb, &ubi->free); ubi->move_from = e1; ubi->move_to = e2; spin_unlock(&ubi->wl_lock); @@ -844,46 +756,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); if (err) { - + if (err == -EAGAIN) + goto out_not_moved; if (err < 0) goto out_error; - if (err == 1) + if (err == 2) { + /* Target PEB write error, torture it */ + torture = 1; goto out_not_moved; + } /* - * For some reason the LEB was not moved - it might be because - * the volume is being deleted. We should prevent this PEB from - * being selected for wear-levelling movement for some "time", - * so put it to the protection tree. + * The LEB has not been moved because the volume is being + * deleted or the PEB has been put meanwhile. We should prevent + * this PEB from being selected for wear-leveling movement + * again, so put it to the protection queue. */ - dbg_wl("cancelled moving PEB %d", e1->pnum); - pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); - if (!pe) { - err = -ENOMEM; - goto out_error; - } + dbg_wl("canceled moving PEB %d", e1->pnum); + ubi_assert(err == 1); + + ubi_free_vid_hdr(ubi, vid_hdr); + vid_hdr = NULL; + + spin_lock(&ubi->wl_lock); + prot_queue_add(ubi, e1); + ubi_assert(!ubi->move_to_put); + ubi->move_from = ubi->move_to = NULL; + ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); - protect = 1; + e1 = NULL; + err = schedule_erase(ubi, e2, 0); + if (err) + goto out_error; + mutex_unlock(&ubi->move_mutex); + return 0; } + /* The PEB has been successfully moved */ ubi_free_vid_hdr(ubi, vid_hdr); - if (scrubbing && !protect) + vid_hdr = NULL; + if (scrubbing) ubi_msg("scrubbed PEB %d, data moved to PEB %d", e1->pnum, e2->pnum); spin_lock(&ubi->wl_lock); - if (protect) - prot_tree_add(ubi, e1, pe, protect); - if (!ubi->move_to_put) + if (!ubi->move_to_put) { wl_tree_add(e2, &ubi->used); - else - put = 1; + e2 = NULL; + } ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - if (put) { + err = schedule_erase(ubi, e1, 0); + if (err) { + e1 = NULL; + goto out_error; + } + + if (e2) { /* * Well, the target PEB was put meanwhile, schedule it for * erasure. @@ -894,13 +827,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, goto out_error; } - if (!protect) { - err = schedule_erase(ubi, e1, 0); - if (err) - goto out_error; - } - - dbg_wl("done"); mutex_unlock(&ubi->move_mutex); return 0; @@ -908,20 +834,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, /* * For some reasons the LEB was not moved, might be an error, might be * something else. @e1 was not changed, so return it back. @e2 might - * be changed, schedule it for erasure. + * have been changed, schedule it for erasure. */ out_not_moved: + dbg_wl("canceled moving PEB %d", e1->pnum); ubi_free_vid_hdr(ubi, vid_hdr); + vid_hdr = NULL; spin_lock(&ubi->wl_lock); if (scrubbing) wl_tree_add(e1, &ubi->scrub); else wl_tree_add(e1, &ubi->used); + ubi_assert(!ubi->move_to_put); ubi->move_from = ubi->move_to = NULL; - ubi->move_to_put = ubi->wl_scheduled = 0; + ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - err = schedule_erase(ubi, e2, 0); + e1 = NULL; + err = schedule_erase(ubi, e2, torture); if (err) goto out_error; @@ -938,8 +868,10 @@ out_error: ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - kmem_cache_free(ubi_wl_entry_slab, e1); - kmem_cache_free(ubi_wl_entry_slab, e2); + if (e1) + kmem_cache_free(ubi_wl_entry_slab, e1); + if (e2) + kmem_cache_free(ubi_wl_entry_slab, e2); ubi_ro_mode(ubi); mutex_unlock(&ubi->move_mutex); @@ -985,10 +917,10 @@ static int ensure_wear_leveling(struct ubi_device *ubi) /* * We schedule wear-leveling only if the difference between the * lowest erase counter of used physical eraseblocks and a high - * erase counter of free physical eraseblocks is greater then + * erase counter of free physical eraseblocks is greater than * %UBI_WL_THRESHOLD. */ - e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); + e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) @@ -1050,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, kfree(wl_wrk); spin_lock(&ubi->wl_lock); - ubi->abs_ec += 1; wl_tree_add(e, &ubi->free); spin_unlock(&ubi->wl_lock); @@ -1058,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, * One more erase operation has happened, take care about * protected physical eraseblocks. */ - check_protection_over(ubi); + serve_prot_queue(ubi); /* And take care about wear-leveling */ err = ensure_wear_leveling(ubi); @@ -1190,12 +1121,12 @@ retry: } else { if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(e, &ubi->used); - rb_erase(&e->rb, &ubi->used); + rb_erase(&e->u.rb, &ubi->used); } else if (in_wl_tree(e, &ubi->scrub)) { paranoid_check_in_wl_tree(e, &ubi->scrub); - rb_erase(&e->rb, &ubi->scrub); + rb_erase(&e->u.rb, &ubi->scrub); } else { - err = prot_tree_del(ubi, e->pnum); + err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); @@ -1255,11 +1186,11 @@ retry: if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(e, &ubi->used); - rb_erase(&e->rb, &ubi->used); + rb_erase(&e->u.rb, &ubi->used); } else { int err; - err = prot_tree_del(ubi, e->pnum); + err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); @@ -1290,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi) int err; /* - * Erase while the pending works queue is not empty, but not more then + * Erase while the pending works queue is not empty, but not more than * the number of currently pending works. */ dbg_wl("flush (%d pending works)", ubi->works_count); @@ -1308,7 +1239,7 @@ int ubi_wl_flush(struct ubi_device *ubi) up_write(&ubi->work_sem); /* - * And in case last was the WL worker and it cancelled the LEB + * And in case last was the WL worker and it canceled the LEB * movement, flush again. */ while (ubi->works_count) { @@ -1337,11 +1268,11 @@ static void tree_destroy(struct rb_root *root) else if (rb->rb_right) rb = rb->rb_right; else { - e = rb_entry(rb, struct ubi_wl_entry, rb); + e = rb_entry(rb, struct ubi_wl_entry, u.rb); rb = rb_parent(rb); if (rb) { - if (rb->rb_left == &e->rb) + if (rb->rb_left == &e->u.rb) rb->rb_left = NULL; else rb->rb_right = NULL; @@ -1436,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi) */ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) { - int err; + int err, i; struct rb_node *rb1, *rb2; struct ubi_scan_volume *sv; struct ubi_scan_leb *seb, *tmp; struct ubi_wl_entry *e; - ubi->used = ubi->free = ubi->scrub = RB_ROOT; - ubi->prot.pnum = ubi->prot.aec = RB_ROOT; spin_lock_init(&ubi->wl_lock); mutex_init(&ubi->move_mutex); init_rwsem(&ubi->work_sem); @@ -1458,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) if (!ubi->lookuptbl) return err; + for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) + INIT_LIST_HEAD(&ubi->pq[i]); + ubi->pq_head = 0; + list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { cond_resched(); @@ -1552,33 +1485,18 @@ out_free: } /** - * protection_trees_destroy - destroy the protection RB-trees. + * protection_queue_destroy - destroy the protection queue. * @ubi: UBI device description object */ -static void protection_trees_destroy(struct ubi_device *ubi) +static void protection_queue_destroy(struct ubi_device *ubi) { - struct rb_node *rb; - struct ubi_wl_prot_entry *pe; - - rb = ubi->prot.aec.rb_node; - while (rb) { - if (rb->rb_left) - rb = rb->rb_left; - else if (rb->rb_right) - rb = rb->rb_right; - else { - pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); - - rb = rb_parent(rb); - if (rb) { - if (rb->rb_left == &pe->rb_aec) - rb->rb_left = NULL; - else - rb->rb_right = NULL; - } + int i; + struct ubi_wl_entry *e, *tmp; - kmem_cache_free(ubi_wl_entry_slab, pe->e); - kfree(pe); + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { + list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { + list_del(&e->u.list); + kmem_cache_free(ubi_wl_entry_slab, e); } } } @@ -1591,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi) { dbg_wl("close the WL sub-system"); cancel_pending(ubi); - protection_trees_destroy(ubi); + protection_queue_destroy(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); @@ -1661,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, return 1; } +/** + * paranoid_check_in_pq - check if wear-leveling entry is in the protection + * queue. + * @ubi: UBI device description object + * @e: the wear-leveling entry to check + * + * This function returns zero if @e is in @ubi->pq and %1 if it is not. + */ +static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e) +{ + struct ubi_wl_entry *p; + int i; + + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) + list_for_each_entry(p, &ubi->pq[i], u.list) + if (p == e) + return 0; + + ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue", + e->pnum, e->ec); + ubi_dbg_dump_stack(); + return 1; +} #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ |