Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (120 commits)
  [MTD] Fix mtdoops.c compilation
  [MTD] [NOR] fix startup lock when using multiple nor flash chips
  [MTD] [DOC200x] eccbuf is statically defined and always evaluate to true
  [MTD] Fix maps/physmap.c compilation with CONFIG_PM
  [MTD] onenand: Add panic_write function to the onenand driver
  [MTD] mtdoops: Use the panic_write function when present
  [MTD] Add mtd panic_write function pointer
  [MTD] [NAND] Freescale enhanced Local Bus Controller FCM NAND support.
  [MTD] physmap.c: Add support for multiple resources
  [MTD] [NAND] Fix misparenthesization introduced by commit 78b65179...
  [MTD] [NAND] Fix Blackfin NFC ECC calculating bug with page size 512 bytes
  [MTD] [NAND] Remove wrong operation in PM function of the BF54x NFC driver
  [MTD] [NAND] Remove unused variable in plat_nand_remove
  [MTD] Unlocking all Intel flash that is locked on power up.
  [MTD] [NAND] at91_nand: Make mtdparts option can override board info
  [MTD] mtdoops: Various minor cleanups
  [MTD] mtdoops: Ensure sequential write to the buffer
  [MTD] mtdoops: Perform write operations in a workqueue
  [MTD] mtdoops: Add further error return code checking
  [MTD] [NOR] Test devtype, not definition in flash_probe(), drivers/mtd/devices/lart.c
  ...
This commit is contained in:
Linus Torvalds 2008-02-07 10:20:31 -08:00
commit a8e98d6d51
66 changed files with 4871 additions and 2108 deletions

View file

@ -150,6 +150,14 @@ config MTD_AFS_PARTS
for your particular device. It won't happen automatically. The
'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
config MTD_OF_PARTS
tristate "Flash partition map based on OF description"
depends on PPC_OF && MTD_PARTITIONS
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
as described in Documentation/powerpc/booting-without-of.txt.
comment "User Modules And Translation Layers"
config MTD_CHAR
@ -286,6 +294,9 @@ config MTD_OOPS
buffer in a flash partition where it can be read back at some
later point.
To use, add console=ttyMTDx to the kernel command line,
where x is the MTD device number to use.
source "drivers/mtd/chips/Kconfig"
source "drivers/mtd/maps/Kconfig"

View file

@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o

View file

@ -50,6 +50,7 @@
#define I82802AC 0x00ac
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
#define AT49BV640D 0x02de
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@ -157,6 +158,47 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
}
#endif
/* Atmel chips don't use the same PRI format as Intel chips */
static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
struct cfi_pri_atmel atmel_pri;
uint32_t features = 0;
/* Reverse byteswapping */
extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
memcpy(&atmel_pri, extp, sizeof(atmel_pri));
memset((char *)extp + 5, 0, sizeof(*extp) - 5);
printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
if (atmel_pri.Features & 0x01) /* chip erase supported */
features |= (1<<0);
if (atmel_pri.Features & 0x02) /* erase suspend supported */
features |= (1<<1);
if (atmel_pri.Features & 0x04) /* program suspend supported */
features |= (1<<2);
if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
features |= (1<<9);
if (atmel_pri.Features & 0x20) /* page mode read supported */
features |= (1<<7);
if (atmel_pri.Features & 0x40) /* queued erase supported */
features |= (1<<4);
if (atmel_pri.Features & 0x80) /* Protection bits supported */
features |= (1<<6);
extp->FeatureSupport = features;
/* burst write mode not supported */
cfi->cfiq->BufWriteTimeoutTyp = 0;
cfi->cfiq->BufWriteTimeoutMax = 0;
}
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@ -227,13 +269,20 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
/*
* Some chips power-up with all sectors locked by default.
*/
static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
{
printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
mtd->flags |= MTD_STUPID_LOCK;
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
if (cfip->FeatureSupport&32) {
printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
mtd->flags |= MTD_POWERUP_LOCK;
}
}
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
#endif
@ -245,7 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
{ MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
{ MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@ -277,7 +326,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
return NULL;
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
@ -752,6 +801,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
int ret;
DECLARE_WAITQUEUE(wait, current);
retry:
if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
@ -808,6 +858,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(contender->mutex);
}
/* Check if we already have suspended erase
* on this chip. Sleep. */
if (mode == FL_ERASING && shared->erasing
&& shared->erasing->oldstate == FL_ERASING) {
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
goto retry;
}
/* We now own it */
shared->writing = chip;
if (mode == FL_ERASING)
@ -2294,7 +2358,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
struct flchip *chip;
int ret = 0;
if ((mtd->flags & MTD_STUPID_LOCK)
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_save_locks(mtd);
@ -2405,7 +2469,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
spin_unlock(chip->mutex);
}
if ((mtd->flags & MTD_STUPID_LOCK)
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_restore_locks(mtd);
}

View file

@ -185,6 +185,10 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
extp->TopBottom = 2;
else
extp->TopBottom = 3;
/* burst write mode not supported */
cfi->cfiq->BufWriteTimeoutTyp = 0;
cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_use_secsi(struct mtd_info *mtd, void *param)
@ -213,10 +217,11 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
{
mtd->lock = cfi_atmel_lock;
mtd->unlock = cfi_atmel_unlock;
mtd->flags |= MTD_STUPID_LOCK;
mtd->flags |= MTD_POWERUP_LOCK;
}
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
@ -229,7 +234,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
#endif
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
@ -338,10 +342,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
/* Modify the unlock address if we are in compatibility mode */
if ( /* x16 in x8 mode */
((cfi->device_type == CFI_DEVICETYPE_X8) &&
(cfi->cfiq->InterfaceDesc == 2)) ||
(cfi->cfiq->InterfaceDesc ==
CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
/* x32 in x16 mode */
((cfi->device_type == CFI_DEVICETYPE_X16) &&
(cfi->cfiq->InterfaceDesc == 4)))
(cfi->cfiq->InterfaceDesc ==
CFI_INTERFACE_X16_BY_X32_ASYNC)))
{
cfi->addr_unlock1 = 0xaaa;
cfi->addr_unlock2 = 0x555;

View file

@ -370,27 +370,27 @@ static void print_cfi_ident(struct cfi_ident *cfip)
printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
switch(cfip->InterfaceDesc) {
case 0:
case CFI_INTERFACE_X8_ASYNC:
printk(" - x8-only asynchronous interface\n");
break;
case 1:
case CFI_INTERFACE_X16_ASYNC:
printk(" - x16-only asynchronous interface\n");
break;
case 2:
case CFI_INTERFACE_X8_BY_X16_ASYNC:
printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
break;
case 3:
case CFI_INTERFACE_X32_ASYNC:
printk(" - x32-only asynchronous interface\n");
break;
case 4:
case CFI_INTERFACE_X16_BY_X32_ASYNC:
printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
break;
case 65535:
case CFI_INTERFACE_NOT_ALLOWED:
printk(" - Not Allowed / Reserved\n");
break;

View file

@ -112,7 +112,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
max_chips = 1;
}
mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG;
mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG );
chip_map = kzalloc(mapsize, GFP_KERNEL);
if (!chip_map) {
printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);

File diff suppressed because it is too large Load diff

View file

@ -9,7 +9,7 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
* <partdef> := <size>[@offset][<name>][ro]
* <partdef> := <size>[@offset][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* <name> := '(' NAME ')'
@ -143,6 +143,13 @@ static struct mtd_partition * newpart(char *s,
s += 2;
}
/* if lk is found do NOT unlock the MTD partition*/
if (strncmp(s, "lk", 2) == 0)
{
mask_flags |= MTD_POWERUP_LOCK;
s += 2;
}
/* test if more partitions are following */
if (*s == ',')
{

View file

@ -632,7 +632,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
len = ((from | 0x1ff) + 1) - from;
/* The ECC will not be calculated correctly if less than 512 is read */
if (len != 0x200 && eccbuf)
if (len != 0x200)
printk(KERN_WARNING
"ECC needs a full sector read (adr: %lx size %lx)\n",
(long) from, (long) len);
@ -896,7 +896,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Let the caller know we completed it */
*retlen += len;
if (eccbuf) {
{
unsigned char x[8];
size_t dummy;
int ret;

View file

@ -748,7 +748,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
/* On interleaved devices the flags for 2nd half 512 are before data */
if (eccbuf && before)
if (before)
fto -= 2;
/* issue the Serial Data In command to initial the Page Program process */

View file

@ -323,7 +323,7 @@ static int flash_probe (void)
/* put the flash back into command mode */
write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM));
return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM));
}
/*

View file

@ -420,7 +420,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = dataflash_waitready(priv->spi);
/* Check result of the compare operation */
if ((status & (1 << 6)) == 1) {
if (status & (1 << 6)) {
printk(KERN_ERR "%s: compare page %u, err %d\n",
spi->dev.bus_id, pageaddr, status);
remaining = 0;

View file

@ -110,13 +110,6 @@ config MTD_SUN_UFLASH
Sun Microsystems boardsets. This driver will require CFI support
in the kernel, so if you did not enable CFI previously, do that now.
config MTD_PNC2000
tristate "CFI Flash device mapped on Photron PNC-2000"
depends on X86 && MTD_CFI && MTD_PARTITIONS
help
PNC-2000 is the name of Network Camera product from PHOTRON
Ltd. in Japan. It uses CFI-compliant flash.
config MTD_SC520CDP
tristate "CFI Flash device mapped on AMD SC520 CDP"
depends on X86 && MTD_CFI && MTD_CONCAT
@ -576,7 +569,7 @@ config MTD_BAST_MAXSIZE
default "4"
config MTD_SHARP_SL
bool "ROM mapped on Sharp SL Series"
tristate "ROM mapped on Sharp SL Series"
depends on ARCH_PXA
help
This enables access to the flash chip on the Sharp SL Series of PDAs.

View file

@ -28,7 +28,6 @@ obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o

View file

@ -20,11 +20,15 @@
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/concat.h>
#include <asm/io.h>
#define MAX_RESOURCES 4
struct physmap_flash_info {
struct mtd_info *mtd;
struct map_info map;
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
struct resource *res;
#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
@ -32,11 +36,11 @@ struct physmap_flash_info {
#endif
};
static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
int i;
info = platform_get_drvdata(dev);
if (info == NULL)
@ -45,24 +49,33 @@ static int physmap_flash_remove(struct platform_device *dev)
physmap_data = dev->dev.platform_data;
if (info->mtd != NULL) {
#ifdef CONFIG_MTD_PARTITIONS
if (info->nr_parts) {
del_mtd_partitions(info->mtd);
kfree(info->parts);
} else if (physmap_data->nr_parts) {
del_mtd_partitions(info->mtd);
} else {
del_mtd_device(info->mtd);
}
#else
del_mtd_device(info->mtd);
#endif
map_destroy(info->mtd);
#ifdef CONFIG_MTD_CONCAT
if (info->cmtd != info->mtd[0]) {
del_mtd_device(info->cmtd);
mtd_concat_destroy(info->cmtd);
}
#endif
if (info->map.virt != NULL)
iounmap(info->map.virt);
for (i = 0; i < MAX_RESOURCES; i++) {
if (info->mtd[i] != NULL) {
#ifdef CONFIG_MTD_PARTITIONS
if (info->nr_parts) {
del_mtd_partitions(info->mtd[i]);
kfree(info->parts);
} else if (physmap_data->nr_parts) {
del_mtd_partitions(info->mtd[i]);
} else {
del_mtd_device(info->mtd[i]);
}
#else
del_mtd_device(info->mtd[i]);
#endif
map_destroy(info->mtd[i]);
}
if (info->map[i].virt != NULL)
iounmap(info->map[i].virt);
}
if (info->res != NULL) {
release_resource(info->res);
@ -82,16 +95,14 @@ static int physmap_flash_probe(struct platform_device *dev)
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
const char **probe_type;
int err;
int err = 0;
int i;
int devices_found = 0;
physmap_data = dev->dev.platform_data;
if (physmap_data == NULL)
return -ENODEV;
printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
(unsigned long long)(dev->resource->end - dev->resource->start + 1),
(unsigned long long)dev->resource->start);
info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
@ -100,56 +111,83 @@ static int physmap_flash_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
info->res = request_mem_region(dev->resource->start,
dev->resource->end - dev->resource->start + 1,
dev->dev.bus_id);
if (info->res == NULL) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
for (i = 0; i < dev->num_resources; i++) {
printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
(unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
(unsigned long long)dev->resource[i].start);
info->res = request_mem_region(dev->resource[i].start,
dev->resource[i].end - dev->resource[i].start + 1,
dev->dev.bus_id);
if (info->res == NULL) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
info->map[i].name = dev->dev.bus_id;
info->map[i].phys = dev->resource[i].start;
info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
info->map[i].bankwidth = physmap_data->width;
info->map[i].set_vpp = physmap_data->set_vpp;
info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size);
if (info->map[i].virt == NULL) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = EIO;
goto err_out;
}
simple_map_init(&info->map[i]);
probe_type = rom_probe_types;
for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
if (info->mtd[i] == NULL) {
dev_err(&dev->dev, "map_probe failed\n");
err = -ENXIO;
goto err_out;
} else {
devices_found++;
}
info->mtd[i]->owner = THIS_MODULE;
}
info->map.name = dev->dev.bus_id;
info->map.phys = dev->resource->start;
info->map.size = dev->resource->end - dev->resource->start + 1;
info->map.bankwidth = physmap_data->width;
info->map.set_vpp = physmap_data->set_vpp;
info->map.virt = ioremap(info->map.phys, info->map.size);
if (info->map.virt == NULL) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = EIO;
goto err_out;
}
simple_map_init(&info->map);
probe_type = rom_probe_types;
for (; info->mtd == NULL && *probe_type != NULL; probe_type++)
info->mtd = do_map_probe(*probe_type, &info->map);
if (info->mtd == NULL) {
dev_err(&dev->dev, "map_probe failed\n");
if (devices_found == 1) {
info->cmtd = info->mtd[0];
} else if (devices_found > 1) {
/*
* We detected multiple devices. Concatenate them together.
*/
#ifdef CONFIG_MTD_CONCAT
info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id);
if (info->cmtd == NULL)
err = -ENXIO;
#else
printk(KERN_ERR "physmap-flash: multiple devices "
"found but MTD concat support disabled.\n");
err = -ENXIO;
goto err_out;
#endif
}
info->mtd->owner = THIS_MODULE;
if (err)
goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0);
err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0);
if (err > 0) {
add_mtd_partitions(info->mtd, info->parts, err);
add_mtd_partitions(info->cmtd, info->parts, err);
return 0;
}
if (physmap_data->nr_parts) {
printk(KERN_NOTICE "Using physmap partition information\n");
add_mtd_partitions(info->mtd, physmap_data->parts,
physmap_data->nr_parts);
add_mtd_partitions(info->cmtd, physmap_data->parts,
physmap_data->nr_parts);
return 0;
}
#endif
add_mtd_device(info->mtd);
add_mtd_device(info->cmtd);
return 0;
err_out:
@ -162,9 +200,11 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
int ret = 0;
int i;
if (info)
ret = info->mtd->suspend(info->mtd);
for (i = 0; i < MAX_RESOURCES; i++)
ret |= info->mtd[i]->suspend(info->mtd[i]);
return ret;
}
@ -172,27 +212,35 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
static int physmap_flash_resume(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
int i;
if (info)
info->mtd->resume(info->mtd);
for (i = 0; i < MAX_RESOURCES; i++)
info->mtd[i]->resume(info->mtd[i]);
return 0;
}
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
if (info && info->mtd->suspend(info->mtd) == 0)
info->mtd->resume(info->mtd);
int i;
for (i = 0; i < MAX_RESOURCES; i++)
if (info && info->mtd[i]->suspend(info->mtd[i]) == 0)
info->mtd[i]->resume(info->mtd[i]);
}
#else
#define physmap_flash_suspend NULL
#define physmap_flash_resume NULL
#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
#ifdef CONFIG_PM
.suspend = physmap_flash_suspend,
.resume = physmap_flash_resume,
.shutdown = physmap_flash_shutdown,
#endif
.driver = {
.name = "physmap-flash",
},

View file

@ -80,64 +80,6 @@ static int parse_obsolete_partitions(struct of_device *dev,
return nr_parts;
}
static int __devinit parse_partitions(struct of_flash *info,
struct of_device *dev)
{
const char *partname;
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", NULL };
struct device_node *dp = dev->node, *pp;
int nr_parts, i;
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
nr_parts = parse_mtd_partitions(info->mtd, part_probe_types,
&info->parts, 0);
if (nr_parts > 0) {
add_mtd_partitions(info->mtd, info->parts, nr_parts);
return 0;
}
/* First count the subnodes */
nr_parts = 0;
for (pp = dp->child; pp; pp = pp->sibling)
nr_parts++;
if (nr_parts == 0)
return parse_obsolete_partitions(dev, info, dp);
info->parts = kzalloc(nr_parts * sizeof(*info->parts),
GFP_KERNEL);
if (!info->parts)
return -ENOMEM;
for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) {
const u32 *reg;
int len;
reg = of_get_property(pp, "reg", &len);
if (!reg || (len != 2*sizeof(u32))) {
dev_err(&dev->dev, "Invalid 'reg' on %s\n",
dp->full_name);
kfree(info->parts);
info->parts = NULL;
return -EINVAL;
}
info->parts[i].offset = reg[0];
info->parts[i].size = reg[1];
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
info->parts[i].name = (char *)partname;
if (of_get_property(pp, "read-only", &len))
info->parts[i].mask_flags = MTD_WRITEABLE;
}
return nr_parts;
}
#else /* MTD_PARTITIONS */
#define OF_FLASH_PARTS(info) (0)
#define parse_partitions(info, dev) (0)
@ -212,6 +154,10 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
static int __devinit of_flash_probe(struct of_device *dev,
const struct of_device_id *match)
{
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", NULL };
#endif
struct device_node *dp = dev->node;
struct resource res;
struct of_flash *info;
@ -274,13 +220,33 @@ static int __devinit of_flash_probe(struct of_device *dev,
}
info->mtd->owner = THIS_MODULE;
err = parse_partitions(info, dev);
#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
err = parse_mtd_partitions(info->mtd, part_probe_types,
&info->parts, 0);
if (err < 0)
goto err_out;
return err;
#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {
err = of_mtd_parse_partitions(&dev->dev, info->mtd,
dp, &info->parts);
if (err < 0)
return err;
}
#endif
if (err == 0) {
err = parse_obsolete_partitions(dev, info, dp);
if (err < 0)
return err;
}
if (err > 0)
add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err);
add_mtd_partitions(info->mtd, info->parts, err);
else
#endif
add_mtd_device(info->mtd);
return 0;

View file

@ -1,93 +0,0 @@
/*
* pnc2000.c - mapper for Photron PNC-2000 board.
*
* Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
*
* This code is GPL
*
* $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#define WINDOW_ADDR 0xbf000000
#define WINDOW_SIZE 0x00400000
/*
* MAP DRIVER STUFF
*/
static struct map_info pnc_map = {
.name = "PNC-2000",
.size = WINDOW_SIZE,
.bankwidth = 4,
.phys = 0xFFFFFFFF,
.virt = (void __iomem *)WINDOW_ADDR,
};
/*
* MTD 'PARTITIONING' STUFF
*/
static struct mtd_partition pnc_partitions[3] = {
{
.name = "PNC-2000 boot firmware",
.size = 0x20000,
.offset = 0
},
{
.name = "PNC-2000 kernel",
.size = 0x1a0000,
.offset = 0x20000
},
{
.name = "PNC-2000 filesystem",
.size = 0x240000,
.offset = 0x1c0000
}
};
/*
* This is the master MTD device for which all the others are just
* auto-relocating aliases.
*/
static struct mtd_info *mymtd;
static int __init init_pnc2000(void)
{
printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
simple_map_init(&pnc_map);
mymtd = do_map_probe("cfi_probe", &pnc_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
return add_mtd_partitions(mymtd, pnc_partitions, 3);
}
return -ENXIO;
}
static void __exit cleanup_pnc2000(void)
{
if (mymtd) {
del_mtd_partitions(mymtd);
map_destroy(mymtd);
}
}
module_init(init_pnc2000);
module_exit(cleanup_pnc2000);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");

View file

@ -79,7 +79,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
/* barf if this doesn't look right */
if (cfi->cfiq->InterfaceDesc != 1) {
if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
cfi->cfiq->InterfaceDesc);
return -1;

View file

@ -248,9 +248,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
return -EBUSY;
}
mutex_init(&new->lock);
list_add_tail(&new->list, &tr->devs);
added:
mutex_init(&new->lock);
if (!tr->writesect)
new->readonly = 1;

View file

@ -481,6 +481,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
struct mtd_oob_buf buf;
struct mtd_oob_ops ops;
uint32_t retlen;
if(!(file->f_mode & 2))
return -EPERM;
@ -520,8 +521,11 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
buf.start &= ~(mtd->oobsize - 1);
ret = mtd->write_oob(mtd, buf.start, &ops);
if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen,
sizeof(uint32_t)))
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
retlen = ops.oobretlen;
if (copy_to_user(&((struct mtd_oob_buf *)argp)->length,
&retlen, sizeof(buf.length)))
ret = -EFAULT;
kfree(ops.oobbuf);

View file

@ -61,7 +61,7 @@ int add_mtd_device(struct mtd_info *mtd)
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE)
&& (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) {
&& (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
if (mtd->unlock(mtd, 0, mtd->size))
printk(KERN_WARNING
"%s: unlock failed, "

View file

@ -28,19 +28,26 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#define OOPS_PAGE_SIZE 4096
static struct mtdoops_context {
struct mtdoops_context {
int mtd_index;
struct work_struct work;
struct work_struct work_erase;
struct work_struct work_write;
struct mtd_info *mtd;
int oops_pages;
int nextpage;
int nextcount;
void *oops_buf;
/* writecount and disabling ready are spin lock protected */
spinlock_t writecount_lock;
int ready;
int writecount;
} oops_cxt;
@ -62,10 +69,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
erase.mtd = mtd;
erase.callback = mtdoops_erase_callback;
erase.addr = offset;
if (mtd->erasesize < OOPS_PAGE_SIZE)
erase.len = OOPS_PAGE_SIZE;
else
erase.len = mtd->erasesize;
erase.len = mtd->erasesize;
erase.priv = (u_long)&wait_q;
set_current_state(TASK_INTERRUPTIBLE);
@ -87,7 +91,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
return 0;
}
static int mtdoops_inc_counter(struct mtdoops_context *cxt)
static void mtdoops_inc_counter(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
@ -103,25 +107,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt)
ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
&retlen, (u_char *) &count);
if ((retlen != 4) || (ret < 0)) {
if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
retlen, ret);
return 1;
schedule_work(&cxt->work_erase);
return;
}
/* See if we need to erase the next block */
if (count != 0xffffffff)
return 1;
if (count != 0xffffffff) {
schedule_work(&cxt->work_erase);
return;
}
printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
return 0;
}
static void mtdoops_prepare(struct mtdoops_context *cxt)
/* Scheduled work - when we can't proceed without erasing a block */
static void mtdoops_workfunc_erase(struct work_struct *work)
{
struct mtdoops_context *cxt =
container_of(work, struct mtdoops_context, work_erase);
struct mtd_info *mtd = cxt->mtd;
int i = 0, j, ret, mod;
@ -136,8 +145,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt)
cxt->nextpage = 0;
}
while (mtd->block_isbad &&
mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) {
while (mtd->block_isbad) {
ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
if (!ret)
break;
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
return;
}
badblock:
printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
cxt->nextpage * OOPS_PAGE_SIZE);
@ -154,34 +169,72 @@ badblock:
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
if (ret < 0) {
if (mtd->block_markbad)
mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
goto badblock;
if (ret >= 0) {
printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
return;
}
printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
if (mtd->block_markbad && (ret == -EIO)) {
ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
return;
}
}
goto badblock;
}
static void mtdoops_workfunc(struct work_struct *work)
{
struct mtdoops_context *cxt =
container_of(work, struct mtdoops_context, work);
mtdoops_prepare(cxt);
}
static int find_next_position(struct mtdoops_context *cxt)
static void mtdoops_write(struct mtdoops_context *cxt, int panic)
{
struct mtd_info *mtd = cxt->mtd;
int page, maxpos = 0;
size_t retlen;
int ret;
if (cxt->writecount < OOPS_PAGE_SIZE)
memset(cxt->oops_buf + cxt->writecount, 0xff,
OOPS_PAGE_SIZE - cxt->writecount);
if (panic)
ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
else
ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
cxt->writecount = 0;
if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
mtdoops_inc_counter(cxt);
}
static void mtdoops_workfunc_write(struct work_struct *work)
{
struct mtdoops_context *cxt =
container_of(work, struct mtdoops_context, work_write);
mtdoops_write(cxt, 0);
}
static void find_next_position(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
int ret, page, maxpos = 0;
u32 count, maxcount = 0xffffffff;
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
continue;
}
if (count == 0xffffffff)
continue;
if (maxcount == 0xffffffff) {
@ -205,20 +258,19 @@ static int find_next_position(struct mtdoops_context *cxt)
cxt->ready = 1;
printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
cxt->nextpage, cxt->nextcount);
return 0;
return;
}
cxt->nextpage = maxpos;
cxt->nextcount = maxcount;
return mtdoops_inc_counter(cxt);
mtdoops_inc_counter(cxt);
}
static void mtdoops_notify_add(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
int ret;
if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
return;
@ -229,14 +281,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
return;
}
if (mtd->erasesize < OOPS_PAGE_SIZE) {
printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
mtd->index);
return;
}
cxt->mtd = mtd;
cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
ret = find_next_position(cxt);
if (ret == 1)
mtdoops_prepare(cxt);
find_next_position(cxt);
printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index);
printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
}
static void mtdoops_notify_remove(struct mtd_info *mtd)
@ -254,31 +310,28 @@ static void mtdoops_console_sync(void)
{
struct mtdoops_context *cxt = &oops_cxt;
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
int ret;
unsigned long flags;
if (!cxt->ready || !mtd)
if (!cxt->ready || !mtd || cxt->writecount == 0)
return;
if (cxt->writecount == 0)
/*
* Once ready is 0 and we've held the lock no further writes to the
* buffer will happen
*/
spin_lock_irqsave(&cxt->writecount_lock, flags);
if (!cxt->ready) {
spin_unlock_irqrestore(&cxt->writecount_lock, flags);
return;
if (cxt->writecount < OOPS_PAGE_SIZE)
memset(cxt->oops_buf + cxt->writecount, 0xff,
OOPS_PAGE_SIZE - cxt->writecount);
ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
}
cxt->ready = 0;
cxt->writecount = 0;
spin_unlock_irqrestore(&cxt->writecount_lock, flags);
if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
ret = mtdoops_inc_counter(cxt);
if (ret == 1)
schedule_work(&cxt->work);
if (mtd->panic_write && in_interrupt())
/* Interrupt context, we're going to panic so try and log */
mtdoops_write(cxt, 1);
else
schedule_work(&cxt->work_write);
}
static void
@ -286,7 +339,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
{
struct mtdoops_context *cxt = co->data;
struct mtd_info *mtd = cxt->mtd;
int i;
unsigned long flags;
if (!oops_in_progress) {
mtdoops_console_sync();
@ -296,6 +349,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
if (!cxt->ready || !mtd)
return;
/* Locking on writecount ensures sequential writes to the buffer */
spin_lock_irqsave(&cxt->writecount_lock, flags);
/* Check ready status didn't change whilst waiting for the lock */
if (!cxt->ready)
return;
if (cxt->writecount == 0) {
u32 *stamp = cxt->oops_buf;
*stamp = cxt->nextcount;
@ -305,10 +365,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
count = OOPS_PAGE_SIZE - cxt->writecount;
for (i = 0; i < count; i++, s++)
*((char *)(cxt->oops_buf) + cxt->writecount + i) = *s;
memcpy(cxt->oops_buf + cxt->writecount, s, count);
cxt->writecount += count;
cxt->writecount = cxt->writecount + count;
spin_unlock_irqrestore(&cxt->writecount_lock, flags);
if (cxt->writecount == OOPS_PAGE_SIZE)
mtdoops_console_sync();
}
static int __init mtdoops_console_setup(struct console *co, char *options)
@ -334,7 +397,6 @@ static struct console mtdoops_console = {
.write = mtdoops_console_write,
.setup = mtdoops_console_setup,
.unblank = mtdoops_console_sync,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &oops_cxt,
};
@ -347,11 +409,12 @@ static int __init mtdoops_console_init(void)
cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
if (!cxt->oops_buf) {
printk(KERN_ERR "Failed to allocate oops buffer workspace\n");
printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
return -ENOMEM;
}
INIT_WORK(&cxt->work, mtdoops_workfunc);
INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
register_console(&mtdoops_console);
register_mtd_user(&mtdoops_notifier);

View file

@ -151,6 +151,20 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
len, retlen, buf);
}
static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (to >= mtd->size)
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
return part->master->panic_write (part->master, to + part->offset,
len, retlen, buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
@ -352,6 +366,9 @@ int add_mtd_partitions(struct mtd_info *master,
slave->mtd.read = part_read;
slave->mtd.write = part_write;
if (master->panic_write)
slave->mtd.panic_write = part_panic_write;
if(master->point && master->unpoint){
slave->mtd.point = part_point;
slave->mtd.unpoint = part_unpoint;

View file

@ -93,7 +93,7 @@ config MTD_NAND_AU1550
config MTD_NAND_BF5XX
tristate "Blackfin on-chip NAND Flash Controller driver"
depends on BF54x && MTD_NAND
depends on (BF54x || BF52x) && MTD_NAND
help
This enables the Blackfin on-chip NAND flash controller
@ -283,6 +283,12 @@ config MTD_NAND_CM_X270
tristate "Support for NAND Flash on CM-X270 modules"
depends on MTD_NAND && MACH_ARMCORE
config MTD_NAND_PASEMI
tristate "NAND support for PA Semi PWRficient"
depends on MTD_NAND && PPC_PASEMI
help
Enables support for NAND Flash interface on PA Semi PWRficient
based boards
config MTD_NAND_NANDSIM
tristate "Support for NAND Flash Simulator"
@ -306,4 +312,22 @@ config MTD_ALAUDA
These two (and possibly other) Alauda-based cardreaders for
SmartMedia and xD allow raw flash access.
config MTD_NAND_ORION
tristate "NAND Flash support for Marvell Orion SoC"
depends on ARCH_ORION && MTD_NAND
help
This enables the NAND flash controller on Orion machines.
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_FSL_ELBC
tristate "NAND support for Freescale eLBC controllers"
depends on MTD_NAND && PPC_OF
help
Various Freescale chips, including the 8313, include a NAND Flash
Controller Module with built-in hardware ECC capabilities.
Enabling this option will enable you to use this to control
external NAND devices.
endif # MTD_NAND

View file

@ -29,5 +29,8 @@ obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_ALAUDA) += alauda.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
nand-objs := nand_base.o nand_bbt.o

View file

@ -156,14 +156,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
if (host->board->partition_info)
partitions = host->board->partition_info(mtd->size, &num_partitions);
#ifdef CONFIG_MTD_CMDLINE_PARTS
else {
mtd->name = "at91_nand";
num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
}
mtd->name = "at91_nand";
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partitions, 0);
#endif
if (num_partitions <= 0 && host->board->partition_info)
partitions = host->board->partition_info(mtd->size,
&num_partitions);
if ((!partitions) || (num_partitions == 0)) {
printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");

View file

@ -74,7 +74,22 @@ static int hardware_ecc = 1;
static int hardware_ecc;
#endif
static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0};
static unsigned short bfin_nfc_pin_req[] =
{P_NAND_CE,
P_NAND_RB,
P_NAND_D0,
P_NAND_D1,
P_NAND_D2,
P_NAND_D3,
P_NAND_D4,
P_NAND_D5,
P_NAND_D6,
P_NAND_D7,
P_NAND_WE,
P_NAND_RE,
P_NAND_CLE,
P_NAND_ALE,
0};
/*
* Data structures for bf5xx nand flash controller driver
@ -278,7 +293,6 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
u16 ecc0, ecc1;
u32 code[2];
u8 *p;
int bytes = 3, i;
/* first 4 bytes ECC code for 256 page size */
ecc0 = bfin_read_NFC_ECC0();
@ -288,19 +302,24 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
/* first 3 bytes in ecc_code for 256 page size */
p = (u8 *) code;
memcpy(ecc_code, p, 3);
/* second 4 bytes ECC code for 512 page size */
if (page_size == 512) {
ecc0 = bfin_read_NFC_ECC2();
ecc1 = bfin_read_NFC_ECC3();
code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11);
bytes = 6;
/* second 3 bytes in ecc_code for second 256
* bytes of 512 page size
*/
p = (u8 *) (code + 1);
memcpy((ecc_code + 3), p, 3);
dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
}
p = (u8 *)code;
for (i = 0; i < bytes; i++)
ecc_code[i] = p[i];
return 0;
}
@ -507,12 +526,13 @@ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
init_completion(&info->dma_completion);
#ifdef CONFIG_BF54x
/* Setup DMAC1 channel mux for NFC which shared with SDH */
val = bfin_read_DMAC1_PERIMUX();
val &= 0xFFFE;
bfin_write_DMAC1_PERIMUX(val);
SSYNC();
#endif
/* Request NFC DMA channel */
ret = request_dma(CH_NFC, "BF5XX NFC driver");
if (ret < 0) {
@ -744,9 +764,6 @@ static int bf5xx_nand_resume(struct platform_device *dev)
{
struct bf5xx_nand_info *info = platform_get_drvdata(dev);
if (info)
bf5xx_nand_hw_init(info);
return 0;
}

View file

@ -11,6 +11,7 @@
#undef DEBUG
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/rslib.h>
#include <linux/pci.h>
#include <linux/delay.h>
@ -52,6 +53,7 @@
struct cafe_priv {
struct nand_chip nand;
struct mtd_partition *parts;
struct pci_dev *pdev;
void __iomem *mmio;
struct rs_control *rs;
@ -84,6 +86,10 @@ static unsigned int numtimings;
static int timing[3];
module_param_array(timing, int, &numtimings, 0644);
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "RedBoot", NULL };
#endif
/* Hrm. Why isn't this already conditional on something in the struct device? */
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@ -620,7 +626,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
{
struct mtd_info *mtd;
struct cafe_priv *cafe;
struct mtd_partition *parts;
uint32_t ctrl;
int nr_parts;
int err = 0;
/* Very old versions shared the same PCI ident for all three
@ -787,7 +795,18 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
goto out_irq;
pci_set_drvdata(pdev, mtd);
/* We register the whole device first, separate from the partitions */
add_mtd_device(mtd);
#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
if (nr_parts > 0) {
cafe->parts = parts;
dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts);
add_mtd_partitions(mtd, parts, nr_parts);
}
#endif
goto out;
out_irq:

File diff suppressed because it is too large Load diff

View file

@ -2469,8 +2469,12 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.write_oob = nand_write_oob_std;
case NAND_ECC_HW_SYNDROME:
if (!chip->ecc.calculate || !chip->ecc.correct ||
!chip->ecc.hwctl) {
if ((!chip->ecc.calculate || !chip->ecc.correct ||
!chip->ecc.hwctl) &&
(!chip->ecc.read_page ||
chip->ecc.read_page == nand_read_page_hwecc ||
!chip->ecc.write_page ||
chip->ecc.write_page == nand_write_page_hwecc)) {
printk(KERN_WARNING "No ECC functions supplied, "
"Hardware ECC not possible\n");
BUG();

View file

@ -0,0 +1,171 @@
/*
* drivers/mtd/nand/orion_nand.c
*
* NAND support for Marvell Orion SoC platforms
*
* Tzachi Perelstein <tzachi@marvell.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/sizes.h>
#include <asm/arch/platform.h>
#include <asm/arch/hardware.h>
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *nc = mtd->priv;
struct orion_nand_data *board = nc->priv;
u32 offs;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
offs = (1 << board->cle);
else if (ctrl & NAND_ALE)
offs = (1 << board->ale);
else
return;
if (nc->options & NAND_BUSWIDTH_16)
offs <<= 1;
writeb(cmd, nc->IO_ADDR_W + offs);
}
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
void __iomem *io_base;
int ret = 0;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_part = 0;
#endif
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
ret = -ENOMEM;
goto no_res;
}
mtd = (struct mtd_info *)(nc + 1);
io_base = ioremap(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
if (!io_base) {
printk(KERN_ERR "orion_nand: ioremap failed\n");
ret = -EIO;
goto no_res;
}
board = pdev->dev.platform_data;
mtd->priv = nc;
mtd->owner = THIS_MODULE;
nc->priv = board;
nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
nc->cmd_ctrl = orion_nand_cmd_ctrl;
nc->ecc.mode = NAND_ECC_SOFT;
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
platform_set_drvdata(pdev, mtd);
if (nand_scan(mtd, 1)) {
ret = -ENXIO;
goto no_dev;
}
#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "orion_nand";
num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
#endif
/* If cmdline partitions have been passed, let them be used */
if (num_part <= 0) {
num_part = board->nr_parts;
partitions = board->parts;
}
if (partitions && num_part > 0)
ret = add_mtd_partitions(mtd, partitions, num_part);
else
ret = add_mtd_device(mtd);
#else
ret = add_mtd_device(mtd);
#endif
if (ret) {
nand_release(mtd);
goto no_dev;
}
return 0;
no_dev:
platform_set_drvdata(pdev, NULL);
iounmap(io_base);
no_res:
kfree(nc);
return ret;
}
static int __devexit orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
nand_release(mtd);
iounmap(nc->IO_ADDR_W);
kfree(nc);
return 0;
}
static struct platform_driver orion_nand_driver = {
.probe = orion_nand_probe,
.remove = orion_nand_remove,
.driver = {
.name = "orion_nand",
.owner = THIS_MODULE,
},
};
static int __init orion_nand_init(void)
{
return platform_driver_register(&orion_nand_driver);
}
static void __exit orion_nand_exit(void)
{
platform_driver_unregister(&orion_nand_driver);
}
module_init(orion_nand_init);
module_exit(orion_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tzachi Perelstein");
MODULE_DESCRIPTION("NAND glue for Orion platforms");

View file

@ -0,0 +1,243 @@
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Author: Egor Martovetsky <egor@pasemi.com>
* Maintained by: Olof Johansson <olof@lixom.net>
*
* Driver for the PWRficient onchip NAND flash interface
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <asm/io.h>
#define LBICTRL_LPCCTL_NR 0x00004000
#define CLE_PIN_CTL 15
#define ALE_PIN_CTL 14
static unsigned int lpcctl;
static struct mtd_info *pasemi_nand_mtd;
static const char driver_name[] = "pasemi-nand";
static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *chip = mtd->priv;
while (len > 0x800) {
memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
buf += 0x800;
len -= 0x800;
}
memcpy_fromio(buf, chip->IO_ADDR_R, len);
}
static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *chip = mtd->priv;
while (len > 0x800) {
memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
buf += 0x800;
len -= 0x800;
}
memcpy_toio(chip->IO_ADDR_R, buf, len);
}
static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct nand_chip *chip = mtd->priv;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
else
out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
/* Push out posted writes */
eieio();
inl(lpcctl);
}
int pasemi_device_ready(struct mtd_info *mtd)
{
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
static int __devinit pasemi_nand_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct pci_dev *pdev;
struct device_node *np = ofdev->node;
struct resource res;
struct nand_chip *chip;
int err = 0;
err = of_address_to_resource(np, 0, &res);
if (err)
return -EINVAL;
/* We only support one device at the moment */
if (pasemi_nand_mtd)
return -ENODEV;
pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end);
/* Allocate memory for MTD device structure and private data */
pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
sizeof(struct nand_chip), GFP_KERNEL);
if (!pasemi_nand_mtd) {
printk(KERN_WARNING
"Unable to allocate PASEMI NAND MTD device structure\n");
err = -ENOMEM;
goto out;
}
/* Get pointer to private data */
chip = (struct nand_chip *)&pasemi_nand_mtd[1];
/* Link the private data with the MTD structure */
pasemi_nand_mtd->priv = chip;
pasemi_nand_mtd->owner = THIS_MODULE;
chip->IO_ADDR_R = of_iomap(np, 0);
chip->IO_ADDR_W = chip->IO_ADDR_R;
if (!chip->IO_ADDR_R) {
err = -EIO;
goto out_mtd;
}
pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
if (!pdev) {
err = -ENODEV;
goto out_ior;
}
lpcctl = pci_resource_start(pdev, 0);
if (!request_region(lpcctl, 4, driver_name)) {
err = -EBUSY;
goto out_ior;
}
chip->cmd_ctrl = pasemi_hwcontrol;
chip->dev_ready = pasemi_device_ready;
chip->read_buf = pasemi_read_buf;
chip->write_buf = pasemi_write_buf;
chip->chip_delay = 0;
chip->ecc.mode = NAND_ECC_SOFT;
/* Enable the following for a flash based bad block table */
chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
/* Scan to find existance of the device */
if (nand_scan(pasemi_nand_mtd, 1)) {
err = -ENXIO;
goto out_lpc;
}
if (add_mtd_device(pasemi_nand_mtd)) {
printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
err = -ENODEV;
goto out_lpc;
}
printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n",
res.start, lpcctl);
return 0;
out_lpc:
release_region(lpcctl, 4);
out_ior:
iounmap(chip->IO_ADDR_R);
out_mtd:
kfree(pasemi_nand_mtd);
out:
return err;
}
static int __devexit pasemi_nand_remove(struct of_device *ofdev)
{
struct nand_chip *chip;
if (!pasemi_nand_mtd)
return 0;
chip = pasemi_nand_mtd->priv;
/* Release resources, unregister device */
nand_release(pasemi_nand_mtd);
release_region(lpcctl, 4);
iounmap(chip->IO_ADDR_R);
/* Free the MTD device structure */
kfree(pasemi_nand_mtd);
pasemi_nand_mtd = NULL;
return 0;
}
static struct of_device_id pasemi_nand_match[] =
{
{
.compatible = "pasemi,localbus-nand",
},
{},
};
MODULE_DEVICE_TABLE(of, pasemi_nand_match);
static struct of_platform_driver pasemi_nand_driver =
{
.name = (char*)driver_name,
.match_table = pasemi_nand_match,
.probe = pasemi_nand_probe,
.remove = pasemi_nand_remove,
};
static int __init pasemi_nand_init(void)
{
return of_register_platform_driver(&pasemi_nand_driver);
}
module_init(pasemi_nand_init);
static void __exit pasemi_nand_exit(void)
{
of_unregister_platform_driver(&pasemi_nand_driver);
}
module_exit(pasemi_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");

View file

@ -110,7 +110,9 @@ out:
static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
#ifdef CONFIG_MTD_PARTITIONS
struct platform_nand_data *pdata = pdev->dev.platform_data;
#endif
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS

View file

@ -120,6 +120,8 @@ struct s3c2410_nand_info {
int sel_bit;
int mtd_count;
unsigned long save_nfconf;
enum s3c_cpu_type cpu_type;
};
@ -364,23 +366,21 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
/* calculate the bit position of the error */
bit = (diff2 >> 2) & 1;
bit |= (diff2 >> 3) & 2;
bit |= (diff2 >> 4) & 4;
bit = ((diff2 >> 3) & 1) |
((diff2 >> 4) & 2) |
((diff2 >> 5) & 4);
/* calculate the byte position of the error */
byte = (diff1 << 1) & 0x80;
byte |= (diff1 << 2) & 0x40;
byte |= (diff1 << 3) & 0x20;
byte |= (diff1 << 4) & 0x10;
byte |= (diff0 >> 3) & 0x08;
byte |= (diff0 >> 2) & 0x04;
byte |= (diff0 >> 1) & 0x02;
byte |= (diff0 >> 0) & 0x01;
byte |= (diff2 << 8) & 0x100;
byte = ((diff2 << 7) & 0x100) |
((diff1 << 0) & 0x80) |
((diff1 << 1) & 0x40) |
((diff1 << 2) & 0x20) |
((diff1 << 3) & 0x10) |
((diff0 >> 4) & 0x08) |
((diff0 >> 3) & 0x04) |
((diff0 >> 2) & 0x02) |
((diff0 >> 1) & 0x01);
dev_dbg(info->device, "correcting error bit %d, byte %d\n",
bit, byte);
@ -399,7 +399,7 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
if ((diff0 & ~(1<<fls(diff0))) == 0)
return 1;
return 0;
return -1;
}
/* ECC functions
@ -810,6 +810,16 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
if (info) {
info->save_nfconf = readl(info->regs + S3C2410_NFCONF);
/* For the moment, we must ensure nFCE is high during
* the time we are suspended. This really should be
* handled by suspending the MTDs we are using, but
* that is currently not the case. */
writel(info->save_nfconf | info->sel_bit,
info->regs + S3C2410_NFCONF);
if (!allow_clk_stop(info))
clk_disable(info->clk);
}
@ -820,11 +830,19 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
static int s3c24xx_nand_resume(struct platform_device *dev)
{
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
unsigned long nfconf;
if (info) {
clk_enable(info->clk);
s3c2410_nand_inithw(info, dev);
/* Restore the state of the nFCE line. */
nfconf = readl(info->regs + S3C2410_NFCONF);
nfconf &= ~info->sel_bit;
nfconf |= info->save_nfconf & info->sel_bit;
writel(nfconf, info->regs + S3C2410_NFCONF);
if (allow_clk_stop(info))
clk_disable(info->clk);
}

74
drivers/mtd/ofpart.c Normal file
View file

@ -0,0 +1,74 @@
/*
* Flash partitions described by the OF (or flattened) device tree
*
* Copyright (C) 2006 MontaVista Software Inc.
* Author: Vitaly Wool <vwool@ru.mvista.com>
*
* Revised to handle newer style flash binding by:
* Copyright (C) 2007 David Gibson, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
int __devinit of_mtd_parse_partitions(struct device *dev,
struct mtd_info *mtd,
struct device_node *node,
struct mtd_partition **pparts)
{
const char *partname;
struct device_node *pp;
int nr_parts, i;
/* First count the subnodes */
pp = NULL;
nr_parts = 0;
while ((pp = of_get_next_child(node, pp)))
nr_parts++;
if (nr_parts == 0)
return 0;
*pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
if (!*pparts)
return -ENOMEM;
pp = NULL;
i = 0;
while ((pp = of_get_next_child(node, pp))) {
const u32 *reg;
int len;
reg = of_get_property(pp, "reg", &len);
if (!reg || (len != 2 * sizeof(u32))) {
of_node_put(pp);
dev_err(dev, "Invalid 'reg' on %s\n", node->full_name);
kfree(*pparts);
*pparts = NULL;
return -EINVAL;
}
(*pparts)[i].offset = reg[0];
(*pparts)[i].size = reg[1];
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
(*pparts)[i].name = (char *)partname;
if (of_get_property(pp, "read-only", &len))
(*pparts)[i].mask_flags = MTD_WRITEABLE;
i++;
}
return nr_parts;
}
EXPORT_SYMBOL(of_mtd_parse_partitions);

View file

@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mtd/mtd.h>
@ -169,6 +170,18 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
return ((bsa << ONENAND_BSA_SHIFT) | bsc);
}
/**
* onenand_get_density - [DEFAULT] Get OneNAND density
* @param dev_id OneNAND device ID
*
* Get OneNAND density from device ID
*/
static inline int onenand_get_density(int dev_id)
{
int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
return (density & ONENAND_DEVICE_DENSITY_MASK);
}
/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @param mtd MTD device structure
@ -182,8 +195,7 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
int value, readcmd = 0, block_cmd = 0;
int block, page;
int value, block, page;
/* Address translation */
switch (cmd) {
@ -198,7 +210,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case ONENAND_CMD_ERASE:
case ONENAND_CMD_BUFFERRAM:
case ONENAND_CMD_OTP_ACCESS:
block_cmd = 1;
block = (int) (addr >> this->erase_shift);
page = -1;
break;
@ -240,11 +251,9 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
if (block_cmd) {
/* Select DataRAM for DDP */
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
/* Select DataRAM for DDP */
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
if (page != -1) {
@ -256,7 +265,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
readcmd = 1;
break;
default:
@ -273,12 +281,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
/* Write 'BSA, BSC' of DataRAM */
value = onenand_buffer_address(dataram, sectors, count);
this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
if (readcmd) {
/* Select DataRAM for DDP */
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
}
/* Interrupt clear */
@ -855,6 +857,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
if (ret == -EBADMSG)
ret = 0;
}
}
@ -913,6 +917,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
if (ret == -EBADMSG)
ret = 0;
}
/*
@ -923,12 +929,12 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
ops->retlen = read;
ops->oobretlen = oobread;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
if (ret)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
@ -944,6 +950,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
struct mtd_ecc_stats stats;
int read = 0, thislen, column, oobsize;
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
@ -977,6 +984,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
stats = mtd->ecc_stats;
while (read < len) {
cond_resched();
@ -988,18 +997,16 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
/* First copy data and check return value for ECC handling */
if (ret && ret != -EBADMSG) {
printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
break;
}
if (mode == MTD_OOB_AUTO)
onenand_transfer_auto_oob(mtd, buf, column, thislen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
if (ret) {
printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
break;
}
read += thislen;
if (read == len)
@ -1016,7 +1023,14 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
ops->oobretlen = read;
return ret;
if (ret)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
return 0;
}
/**
@ -1106,12 +1120,10 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
/* Initial bad block case: 0x2400 or 0x0400 */
if (ctrl & ONENAND_CTRL_ERROR) {
printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
/* Initial bad block case */
if (ctrl & ONENAND_CTRL_LOAD)
return ONENAND_BBT_READ_ERROR;
return ONENAND_BBT_READ_FATAL_ERROR;
return ONENAND_BBT_READ_ERROR;
}
if (interrupt & ONENAND_INT_READ) {
@ -1206,7 +1218,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
{
struct onenand_chip *this = mtd->priv;
char oobbuf[64];
u_char *oob_buf = this->oob_buf;
int status, i;
this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
@ -1215,9 +1227,9 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
if (status)
return status;
this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
for (i = 0; i < mtd->oobsize; i++)
if (buf[i] != 0xFF && buf[i] != oobbuf[i])
if (buf[i] != 0xFF && buf[i] != oob_buf[i])
return -EBADMSG;
return 0;
@ -1273,6 +1285,112 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
static void onenand_panic_wait(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
unsigned int interrupt;
int i;
for (i = 0; i < 2000; i++) {
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
if (interrupt & ONENAND_INT_MASTER)
break;
udelay(10);
}
}
/**
* onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
* @param mtd MTD device structure
* @param to offset to write to
* @param len number of bytes to write
* @param retlen pointer to variable to store the number of written bytes
* @param buf the data to write
*
* Write with ECC
*/
static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct onenand_chip *this = mtd->priv;
int column, subpage;
int written = 0;
int ret = 0;
if (this->state == FL_PM_SUSPENDED)
return -EBUSY;
/* Wait for any existing operation to clear */
onenand_panic_wait(mtd);
DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n",
(unsigned int) to, (int) len);
/* Initialize retlen, in case of early exit */
*retlen = 0;
/* Do not allow writes past end of device */
if (unlikely((to + len) > mtd->size)) {
printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n");
return -EINVAL;
}
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n");
return -EINVAL;
}
column = to & (mtd->writesize - 1);
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, mtd->writesize - column, len - written);
u_char *wbuf = (u_char *) buf;
this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
/* Partial page write */
subpage = thislen < mtd->writesize;
if (subpage) {
memset(this->page_buf, 0xff, mtd->writesize);
memcpy(this->page_buf + column, buf, thislen);
wbuf = this->page_buf;
}
this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
onenand_panic_wait(mtd);
/* In partial page write we don't update bufferram */
onenand_update_bufferram(mtd, to, !ret && !subpage);
if (ONENAND_IS_2PLANE(this)) {
ONENAND_SET_BUFFERRAM1(this);
onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
}
if (ret) {
printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret);
break;
}
written += thislen;
if (written == len)
break;
column = 0;
to += thislen;
buf += thislen;
}
*retlen = written;
return ret;
}
/**
* onenand_fill_auto_oob - [Internal] oob auto-placement transfer
* @param mtd MTD device structure
@ -1419,7 +1537,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
}
/* Only check verify write turn on */
ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen);
ret = onenand_verify(mtd, buf, to, thislen);
if (ret) {
printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
break;
@ -1435,9 +1553,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
buf += thislen;
}
/* Deselect and wake up anyone waiting on the device */
onenand_release_device(mtd);
ops->retlen = written;
return ret;
@ -2148,7 +2263,7 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
*retlen = 0;
density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
density = onenand_get_density(this->device_id);
if (density < ONENAND_DEVICE_DENSITY_512Mb)
otp_pages = 20;
else
@ -2299,7 +2414,8 @@ static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
unsigned char oob_buf[64];
struct onenand_chip *this = mtd->priv;
u_char *oob_buf = this->oob_buf;
size_t retlen;
int ret;
@ -2339,7 +2455,7 @@ static void onenand_check_features(struct mtd_info *mtd)
unsigned int density, process;
/* Lock scheme depends on density and process */
density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
density = onenand_get_density(this->device_id);
process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
/* Lock scheme */
@ -2388,7 +2504,7 @@ static void onenand_print_device_info(int device, int version)
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
density = device >> ONENAND_DEVICE_DENSITY_SHIFT;
density = onenand_get_density(device);
printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
demuxed ? "" : "Muxed ",
ddp ? "(DDP)" : "",
@ -2480,7 +2596,7 @@ static int onenand_probe(struct mtd_info *mtd)
this->device_id = dev_id;
this->version_id = ver_id;
density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
density = onenand_get_density(dev_id);
this->chipsize = (16 << density) << 20;
/* Set density mask. it is used for DDP */
if (ONENAND_IS_DDP(this))
@ -2664,6 +2780,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->write = onenand_write;
mtd->read_oob = onenand_read_oob;
mtd->write_oob = onenand_write_oob;
mtd->panic_write = onenand_panic_write;
#ifdef CONFIG_MTD_ONENAND_OTP
mtd->get_fact_prot_info = onenand_get_fact_prot_info;
mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;

View file

@ -59,16 +59,31 @@ static int parse_redboot_partitions(struct mtd_info *master,
static char nullstring[] = "unallocated";
#endif
if ( directory < 0 ) {
offset = master->size + directory * master->erasesize;
while (master->block_isbad &&
master->block_isbad(master, offset)) {
if (!offset) {
nogood:
printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
return -EIO;
}
offset -= master->erasesize;
}
} else {
offset = directory * master->erasesize;
while (master->block_isbad &&
master->block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
goto nogood;
}
}
buf = vmalloc(master->erasesize);
if (!buf)
return -ENOMEM;
if ( directory < 0 )
offset = master->size + directory*master->erasesize;
else
offset = directory*master->erasesize;
printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);

File diff suppressed because it is too large Load diff

View file

@ -28,6 +28,11 @@
*
* Major and minor numbers are assigned dynamically to both UBI and volume
* character devices.
*
* Well, there is the third kind of character devices - the UBI control
* character device, which allows to manipulate by UBI devices - create and
* delete them. In other words, it is used for attaching and detaching MTD
* devices.
*/
#include <linux/module.h>
@ -39,34 +44,6 @@
#include <asm/div64.h>
#include "ubi.h"
/*
* Maximum sequence numbers of UBI and volume character device IOCTLs (direct
* logical eraseblock erase is a debug-only feature).
*/
#define UBI_CDEV_IOC_MAX_SEQ 2
#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
#define VOL_CDEV_IOC_MAX_SEQ 1
#else
#define VOL_CDEV_IOC_MAX_SEQ 2
#endif
/**
* major_to_device - get UBI device object by character device major number.
* @major: major number
*
* This function returns a pointer to the UBI device object.
*/
static struct ubi_device *major_to_device(int major)
{
int i;
for (i = 0; i < ubi_devices_cnt; i++)
if (ubi_devices[i] && ubi_devices[i]->major == major)
return ubi_devices[i];
BUG();
return NULL;
}
/**
* get_exclusive - get exclusive access to an UBI volume.
* @desc: volume descriptor
@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
static int vol_cdev_open(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc;
const struct ubi_device *ubi = major_to_device(imajor(inode));
int vol_id = iminor(inode) - 1;
int mode;
int vol_id = iminor(inode) - 1, mode, ubi_num;
ubi_num = ubi_major2num(imajor(inode));
if (ubi_num < 0)
return ubi_num;
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
dbg_msg("open volume %d, mode %d", vol_id, mode);
desc = ubi_open_volume(ubi->ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
@ -153,8 +132,15 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
vol->vol_id);
ubi_assert(!vol->changing_leb);
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
dbg_msg("only %lld of %lld bytes received for atomic LEB change"
" for volume %d:%d, cancel", vol->upd_received,
vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
ubi_close_volume(desc);
@ -205,13 +191,13 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size;
int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
uint64_t tmp;
dbg_msg("read %zd bytes from offset %lld of volume %d",
count, *offp, vol_id);
count, *offp, vol->vol_id);
if (vol->updating) {
dbg_err("updating");
@ -225,7 +211,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
return 0;
if (vol->corrupted)
dbg_msg("read from corrupted volume %d", vol_id);
dbg_msg("read from corrupted volume %d", vol->vol_id);
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
@ -249,7 +235,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0);
err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
if (err)
break;
@ -289,13 +275,13 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0;
int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
uint64_t tmp;
dbg_msg("requested: write %zd bytes to offset %lld of volume %u",
count, *offp, desc->vol->vol_id);
count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@ -339,7 +325,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
break;
}
err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len,
err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
UBI_UNKNOWN);
if (err)
break;
@ -372,22 +358,32 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
if (!vol->updating)
if (!vol->updating && !vol->changing_leb)
return vol_cdev_direct_write(file, buf, count, offp);
err = ubi_more_update_data(ubi, vol->vol_id, buf, count);
if (vol->updating)
err = ubi_more_update_data(ubi, vol, buf, count);
else
err = ubi_more_leb_change_data(ubi, vol, buf, count);
if (err < 0) {
ubi_err("cannot write %zd bytes of update data", count);
ubi_err("cannot accept more %zd bytes of data, error %d",
count, err);
return err;
}
if (err) {
/*
* Update is finished, @err contains number of actually written
* bytes now.
* The operation is finished, @err contains number of actually
* written bytes.
*/
count = err;
if (vol->changing_leb) {
revoke_exclusive(desc, UBI_READWRITE);
return count;
}
err = ubi_check_volume(ubi, vol->vol_id);
if (err < 0)
return err;
@ -402,7 +398,6 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
revoke_exclusive(desc, UBI_READWRITE);
}
*offp += count;
return count;
}
@ -447,11 +442,46 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
if (err < 0)
break;
err = ubi_start_update(ubi, vol->vol_id, bytes);
err = ubi_start_update(ubi, vol, bytes);
if (bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
file->f_pos = 0;
/* Atomic logical eraseblock change command */
case UBI_IOCEBCH:
{
struct ubi_leb_change_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_leb_change_req));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
req.dtype != UBI_UNKNOWN)
break;
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_leb_change(ubi, vol, &req);
if (req.bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
@ -467,7 +497,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
if (desc->mode == UBI_READONLY) {
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
@ -477,13 +508,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
if (vol->vol_type != UBI_DYNAMIC_VOLUME) {
err = -EROFS;
break;
}
dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum);
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
break;
@ -580,9 +606,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
ubi = major_to_device(imajor(inode));
if (IS_ERR(ubi))
return PTR_ERR(ubi);
ubi = ubi_get_by_major(imajor(inode));
if (!ubi)
return -ENODEV;
switch (cmd) {
/* Create volume command */
@ -591,8 +617,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_mkvol_req req;
dbg_msg("create volume");
err = copy_from_user(&req, argp,
sizeof(struct ubi_mkvol_req));
err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
break;
@ -604,7 +629,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
req.name[req.name_len] = '\0';
mutex_lock(&ubi->volumes_mutex);
err = ubi_create_volume(ubi, &req);
mutex_unlock(&ubi->volumes_mutex);
if (err)
break;
@ -633,10 +660,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
mutex_lock(&ubi->volumes_mutex);
err = ubi_remove_volume(desc);
if (err)
ubi_close_volume(desc);
mutex_unlock(&ubi->volumes_mutex);
/*
* The volume is deleted (unless an error occurred), and the
* 'struct ubi_volume' object will be freed when
* 'ubi_close_volume()' will call 'put_device()'.
*/
ubi_close_volume(desc);
break;
}
@ -648,8 +681,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_rsvol_req req;
dbg_msg("re-size volume");
err = copy_from_user(&req, argp,
sizeof(struct ubi_rsvol_req));
err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
break;
@ -669,7 +701,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
pebs = !!do_div(tmp, desc->vol->usable_leb_size);
pebs += tmp;
mutex_lock(&ubi->volumes_mutex);
err = ubi_resize_volume(desc, pebs);
mutex_unlock(&ubi->volumes_mutex);
ubi_close_volume(desc);
break;
}
@ -679,9 +713,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
ubi_put_device(ubi);
return err;
}
static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
switch (cmd) {
/* Attach an MTD device command */
case UBI_IOCATT:
{
struct ubi_attach_req req;
struct mtd_info *mtd;
dbg_msg("attach MTD device");
err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
if (err) {
err = -EFAULT;
break;
}
if (req.mtd_num < 0 ||
(req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
err = -EINVAL;
break;
}
mtd = get_mtd_device(NULL, req.mtd_num);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
break;
}
/*
* Note, further request verification is done by
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
else
/* @err contains UBI device number */
err = put_user(err, (__user int32_t *)argp);
break;
}
/* Detach an MTD device command */
case UBI_IOCDET:
{
int ubi_num;
dbg_msg("dettach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
mutex_lock(&ubi_devices_mutex);
err = ubi_detach_mtd_dev(ubi_num, 0);
mutex_unlock(&ubi_devices_mutex);
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
/* UBI control character device operations */
struct file_operations ubi_ctrl_cdev_operations = {
.ioctl = ctrl_cdev_ioctl,
.owner = THIS_MODULE,
};
/* UBI character device operations */
struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,

View file

@ -39,8 +39,9 @@
#ifdef CONFIG_MTD_UBI_DEBUG_MSG
/* Generic debugging message */
#define dbg_msg(fmt, ...) \
printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__)
#define dbg_msg(fmt, ...) \
printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
current->pid, __FUNCTION__, ##__VA_ARGS__)
#define ubi_dbg_dump_stack() dump_stack()
@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
/* Messages from the eraseblock association unit */
#define dbg_eba(fmt, ...) \
printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
##__VA_ARGS__)
#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_eba(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
/* Messages from the wear-leveling unit */
#define dbg_wl(fmt, ...) \
printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
##__VA_ARGS__)
#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_wl(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
/* Messages from the input/output unit */
#define dbg_io(fmt, ...) \
printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
##__VA_ARGS__)
#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_io(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
/* Initialization and build messages */
#define dbg_bld(fmt, ...) \
printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
##__VA_ARGS__)
#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_bld(fmt, ...) ({})
#endif

View file

@ -31,7 +31,7 @@
* logical eraseblock it is locked for reading or writing. The per-logical
* eraseblock locking is implemented by means of the lock tree. The lock tree
* is an RB-tree which refers all the currently locked logical eraseblocks. The
* lock tree elements are &struct ltree_entry objects. They are indexed by
* lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
* (@vol_id, @lnum) pairs.
*
* EBA also maintains the global sequence counter which is incremented each
@ -49,29 +49,6 @@
/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1
/**
* struct ltree_entry - an entry in the lock tree.
* @rb: links RB-tree nodes
* @vol_id: volume ID of the locked logical eraseblock
* @lnum: locked logical eraseblock number
* @users: how many tasks are using this logical eraseblock or wait for it
* @mutex: read/write mutex to implement read/write access serialization to
* the (@vol_id, @lnum) logical eraseblock
*
* When a logical eraseblock is being locked - corresponding &struct ltree_entry
* object is inserted to the lock tree (@ubi->ltree).
*/
struct ltree_entry {
struct rb_node rb;
int vol_id;
int lnum;
int users;
struct rw_semaphore mutex;
};
/* Slab cache for lock-tree entries */
static struct kmem_cache *ltree_slab;
/**
* next_sqnum - get next sequence number.
* @ubi: UBI device description object
@ -101,7 +78,7 @@ static unsigned long long next_sqnum(struct ubi_device *ubi)
*/
static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
{
if (vol_id == UBI_LAYOUT_VOL_ID)
if (vol_id == UBI_LAYOUT_VOLUME_ID)
return UBI_LAYOUT_VOLUME_COMPAT;
return 0;
}
@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function returns a pointer to the corresponding &struct ltree_entry
* This function returns a pointer to the corresponding &struct ubi_ltree_entry
* object if the logical eraseblock is locked and %NULL if it is not.
* @ubi->ltree_lock has to be locked.
*/
static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
int lnum)
static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
int lnum)
{
struct rb_node *p;
p = ubi->ltree.rb_node;
while (p) {
struct ltree_entry *le;
struct ubi_ltree_entry *le;
le = rb_entry(p, struct ltree_entry, rb);
le = rb_entry(p, struct ubi_ltree_entry, rb);
if (vol_id < le->vol_id)
p = p->rb_left;
@ -155,15 +132,17 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
* Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
* failed.
*/
static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
int lnum)
static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
int vol_id, int lnum)
{
struct ltree_entry *le, *le1, *le_free;
struct ubi_ltree_entry *le, *le1, *le_free;
le = kmem_cache_alloc(ltree_slab, GFP_NOFS);
le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
if (!le)
return ERR_PTR(-ENOMEM);
le->users = 0;
init_rwsem(&le->mutex);
le->vol_id = vol_id;
le->lnum = lnum;
@ -189,7 +168,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
p = &ubi->ltree.rb_node;
while (*p) {
parent = *p;
le1 = rb_entry(parent, struct ltree_entry, rb);
le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
if (vol_id < le1->vol_id)
p = &(*p)->rb_left;
@ -211,7 +190,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
spin_unlock(&ubi->ltree_lock);
if (le_free)
kmem_cache_free(ltree_slab, le_free);
kfree(le_free);
return le;
}
@ -227,7 +206,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
*/
static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ltree_entry *le;
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
@ -245,7 +224,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
int free = 0;
struct ltree_entry *le;
struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
@ -259,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_read(&le->mutex);
if (free)
kmem_cache_free(ltree_slab, le);
kfree(le);
}
/**
@ -273,7 +252,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
*/
static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ltree_entry *le;
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
@ -282,6 +261,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
return 0;
}
/**
* leb_write_lock - lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function locks a logical eraseblock for writing if there is no
* contention and does nothing if there is contention. Returns %0 in case of
* success, %1 in case of contention, and and a negative error code in case of
* failure.
*/
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
{
int free;
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
if (down_write_trylock(&le->mutex))
return 0;
/* Contention, cancel */
spin_lock(&ubi->ltree_lock);
le->users -= 1;
ubi_assert(le->users >= 0);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
free = 1;
} else
free = 0;
spin_unlock(&ubi->ltree_lock);
if (free)
kfree(le);
return 1;
}
/**
* leb_write_unlock - unlock logical eraseblock.
* @ubi: UBI device description object
@ -291,7 +308,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
int free;
struct ltree_entry *le;
struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
@ -306,23 +323,23 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_write(&le->mutex);
if (free)
kmem_cache_free(ltree_slab, le);
kfree(le);
}
/**
* ubi_eba_unmap_leb - un-map logical eraseblock.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @lnum: logical eraseblock number
*
* This function un-maps logical eraseblock @lnum and schedules corresponding
* physical eraseblock for erasure. Returns zero in case of success and a
* negative error code in case of failure.
*/
int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum)
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum)
{
int idx = vol_id2idx(ubi, vol_id), err, pnum;
struct ubi_volume *vol = ubi->volumes[idx];
int err, pnum, vol_id = vol->vol_id;
if (ubi->ro_mode)
return -EROFS;
@ -349,7 +366,7 @@ out_unlock:
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: buffer to store the read data
* @offset: offset from where to read
@ -365,12 +382,11 @@ out_unlock:
* returned for any volume type if an ECC error was detected by the MTD device
* driver. Other negative error cored may be returned in case of other errors.
*/
int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
int offset, int len, int check)
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check)
{
int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
struct ubi_volume *vol = ubi->volumes[idx];
uint32_t uninitialized_var(crc);
err = leb_read_lock(ubi, vol_id, lnum);
@ -578,7 +594,7 @@ write_error:
/**
* ubi_eba_write_leb - write data to dynamic volume.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
@ -586,15 +602,14 @@ write_error:
* @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol_id. Returns zero in case of success and a negative error code in case
* @vol. Returns zero in case of success and a negative error code in case
* of failure. In case of error, it is possible that something was still
* written to the flash media, but may be some garbage.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len, int dtype)
{
int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0;
struct ubi_volume *vol = ubi->volumes[idx];
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
if (ubi->ro_mode)
@ -613,7 +628,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
if (err) {
ubi_warn("failed to write data to PEB %d", pnum);
if (err == -EIO && ubi->bad_allowed)
err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len);
err = recover_peb(ubi, pnum, vol_id, lnum, buf,
offset, len);
if (err)
ubi_ro_mode(ubi);
}
@ -656,11 +672,14 @@ retry:
goto write_error;
}
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, "
"PEB %d", len, offset, vol_id, lnum, pnum);
goto write_error;
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn("failed to write %d bytes at offset %d of "
"LEB %d:%d, PEB %d", len, offset, vol_id,
lnum, pnum);
goto write_error;
}
}
vol->eba_tbl[lnum] = pnum;
@ -698,7 +717,7 @@ write_error:
/**
* ubi_eba_write_leb_st - write data to static volume.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
@ -706,7 +725,7 @@ write_error:
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
* @vol_id. The @used_ebs argument should contain total number of logical
* @vol. The @used_ebs argument should contain total number of logical
* eraseblock in this static volume.
*
* When writing to the last logical eraseblock, the @len argument doesn't have
@ -718,12 +737,11 @@ write_error:
* volumes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
const void *buf, int len, int dtype, int used_ebs)
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int dtype,
int used_ebs)
{
int err, pnum, tries = 0, data_size = len;
int idx = vol_id2idx(ubi, vol_id);
struct ubi_volume *vol = ubi->volumes[idx];
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
@ -819,7 +837,7 @@ write_error:
/*
* ubi_eba_atomic_leb_change - change logical eraseblock atomically.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
@ -834,17 +852,27 @@ write_error:
* UBI reserves one LEB for the "atomic LEB change" operation, so only one
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
const void *buf, int len, int dtype)
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int dtype)
{
int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id);
struct ubi_volume *vol = ubi->volumes[idx];
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
if (ubi->ro_mode)
return -EROFS;
if (len == 0) {
/*
* Special case when data length is zero. In this case the LEB
* has to be unmapped and mapped somewhere else.
*/
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
@ -927,20 +955,6 @@ write_error:
goto retry;
}
/**
* ltree_entry_ctor - lock tree entries slab cache constructor.
* @obj: the lock-tree entry to construct
* @cache: the lock tree entry slab cache
* @flags: constructor flags
*/
static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
{
struct ltree_entry *le = obj;
le->users = 0;
init_rwsem(&le->mutex);
}
/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
@ -950,14 +964,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
*
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation
* was canceled because bit-flips were detected at the target PEB, and a
* negative error code in case of failure.
* function. Returns:
* o %0 in case of success;
* o %1 if the operation was canceled and should be tried later (e.g.,
* because a bit-flip was detected at the target PEB);
* o %2 if the volume is being deleted and this LEB should not be moved.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr)
{
int err, vol_id, lnum, data_size, aldata_size, pnum, idx;
int err, vol_id, lnum, data_size, aldata_size, idx;
struct ubi_volume *vol;
uint32_t crc;
@ -973,51 +989,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
data_size = aldata_size =
ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
/*
* We do not want anybody to write to this logical eraseblock while we
* are moving it, so we lock it.
*/
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
return err;
mutex_lock(&ubi->buf_mutex);
/*
* But the logical eraseblock might have been put by this time.
* Cancel if it is true.
*/
idx = vol_id2idx(ubi, vol_id);
/*
* We may race with volume deletion/re-size, so we have to hold
* @ubi->volumes_lock.
*/
spin_lock(&ubi->volumes_lock);
/*
* Note, we may race with volume deletion, which means that the volume
* this logical eraseblock belongs to might be being deleted. Since the
* volume deletion unmaps all the volume's logical eraseblocks, it will
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
vol = ubi->volumes[idx];
if (!vol) {
dbg_eba("volume %d was removed meanwhile", vol_id);
/* No need to do further work, cancel */
dbg_eba("volume %d is being removed, cancel", vol_id);
spin_unlock(&ubi->volumes_lock);
goto out_unlock;
}
pnum = vol->eba_tbl[lnum];
if (pnum != from) {
dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
"PEB %d, cancel", vol_id, lnum, from, pnum);
spin_unlock(&ubi->volumes_lock);
goto out_unlock;
return 2;
}
spin_unlock(&ubi->volumes_lock);
/* OK, now the LEB is locked and we can safely start moving it */
/*
* We do not want anybody to write to this logical eraseblock while we
* are moving it, so lock it.
*
* Note, we are using non-waiting locking here, because we cannot sleep
* on the LEB, since it may cause deadlocks. Indeed, imagine a task is
* unmapping the LEB which is mapped to the PEB we are going to move
* (@from). This task locks the LEB and goes sleep in the
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return %1.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
return err;
}
/*
* The LEB might have been put meanwhile, and the task which put it is
* probably waiting on @ubi->move_mutex. No need to continue the work,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
"PEB %d, cancel", vol_id, lnum, from,
vol->eba_tbl[lnum]);
err = 1;
goto out_unlock_leb;
}
/*
* OK, now the LEB is locked and we can safely start moving iy. Since
* this function utilizes thie @ubi->peb1_buf buffer which is shared
* with some other functions, so lock the buffer by taking the
* @ubi->buf_mutex.
*/
mutex_lock(&ubi->buf_mutex);
dbg_eba("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
goto out_unlock;
goto out_unlock_buf;
}
/*
@ -1053,7 +1085,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err)
goto out_unlock;
goto out_unlock_buf;
cond_resched();
@ -1062,13 +1094,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err) {
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read VID header back from PEB %d", to);
goto out_unlock;
else
err = 1;
goto out_unlock_buf;
}
if (data_size > 0) {
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
if (err)
goto out_unlock;
goto out_unlock_buf;
cond_resched();
@ -1082,7 +1116,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read data back from PEB %d",
to);
goto out_unlock;
else
err = 1;
goto out_unlock_buf;
}
cond_resched();
@ -1090,15 +1126,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
ubi_warn("read data back from PEB %d - it is different",
to);
goto out_unlock;
goto out_unlock_buf;
}
}
ubi_assert(vol->eba_tbl[lnum] == from);
vol->eba_tbl[lnum] = to;
out_unlock:
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
out_unlock_leb:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
@ -1125,14 +1162,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
if (ubi_devices_cnt == 0) {
ltree_slab = kmem_cache_create("ubi_ltree_slab",
sizeof(struct ltree_entry), 0,
0, &ltree_entry_ctor);
if (!ltree_slab)
return -ENOMEM;
}
ubi->global_sqnum = si->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
@ -1168,6 +1197,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
}
}
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= EBA_RESERVED_PEBS;
ubi->rsvd_pebs += EBA_RESERVED_PEBS;
if (ubi->bad_allowed) {
ubi_calculate_reserved(ubi);
@ -1184,15 +1222,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= EBA_RESERVED_PEBS;
ubi->rsvd_pebs += EBA_RESERVED_PEBS;
dbg_eba("EBA unit is initialized");
return 0;
@ -1202,8 +1231,6 @@ out_free:
continue;
kfree(ubi->volumes[i]->eba_tbl);
}
if (ubi_devices_cnt == 0)
kmem_cache_destroy(ltree_slab);
return err;
}
@ -1222,6 +1249,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
continue;
kfree(ubi->volumes[i]->eba_tbl);
}
if (ubi_devices_cnt == 1)
kmem_cache_destroy(ltree_slab);
}

View file

@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
if (to_read > total_read)
to_read = total_read;
err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs,
to_read, 0);
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
if (err)
break;
@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to_write > total_written)
to_write = total_written;
err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs,
to_write, UBI_UNKNOWN);
err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
UBI_UNKNOWN);
if (err)
break;
@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EROFS;
for (i = 0; i < count; i++) {
err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i);
err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
if (err)
goto out_err;
}

View file

@ -173,6 +173,16 @@ retry:
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
ubi_dbg_dump_stack();
/*
* The driver should never return -EBADMSG if it failed to read
* all the requested data. But some buggy drivers might do
* this, so we change it to -EIO.
*/
if (read != len && err == -EBADMSG) {
ubi_assert(0);
err = -EIO;
}
} else {
ubi_assert(len == read);

View file

@ -30,23 +30,27 @@
* @ubi_num: UBI device number
* @di: the information is stored here
*
* This function returns %0 in case of success and a %-ENODEV if there is no
* such UBI device.
* This function returns %0 in case of success, %-EINVAL if the UBI device
* number is invalid, and %-ENODEV if there is no such UBI device.
*/
int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
{
const struct ubi_device *ubi;
struct ubi_device *ubi;
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
!ubi_devices[ubi_num])
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
ubi = ubi_devices[ubi_num];
di->ubi_num = ubi->ubi_num;
di->leb_size = ubi->leb_size;
di->min_io_size = ubi->min_io_size;
di->ro_mode = ubi->ro_mode;
di->cdev = MKDEV(ubi->major, 0);
di->cdev = ubi->cdev.dev;
ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
vi->usable_leb_size = vol->usable_leb_size;
vi->name_len = vol->name_len;
vi->name = vol->name;
vi->cdev = MKDEV(ubi->major, vi->vol_id + 1);
vi->cdev = vol->cdev.dev;
}
EXPORT_SYMBOL_GPL(ubi_get_volume_info);
@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
err = -ENODEV;
if (ubi_num < 0)
return ERR_PTR(err);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return ERR_PTR(-EINVAL);
ubi = ubi_devices[ubi_num];
if (!try_module_get(THIS_MODULE))
return ERR_PTR(err);
if (ubi_num >= UBI_MAX_DEVICES || !ubi)
goto out_put;
err = -EINVAL;
if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
goto out_put;
if (mode != UBI_READONLY && mode != UBI_READWRITE &&
mode != UBI_EXCLUSIVE)
goto out_put;
return ERR_PTR(-EINVAL);
/*
* First of all, we have to get the UBI device to prevent its removal.
*/
ubi = ubi_get_device(ubi_num);
if (!ubi)
return ERR_PTR(-ENODEV);
if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
err = -EINVAL;
goto out_put_ubi;
}
desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
if (!desc) {
err = -ENOMEM;
goto out_put;
goto out_put_ubi;
}
err = -ENODEV;
if (!try_module_get(THIS_MODULE))
goto out_free;
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
if (!vol) {
err = -ENODEV;
if (!vol)
goto out_unlock;
}
err = -EBUSY;
switch (mode) {
@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
vol->exclusive = 1;
break;
}
get_device(&vol->dev);
vol->ref_count += 1;
spin_unlock(&ubi->volumes_lock);
desc->vol = vol;
desc->mode = mode;
/*
* To prevent simultaneous checks of the same volume we use @vtbl_mutex,
* although it is not the purpose it was introduced for.
*/
mutex_lock(&ubi->vtbl_mutex);
mutex_lock(&ubi->ckvol_mutex);
if (!vol->checked) {
/* This is the first open - check the volume */
err = ubi_check_volume(ubi, vol_id);
if (err < 0) {
mutex_unlock(&ubi->vtbl_mutex);
mutex_unlock(&ubi->ckvol_mutex);
ubi_close_volume(desc);
return ERR_PTR(err);
}
@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
}
vol->checked = 1;
}
mutex_unlock(&ubi->vtbl_mutex);
mutex_unlock(&ubi->ckvol_mutex);
return desc;
out_unlock:
spin_unlock(&ubi->volumes_lock);
kfree(desc);
out_put:
module_put(THIS_MODULE);
out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
int mode)
{
int i, vol_id = -1, len;
struct ubi_volume_desc *ret;
struct ubi_device *ubi;
struct ubi_volume_desc *ret;
dbg_msg("open volume %s, mode %d", name, mode);
@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
if (len > UBI_VOL_NAME_MAX)
return ERR_PTR(-EINVAL);
ret = ERR_PTR(-ENODEV);
if (!try_module_get(THIS_MODULE))
return ret;
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return ERR_PTR(-EINVAL);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
goto out_put;
ubi = ubi_devices[ubi_num];
ubi = ubi_get_device(ubi_num);
if (!ubi)
return ERR_PTR(-ENODEV);
spin_lock(&ubi->volumes_lock);
/* Walk all volumes of this UBI device */
@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
}
spin_unlock(&ubi->volumes_lock);
if (vol_id < 0)
goto out_put;
if (vol_id >= 0)
ret = ubi_open_volume(ubi_num, vol_id, mode);
else
ret = ERR_PTR(-ENODEV);
ret = ubi_open_volume(ubi_num, vol_id, mode);
out_put:
module_put(THIS_MODULE);
/*
* We should put the UBI device even in case of success, because
* 'ubi_open_volume()' took a reference as well.
*/
ubi_put_device(ubi);
return ret;
}
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
void ubi_close_volume(struct ubi_volume_desc *desc)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
spin_lock(&vol->ubi->volumes_lock);
spin_lock(&ubi->volumes_lock);
switch (desc->mode) {
case UBI_READONLY:
vol->readers -= 1;
@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
case UBI_EXCLUSIVE:
vol->exclusive = 0;
}
spin_unlock(&vol->ubi->volumes_lock);
vol->ref_count -= 1;
spin_unlock(&ubi->volumes_lock);
kfree(desc);
put_device(&vol->dev);
ubi_put_device(ubi);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(ubi_close_volume);
@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
if (len == 0)
return 0;
err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check);
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn("mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (len == 0)
return 0;
return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype);
return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (len == 0)
return 0;
return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype);
return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@ -468,9 +480,9 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, vol_id = vol->vol_id;
int err;
dbg_msg("erase LEB %d:%d", vol_id, lnum);
dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (vol->upd_marker)
return -EBADF;
err = ubi_eba_unmap_leb(ubi, vol_id, lnum);
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
@ -529,9 +541,8 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int vol_id = vol->vol_id;
dbg_msg("unmap LEB %d:%d", vol_id, lnum);
dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@ -542,10 +553,54 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
if (vol->upd_marker)
return -EBADF;
return ubi_eba_unmap_leb(ubi, vol_id, lnum);
return ubi_eba_unmap_leb(ubi, vol, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_unmap);
/**
* ubi_leb_map - map logical erasblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
* @dtype: expected data type
*
* This function maps an un-mapped logical eraseblock @lnum to a physical
* eraseblock. This means, that after a successfull invocation of this
* function the logical eraseblock @lnum will be empty (contain only %0xFF
* bytes) and be mapped to a physical eraseblock, even if an unclean reboot
* happens.
*
* This function returns zero in case of success, %-EBADF if the volume is
* damaged because of an interrupted update, %-EBADMSG if the logical
* eraseblock is already mapped, and other negative error codes in case of
* other failures.
*/
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (lnum < 0 || lnum >= vol->reserved_pebs)
return -EINVAL;
if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
dtype != UBI_UNKNOWN)
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
if (vol->eba_tbl[lnum] >= 0)
return -EBADMSG;
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
}
EXPORT_SYMBOL_GPL(ubi_leb_map);
/**
* ubi_is_mapped - check if logical eraseblock is mapped.
* @desc: volume descriptor

View file

@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
else
size = vol->usable_leb_size;
err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1);
err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
if (err) {
if (err == -EBADMSG)
err = 1;

View file

@ -286,9 +286,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
* FIXME: but this is anyway obsolete and will be removed at
* some point.
*/
dbg_bld("using old crappy leb_ver stuff");
if (v1 == v2) {
ubi_err("PEB %d and PEB %d have the same version %lld",
seb->pnum, pnum, v1);
return -EINVAL;
}
abs = v1 - v2;
if (abs < 0)
abs = -abs;
@ -390,7 +395,6 @@ out_free_buf:
vfree(buf);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
ubi_assert(err < 0);
return err;
}
@ -769,7 +773,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
*/
static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
{
long long ec;
long long uninitialized_var(ec);
int err, bitflips = 0, vol_id, ec_corr = 0;
dbg_bld("scan PEB %d", pnum);
@ -854,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
}
vol_id = be32_to_cpu(vidh->vol_id);
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) {
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */

View file

@ -94,8 +94,43 @@ enum {
UBI_IO_BITFLIPS
};
extern int ubi_devices_cnt;
extern struct ubi_device *ubi_devices[];
/**
* struct ubi_wl_entry - wear-leveling entry.
* @rb: link in the corresponding RB-tree
* @ec: erase counter
* @pnum: physical eraseblock number
*
* This data structure is used in the WL unit. Each physical eraseblock has a
* corresponding &struct wl_entry object which may be kept in different
* RB-trees. See WL unit for details.
*/
struct ubi_wl_entry {
struct rb_node rb;
int ec;
int pnum;
};
/**
* struct ubi_ltree_entry - an entry in the lock tree.
* @rb: links RB-tree nodes
* @vol_id: volume ID of the locked logical eraseblock
* @lnum: locked logical eraseblock number
* @users: how many tasks are using this logical eraseblock or wait for it
* @mutex: read/write mutex to implement read/write access serialization to
* the (@vol_id, @lnum) logical eraseblock
*
* This data structure is used in the EBA unit to implement per-LEB locking.
* When a logical eraseblock is being locked - corresponding
* &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
* See EBA unit for details.
*/
struct ubi_ltree_entry {
struct rb_node rb;
int vol_id;
int lnum;
int users;
struct rw_semaphore mutex;
};
struct ubi_volume_desc;
@ -105,11 +140,10 @@ struct ubi_volume_desc;
* @cdev: character device object to create character device
* @ubi: reference to the UBI device description object
* @vol_id: volume ID
* @ref_count: volume reference count
* @readers: number of users holding this volume in read-only mode
* @writers: number of users holding this volume in read-write mode
* @exclusive: whether somebody holds this volume in exclusive mode
* @removed: if the volume was removed
* @checked: if this static volume was checked
*
* @reserved_pebs: how many physical eraseblocks are reserved for this volume
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@ -117,21 +151,30 @@ struct ubi_volume_desc;
* @used_ebs: how many logical eraseblocks in this volume contain data
* @last_eb_bytes: how many bytes are stored in the last logical eraseblock
* @used_bytes: how many bytes of data this volume contains
* @upd_marker: non-zero if the update marker is set for this volume
* @corrupted: non-zero if the volume is corrupted (static volumes only)
* @alignment: volume alignment
* @data_pad: how many bytes are not used at the end of physical eraseblocks to
* satisfy the requested alignment
* satisfy the requested alignment
* @name_len: volume name length
* @name: volume name
*
* @updating: whether the volume is being updated
* @upd_ebs: how many eraseblocks are expected to be updated
* @upd_bytes: how many bytes are expected to be received
* @upd_received: how many update bytes were already received
* @upd_buf: update buffer which is used to collect update data
* @ch_lnum: LEB number which is being changing by the atomic LEB change
* operation
* @ch_dtype: data persistency type which is being changing by the atomic LEB
* change operation
* @upd_bytes: how many bytes are expected to be received for volume update or
* atomic LEB change
* @upd_received: how many bytes were already received for volume update or
* atomic LEB change
* @upd_buf: update buffer which is used to collect update data or data for
* atomic LEB change
*
* @eba_tbl: EBA table of this volume (LEB->PEB mapping)
* @checked: %1 if this static volume was checked
* @corrupted: %1 if the volume is corrupted (static volumes only)
* @upd_marker: %1 if the update marker is set for this volume
* @updating: %1 if the volume is being updated
* @changing_leb: %1 if the atomic LEB change ioctl command is in progress
*
* @gluebi_desc: gluebi UBI volume descriptor
* @gluebi_refcount: reference count of the gluebi MTD device
@ -150,11 +193,10 @@ struct ubi_volume {
struct cdev cdev;
struct ubi_device *ubi;
int vol_id;
int ref_count;
int readers;
int writers;
int exclusive;
int removed;
int checked;
int reserved_pebs;
int vol_type;
@ -162,23 +204,31 @@ struct ubi_volume {
int used_ebs;
int last_eb_bytes;
long long used_bytes;
int upd_marker;
int corrupted;
int alignment;
int data_pad;
int name_len;
char name[UBI_VOL_NAME_MAX+1];
int updating;
int upd_ebs;
int ch_lnum;
int ch_dtype;
long long upd_bytes;
long long upd_received;
void *upd_buf;
int *eba_tbl;
int checked:1;
int corrupted:1;
int upd_marker:1;
int updating:1;
int changing_leb:1;
#ifdef CONFIG_MTD_UBI_GLUEBI
/* Gluebi-related stuff may be compiled out */
/*
* Gluebi-related stuff may be compiled out.
* TODO: this should not be built into UBI but should be a separate
* ubimtd driver which works on top of UBI and emulates MTD devices.
*/
struct ubi_volume_desc *gluebi_desc;
int gluebi_refcount;
struct mtd_info gluebi_mtd;
@ -200,28 +250,31 @@ struct ubi_wl_entry;
/**
* struct ubi_device - UBI device description structure
* @dev: class device object to use the the Linux device model
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
* @ubi_num: UBI device number
* @ubi_name: UBI device name
* @major: character device major number
* @vol_count: number of volumes in this UBI device
* @volumes: volumes of this UBI device
* @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
* @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers,
* @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and
* @vol->eba_tbl.
* @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
* @vol->readers, @vol->writers, @vol->exclusive,
* @vol->ref_count, @vol->mapping and @vol->eba_tbl.
* @ref_count: count of references on the UBI device
*
* @rsvd_pebs: count of reserved physical eraseblocks
* @avail_pebs: count of available physical eraseblocks
* @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
* handling
* handling
* @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
*
* @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
* of UBI ititializetion
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
* @vtbl_mutex: protects on-flash volume table
* @volumes_mutex: protects on-flash volume table and serializes volume
* changes, like creation, deletion, update, resize
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
@ -238,15 +291,15 @@ struct ubi_wl_entry;
* @prot.pnum: protection tree indexed by physical eraseblock numbers
* @prot.aec: protection tree indexed by absolute erase counter value
* @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
* fields
* @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
* fields
* @move_mutex: serializes eraseblock moves
* @wl_scheduled: non-zero if the wear-leveling was scheduled
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
* physical eraseblock
* physical eraseblock
* @abs_ec: absolute erase counter
* @move_from: physical eraseblock from where the data is being moved
* @move_to: physical eraseblock where the data is being moved to
* @move_from_put: if the "from" PEB was put
* @move_to_put: if the "to" PEB was put
* @works: list of pending works
* @works_count: count of pending works
@ -273,13 +326,13 @@ struct ubi_wl_entry;
* @hdrs_min_io_size
* @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not
* not
* @mtd: MTD device descriptor
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: proptects @peb_buf1 and @peb_buf2
* @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_buf_mutex: proptects @dbg_peb_buf
*/
struct ubi_device {
@ -287,22 +340,24 @@ struct ubi_device {
struct device dev;
int ubi_num;
char ubi_name[sizeof(UBI_NAME_STR)+5];
int major;
int vol_count;
struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
spinlock_t volumes_lock;
int ref_count;
int rsvd_pebs;
int avail_pebs;
int beb_rsvd_pebs;
int beb_rsvd_level;
int autoresize_vol_id;
int vtbl_slots;
int vtbl_size;
struct ubi_vtbl_record *vtbl;
struct mutex vtbl_mutex;
struct mutex volumes_mutex;
int max_ec;
/* TODO: mean_ec is not updated run-time, fix */
int mean_ec;
/* EBA unit's stuff */
@ -320,12 +375,13 @@ struct ubi_device {
struct rb_root aec;
} prot;
spinlock_t wl_lock;
struct mutex move_mutex;
struct rw_semaphore work_sem;
int wl_scheduled;
struct ubi_wl_entry **lookuptbl;
unsigned long long abs_ec;
struct ubi_wl_entry *move_from;
struct ubi_wl_entry *move_to;
int move_from_put;
int move_to_put;
struct list_head works;
int works_count;
@ -355,15 +411,19 @@ struct ubi_device {
void *peb_buf1;
void *peb_buf2;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
#ifdef CONFIG_MTD_UBI_DEBUG
void *dbg_peb_buf;
struct mutex dbg_buf_mutex;
#endif
};
extern struct kmem_cache *ubi_wl_entry_slab;
extern struct file_operations ubi_ctrl_cdev_operations;
extern struct file_operations ubi_cdev_operations;
extern struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@ -374,13 +434,18 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
int ubi_remove_volume(struct ubi_volume_desc *desc);
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
int ubi_add_volume(struct ubi_device *ubi, int vol_id);
void ubi_free_volume(struct ubi_device *ubi, int vol_id);
int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
/* upd.c */
int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes);
int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
long long bytes);
int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count);
int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
const struct ubi_leb_change_req *req);
int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count);
/* misc.c */
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
@ -399,16 +464,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol);
#endif
/* eba.c */
int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum);
int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
int offset, int len, int check);
int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum);
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check);
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len, int dtype);
int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
const void *buf, int len, int dtype,
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int dtype,
int used_ebs);
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
const void *buf, int len, int dtype);
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int dtype);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
@ -421,6 +487,7 @@ int ubi_wl_flush(struct ubi_device *ubi);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@ -439,6 +506,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
struct ubi_device *ubi_get_by_major(int major);
int ubi_major2num(int major);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
* @rb: a pointer to type 'struct rb_node' to to use as a loop counter
@ -523,8 +598,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
*/
static inline void ubi_ro_mode(struct ubi_device *ubi)
{
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
}
}
/**

View file

@ -22,7 +22,8 @@
*/
/*
* This file contains implementation of the volume update functionality.
* This file contains implementation of the volume update and atomic LEB change
* functionality.
*
* The update operation is based on the per-volume update marker which is
* stored in the volume table. The update marker is set before the update
@ -45,29 +46,31 @@
/**
* set_update_marker - set update marker.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
*
* This function sets the update marker flag for volume @vol_id. Returns zero
* This function sets the update marker flag for volume @vol. Returns zero
* in case of success and a negative error code in case of failure.
*/
static int set_update_marker(struct ubi_device *ubi, int vol_id)
static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
struct ubi_vtbl_record vtbl_rec;
struct ubi_volume *vol = ubi->volumes[vol_id];
dbg_msg("set update marker for volume %d", vol_id);
dbg_msg("set update marker for volume %d", vol->vol_id);
if (vol->upd_marker) {
ubi_assert(ubi->vtbl[vol_id].upd_marker);
ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
dbg_msg("already set");
return 0;
}
memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
sizeof(struct ubi_vtbl_record));
vtbl_rec.upd_marker = 1;
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
mutex_lock(&ubi->volumes_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 1;
return err;
}
@ -75,23 +78,24 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id)
/**
* clear_update_marker - clear update marker.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @bytes: new data size in bytes
*
* This function clears the update marker for volume @vol_id, sets new volume
* This function clears the update marker for volume @vol, sets new volume
* data size and clears the "corrupted" flag (static volumes only). Returns
* zero in case of success and a negative error code in case of failure.
*/
static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes)
static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
long long bytes)
{
int err;
uint64_t tmp;
struct ubi_vtbl_record vtbl_rec;
struct ubi_volume *vol = ubi->volumes[vol_id];
dbg_msg("clear update marker for volume %d", vol_id);
dbg_msg("clear update marker for volume %d", vol->vol_id);
memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
sizeof(struct ubi_vtbl_record));
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
@ -106,7 +110,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
vol->last_eb_bytes = vol->usable_leb_size;
}
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
mutex_lock(&ubi->volumes_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 0;
return err;
}
@ -114,35 +120,36 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
/**
* ubi_start_update - start volume update.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @bytes: update bytes
*
* This function starts volume update operation. If @bytes is zero, the volume
* is just wiped out. Returns zero in case of success and a negative error code
* in case of failure.
*/
int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
long long bytes)
{
int i, err;
uint64_t tmp;
struct ubi_volume *vol = ubi->volumes[vol_id];
dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes);
dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
ubi_assert(!vol->updating && !vol->changing_leb);
vol->updating = 1;
err = set_update_marker(ubi, vol_id);
err = set_update_marker(ubi, vol);
if (err)
return err;
/* Before updating - wipe out the volume */
for (i = 0; i < vol->reserved_pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol_id, i);
err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
return err;
}
if (bytes == 0) {
err = clear_update_marker(ubi, vol_id, 0);
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
err = ubi_wl_flush(ubi);
@ -162,10 +169,43 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
return 0;
}
/**
* ubi_start_leb_change - start atomic LEB change.
* @ubi: UBI device description object
* @vol: volume description object
* @req: operation request
*
* This function starts atomic LEB change operation. Returns zero in case of
* success and a negative error code in case of failure.
*/
int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
const struct ubi_leb_change_req *req)
{
ubi_assert(!vol->updating && !vol->changing_leb);
dbg_msg("start changing LEB %d:%d, %u bytes",
vol->vol_id, req->lnum, req->bytes);
if (req->bytes == 0)
return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
req->dtype);
vol->upd_bytes = req->bytes;
vol->upd_received = 0;
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
vol->ch_dtype = req->dtype;
vol->upd_buf = vmalloc(req->bytes);
if (!vol->upd_buf)
return -ENOMEM;
return 0;
}
/**
* write_leb - write update data.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: data size
@ -191,26 +231,22 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
int len, int used_ebs)
static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int len, int used_ebs)
{
int err, l;
struct ubi_volume *vol = ubi->volumes[vol_id];
int err;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
l = ALIGN(len, ubi->min_io_size);
memset(buf + len, 0xFF, l - len);
len = ALIGN(len, ubi->min_io_size);
memset(buf + len, 0xFF, len - len);
l = ubi_calc_data_len(ubi, buf, l);
if (l == 0) {
len = ubi_calc_data_len(ubi, buf, len);
if (len == 0) {
dbg_msg("all %d bytes contain 0xFF - skip", len);
return 0;
}
if (len != l)
dbg_msg("skip last %d bytes (0xFF)", len - l);
err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l,
UBI_UNKNOWN);
err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
} else {
/*
* When writing static volume, and this is the last logical
@ -222,7 +258,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len,
err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
UBI_UNKNOWN, used_ebs);
}
@ -236,16 +272,15 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
* @count: how much bytes to write
*
* This function writes more data to the volume which is being updated. It may
* be called arbitrary number of times until all of the update data arrive.
* This function returns %0 in case of success, number of bytes written during
* the last call if the whole volume update was successfully finished, and a
* be called arbitrary number of times until all the update data arriveis. This
* function returns %0 in case of success, number of bytes written during the
* last call if the whole volume update has been successfully finished, and a
* negative error code in case of failure.
*/
int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count)
{
uint64_t tmp;
struct ubi_volume *vol = ubi->volumes[vol_id];
int lnum, offs, err = 0, len, to_write = count;
dbg_msg("write %d of %lld bytes, %lld already passed",
@ -290,8 +325,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
* is the last chunk, it's time to flush the buffer.
*/
ubi_assert(flush_len <= vol->usable_leb_size);
err = write_leb(ubi, vol_id, lnum, vol->upd_buf,
flush_len, vol->upd_ebs);
err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
vol->upd_ebs);
if (err)
return err;
}
@ -318,8 +353,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
if (len == vol->usable_leb_size ||
vol->upd_received + len == vol->upd_bytes) {
err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len,
vol->upd_ebs);
err = write_leb(ubi, vol, lnum, vol->upd_buf,
len, vol->upd_ebs);
if (err)
break;
}
@ -333,16 +368,70 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
/* The update is finished, clear the update marker */
err = clear_update_marker(ubi, vol_id, vol->upd_bytes);
err = clear_update_marker(ubi, vol, vol->upd_bytes);
if (err)
return err;
err = ubi_wl_flush(ubi);
if (err == 0) {
vol->updating = 0;
err = to_write;
vfree(vol->upd_buf);
vol->updating = 0;
}
}
return err;
}
/**
* ubi_more_leb_change_data - accept more data for atomic LEB change.
* @vol: volume description object
* @buf: write data (user-space memory buffer)
* @count: how much bytes to write
*
* This function accepts more data to the volume which is being under the
* "atomic LEB change" operation. It may be called arbitrary number of times
* until all data arrives. This function returns %0 in case of success, number
* of bytes written during the last call if the whole "atomic LEB change"
* operation has been successfully finished, and a negative error code in case
* of failure.
*/
int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count)
{
int err;
dbg_msg("write %d of %lld bytes, %lld already passed",
count, vol->upd_bytes, vol->upd_received);
if (ubi->ro_mode)
return -EROFS;
if (vol->upd_received + count > vol->upd_bytes)
count = vol->upd_bytes - vol->upd_received;
err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
if (err)
return -EFAULT;
vol->upd_received += count;
if (vol->upd_received == vol->upd_bytes) {
int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
vol->upd_buf, len, UBI_UNKNOWN);
if (err)
return err;
}
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
vol->changing_leb = 0;
err = count;
vfree(vol->upd_buf);
}
return err;
}

View file

@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker =
* B. process 2 removes volume Y;
* C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
*
* What we want to do in a situation like that is to return error when the file
* is read. This is done by means of the 'removed' flag and the 'vol_lock' of
* the UBI volume description object.
* In this situation, this function will return %-ENODEV because it will find
* out that the volume was removed from the @ubi->volumes array.
*/
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
struct ubi_device *ubi;
spin_lock(&vol->ubi->volumes_lock);
if (vol->removed) {
spin_unlock(&vol->ubi->volumes_lock);
ubi = ubi_get_device(vol->ubi->ubi_num);
if (!ubi)
return -ENODEV;
spin_lock(&ubi->volumes_lock);
if (!ubi->volumes[vol->vol_id]) {
spin_unlock(&ubi->volumes_lock);
ubi_put_device(ubi);
return -ENODEV;
}
/* Take a reference to prevent volume removal */
vol->ref_count += 1;
spin_unlock(&ubi->volumes_lock);
if (attr == &attr_vol_reserved_ebs)
ret = sprintf(buf, "%d\n", vol->reserved_pebs);
else if (attr == &attr_vol_type) {
@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev,
ret = sprintf(buf, "%d\n", vol->corrupted);
else if (attr == &attr_vol_alignment)
ret = sprintf(buf, "%d\n", vol->alignment);
else if (attr == &attr_vol_usable_eb_size) {
else if (attr == &attr_vol_usable_eb_size)
ret = sprintf(buf, "%d\n", vol->usable_leb_size);
} else if (attr == &attr_vol_data_bytes)
else if (attr == &attr_vol_data_bytes)
ret = sprintf(buf, "%lld\n", vol->used_bytes);
else if (attr == &attr_vol_upd_marker)
ret = sprintf(buf, "%d\n", vol->upd_marker);
else
BUG();
spin_unlock(&vol->ubi->volumes_lock);
/* This must be a bug */
ret = -EINVAL;
/* We've done the operation, drop volume and UBI device references */
spin_lock(&ubi->volumes_lock);
vol->ref_count -= 1;
ubi_assert(vol->ref_count >= 0);
spin_unlock(&ubi->volumes_lock);
ubi_put_device(ubi);
return ret;
}
@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev,
static void vol_release(struct device *dev)
{
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
ubi_assert(vol->removed);
kfree(vol);
}
@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_upd_marker);
if (err)
return err;
return 0;
return err;
}
/**
@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol)
* @req: volume creation request
*
* This function creates volume described by @req. If @req->vol_id id
* %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume
* %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
* error code in case of failure.
* error code in case of failure. Note, the caller has to have the
* @ubi->volumes_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
int i, err, vol_id = req->vol_id;
int i, err, vol_id = req->vol_id, dont_free = 0;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
uint64_t bytes;
dev_t dev;
if (ubi->ro_mode)
return -EROFS;
@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
return -ENOMEM;
spin_lock(&ubi->volumes_lock);
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
dbg_msg("search for vacant volume ID");
@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
ubi->avail_pebs -= vol->reserved_pebs;
ubi->rsvd_pebs += vol->reserved_pebs;
spin_unlock(&ubi->volumes_lock);
vol->vol_id = vol_id;
vol->alignment = req->alignment;
@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->vol_type = req->vol_type;
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len + 1);
vol->exclusive = 1;
vol->ubi = ubi;
ubi->volumes[vol_id] = vol;
spin_unlock(&ubi->volumes_lock);
/*
* Finish all pending erases because there may be some LEBs belonging
@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1);
dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
err = cdev_add(&vol->cdev, dev, 1);
if (err) {
ubi_err("cannot add character device for volume %d", vol_id);
ubi_err("cannot add character device");
goto out_mapping;
}
@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
vol->dev.devt = dev;
vol->dev.class = ubi_class;
sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
if (err) {
ubi_err("cannot register device");
goto out_gluebi;
}
err = volume_sysfs_init(ubi, vol);
if (err)
@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
goto out_sysfs;
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
vol->exclusive = 0;
spin_unlock(&ubi->volumes_lock);
paranoid_check_volumes(ubi);
return 0;
out_sysfs:
/*
* We have registered our device, we should not free the volume*
* description object in this function in case of an error - it is
* freed by the release function.
*
* Get device reference to prevent the release function from being
* called just after sysfs has been closed.
*/
dont_free = 1;
get_device(&vol->dev);
volume_sysfs_close(vol);
out_gluebi:
err = ubi_destroy_gluebi(vol);
ubi_destroy_gluebi(vol);
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
@ -356,26 +385,13 @@ out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
ubi->volumes[vol_id] = NULL;
out_unlock:
spin_unlock(&ubi->volumes_lock);
kfree(vol);
return err;
/*
* We are registered, so @vol is destroyed in the release function and
* we have to de-initialize differently.
*/
out_sysfs:
err = ubi_destroy_gluebi(vol);
cdev_del(&vol->cdev);
kfree(vol->eba_tbl);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
volume_sysfs_close(vol);
if (dont_free)
put_device(&vol->dev);
else
kfree(vol);
ubi_err("cannot create volume %d, error %d", vol_id, err);
return err;
}
@ -385,7 +401,8 @@ out_sysfs:
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
* code in case of failure.
* code in case of failure. The caller has to have the @ubi->volumes_mutex
* locked.
*/
int ubi_remove_volume(struct ubi_volume_desc *desc)
{
@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
if (ubi->ro_mode)
return -EROFS;
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
/*
* The volume is busy, probably someone is reading one of its
* sysfs files.
*/
err = -EBUSY;
goto out_unlock;
}
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
err = ubi_destroy_gluebi(vol);
if (err)
return err;
goto out_err;
err = ubi_change_vtbl_record(ubi, vol_id, NULL);
if (err)
return err;
goto out_err;
for (i = 0; i < vol->reserved_pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol_id, i);
err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
return err;
goto out_err;
}
spin_lock(&ubi->volumes_lock);
vol->removed = 1;
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
kfree(vol->eba_tbl);
vol->eba_tbl = NULL;
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
kfree(desc);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
spin_unlock(&ubi->volumes_lock);
paranoid_check_volumes(ubi);
module_put(THIS_MODULE);
return 0;
out_err:
ubi_err("cannot remove volume %d, error %d", vol_id, err);
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
out_unlock:
spin_unlock(&ubi->volumes_lock);
return err;
}
/**
@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
* @desc: volume descriptor
* @reserved_pebs: new size in physical eraseblocks
*
* This function returns zero in case of success, and a negative error code in
* case of failure.
* This function re-sizes the volume and returns zero in case of success, and a
* negative error code in case of failure. The caller has to have the
* @ubi->volumes_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
@ -466,8 +497,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
dbg_msg("re-size volume %d to from %d to %d PEBs",
vol_id, vol->reserved_pebs, reserved_pebs);
ubi_assert(desc->mode == UBI_EXCLUSIVE);
ubi_assert(vol == ubi->volumes[vol_id]);
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
@ -487,6 +516,14 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = UBI_LEB_UNMAPPED;
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
spin_unlock(&ubi->volumes_lock);
err = -EBUSY;
goto out_free;
}
spin_unlock(&ubi->volumes_lock);
/* Reserve physical eraseblocks */
pebs = reserved_pebs - vol->reserved_pebs;
if (pebs > 0) {
@ -516,7 +553,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i);
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
goto out_acc;
}
@ -565,27 +602,28 @@ out_free:
/**
* ubi_add_volume - add volume.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
*
* This function adds an existin volume and initializes all its data
* structures. Returnes zero in case of success and a negative error code in
* This function adds an existing volume and initializes all its data
* structures. Returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_add_volume(struct ubi_device *ubi, int vol_id)
int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, vol_id = vol->vol_id;
dev_t dev;
dbg_msg("add volume %d", vol_id);
ubi_dbg_dump_vol_info(vol);
ubi_assert(vol);
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1);
dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
err = cdev_add(&vol->cdev, dev, 1);
if (err) {
ubi_err("cannot add character device for volume %d", vol_id);
ubi_err("cannot add character device for volume %d, error %d",
vol_id, err);
return err;
}
@ -595,7 +633,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
vol->dev.devt = dev;
vol->dev.class = ubi_class;
sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
@ -623,22 +661,19 @@ out_cdev:
/**
* ubi_free_volume - free volume.
* @ubi: UBI device description object
* @vol_id: volume ID
* @vol: volume description object
*
* This function frees all resources for volume @vol_id but does not remove it.
* This function frees all resources for volume @vol but does not remove it.
* Used only when the UBI device is detached.
*/
void ubi_free_volume(struct ubi_device *ubi, int vol_id)
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
struct ubi_volume *vol = ubi->volumes[vol_id];
dbg_msg("free volume %d", vol_id);
ubi_assert(vol);
dbg_msg("free volume %d", vol->vol_id);
vol->removed = 1;
ubi->volumes[vol->vol_id] = NULL;
err = ubi_destroy_gluebi(vol);
ubi->volumes[vol_id] = NULL;
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
}
@ -708,11 +743,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
goto fail;
}
if (vol->upd_marker != 0 && vol->upd_marker != 1) {
ubi_err("bad upd_marker");
goto fail;
}
if (vol->upd_marker && vol->corrupted) {
dbg_err("update marker and corrupted simultaneously");
goto fail;
@ -747,7 +777,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
n = (long long)vol->used_ebs * vol->usable_leb_size;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
if (vol->corrupted != 0) {
if (vol->corrupted) {
ubi_err("corrupted dynamic volume");
goto fail;
}
@ -764,10 +794,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
goto fail;
}
} else {
if (vol->corrupted != 0 && vol->corrupted != 1) {
ubi_err("bad corrupted");
goto fail;
}
if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
ubi_err("bad used_ebs");
goto fail;
@ -820,9 +846,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
{
int i;
mutex_lock(&ubi->vtbl_mutex);
for (i = 0; i < ubi->vtbl_slots; i++)
paranoid_check_volume(ubi, i);
mutex_unlock(&ubi->vtbl_mutex);
}
#endif

View file

@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
{
int i, err;
uint32_t crc;
struct ubi_volume *layout_vol;
ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
if (!vtbl_rec)
vtbl_rec = &empty_vtbl_record;
@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
vtbl_rec->crc = cpu_to_be32(crc);
}
mutex_lock(&ubi->vtbl_mutex);
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i);
if (err) {
mutex_unlock(&ubi->vtbl_mutex);
err = ubi_eba_unmap_leb(ubi, layout_vol, i);
if (err)
return err;
}
err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0,
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
ubi->vtbl_size, UBI_LONGTERM);
if (err) {
mutex_unlock(&ubi->vtbl_mutex);
if (err)
return err;
}
}
paranoid_vtbl_check(ubi);
mutex_unlock(&ubi->vtbl_mutex);
return ubi_wl_flush(ubi);
return 0;
}
/**
* vol_til_check - check if volume table is not corrupted and contains sensible
* data.
*
* vtbl_check - check if volume table is not corrupted and contains sensible
* data.
* @ubi: UBI device description object
* @vtbl: volume table
*
@ -273,7 +269,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
* this volume table copy was found during scanning. It has to be wiped
* out.
*/
sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
if (sv)
old_seb = ubi_scan_find_seb(sv, copy);
@ -285,7 +281,7 @@ retry:
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID);
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
@ -518,6 +514,17 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->name[vol->name_len] = '\0';
vol->vol_id = i;
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
ubi_err("more then one auto-resize volume (%d "
"and %d)", ubi->autoresize_vol_id, i);
return -EINVAL;
}
ubi->autoresize_vol_id = i;
}
ubi_assert(!ubi->volumes[i]);
ubi->volumes[i] = vol;
ubi->vol_count += 1;
@ -568,6 +575,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->last_eb_bytes = sv->last_data_size;
}
/* And add the layout volume */
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
@ -582,7 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->last_eb_bytes = vol->reserved_pebs;
vol->used_bytes =
(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
vol->vol_id = UBI_LAYOUT_VOL_ID;
vol->vol_id = UBI_LAYOUT_VOLUME_ID;
vol->ref_count = 1;
ubi_assert(!ubi->volumes[i]);
ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
@ -734,7 +743,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
if (!sv) {
/*
* No logical eraseblocks belonging to the layout volume were

View file

@ -116,21 +116,6 @@
*/
#define WL_MAX_FAILURES 32
/**
* struct ubi_wl_entry - wear-leveling entry.
* @rb: link in the corresponding RB-tree
* @ec: erase counter
* @pnum: physical eraseblock number
*
* Each physical eraseblock has a corresponding &struct wl_entry object which
* may be kept in different RB-trees.
*/
struct ubi_wl_entry {
struct rb_node rb;
int ec;
int pnum;
};
/**
* struct ubi_wl_prot_entry - PEB protection entry.
* @rb_pnum: link in the @wl->prot.pnum RB-tree
@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
#define paranoid_check_in_wl_tree(e, root)
#endif
/* Slab cache for wear-leveling entries */
static struct kmem_cache *wl_entries_slab;
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
int err;
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
cond_resched();
/*
* @ubi->work_sem is used to synchronize with the workers. Workers take
* it in read mode, so many of them may be doing works at a time. But
* the queue flush code has to be sure the whole queue of works is
* done, and it takes the mutex in write mode.
*/
down_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->work_sem);
return 0;
}
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
ubi->works_count -= 1;
ubi_assert(ubi->works_count >= 0);
spin_unlock(&ubi->wl_lock);
/*
@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
err = wrk->func(ubi, wrk, 0);
if (err)
ubi_err("work failed with error code %d", err);
up_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
ubi->works_count -= 1;
ubi_assert(ubi->works_count >= 0);
spin_unlock(&ubi->wl_lock);
return err;
}
@ -549,8 +539,12 @@ retry:
* prot_tree_del - remove a physical eraseblock from the protection trees
* @ubi: UBI device description object
* @pnum: the physical eraseblock to remove
*
* This function returns PEB @pnum from the protection trees and returns zero
* in case of success and %-ENODEV if the PEB was not found in the protection
* trees.
*/
static void prot_tree_del(struct ubi_device *ubi, int pnum)
static int prot_tree_del(struct ubi_device *ubi, int pnum)
{
struct rb_node *p;
struct ubi_wl_prot_entry *pe = NULL;
@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
if (pnum == pe->e->pnum)
break;
goto found;
if (pnum < pe->e->pnum)
p = p->rb_left;
@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
p = p->rb_right;
}
return -ENODEV;
found:
ubi_assert(pe->e->pnum == pnum);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
kfree(pe);
return 0;
}
/**
@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
int err, put = 0;
int err, put = 0, scrubbing = 0, protect = 0;
struct ubi_wl_prot_entry *uninitialized_var(pe);
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!vid_hdr)
return -ENOMEM;
mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put);
/*
* Only one WL worker at a time is supported at this implementation, so
* make sure a PEB is not being moved already.
*/
if (ubi->move_to || !ubi->free.rb_node ||
if (!ubi->free.rb_node ||
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
* Only one WL worker at a time is supported at this
* implementation, so if a LEB is already being moved, cancel.
*
* No free physical eraseblocks? Well, we cancel wear-leveling
* then. It will be triggered again when a free physical
* eraseblock appears.
* No free physical eraseblocks? Well, they must be waiting in
* the queue to be erased. Cancel movement - it will be
* triggered again when a free physical eraseblock appears.
*
* No used physical eraseblocks? They must be temporarily
* protected from being moved. They will be moved to the
@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("cancel WL, a list is empty: free %d, used %d",
!ubi->free.rb_node, !ubi->used.rb_node);
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
goto out_cancel;
}
if (!ubi->scrub.rb_node) {
@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
goto out_cancel;
}
paranoid_check_in_wl_tree(e1, &ubi->used);
rb_erase(&e1->rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
/* Perform scrubbing */
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
paranoid_check_in_wl_tree(e1, &ubi->scrub);
rb_erase(&e1->rb, &ubi->scrub);
rb_erase(&e1->rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
paranoid_check_in_wl_tree(e2, &ubi->free);
rb_erase(&e2->rb, &ubi->free);
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* We so far do not know which logical eraseblock our physical
* eraseblock (@e1) belongs to. We have to read the volume identifier
* header first.
*
* Note, we are protected from this PEB being unmapped and erased. The
* 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
* which is being moved was unmapped.
*/
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* likely have the VID header in place.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
err = 0;
} else {
ubi_err("error %d while reading VID header from PEB %d",
err, e1->pnum);
if (err > 0)
err = -EIO;
goto out_not_moved;
}
goto error;
ubi_err("error %d while reading VID header from PEB %d",
err, e1->pnum);
if (err > 0)
err = -EIO;
goto out_error;
}
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
if (err == UBI_IO_BITFLIPS)
err = 0;
goto error;
if (err < 0)
goto out_error;
if (err == 1)
goto out_not_moved;
/*
* For some reason the LEB was not moved - it might be because
* the volume is being deleted. We should prevent this PEB from
* being selected for wear-levelling movement for some "time",
* so put it to the protection tree.
*/
dbg_wl("cancelled moving PEB %d", e1->pnum);
pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
if (!pe) {
err = -ENOMEM;
goto out_error;
}
protect = 1;
}
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
if (protect)
prot_tree_add(ubi, e1, pe, protect);
if (!ubi->move_to_put)
wl_tree_add(e2, &ubi->used);
else
put = 1;
ubi->move_from = ubi->move_to = NULL;
ubi->move_from_put = ubi->move_to_put = 0;
ubi->wl_scheduled = 0;
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
if (put) {
@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
err = schedule_erase(ubi, e2, 0);
if (err) {
kmem_cache_free(wl_entries_slab, e2);
ubi_ro_mode(ubi);
}
if (err)
goto out_error;
}
err = schedule_erase(ubi, e1, 0);
if (err) {
kmem_cache_free(wl_entries_slab, e1);
ubi_ro_mode(ubi);
if (!protect) {
err = schedule_erase(ubi, e1, 0);
if (err)
goto out_error;
}
dbg_wl("done");
return err;
mutex_unlock(&ubi->move_mutex);
return 0;
/*
* Some error occurred. @e1 was not changed, so return it back. @e2
* might be changed, schedule it for erasure.
* For some reasons the LEB was not moved, might be an error, might be
* something else. @e1 was not changed, so return it back. @e2 might
* be changed, schedule it for erasure.
*/
error:
if (err)
dbg_wl("error %d occurred, cancel operation", err);
ubi_assert(err <= 0);
out_not_moved:
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
ubi->wl_scheduled = 0;
if (ubi->move_from_put)
put = 1;
if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
else
wl_tree_add(e1, &ubi->used);
ubi->move_from = ubi->move_to = NULL;
ubi->move_from_put = ubi->move_to_put = 0;
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
if (put) {
/*
* Well, the target PEB was put meanwhile, schedule it for
* erasure.
*/
dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
err = schedule_erase(ubi, e1, 0);
if (err) {
kmem_cache_free(wl_entries_slab, e1);
ubi_ro_mode(ubi);
}
}
err = schedule_erase(ubi, e2, 0);
if (err) {
kmem_cache_free(wl_entries_slab, e2);
ubi_ro_mode(ubi);
}
if (err)
goto out_error;
yield();
mutex_unlock(&ubi->move_mutex);
return 0;
out_error:
ubi_err("error %d while moving PEB %d to PEB %d",
err, e1->pnum, e2->pnum);
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
kmem_cache_free(ubi_wl_entry_slab, e1);
kmem_cache_free(ubi_wl_entry_slab, e2);
ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
return err;
out_cancel:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
mutex_unlock(&ubi->move_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
}
/**
@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
kmem_cache_free(wl_entries_slab, e);
kmem_cache_free(ubi_wl_entry_slab, e);
return 0;
}
@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
kmem_cache_free(wl_entries_slab, e);
kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@ -1119,8 +1136,7 @@ out_ro:
}
/**
* ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
* unit.
* ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
* @ubi: UBI device description object
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
@ -1128,7 +1144,7 @@ out_ro:
* This function is called to return physical eraseblock @pnum to the pool of
* free physical eraseblocks. The @torture flag has to be set if an I/O error
* occurred to this @pnum and it has to be tested. This function returns zero
* in case of success and a negative error code in case of failure.
* in case of success, and a negative error code in case of failure.
*/
int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
{
@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
if (e == ubi->move_from) {
/*
@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
* be moved. It will be scheduled for erasure in the
* wear-leveling worker.
*/
dbg_wl("PEB %d is being moved", pnum);
ubi_assert(!ubi->move_from_put);
ubi->move_from_put = 1;
dbg_wl("PEB %d is being moved, wait", pnum);
spin_unlock(&ubi->wl_lock);
return 0;
/* Wait for the WL worker by taking the @ubi->move_mutex */
mutex_lock(&ubi->move_mutex);
mutex_unlock(&ubi->move_mutex);
goto retry;
} else if (e == ubi->move_to) {
/*
* User is putting the physical eraseblock which was selected
* as the target the data is moved to. It may happen if the EBA
* unit already re-mapped the LEB but the WL unit did has not
* put the PEB to the "used" tree.
* unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
* the WL unit has not put the PEB to the "used" tree yet, but
* it is about to do this. So we just set a flag which will
* tell the WL worker that the PEB is not needed anymore and
* should be scheduled for erasure.
*/
dbg_wl("PEB %d is the target of data moving", pnum);
ubi_assert(!ubi->move_to_put);
@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
} else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->rb, &ubi->scrub);
} else
prot_tree_del(ubi, e->pnum);
} else {
err = prot_tree_del(ubi, e->pnum);
if (err) {
ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi);
spin_unlock(&ubi->wl_lock);
return err;
}
}
}
spin_unlock(&ubi->wl_lock);
@ -1227,8 +1255,17 @@ retry:
if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used);
} else
prot_tree_del(ubi, pnum);
} else {
int err;
err = prot_tree_del(ubi, e->pnum);
if (err) {
ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi);
spin_unlock(&ubi->wl_lock);
return err;
}
}
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
@ -1249,17 +1286,32 @@ retry:
*/
int ubi_wl_flush(struct ubi_device *ubi)
{
int err, pending_count;
pending_count = ubi->works_count;
dbg_wl("flush (%d pending works)", pending_count);
int err;
/*
* Erase while the pending works queue is not empty, but not more then
* the number of currently pending works.
*/
while (pending_count-- > 0) {
dbg_wl("flush (%d pending works)", ubi->works_count);
while (ubi->works_count) {
err = do_work(ubi);
if (err)
return err;
}
/*
* Make sure all the works which have been done in parallel are
* finished.
*/
down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
/*
* And in case last was the WL worker and it cancelled the LEB
* movement, flush again.
*/
while (ubi->works_count) {
dbg_wl("flush more (%d pending works)", ubi->works_count);
err = do_work(ubi);
if (err)
return err;
@ -1294,7 +1346,7 @@ static void tree_destroy(struct rb_root *root)
rb->rb_right = NULL;
}
kmem_cache_free(wl_entries_slab, e);
kmem_cache_free(ubi_wl_entry_slab, e);
}
}
}
@ -1303,7 +1355,7 @@ static void tree_destroy(struct rb_root *root)
* ubi_thread - UBI background thread.
* @u: the UBI device description object pointer
*/
static int ubi_thread(void *u)
int ubi_thread(void *u)
{
int failures = 0;
struct ubi_device *ubi = u;
@ -1394,36 +1446,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->used = ubi->free = ubi->scrub = RB_ROOT;
ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
ubi->max_ec = si->max_ec;
INIT_LIST_HEAD(&ubi->works);
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
err);
return err;
}
if (ubi_devices_cnt == 0) {
wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
if (!wl_entries_slab)
return -ENOMEM;
}
err = -ENOMEM;
ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
if (!ubi->lookuptbl)
goto out_free;
return err;
list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@ -1431,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
kmem_cache_free(wl_entries_slab, e);
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
@ -1439,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->free, u.list) {
cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@ -1453,7 +1491,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->corr, u.list) {
cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@ -1461,7 +1499,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
kmem_cache_free(wl_entries_slab, e);
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
@ -1470,7 +1508,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@ -1510,8 +1548,6 @@ out_free:
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
if (ubi_devices_cnt == 0)
kmem_cache_destroy(wl_entries_slab);
return err;
}
@ -1541,7 +1577,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
rb->rb_right = NULL;
}
kmem_cache_free(wl_entries_slab, pe->e);
kmem_cache_free(ubi_wl_entry_slab, pe->e);
kfree(pe);
}
}
@ -1553,10 +1589,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
*/
void ubi_wl_close(struct ubi_device *ubi)
{
dbg_wl("disable \"%s\"", ubi->bgt_name);
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
dbg_wl("close the UBI wear-leveling unit");
cancel_pending(ubi);
@ -1565,8 +1597,6 @@ void ubi_wl_close(struct ubi_device *ubi)
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
if (ubi_devices_cnt == 1)
kmem_cache_destroy(wl_entries_slab);
}
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID

View file

@ -176,7 +176,7 @@ static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct
spin_unlock(&inode->i_lock);
}
struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct posix_acl *acl;
@ -345,8 +345,10 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
if (!clone)
return -ENOMEM;
rc = posix_acl_create_masq(clone, (mode_t *)i_mode);
if (rc < 0)
if (rc < 0) {
posix_acl_release(clone);
return rc;
}
if (rc > 0)
jffs2_iset_acl(inode, &f->i_acl_access, clone);

View file

@ -28,7 +28,6 @@ struct jffs2_acl_header {
#define JFFS2_ACL_NOT_CACHED ((void *)-1)
extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
extern int jffs2_permission(struct inode *, int, struct nameidata *);
extern int jffs2_acl_chmod(struct inode *);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
@ -40,7 +39,6 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
#else
#define jffs2_get_acl(inode, type) (NULL)
#define jffs2_permission (NULL)
#define jffs2_acl_chmod(inode) (0)
#define jffs2_init_acl_pre(dir_i,inode,mode) (0)

View file

@ -97,11 +97,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
if (ivalid & ATTR_MODE)
if (iattr->ia_mode & S_ISGID &&
!in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
else
ri->mode = cpu_to_jemode(iattr->ia_mode);
ri->mode = cpu_to_jemode(iattr->ia_mode);
else
ri->mode = cpu_to_jemode(inode->i_mode);

View file

@ -32,15 +32,18 @@ void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new
if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
/* Duplicate. Free one */
if (new->version < (*prev)->version) {
dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n",
dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n",
(*prev)->name, (*prev)->ino);
jffs2_mark_node_obsolete(c, new->raw);
jffs2_free_full_dirent(new);
} else {
dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n",
dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n",
(*prev)->name, (*prev)->ino);
new->next = (*prev)->next;
jffs2_mark_node_obsolete(c, ((*prev)->raw));
/* It may have been a 'placeholder' deletion dirent,
if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */
if ((*prev)->raw)
jffs2_mark_node_obsolete(c, ((*prev)->raw));
jffs2_free_full_dirent(*prev);
*prev = new;
}

View file

@ -37,24 +37,25 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
BUG_ON(tn->csize == 0);
if (!jffs2_is_writebuffered(c))
goto adj_acc;
/* Calculate how many bytes were already checked */
ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
len = ofs % c->wbuf_pagesize;
if (likely(len))
len = c->wbuf_pagesize - len;
len = tn->csize;
if (len >= tn->csize) {
dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
ref_offset(ref), tn->csize, ofs);
goto adj_acc;
if (jffs2_is_writebuffered(c)) {
int adj = ofs % c->wbuf_pagesize;
if (likely(adj))
adj = c->wbuf_pagesize - adj;
if (adj >= tn->csize) {
dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
ref_offset(ref), tn->csize, ofs);
goto adj_acc;
}
ofs += adj;
len -= adj;
}
ofs += len;
len = tn->csize - len;
dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
@ -63,7 +64,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
* adding and jffs2_flash_read_end() interface. */
if (c->mtd->point) {
err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
if (!err && retlen < tn->csize) {
if (!err && retlen < len) {
JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
c->mtd->unpoint(c->mtd, buffer, ofs, retlen);
} else if (err)

View file

@ -582,7 +582,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
up(&dir_f->sem);
} else {
struct jffs2_full_dirent **prev = &dir_f->dents;
struct jffs2_full_dirent *fd = dir_f->dents;
uint32_t nhash = full_name_hash(name, namelen);
/* We don't actually want to reserve any space, but we do
@ -590,21 +590,22 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
down(&c->alloc_sem);
down(&dir_f->sem);
while ((*prev) && (*prev)->nhash <= nhash) {
if ((*prev)->nhash == nhash &&
!memcmp((*prev)->name, name, namelen) &&
!(*prev)->name[namelen]) {
struct jffs2_full_dirent *this = *prev;
for (fd = dir_f->dents; fd; fd = fd->next) {
if (fd->nhash == nhash &&
!memcmp(fd->name, name, namelen) &&
!fd->name[namelen]) {
D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
this->ino, ref_offset(this->raw)));
*prev = this->next;
jffs2_mark_node_obsolete(c, (this->raw));
jffs2_free_full_dirent(this);
fd->ino, ref_offset(fd->raw)));
jffs2_mark_node_obsolete(c, fd->raw);
/* We don't want to remove it from the list immediately,
because that screws up getdents()/seek() semantics even
more than they're screwed already. Turn it into a
node-less deletion dirent instead -- a placeholder */
fd->raw = NULL;
fd->ino = 0;
break;
}
prev = &((*prev)->next);
}
up(&dir_f->sem);
}
@ -630,7 +631,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n",
fd->name, dead_f->inocache->ino));
}
jffs2_mark_node_obsolete(c, fd->raw);
if (fd->raw)
jffs2_mark_node_obsolete(c, fd->raw);
jffs2_free_full_dirent(fd);
}
}

View file

@ -98,6 +98,18 @@ static inline int cfi_interleave_supported(int i)
#define CFI_DEVICETYPE_X32 (32 / 8)
#define CFI_DEVICETYPE_X64 (64 / 8)
/* Device Interface Code Assignments from the "Common Flash Memory Interface
* Publication 100" dated December 1, 2001.
*/
#define CFI_INTERFACE_X8_ASYNC 0x0000
#define CFI_INTERFACE_X16_ASYNC 0x0001
#define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002
#define CFI_INTERFACE_X32_ASYNC 0x0003
#define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005
#define CFI_INTERFACE_NOT_ALLOWED 0xffff
/* NB: We keep these structures in memory in HOST byteorder, except
* where individually noted.
*/

View file

@ -152,6 +152,15 @@ struct mtd_info {
int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
/* In blackbox flight recorder like scenarios we want to make successful
writes in interrupt context. panic_write() is only intended to be
called when its known the kernel is about to panic and we need the
write to succeed. Since the kernel is not going to be running for much
longer, this function can break locks and delay to ensure the write
succeeds (but not sleep). */
int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
int (*read_oob) (struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
int (*write_oob) (struct mtd_info *mtd, loff_t to,

View file

@ -0,0 +1,8 @@
#ifndef __MTD_MTDRAM_H__
#define __MTD_MTDRAM_H__
#include <linux/mtd/mtd.h>
int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
unsigned long size, char *name);
#endif /* __MTD_MTDRAM_H__ */

View file

@ -67,6 +67,7 @@
/*
* Device ID Register F001h (R)
*/
#define ONENAND_DEVICE_DENSITY_MASK (0xf)
#define ONENAND_DEVICE_DENSITY_SHIFT (4)
#define ONENAND_DEVICE_IS_DDP (1 << 3)
#define ONENAND_DEVICE_IS_DEMUX (1 << 2)

View file

@ -71,5 +71,12 @@ extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
#define put_partition_parser(p) do { module_put((p)->owner); } while(0)
#endif
struct device;
struct device_node;
int __devinit of_mtd_parse_partitions(struct device *dev,
struct mtd_info *mtd,
struct device_node *node,
struct mtd_partition **pparts);
#endif

View file

@ -25,23 +25,6 @@
#include <linux/types.h>
#include <mtd/ubi-user.h>
/*
* UBI data type hint constants.
*
* UBI_LONGTERM: long-term data
* UBI_SHORTTERM: short-term data
* UBI_UNKNOWN: data persistence is unknown
*
* These constants are used when data is written to UBI volumes in order to
* help the UBI wear-leveling unit to find more appropriate physical
* eraseblocks.
*/
enum {
UBI_LONGTERM = 1,
UBI_SHORTTERM,
UBI_UNKNOWN
};
/*
* enum ubi_open_mode - UBI volume open mode constants.
*
@ -167,6 +150,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
int len, int dtype);
int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
/*

View file

@ -29,7 +29,7 @@ struct mtd_oob_buf {
#define MTD_WRITEABLE 0x400 /* Device is writeable */
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
#define MTD_NO_ERASE 0x1000 /* No erase necessary */
#define MTD_STUPID_LOCK 0x2000 /* Always locked after reset */
#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */
// Some common devices / combinations of capabilities
#define MTD_CAP_ROM 0

View file

@ -57,6 +57,43 @@ enum {
UBI_VID_STATIC = 2
};
/*
* Volume flags used in the volume table record.
*
* @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
*
* %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
* table. UBI automatically re-sizes the volume which has this flag and makes
* the volume to be of largest possible size. This means that if after the
* initialization UBI finds out that there are available physical eraseblocks
* present on the device, it automatically appends all of them to the volume
* (the physical eraseblocks reserved for bad eraseblocks handling and other
* reserved physical eraseblocks are not taken). So, if there is a volume with
* the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
* eraseblocks will be zero after UBI is loaded, because all of them will be
* reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
* after the volume had been initialized.
*
* The auto-resize feature is useful for device production purposes. For
* example, different NAND flash chips may have different amount of initial bad
* eraseblocks, depending of particular chip instance. Manufacturers of NAND
* chips usually guarantee that the amount of initial bad eraseblocks does not
* exceed certain percent, e.g. 2%. When one creates an UBI image which will be
* flashed to the end devices in production, he does not know the exact amount
* of good physical eraseblocks the NAND chip on the device will have, but this
* number is required to calculate the volume sized and put them to the volume
* table of the UBI image. In this case, one of the volumes (e.g., the one
* which will store the root file system) is marked as "auto-resizable", and
* UBI will adjust its size on the first boot if needed.
*
* Note, first UBI reserves some amount of physical eraseblocks for bad
* eraseblock handling, and then re-sizes the volume, not vice-versa. This
* means that the pool of reserved physical eraseblocks will always be present.
*/
enum {
UBI_VTBL_AUTORESIZE_FLG = 0x01,
};
/*
* Compatibility constants used by internal volumes.
*
@ -262,7 +299,9 @@ struct ubi_vid_hdr {
/* The layout volume contains the volume table */
#define UBI_LAYOUT_VOL_ID UBI_INTERNAL_VOL_START
#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
#define UBI_LAYOUT_VOLUME_ALIGN 1
#define UBI_LAYOUT_VOLUME_EBS 2
#define UBI_LAYOUT_VOLUME_NAME "layout volume"
#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
@ -289,7 +328,8 @@ struct ubi_vid_hdr {
* @upd_marker: if volume update was started but not finished
* @name_len: volume name length
* @name: the volume name
* @padding2: reserved, zeroes
* @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
* @padding: reserved, zeroes
* @crc: a CRC32 checksum of the record
*
* The volume table records are stored in the volume table, which is stored in
@ -324,7 +364,8 @@ struct ubi_vtbl_record {
__u8 upd_marker;
__be16 name_len;
__u8 name[UBI_VOL_NAME_MAX+1];
__u8 padding2[24];
__u8 flags;
__u8 padding[23];
__be32 crc;
} __attribute__ ((packed));

View file

@ -22,6 +22,21 @@
#define __UBI_USER_H__
/*
* UBI device creation (the same as MTD device attachment)
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* MTD devices may be attached using %UBI_IOCATT ioctl command of the UBI
* control device. The caller has to properly fill and pass
* &struct ubi_attach_req object - UBI will attach the MTD device specified in
* the request and return the newly created UBI device number as the ioctl
* return value.
*
* UBI device deletion (the same as MTD device detachment)
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* An UBI device maybe deleted with %UBI_IOCDET ioctl command of the UBI
* control device.
*
* UBI volume creation
* ~~~~~~~~~~~~~~~~~~~
*
@ -48,7 +63,7 @@
*
* Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the
* corresponding UBI volume character device. A pointer to a 64-bit update
* size should be passed to the IOCTL. After then, UBI expects user to write
* size should be passed to the IOCTL. After this, UBI expects user to write
* this number of bytes to the volume character device. The update is finished
* when the claimed number of bytes is passed. So, the volume update sequence
* is something like:
@ -57,14 +72,24 @@
* ioctl(fd, UBI_IOCVOLUP, &image_size);
* write(fd, buf, image_size);
* close(fd);
*
* Atomic eraseblock change
* ~~~~~~~~~~~~~~~~~~~~~~~~
*
* Atomic eraseblock change operation is done via the %UBI_IOCEBCH IOCTL
* command of the corresponding UBI volume character device. A pointer to
* &struct ubi_leb_change_req has to be passed to the IOCTL. Then the user is
* expected to write the requested amount of bytes. This is similar to the
* "volume update" IOCTL.
*/
/*
* When a new volume is created, users may either specify the volume number they
* want to create or to let UBI automatically assign a volume number using this
* constant.
* When a new UBI volume or UBI device is created, users may either specify the
* volume/device number they want to create or to let UBI automatically assign
* the number using these constants.
*/
#define UBI_VOL_NUM_AUTO (-1)
#define UBI_DEV_NUM_AUTO (-1)
/* Maximum volume name length */
#define UBI_MAX_VOLUME_NAME 127
@ -80,6 +105,15 @@
/* Re-size an UBI volume */
#define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req)
/* IOCTL commands of the UBI control character device */
#define UBI_CTRL_IOC_MAGIC 'o'
/* Attach an MTD device */
#define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req)
/* Detach an MTD device */
#define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, int32_t)
/* IOCTL commands of UBI volume character devices */
#define UBI_VOL_IOC_MAGIC 'O'
@ -88,6 +122,28 @@
#define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t)
/* An eraseblock erasure command, used for debugging, disabled by default */
#define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t)
/* An atomic eraseblock change command */
#define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, int32_t)
/* Maximum MTD device name length supported by UBI */
#define MAX_UBI_MTD_NAME_LEN 127
/*
* UBI data type hint constants.
*
* UBI_LONGTERM: long-term data
* UBI_SHORTTERM: short-term data
* UBI_UNKNOWN: data persistence is unknown
*
* These constants are used when data is written to UBI volumes in order to
* help the UBI wear-leveling unit to find more appropriate physical
* eraseblocks.
*/
enum {
UBI_LONGTERM = 1,
UBI_SHORTTERM = 2,
UBI_UNKNOWN = 3,
};
/*
* UBI volume type constants.
@ -97,22 +153,58 @@
*/
enum {
UBI_DYNAMIC_VOLUME = 3,
UBI_STATIC_VOLUME = 4
UBI_STATIC_VOLUME = 4,
};
/**
* struct ubi_attach_req - attach MTD device request.
* @ubi_num: UBI device number to create
* @mtd_num: MTD device number to attach
* @vid_hdr_offset: VID header offset (use defaults if %0)
* @padding: reserved for future, not used, has to be zeroed
*
* This data structure is used to specify MTD device UBI has to attach and the
* parameters it has to use. The number which should be assigned to the new UBI
* device is passed in @ubi_num. UBI may automatically assign the number if
* @UBI_DEV_NUM_AUTO is passed. In this case, the device number is returned in
* @ubi_num.
*
* Most applications should pass %0 in @vid_hdr_offset to make UBI use default
* offset of the VID header within physical eraseblocks. The default offset is
* the next min. I/O unit after the EC header. For example, it will be offset
* 512 in case of a 512 bytes page NAND flash with no sub-page support. Or
* it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages.
*
* But in rare cases, if this optimizes things, the VID header may be placed to
* a different offset. For example, the boot-loader might do things faster if the
* VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As
* the boot-loader would not normally need to read EC headers (unless it needs
* UBI in RW mode), it might be faster to calculate ECC. This is weird example,
* but it real-life example. So, in this example, @vid_hdr_offer would be
* 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
* aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page
* of the first page and add needed padding.
*/
struct ubi_attach_req {
int32_t ubi_num;
int32_t mtd_num;
int32_t vid_hdr_offset;
uint8_t padding[12];
};
/**
* struct ubi_mkvol_req - volume description data structure used in
* volume creation requests.
* volume creation requests.
* @vol_id: volume number
* @alignment: volume alignment
* @bytes: volume size in bytes
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
* @padding1: reserved for future, not used
* @padding1: reserved for future, not used, has to be zeroed
* @name_len: volume name length
* @padding2: reserved for future, not used
* @padding2: reserved for future, not used, has to be zeroed
* @name: volume name
*
* This structure is used by userspace programs when creating new volumes. The
* This structure is used by user-space programs when creating new volumes. The
* @used_bytes field is only necessary when creating static volumes.
*
* The @alignment field specifies the required alignment of the volume logical
@ -139,7 +231,7 @@ struct ubi_mkvol_req {
int8_t padding1;
int16_t name_len;
int8_t padding2[4];
char name[UBI_MAX_VOLUME_NAME+1];
char name[UBI_MAX_VOLUME_NAME + 1];
} __attribute__ ((packed));
/**
@ -158,4 +250,19 @@ struct ubi_rsvol_req {
int32_t vol_id;
} __attribute__ ((packed));
/**
* struct ubi_leb_change_req - a data structure used in atomic logical
* eraseblock change requests.
* @lnum: logical eraseblock number to change
* @bytes: how many bytes will be written to the logical eraseblock
* @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
* @padding: reserved for future, not used, has to be zeroed
*/
struct ubi_leb_change_req {
int32_t lnum;
int32_t bytes;
uint8_t dtype;
uint8_t padding[7];
} __attribute__ ((packed));
#endif /* __UBI_USER_H__ */