Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (90 commits)
  jffs2: Fix long-standing bug with symlink garbage collection.
  mtd: OneNAND: Fix test of unsigned in onenand_otp_walk()
  mtd: cfi_cmdset_0002, fix lock imbalance
  Revert "mtd: move mxcnd_remove to .exit.text"
  mtd: m25p80: add support for Macronix MX25L4005A
  kmsg_dump: fix build for CONFIG_PRINTK=n
  mtd: nandsim: add support for 4KiB pages
  mtd: mtdoops: refactor as a kmsg_dumper
  mtd: mtdoops: make record size configurable
  mtd: mtdoops: limit the maximum mtd partition size
  mtd: mtdoops: keep track of used/unused pages in an array
  mtd: mtdoops: several minor cleanups
  core: Add kernel message dumper to call on oopses and panics
  mtd: add ARM pismo support
  mtd: pxa3xx_nand: Fix PIO data transfer
  mtd: nand: fix multi-chip suspend problem
  mtd: add support for switching old SST chips into QRY mode
  mtd: fix M29W800D dev_id and uaddr
  mtd: don't use PF_MEMALLOC
  mtd: Add bad block table overrides to Davinci NAND driver
  ...

Fixed up conflicts (mostly trivial) in
	drivers/mtd/devices/m25p80.c
	drivers/mtd/maps/pcmciamtd.c
	drivers/mtd/nand/pxa3xx_nand.c
	kernel/printk.c
This commit is contained in:
Linus Torvalds 2009-12-16 10:23:43 -08:00
commit 60d9aa758c
61 changed files with 3628 additions and 1758 deletions

View File

@ -70,9 +70,19 @@ static struct ctl_table bcmring_sysctl_reboot[] = {
{}
};
static struct resource nand_resource[] = {
[0] = {
.start = MM_ADDR_IO_NAND,
.end = MM_ADDR_IO_NAND + 0x1000 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device nand_device = {
.name = "bcm-nand",
.id = -1,
.resource = nand_resource,
.num_resources = ARRAY_SIZE(nand_resource),
};
static struct platform_device *devices[] __initdata = {

View File

@ -0,0 +1,66 @@
/*****************************************************************************
* Copyright 2001 - 2008 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
/*
*
*****************************************************************************
*
* REG_NAND.h
*
* PURPOSE:
*
* This file contains definitions for the nand registers:
*
* NOTES:
*
*****************************************************************************/
#if !defined(__ASM_ARCH_REG_NAND_H)
#define __ASM_ARCH_REG_NAND_H
/* ---- Include Files ---------------------------------------------------- */
#include <csp/reg.h>
#include <mach/reg_umi.h>
/* ---- Constants and Types ---------------------------------------------- */
#define HW_NAND_BASE MM_IO_BASE_NAND /* NAND Flash */
/* DMA accesses by the bootstrap need hard nonvirtual addresses */
#define REG_NAND_CMD __REG16(HW_NAND_BASE + 0)
#define REG_NAND_ADDR __REG16(HW_NAND_BASE + 4)
#define REG_NAND_PHYS_DATA16 (HW_NAND_BASE + 8)
#define REG_NAND_PHYS_DATA8 (HW_NAND_BASE + 8)
#define REG_NAND_DATA16 __REG16(REG_NAND_PHYS_DATA16)
#define REG_NAND_DATA8 __REG8(REG_NAND_PHYS_DATA8)
/* use appropriate offset to make sure it start at the 1K boundary */
#define REG_NAND_PHYS_DATA_DMA (HW_NAND_BASE + 0x400)
#define REG_NAND_DATA_DMA __REG32(REG_NAND_PHYS_DATA_DMA)
/* Linux DMA requires physical address of the data register */
#define REG_NAND_DATA16_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA16)
#define REG_NAND_DATA8_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA8)
#define REG_NAND_DATA_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA_DMA)
#define NAND_BUS_16BIT() (0)
#define NAND_BUS_8BIT() (!NAND_BUS_16BIT())
/* Register offsets */
#define REG_NAND_CMD_OFFSET (0)
#define REG_NAND_ADDR_OFFSET (4)
#define REG_NAND_DATA8_OFFSET (8)
#endif

View File

@ -0,0 +1,237 @@
/*****************************************************************************
* Copyright 2005 - 2008 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
/*
*
*****************************************************************************
*
* REG_UMI.h
*
* PURPOSE:
*
* This file contains definitions for the nand registers:
*
* NOTES:
*
*****************************************************************************/
#if !defined(__ASM_ARCH_REG_UMI_H)
#define __ASM_ARCH_REG_UMI_H
/* ---- Include Files ---------------------------------------------------- */
#include <csp/reg.h>
#include <mach/csp/mm_io.h>
/* ---- Constants and Types ---------------------------------------------- */
/* Unified Memory Interface Ctrl Register */
#define HW_UMI_BASE MM_IO_BASE_UMI
/* Flash bank 0 timing and control register */
#define REG_UMI_FLASH0_TCR __REG32(HW_UMI_BASE + 0x00)
/* Flash bank 1 timing and control register */
#define REG_UMI_FLASH1_TCR __REG32(HW_UMI_BASE + 0x04)
/* Flash bank 2 timing and control register */
#define REG_UMI_FLASH2_TCR __REG32(HW_UMI_BASE + 0x08)
/* MMD interface and control register */
#define REG_UMI_MMD_ICR __REG32(HW_UMI_BASE + 0x0c)
/* NAND timing and control register */
#define REG_UMI_NAND_TCR __REG32(HW_UMI_BASE + 0x18)
/* NAND ready/chip select register */
#define REG_UMI_NAND_RCSR __REG32(HW_UMI_BASE + 0x1c)
/* NAND ECC control & status register */
#define REG_UMI_NAND_ECC_CSR __REG32(HW_UMI_BASE + 0x20)
/* NAND ECC data register XXB2B1B0 */
#define REG_UMI_NAND_ECC_DATA __REG32(HW_UMI_BASE + 0x24)
/* BCH ECC Parameter N */
#define REG_UMI_BCH_N __REG32(HW_UMI_BASE + 0x40)
/* BCH ECC Parameter T */
#define REG_UMI_BCH_K __REG32(HW_UMI_BASE + 0x44)
/* BCH ECC Parameter K */
#define REG_UMI_BCH_T __REG32(HW_UMI_BASE + 0x48)
/* BCH ECC Contro Status */
#define REG_UMI_BCH_CTRL_STATUS __REG32(HW_UMI_BASE + 0x4C)
/* BCH WR ECC 31:0 */
#define REG_UMI_BCH_WR_ECC_0 __REG32(HW_UMI_BASE + 0x50)
/* BCH WR ECC 63:32 */
#define REG_UMI_BCH_WR_ECC_1 __REG32(HW_UMI_BASE + 0x54)
/* BCH WR ECC 95:64 */
#define REG_UMI_BCH_WR_ECC_2 __REG32(HW_UMI_BASE + 0x58)
/* BCH WR ECC 127:96 */
#define REG_UMI_BCH_WR_ECC_3 __REG32(HW_UMI_BASE + 0x5c)
/* BCH WR ECC 155:128 */
#define REG_UMI_BCH_WR_ECC_4 __REG32(HW_UMI_BASE + 0x60)
/* BCH Read Error Location 1,0 */
#define REG_UMI_BCH_RD_ERR_LOC_1_0 __REG32(HW_UMI_BASE + 0x64)
/* BCH Read Error Location 3,2 */
#define REG_UMI_BCH_RD_ERR_LOC_3_2 __REG32(HW_UMI_BASE + 0x68)
/* BCH Read Error Location 5,4 */
#define REG_UMI_BCH_RD_ERR_LOC_5_4 __REG32(HW_UMI_BASE + 0x6c)
/* BCH Read Error Location 7,6 */
#define REG_UMI_BCH_RD_ERR_LOC_7_6 __REG32(HW_UMI_BASE + 0x70)
/* BCH Read Error Location 9,8 */
#define REG_UMI_BCH_RD_ERR_LOC_9_8 __REG32(HW_UMI_BASE + 0x74)
/* BCH Read Error Location 11,10 */
#define REG_UMI_BCH_RD_ERR_LOC_B_A __REG32(HW_UMI_BASE + 0x78)
/* REG_UMI_FLASH0/1/2_TCR, REG_UMI_SRAM0/1_TCR bits */
/* Enable wait pin during burst write or read */
#define REG_UMI_TCR_WAITEN 0x80000000
/* Enable mem ctrlr to work iwth ext mem of lower freq than AHB clk */
#define REG_UMI_TCR_LOWFREQ 0x40000000
/* 1=synch write, 0=async write */
#define REG_UMI_TCR_MEMTYPE_SYNCWRITE 0x20000000
/* 1=synch read, 0=async read */
#define REG_UMI_TCR_MEMTYPE_SYNCREAD 0x10000000
/* 1=page mode read, 0=normal mode read */
#define REG_UMI_TCR_MEMTYPE_PAGEREAD 0x08000000
/* page size/burst size (wrap only) */
#define REG_UMI_TCR_MEMTYPE_PGSZ_MASK 0x07000000
/* 4 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_4 0x00000000
/* 8 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_8 0x01000000
/* 16 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_16 0x02000000
/* 32 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_32 0x03000000
/* 64 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_64 0x04000000
/* 128 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_128 0x05000000
/* 256 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_256 0x06000000
/* 512 word */
#define REG_UMI_TCR_MEMTYPE_PGSZ_512 0x07000000
/* Page read access cycle / Burst write latency (n+2 / n+1) */
#define REG_UMI_TCR_TPRC_TWLC_MASK 0x00f80000
/* Bus turnaround cycle (n) */
#define REG_UMI_TCR_TBTA_MASK 0x00070000
/* Write pulse width cycle (n+1) */
#define REG_UMI_TCR_TWP_MASK 0x0000f800
/* Write recovery cycle (n+1) */
#define REG_UMI_TCR_TWR_MASK 0x00000600
/* Write address setup cycle (n+1) */
#define REG_UMI_TCR_TAS_MASK 0x00000180
/* Output enable delay cycle (n) */
#define REG_UMI_TCR_TOE_MASK 0x00000060
/* Read access cycle / Burst read latency (n+2 / n+1) */
#define REG_UMI_TCR_TRC_TLC_MASK 0x0000001f
/* REG_UMI_MMD_ICR bits */
/* Flash write protection pin control */
#define REG_UMI_MMD_ICR_FLASH_WP 0x8000
/* Extend hold time for sram0, sram1 csn (39 MHz operation) */
#define REG_UMI_MMD_ICR_XHCS 0x4000
/* Enable SDRAM 2 interface control */
#define REG_UMI_MMD_ICR_SDRAM2EN 0x2000
/* Enable merge of flash banks 0/1 to 512 MBit bank */
#define REG_UMI_MMD_ICR_INST512 0x1000
/* Enable merge of flash banks 1/2 to 512 MBit bank */
#define REG_UMI_MMD_ICR_DATA512 0x0800
/* Enable SDRAM interface control */
#define REG_UMI_MMD_ICR_SDRAMEN 0x0400
/* Polarity of busy state of Burst Wait Signal */
#define REG_UMI_MMD_ICR_WAITPOL 0x0200
/* Enable burst clock stopped when not accessing external burst flash/sram */
#define REG_UMI_MMD_ICR_BCLKSTOP 0x0100
/* Enable the peri1_csn to replace flash1_csn in 512 Mb flash mode */
#define REG_UMI_MMD_ICR_PERI1EN 0x0080
/* Enable the peri2_csn to replace sdram_csn */
#define REG_UMI_MMD_ICR_PERI2EN 0x0040
/* Enable the peri3_csn to replace sdram2_csn */
#define REG_UMI_MMD_ICR_PERI3EN 0x0020
/* Enable sram bank1 for H/W controlled MRS */
#define REG_UMI_MMD_ICR_MRSB1 0x0010
/* Enable sram bank0 for H/W controlled MRS */
#define REG_UMI_MMD_ICR_MRSB0 0x0008
/* Polarity for assert3ed state of H/W controlled MRS */
#define REG_UMI_MMD_ICR_MRSPOL 0x0004
/* 0: S/W controllable ZZ/MRS/CRE/P-Mode pin */
/* 1: H/W controlled ZZ/MRS/CRE/P-Mode, same timing as CS */
#define REG_UMI_MMD_ICR_MRSMODE 0x0002
/* MRS state for S/W controlled mode */
#define REG_UMI_MMD_ICR_MRSSTATE 0x0001
/* REG_UMI_NAND_TCR bits */
/* Enable software to control CS */
#define REG_UMI_NAND_TCR_CS_SWCTRL 0x80000000
/* 16-bit nand wordsize if set */
#define REG_UMI_NAND_TCR_WORD16 0x40000000
/* Bus turnaround cycle (n) */
#define REG_UMI_NAND_TCR_TBTA_MASK 0x00070000
/* Write pulse width cycle (n+1) */
#define REG_UMI_NAND_TCR_TWP_MASK 0x0000f800
/* Write recovery cycle (n+1) */
#define REG_UMI_NAND_TCR_TWR_MASK 0x00000600
/* Write address setup cycle (n+1) */
#define REG_UMI_NAND_TCR_TAS_MASK 0x00000180
/* Output enable delay cycle (n) */
#define REG_UMI_NAND_TCR_TOE_MASK 0x00000060
/* Read access cycle (n+2) */
#define REG_UMI_NAND_TCR_TRC_TLC_MASK 0x0000001f
/* REG_UMI_NAND_RCSR bits */
/* Status: Ready=1, Busy=0 */
#define REG_UMI_NAND_RCSR_RDY 0x02
/* Keep CS asserted during operation */
#define REG_UMI_NAND_RCSR_CS_ASSERTED 0x01
/* REG_UMI_NAND_ECC_CSR bits */
/* Interrupt status - read-only */
#define REG_UMI_NAND_ECC_CSR_NANDINT 0x80000000
/* Read: Status of ECC done, Write: clear ECC interrupt */
#define REG_UMI_NAND_ECC_CSR_ECCINT_RAW 0x00800000
/* Read: Status of R/B, Write: clear R/B interrupt */
#define REG_UMI_NAND_ECC_CSR_RBINT_RAW 0x00400000
/* 1 = Enable ECC Interrupt */
#define REG_UMI_NAND_ECC_CSR_ECCINT_ENABLE 0x00008000
/* 1 = Assert interrupt at rising edge of R/B_ */
#define REG_UMI_NAND_ECC_CSR_RBINT_ENABLE 0x00004000
/* Calculate ECC by 0=512 bytes, 1=256 bytes */
#define REG_UMI_NAND_ECC_CSR_256BYTE 0x00000080
/* Enable ECC in hardware */
#define REG_UMI_NAND_ECC_CSR_ECC_ENABLE 0x00000001
/* REG_UMI_BCH_CTRL_STATUS bits */
/* Shift to Indicate Number of correctable errors detected */
#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR_SHIFT 20
/* Indicate Number of correctable errors detected */
#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR 0x00F00000
/* Indicate Errors detected during read but uncorrectable */
#define REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR 0x00080000
/* Indicate Errors detected during read and are correctable */
#define REG_UMI_BCH_CTRL_STATUS_CORR_ERR 0x00040000
/* Flag indicates BCH's ECC status of read process are valid */
#define REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID 0x00020000
/* Flag indicates BCH's ECC status of write process are valid */
#define REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID 0x00010000
/* Pause ECC calculation */
#define REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC 0x00000010
/* Enable Interrupt */
#define REG_UMI_BCH_CTRL_STATUS_INT_EN 0x00000004
/* Enable ECC during read */
#define REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN 0x00000002
/* Enable ECC during write */
#define REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN 0x00000001
/* Mask for location */
#define REG_UMI_BCH_ERR_LOC_MASK 0x00001FFF
/* location within a byte */
#define REG_UMI_BCH_ERR_LOC_BYTE 0x00000007
/* location within a word */
#define REG_UMI_BCH_ERR_LOC_WORD 0x00000018
/* location within a page (512 byte) */
#define REG_UMI_BCH_ERR_LOC_PAGE 0x00001FE0
#define REG_UMI_BCH_ERR_LOC_ADDR(index) (__REG32(HW_UMI_BASE + 0x64 + (index / 2)*4) >> ((index % 2) * 16))
#endif

View File

@ -79,6 +79,10 @@ struct davinci_nand_pdata { /* platform_data */
/* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
unsigned options;
/* Main and mirror bbt descriptor overrides */
struct nand_bbt_descr *bbt_td;
struct nand_bbt_descr *bbt_md;
};
#endif /* __ARCH_ARM_DAVINCI_NAND_H */

View File

@ -18,6 +18,7 @@
#include <linux/gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <asm/sizes.h>
@ -149,7 +150,7 @@ static struct mtd_partition nhk8815_onenand_partitions[] = {
}
};
static struct flash_platform_data nhk8815_onenand_data = {
static struct onenand_platform_data nhk8815_onenand_data = {
.parts = nhk8815_onenand_partitions,
.nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions),
};
@ -163,7 +164,7 @@ static struct resource nhk8815_onenand_resource[] = {
};
static struct platform_device nhk8815_onenand_device = {
.name = "onenand",
.name = "onenand-flash",
.id = -1,
.dev = {
.platform_data = &nhk8815_onenand_data,
@ -174,10 +175,10 @@ static struct platform_device nhk8815_onenand_device = {
static void __init nhk8815_onenand_init(void)
{
#ifdef CONFIG_ONENAND
#ifdef CONFIG_MTD_ONENAND
/* Set up SMCS0 for OneNand */
writel(0x000030db, FSMC_BCR0);
writel(0x02100551, FSMC_BTR0);
writel(0x000030db, FSMC_BCR(0));
writel(0x02100551, FSMC_BTR(0));
#endif
}

View File

@ -22,6 +22,7 @@
struct mxc_nand_platform_data {
int width; /* data bus width in bytes */
int hw_ecc; /* 0 if supress hardware ECC */
int hw_ecc:1; /* 0 if supress hardware ECC */
int flash_bbt:1; /* set to 1 to use a flash based bbt */
};
#endif /* __ASM_ARCH_NAND_H */

View File

@ -17,6 +17,7 @@
* Setting this flag will allow the kernel to
* look for it at boot time and also skip the NAND
* scan.
* @options: Default value to set into 'struct nand_chip' options.
* @nr_chips: Number of chips in this set
* @nr_partitions: Number of partitions pointed to by @partitions
* @name: Name of set (optional)
@ -31,6 +32,7 @@ struct s3c2410_nand_set {
unsigned int disable_ecc:1;
unsigned int flash_bbt:1;
unsigned int options;
int nr_chips;
int nr_partitions;
char *name;

View File

@ -43,15 +43,17 @@
// debugging, turns off buffer write mode if set to 1
#define FORCE_WORD_WRITE 0
#define MANUFACTURER_INTEL 0x0089
/* Intel chips */
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
#define MANUFACTURER_ST 0x0020
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
/* Atmel chips */
#define AT49BV640D 0x02de
#define AT49BV640DT 0x02db
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@ -199,6 +201,16 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
cfip->FeatureSupport |= (1 << 5);
mtd->flags |= MTD_POWERUP_LOCK;
}
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@ -283,6 +295,8 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
#endif
@ -294,16 +308,16 @@ static struct cfi_fixup cfi_fixup_table[] = {
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
{ MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
{ MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
{ MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
{ CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup fixup_table[] = {
@ -319,7 +333,7 @@ static struct cfi_fixup fixup_table[] = {
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_intelext *extp)
{
if (cfi->mfr == MANUFACTURER_INTEL &&
if (cfi->mfr == CFI_MFR_INTEL &&
cfi->id == PF38F4476 && extp->MinorVersion == '3')
extp->MinorVersion = '1';
}
@ -2235,7 +2249,7 @@ static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
/* Some chips have OTP located in the _top_ partition only.
For example: Intel 28F256L18T (T means top-parameter device) */
if (cfi->mfr == MANUFACTURER_INTEL) {
if (cfi->mfr == CFI_MFR_INTEL) {
switch (cfi->id) {
case 0x880b:
case 0x880c:
@ -2564,6 +2578,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
spin_unlock(chip->mutex);
}

View File

@ -490,10 +490,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
}
#endif
/* FIXME: erase-suspend-program is broken. See
http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
__module_get(THIS_MODULE);
return mtd;
@ -573,7 +569,6 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
if (time_after(jiffies, timeo)) {
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
spin_unlock(chip->mutex);
return -EIO;
}
spin_unlock(chip->mutex);
@ -589,15 +584,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
return 0;
case FL_ERASING:
if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
goto sleep;
if (!( mode == FL_READY
|| mode == FL_POINT
|| !cfip
|| (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
|| (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
)))
if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
/* We could check to see if we're trying to access the sector

View File

@ -69,6 +69,13 @@ int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
/* ST M29DW chips */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
/* some old SST chips, e.g. 39VF160x/39VF320x */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
/* QRY not found */

View File

@ -142,8 +142,8 @@
/* ST - www.st.com */
#define M29F800AB 0x0058
#define M29W800DT 0x00D7
#define M29W800DB 0x005B
#define M29W800DT 0x22D7
#define M29W800DB 0x225B
#define M29W400DT 0x00EE
#define M29W400DB 0x00EF
#define M29W160DT 0x22C4
@ -1575,7 +1575,7 @@ static const struct amd_flash_info jedec_table[] = {
.dev_id = M29W800DT,
.name = "ST M29W800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
@ -1590,7 +1590,7 @@ static const struct amd_flash_info jedec_table[] = {
.dev_id = M29W800DB,
.name = "ST M29W800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,

View File

@ -22,6 +22,7 @@
#include <linux/mutex.h>
#include <linux/math64.h>
#include <linux/sched.h>
#include <linux/mod_devicetable.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@ -29,9 +30,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#define FLASH_PAGESIZE 256
/* Flash opcodes. */
#define OPCODE_WREN 0x06 /* Write enable */
#define OPCODE_RDSR 0x05 /* Read status register */
@ -61,7 +59,7 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define CMD_SIZE 4
#define MAX_CMD_SIZE 4
#ifdef CONFIG_M25PXX_USE_FAST_READ
#define OPCODE_READ OPCODE_FAST_READ
@ -78,8 +76,10 @@ struct m25p {
struct mutex lock;
struct mtd_info mtd;
unsigned partitioned:1;
u16 page_size;
u16 addr_width;
u8 erase_opcode;
u8 command[CMD_SIZE + FAST_READ_DUMMY_BYTE];
u8 *command;
};
static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@ -198,6 +198,19 @@ static int erase_chip(struct m25p *flash)
return 0;
}
static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
{
/* opcode is in cmd[0] */
cmd[1] = addr >> (flash->addr_width * 8 - 8);
cmd[2] = addr >> (flash->addr_width * 8 - 16);
cmd[3] = addr >> (flash->addr_width * 8 - 24);
}
static int m25p_cmdsz(struct m25p *flash)
{
return 1 + flash->addr_width;
}
/*
* Erase one sector of flash memory at offset ``offset'' which is any
* address within the sector which should be erased.
@ -219,11 +232,9 @@ static int erase_sector(struct m25p *flash, u32 offset)
/* Set up command buffer. */
flash->command[0] = flash->erase_opcode;
flash->command[1] = offset >> 16;
flash->command[2] = offset >> 8;
flash->command[3] = offset;
m25p_addr2cmd(flash, offset, flash->command);
spi_write(flash->spi, flash->command, CMD_SIZE);
spi_write(flash->spi, flash->command, m25p_cmdsz(flash));
return 0;
}
@ -325,7 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
* Should add 1 byte DUMMY_BYTE.
*/
t[0].tx_buf = flash->command;
t[0].len = CMD_SIZE + FAST_READ_DUMMY_BYTE;
t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
@ -352,13 +363,11 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Set up the write data buffer. */
flash->command[0] = OPCODE_READ;
flash->command[1] = from >> 16;
flash->command[2] = from >> 8;
flash->command[3] = from;
m25p_addr2cmd(flash, from, flash->command);
spi_sync(flash->spi, &m);
*retlen = m.actual_length - CMD_SIZE - FAST_READ_DUMMY_BYTE;
*retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
mutex_unlock(&flash->lock);
@ -396,7 +405,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
memset(t, 0, (sizeof t));
t[0].tx_buf = flash->command;
t[0].len = CMD_SIZE;
t[0].len = m25p_cmdsz(flash);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
@ -414,41 +423,36 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Set up the opcode in the write buffer. */
flash->command[0] = OPCODE_PP;
flash->command[1] = to >> 16;
flash->command[2] = to >> 8;
flash->command[3] = to;
m25p_addr2cmd(flash, to, flash->command);
/* what page do we start with? */
page_offset = to % FLASH_PAGESIZE;
page_offset = to & (flash->page_size - 1);
/* do all the bytes fit onto one page? */
if (page_offset + len <= FLASH_PAGESIZE) {
if (page_offset + len <= flash->page_size) {
t[1].len = len;
spi_sync(flash->spi, &m);
*retlen = m.actual_length - CMD_SIZE;
*retlen = m.actual_length - m25p_cmdsz(flash);
} else {
u32 i;
/* the size of data remaining on the first page */
page_size = FLASH_PAGESIZE - page_offset;
page_size = flash->page_size - page_offset;
t[1].len = page_size;
spi_sync(flash->spi, &m);
*retlen = m.actual_length - CMD_SIZE;
*retlen = m.actual_length - m25p_cmdsz(flash);
/* write everything in PAGESIZE chunks */
/* write everything in flash->page_size chunks */
for (i = page_size; i < len; i += page_size) {
page_size = len - i;
if (page_size > FLASH_PAGESIZE)
page_size = FLASH_PAGESIZE;
if (page_size > flash->page_size)
page_size = flash->page_size;
/* write the next page to flash */
flash->command[1] = (to + i) >> 16;
flash->command[2] = (to + i) >> 8;
flash->command[3] = (to + i);
m25p_addr2cmd(flash, to + i, flash->command);
t[1].tx_buf = buf + i;
t[1].len = page_size;
@ -460,7 +464,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
spi_sync(flash->spi, &m);
if (retlen)
*retlen += m.actual_length - CMD_SIZE;
*retlen += m.actual_length - m25p_cmdsz(flash);
}
}
@ -492,7 +496,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
memset(t, 0, (sizeof t));
t[0].tx_buf = flash->command;
t[0].len = CMD_SIZE;
t[0].len = m25p_cmdsz(flash);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
@ -511,9 +515,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Start write from odd address. */
if (actual) {
flash->command[0] = OPCODE_BP;
flash->command[1] = to >> 16;
flash->command[2] = to >> 8;
flash->command[3] = to;
m25p_addr2cmd(flash, to, flash->command);
/* write one byte. */
t[1].len = 1;
@ -521,17 +523,15 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
ret = wait_till_ready(flash);
if (ret)
goto time_out;
*retlen += m.actual_length - CMD_SIZE;
*retlen += m.actual_length - m25p_cmdsz(flash);
}
to += actual;
flash->command[0] = OPCODE_AAI_WP;
flash->command[1] = to >> 16;
flash->command[2] = to >> 8;
flash->command[3] = to;
m25p_addr2cmd(flash, to, flash->command);
/* Write out most of the data here. */
cmd_sz = CMD_SIZE;
cmd_sz = m25p_cmdsz(flash);
for (; actual < len - 1; actual += 2) {
t[0].len = cmd_sz;
/* write two bytes. */
@ -555,10 +555,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
if (actual != len) {
write_enable(flash);
flash->command[0] = OPCODE_BP;
flash->command[1] = to >> 16;
flash->command[2] = to >> 8;
flash->command[3] = to;
t[0].len = CMD_SIZE;
m25p_addr2cmd(flash, to, flash->command);
t[0].len = m25p_cmdsz(flash);
t[1].len = 1;
t[1].tx_buf = buf + actual;
@ -566,7 +564,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
ret = wait_till_ready(flash);
if (ret)
goto time_out;
*retlen += m.actual_length - CMD_SIZE;
*retlen += m.actual_length - m25p_cmdsz(flash);
write_disable(flash);
}
@ -582,8 +580,6 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
*/
struct flash_info {
char *name;
/* JEDEC id zero means "no ID" (most older chips); otherwise it has
* a high byte of zero plus three data bytes: the manufacturer id,
* then a two byte device id.
@ -597,87 +593,119 @@ struct flash_info {
unsigned sector_size;
u16 n_sectors;
u16 page_size;
u16 addr_width;
u16 flags;
#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
#define M25P_NO_ERASE 0x02 /* No erase command needed */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
((kernel_ulong_t)&(struct flash_info) { \
.jedec_id = (_jedec_id), \
.ext_id = (_ext_id), \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
.addr_width = 3, \
.flags = (_flags), \
})
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
((kernel_ulong_t)&(struct flash_info) { \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
.flags = M25P_NO_ERASE, \
})
/* NOTE: double check command sets and memory organization when you add
* more flash chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
*/
static struct flash_info __devinitdata m25p_data [] = {
static const struct spi_device_id m25p_ids[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ "at25fs010", 0x1f6601, 0, 32 * 1024, 4, SECT_4K, },
{ "at25fs040", 0x1f6604, 0, 64 * 1024, 8, SECT_4K, },
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
{ "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, },
{ "at25df641", 0x1f4800, 0, 64 * 1024, 128, SECT_4K, },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
{ "at26f004", 0x1f0400, 0, 64 * 1024, 8, SECT_4K, },
{ "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, },
{ "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
{ "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
{ "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
/* Macronix */
{ "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, },
{ "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, },
{ "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
{ "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl004a", 0x010212, 0, 64 * 1024, 8, },
{ "s25sl008a", 0x010213, 0, 64 * 1024, 16, },
{ "s25sl016a", 0x010214, 0, 64 * 1024, 32, },
{ "s25sl032a", 0x010215, 0, 64 * 1024, 64, },
{ "s25sl064a", 0x010216, 0, 64 * 1024, 128, },
{ "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, },
{ "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, },
{ "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, },
{ "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, },
{ "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, },
{ "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, },
{ "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, },
{ "sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K, },
{ "sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K, },
{ "sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K, },
{ "sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K, },
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
/* ST Microelectronics -- newer production may have feature updates */
{ "m25p05", 0x202010, 0, 32 * 1024, 2, },
{ "m25p10", 0x202011, 0, 32 * 1024, 4, },
{ "m25p20", 0x202012, 0, 64 * 1024, 4, },
{ "m25p40", 0x202013, 0, 64 * 1024, 8, },
{ "m25p80", 0, 0, 64 * 1024, 16, },
{ "m25p16", 0x202015, 0, 64 * 1024, 32, },
{ "m25p32", 0x202016, 0, 64 * 1024, 64, },
{ "m25p64", 0x202017, 0, 64 * 1024, 128, },
{ "m25p128", 0x202018, 0, 256 * 1024, 64, },
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
{ "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
{ "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
{ "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
{ "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
{ "m45pe10", 0x204011, 0, 64 * 1024, 2, },
{ "m45pe80", 0x204014, 0, 64 * 1024, 16, },
{ "m45pe16", 0x204015, 0, 64 * 1024, 32, },
{ "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
{ "m25pe80", 0x208014, 0, 64 * 1024, 16, },
{ "m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K, },
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, },
{ "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, },
{ "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, },
{ "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, },
{ "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, },
{ "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, },
{ "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, },
};
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
{ "cat25c03", CAT25_INFO( 32, 8, 16, 2) },
{ "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
{ "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
{ "cat25128", CAT25_INFO(2048, 8, 64, 2) },
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
{
int tmp;
u8 code = OPCODE_RDID;
@ -702,18 +730,24 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
jedec = jedec << 8;
jedec |= id[2];
/*
* Some chips (like Numonyx M25P80) have JEDEC and non-JEDEC variants,
* which depend on technology process. Officially RDID command doesn't
* exist for non-JEDEC chips, but for compatibility they return ID 0.
*/
if (jedec == 0)
return NULL;
ext_jedec = id[3] << 8 | id[4];
for (tmp = 0, info = m25p_data;
tmp < ARRAY_SIZE(m25p_data);
tmp++, info++) {
for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
info = (void *)m25p_ids[tmp].driver_data;
if (info->jedec_id == jedec) {
if (info->ext_id != 0 && info->ext_id != ext_jedec)
continue;
return info;
return &m25p_ids[tmp];
}
}
dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
return NULL;
}
@ -725,6 +759,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
*/
static int __devinit m25p_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct flash_platform_data *data;
struct m25p *flash;
struct flash_info *info;
@ -737,50 +772,65 @@ static int __devinit m25p_probe(struct spi_device *spi)
*/
data = spi->dev.platform_data;
if (data && data->type) {
for (i = 0, info = m25p_data;
i < ARRAY_SIZE(m25p_data);
i++, info++) {
if (strcmp(data->type, info->name) == 0)
break;
const struct spi_device_id *plat_id;
for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) {
plat_id = &m25p_ids[i];
if (strcmp(data->type, plat_id->name))
continue;
break;
}
/* unrecognized chip? */
if (i == ARRAY_SIZE(m25p_data)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n",
dev_name(&spi->dev), data->type);
info = NULL;
if (plat_id)
id = plat_id;
else
dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
}
/* recognized; is that chip really what's there? */
} else if (info->jedec_id) {
struct flash_info *chip = jedec_probe(spi);
info = (void *)id->driver_data;
if (!chip || chip != info) {
dev_warn(&spi->dev, "found %s, expected %s\n",
chip ? chip->name : "UNKNOWN",
info->name);
info = NULL;
}
if (info->jedec_id) {
const struct spi_device_id *jid;
jid = jedec_probe(spi);
if (!jid) {
dev_info(&spi->dev, "non-JEDEC variant of %s\n",
id->name);
} else if (jid != id) {
/*
* JEDEC knows better, so overwrite platform ID. We
* can't trust partitions any longer, but we'll let
* mtd apply them anyway, since some partitions may be
* marked read-only, and we don't want to lose that
* information, even if it's not 100% accurate.
*/
dev_warn(&spi->dev, "found %s, expected %s\n",
jid->name, id->name);
id = jid;
info = (void *)jid->driver_data;
}
} else
info = jedec_probe(spi);
if (!info)
return -ENODEV;
}
flash = kzalloc(sizeof *flash, GFP_KERNEL);
if (!flash)
return -ENOMEM;
flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
if (!flash->command) {
kfree(flash);
return -ENOMEM;
}
flash->spi = spi;
mutex_init(&flash->lock);
dev_set_drvdata(&spi->dev, flash);
/*
* Atmel serial flash tend to power up
* with the software protection bits set
* Atmel and SST serial flash tend to power
* up with the software protection bits set
*/
if (info->jedec_id >> 16 == 0x1f) {
if (info->jedec_id >> 16 == 0x1f ||
info->jedec_id >> 16 == 0xbf) {
write_enable(flash);
write_sr(flash, 0);
}
@ -812,9 +862,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.erasesize = info->sector_size;
}
flash->mtd.dev.parent = &spi->dev;
if (info->flags & M25P_NO_ERASE)
flash->mtd.flags |= MTD_NO_ERASE;
dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name,
flash->mtd.dev.parent = &spi->dev;
flash->page_size = info->page_size;
flash->addr_width = info->addr_width;
dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
(long long)flash->mtd.size >> 10);
DEBUG(MTD_DEBUG_LEVEL2,
@ -888,8 +943,10 @@ static int __devexit m25p_remove(struct spi_device *spi)
status = del_mtd_partitions(&flash->mtd);
else
status = del_mtd_device(&flash->mtd);
if (status == 0)
if (status == 0) {
kfree(flash->command);
kfree(flash);
}
return 0;
}
@ -900,6 +957,7 @@ static struct spi_driver m25p80_driver = {
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.id_table = m25p_ids,
.probe = m25p_probe,
.remove = __devexit_p(m25p_remove),

View File

@ -636,6 +636,7 @@ add_dataflash_otp(struct spi_device *spi, char *name,
struct mtd_info *device;
struct flash_platform_data *pdata = spi->dev.platform_data;
char *otp_tag = "";
int err = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
@ -693,13 +694,23 @@ add_dataflash_otp(struct spi_device *spi, char *name,
if (nr_parts > 0) {
priv->partitioned = 1;
return add_mtd_partitions(device, parts, nr_parts);
err = add_mtd_partitions(device, parts, nr_parts);
goto out;
}
} else if (pdata && pdata->nr_parts)
dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, device->name);
return add_mtd_device(device) == 1 ? -ENODEV : 0;
if (add_mtd_device(device) == 1)
err = -ENODEV;
out:
if (!err)
return 0;
dev_set_drvdata(&spi->dev, NULL);
kfree(priv);
return err;
}
static inline int __devinit
@ -932,8 +943,10 @@ static int __devexit dataflash_remove(struct spi_device *spi)
status = del_mtd_partitions(&flash->mtd);
else
status = del_mtd_device(&flash->mtd);
if (status == 0)
if (status == 0) {
dev_set_drvdata(&spi->dev, NULL);
kfree(flash);
}
return status;
}

View File

@ -359,12 +359,6 @@ config MTD_SA1100
the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
If you have such a board, say 'Y'.
config MTD_IPAQ
tristate "CFI Flash device mapped on Compaq/HP iPAQ"
depends on IPAQ_HANDHELD && MTD_CFI
help
This provides a driver for the on-board flash of the iPAQ.
config MTD_DC21285
tristate "CFI Flash device mapped on DC21285 Footbridge"
depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS

View File

@ -24,12 +24,12 @@ obj-$(CONFIG_MTD_CEIVA) += ceiva.o
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
obj-$(CONFIG_MTD_IPAQ) += ipaq-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
obj-$(CONFIG_MTD_NETSC520) += netsc520.o

View File

@ -1,460 +0,0 @@
/*
* Flash memory access on iPAQ Handhelds (either SA1100 or PXA250 based)
*
* (C) 2000 Nicolas Pitre <nico@fluxnic.net>
* (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
* (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <asm/mach-types.h>
#include <asm/system.h>
#include <asm/errno.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#ifdef CONFIG_MTD_CONCAT
#include <linux/mtd/concat.h>
#endif
#include <mach/hardware.h>
#include <mach/h3600.h>
#include <asm/io.h>
#ifndef CONFIG_IPAQ_HANDHELD
#error This is for iPAQ Handhelds only
#endif
#ifdef CONFIG_SA1100_JORNADA56X
static void jornada56x_set_vpp(struct map_info *map, int vpp)
{
if (vpp)
GPSR = GPIO_GPIO26;
else
GPCR = GPIO_GPIO26;
GPDR |= GPIO_GPIO26;
}
#endif
#ifdef CONFIG_SA1100_JORNADA720
static void jornada720_set_vpp(struct map_info *map, int vpp)
{
if (vpp)
PPSR |= 0x80;
else
PPSR &= ~0x80;
PPDR |= 0x80;
}
#endif
#define MAX_IPAQ_CS 2 /* Number of CS we are going to test */
#define IPAQ_MAP_INIT(X) \
{ \
name: "IPAQ flash " X, \
}
static struct map_info ipaq_map[MAX_IPAQ_CS] = {
IPAQ_MAP_INIT("bank 1"),
IPAQ_MAP_INIT("bank 2")
};
static struct mtd_info *my_sub_mtd[MAX_IPAQ_CS] = {
NULL,
NULL
};
/*
* Here are partition information for all known IPAQ-based devices.
* See include/linux/mtd/partitions.h for definition of the mtd_partition
* structure.
*
* The *_max_flash_size is the maximum possible mapped flash size which
* is not necessarily the actual flash size. It must be no more than
* the value specified in the "struct map_desc *_io_desc" mapping
* definition for the corresponding machine.
*
* Please keep these in alphabetical order, and formatted as per existing
* entries. Thanks.
*/
#ifdef CONFIG_IPAQ_HANDHELD
static unsigned long h3xxx_max_flash_size = 0x04000000;
static struct mtd_partition h3xxx_partitions[] = {
{
name: "H3XXX boot firmware",
#ifndef CONFIG_LAB
size: 0x00040000,
#else
size: 0x00080000,
#endif
offset: 0,
#ifndef CONFIG_LAB
mask_flags: MTD_WRITEABLE, /* force read-only */
#endif
},
{
name: "H3XXX root jffs2",
#ifndef CONFIG_LAB
size: 0x2000000 - 2*0x40000, /* Warning, this is fixed later */
offset: 0x00040000,
#else
size: 0x2000000 - 0x40000 - 0x80000, /* Warning, this is fixed later */
offset: 0x00080000,
#endif
},
{
name: "asset",
size: 0x40000,
offset: 0x2000000 - 0x40000, /* Warning, this is fixed later */
mask_flags: MTD_WRITEABLE, /* force read-only */
}
};
#ifndef CONFIG_MTD_CONCAT
static struct mtd_partition h3xxx_partitions_bank2[] = {
/* this is used only on 2 CS machines when concat is not present */
{
name: "second H3XXX root jffs2",
size: 0x1000000 - 0x40000, /* Warning, this is fixed later */
offset: 0x00000000,
},
{
name: "second asset",
size: 0x40000,
offset: 0x1000000 - 0x40000, /* Warning, this is fixed later */
mask_flags: MTD_WRITEABLE, /* force read-only */
}
};
#endif
static DEFINE_SPINLOCK(ipaq_vpp_lock);
static void h3xxx_set_vpp(struct map_info *map, int vpp)
{
static int nest = 0;
spin_lock(&ipaq_vpp_lock);
if (vpp)
nest++;
else
nest--;
if (nest)
assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 1);
else
assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 0);
spin_unlock(&ipaq_vpp_lock);
}
#endif
#if defined(CONFIG_SA1100_JORNADA56X) || defined(CONFIG_SA1100_JORNADA720)
static unsigned long jornada_max_flash_size = 0x02000000;
static struct mtd_partition jornada_partitions[] = {
{
name: "Jornada boot firmware",
size: 0x00040000,
offset: 0,
mask_flags: MTD_WRITEABLE, /* force read-only */
}, {
name: "Jornada root jffs2",
size: MTDPART_SIZ_FULL,
offset: 0x00040000,
}
};
#endif
static struct mtd_partition *parsed_parts;
static struct mtd_info *mymtd;
static unsigned long cs_phys[] = {
#ifdef CONFIG_ARCH_SA1100
SA1100_CS0_PHYS,
SA1100_CS1_PHYS,
SA1100_CS2_PHYS,
SA1100_CS3_PHYS,
SA1100_CS4_PHYS,
SA1100_CS5_PHYS,
#else
PXA_CS0_PHYS,
PXA_CS1_PHYS,
PXA_CS2_PHYS,
PXA_CS3_PHYS,
PXA_CS4_PHYS,
PXA_CS5_PHYS,
#endif
};
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int __init h1900_special_case(void);
static int __init ipaq_mtd_init(void)
{
struct mtd_partition *parts = NULL;
int nb_parts = 0;
int parsed_nr_parts = 0;
const char *part_type;
int i; /* used when we have >1 flash chips */
unsigned long tot_flashsize = 0; /* used when we have >1 flash chips */
/* Default flash bankwidth */
// ipaq_map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
if (machine_is_h1900())
{
/* For our intents, the h1900 is not a real iPAQ, so we special-case it. */
return h1900_special_case();
}
if (machine_is_h3100() || machine_is_h1900())
for(i=0; i<MAX_IPAQ_CS; i++)
ipaq_map[i].bankwidth = 2;
else
for(i=0; i<MAX_IPAQ_CS; i++)
ipaq_map[i].bankwidth = 4;
/*
* Static partition definition selection
*/
part_type = "static";
simple_map_init(&ipaq_map[0]);
simple_map_init(&ipaq_map[1]);
#ifdef CONFIG_IPAQ_HANDHELD
if (machine_is_ipaq()) {
parts = h3xxx_partitions;
nb_parts = ARRAY_SIZE(h3xxx_partitions);
for(i=0; i<MAX_IPAQ_CS; i++) {
ipaq_map[i].size = h3xxx_max_flash_size;
ipaq_map[i].set_vpp = h3xxx_set_vpp;
ipaq_map[i].phys = cs_phys[i];
ipaq_map[i].virt = ioremap(cs_phys[i], 0x04000000);
if (machine_is_h3100 () || machine_is_h1900())
ipaq_map[i].bankwidth = 2;
}
if (machine_is_h3600()) {
/* No asset partition here */
h3xxx_partitions[1].size += 0x40000;
nb_parts--;
}
}
#endif
#ifdef CONFIG_ARCH_H5400
if (machine_is_h5400()) {
ipaq_map[0].size = 0x02000000;
ipaq_map[1].size = 0x02000000;
ipaq_map[1].phys = 0x02000000;
ipaq_map[1].virt = ipaq_map[0].virt + 0x02000000;
}
#endif
#ifdef CONFIG_ARCH_H1900
if (machine_is_h1900()) {
ipaq_map[0].size = 0x00400000;
ipaq_map[1].size = 0x02000000;
ipaq_map[1].phys = 0x00080000;
ipaq_map[1].virt = ipaq_map[0].virt + 0x00080000;
}
#endif
#ifdef CONFIG_SA1100_JORNADA56X
if (machine_is_jornada56x()) {
parts = jornada_partitions;
nb_parts = ARRAY_SIZE(jornada_partitions);
ipaq_map[0].size = jornada_max_flash_size;
ipaq_map[0].set_vpp = jornada56x_set_vpp;
ipaq_map[0].virt = (__u32)ioremap(0x0, 0x04000000);
}
#endif
#ifdef CONFIG_SA1100_JORNADA720
if (machine_is_jornada720()) {
parts = jornada_partitions;
nb_parts = ARRAY_SIZE(jornada_partitions);
ipaq_map[0].size = jornada_max_flash_size;
ipaq_map[0].set_vpp = jornada720_set_vpp;
}
#endif
if (machine_is_ipaq()) { /* for iPAQs only */
for(i=0; i<MAX_IPAQ_CS; i++) {
printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with CFI.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
my_sub_mtd[i] = do_map_probe("cfi_probe", &ipaq_map[i]);
if (!my_sub_mtd[i]) {
printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
my_sub_mtd[i] = do_map_probe("jedec_probe", &ipaq_map[i]);
}
if (!my_sub_mtd[i]) {
printk(KERN_NOTICE "iPAQ flash: failed to find flash.\n");
if (i)
break;
else
return -ENXIO;
} else
printk(KERN_NOTICE "iPAQ flash: found %d bytes\n", my_sub_mtd[i]->size);
/* do we really need this debugging? --joshua 20030703 */
// printk("my_sub_mtd[%d]=%p\n", i, my_sub_mtd[i]);
my_sub_mtd[i]->owner = THIS_MODULE;
tot_flashsize += my_sub_mtd[i]->size;
}
#ifdef CONFIG_MTD_CONCAT
/* fix the asset location */
# ifdef CONFIG_LAB
h3xxx_partitions[1].size = tot_flashsize - 0x40000 - 0x80000 /* extra big boot block */;
# else
h3xxx_partitions[1].size = tot_flashsize - 2 * 0x40000;
# endif
h3xxx_partitions[2].offset = tot_flashsize - 0x40000;
/* and concat the devices */
mymtd = mtd_concat_create(&my_sub_mtd[0], i,
"ipaq");
if (!mymtd) {
printk("Cannot create iPAQ concat device\n");
return -ENXIO;
}
#else
mymtd = my_sub_mtd[0];
/*
*In the very near future, command line partition parsing
* will use the device name as 'mtd-id' instead of a value
* passed to the parse_cmdline_partitions() routine. Since
* the bootldr says 'ipaq', make sure it continues to work.
*/
mymtd->name = "ipaq";
if ((machine_is_h3600())) {
# ifdef CONFIG_LAB
h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x80000;
# else
h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000;
# endif
nb_parts = 2;
} else {
# ifdef CONFIG_LAB
h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000 - 0x80000; /* extra big boot block */
# else
h3xxx_partitions[1].size = my_sub_mtd[0]->size - 2*0x40000;
# endif
h3xxx_partitions[2].offset = my_sub_mtd[0]->size - 0x40000;
}
if (my_sub_mtd[1]) {
# ifdef CONFIG_LAB
h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x80000;
# else
h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x40000;
# endif
h3xxx_partitions_bank2[1].offset = my_sub_mtd[1]->size - 0x40000;
}
#endif
}
else {
/*
* Now let's probe for the actual flash. Do it here since
* specific machine settings might have been set above.
*/
printk(KERN_NOTICE "IPAQ flash: probing %d-bit flash bus, window=%lx\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
mymtd = do_map_probe("cfi_probe", &ipaq_map[0]);
if (!mymtd)
return -ENXIO;
mymtd->owner = THIS_MODULE;
}
/*
* Dynamic partition selection stuff (might override the static ones)
*/
i = parse_mtd_partitions(mymtd, part_probes, &parsed_parts, 0);
if (i > 0) {
nb_parts = parsed_nr_parts = i;
parts = parsed_parts;
part_type = "dynamic";
}
if (!parts) {
printk(KERN_NOTICE "IPAQ flash: no partition info available, registering whole flash at once\n");
add_mtd_device(mymtd);
#ifndef CONFIG_MTD_CONCAT
if (my_sub_mtd[1])
add_mtd_device(my_sub_mtd[1]);
#endif
} else {
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
add_mtd_partitions(mymtd, parts, nb_parts);
#ifndef CONFIG_MTD_CONCAT
if (my_sub_mtd[1])
add_mtd_partitions(my_sub_mtd[1], h3xxx_partitions_bank2, ARRAY_SIZE(h3xxx_partitions_bank2));
#endif
}
return 0;
}
static void __exit ipaq_mtd_cleanup(void)
{
int i;
if (mymtd) {
del_mtd_partitions(mymtd);
#ifndef CONFIG_MTD_CONCAT
if (my_sub_mtd[1])
del_mtd_partitions(my_sub_mtd[1]);
#endif
map_destroy(mymtd);
#ifdef CONFIG_MTD_CONCAT
for(i=0; i<MAX_IPAQ_CS; i++)
#else
for(i=1; i<MAX_IPAQ_CS; i++)
#endif
{
if (my_sub_mtd[i])
map_destroy(my_sub_mtd[i]);
}
kfree(parsed_parts);
}
}
static int __init h1900_special_case(void)
{
/* The iPAQ h1900 is a special case - it has weird ROM. */
simple_map_init(&ipaq_map[0]);
ipaq_map[0].size = 0x80000;
ipaq_map[0].set_vpp = h3xxx_set_vpp;
ipaq_map[0].phys = 0x0;
ipaq_map[0].virt = ioremap(0x0, 0x04000000);
ipaq_map[0].bankwidth = 2;
printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
mymtd = do_map_probe("jedec_probe", &ipaq_map[0]);
if (!mymtd)
return -ENODEV;
add_mtd_device(mymtd);
printk(KERN_NOTICE "iPAQ flash: registered h1910 flash\n");
return 0;
}
module_init(ipaq_mtd_init);
module_exit(ipaq_mtd_cleanup);
MODULE_AUTHOR("Jamey Hicks");
MODULE_DESCRIPTION("IPAQ CFI map driver");
MODULE_LICENSE("MIT");

View File

@ -210,7 +210,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
* not attempt to do a direct access on us.
*/
info->map.phys = NO_XIP;
info->map.size = dev->resource->end - dev->resource->start + 1;
info->map.size = resource_size(dev->resource);
/*
* We only support 16-bit accesses for now. If and when
@ -224,7 +224,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
info->map.copy_from = ixp4xx_copy_from,
info->res = request_mem_region(dev->resource->start,
dev->resource->end - dev->resource->start + 1,
resource_size(dev->resource),
"IXP4XXFlash");
if (!info->res) {
printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
@ -233,7 +233,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
}
info->map.virt = ioremap(dev->resource->start,
dev->resource->end - dev->resource->start + 1);
resource_size(dev->resource));
if (!info->map.virt) {
printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
err = -EIO;

View File

@ -48,23 +48,22 @@ static int physmap_flash_remove(struct platform_device *dev)
if (info->cmtd) {
#ifdef CONFIG_MTD_PARTITIONS
if (info->nr_parts || physmap_data->nr_parts)
if (info->nr_parts || physmap_data->nr_parts) {
del_mtd_partitions(info->cmtd);
else
if (info->nr_parts)
kfree(info->parts);
} else {
del_mtd_device(info->cmtd);
}
#else
del_mtd_device(info->cmtd);
#endif
}
#ifdef CONFIG_MTD_PARTITIONS
if (info->nr_parts)
kfree(info->parts);
#endif
#ifdef CONFIG_MTD_CONCAT
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
#endif
}
for (i = 0; i < MAX_RESOURCES; i++) {
if (info->mtd[i] != NULL)
@ -130,7 +129,7 @@ static int physmap_flash_probe(struct platform_device *dev)
info->map[i].size);
if (info->map[i].virt == NULL) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = EIO;
err = -EIO;
goto err_out;
}

View File

@ -248,7 +248,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
plat->exit();
}
static struct sa_info *__init
static struct sa_info *__devinit
sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
{
struct sa_info *info;

View File

@ -612,16 +612,15 @@ static int __devinit vmu_connect(struct maple_device *mdev)
test_flash_data = be32_to_cpu(mdev->devinfo.function);
/* Need to count how many bits are set - to find out which
* function_data element has details of the memory card:
* using Brian Kernighan's/Peter Wegner's method */
for (c = 0; test_flash_data; c++)
test_flash_data &= test_flash_data - 1;
* function_data element has details of the memory card
*/
c = hweight_long(test_flash_data);
basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
if (!card) {
error = ENOMEM;
error = -ENOMEM;
goto fail_nomem;
}

View File

@ -84,9 +84,6 @@ static int mtd_blktrans_thread(void *arg)
struct request_queue *rq = tr->blkcore_priv->rq;
struct request *req = NULL;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
@ -381,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
"%sd", tr->name);
if (IS_ERR(tr->blkcore_priv->thread)) {
int ret = PTR_ERR(tr->blkcore_priv->thread);
ret = PTR_ERR(tr->blkcore_priv->thread);
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);

View File

@ -447,7 +447,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i] == mtd)
ret = mtd_table[i];
} else if (num < MAX_MTD_DEVICES) {
} else if (num >= 0 && num < MAX_MTD_DEVICES) {
ret = mtd_table[num];
if (mtd && mtd != ret)
ret = NULL;

View File

@ -29,14 +29,34 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/kmsg_dump.h>
/* Maximum MTD partition size */
#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
#define OOPS_PAGE_SIZE 4096
#define MTDOOPS_HEADER_SIZE 8
static unsigned long record_size = 4096;
module_param(record_size, ulong, 0400);
MODULE_PARM_DESC(record_size,
"record size for MTD OOPS pages in bytes (default 4096)");
static char mtddev[80];
module_param_string(mtddev, mtddev, 80, 0400);
MODULE_PARM_DESC(mtddev,
"name or index number of the MTD device to use");
static int dump_oops = 1;
module_param(dump_oops, int, 0600);
MODULE_PARM_DESC(dump_oops,
"set to 1 to dump oopses, 0 to only dump panics (default 1)");
static struct mtdoops_context {
struct kmsg_dumper dump;
int mtd_index;
struct work_struct work_erase;
struct work_struct work_write;
@ -44,28 +64,43 @@ static struct mtdoops_context {
int oops_pages;
int nextpage;
int nextcount;
char *name;
unsigned long *oops_page_used;
void *oops_buf;
/* writecount and disabling ready are spin lock protected */
spinlock_t writecount_lock;
int ready;
int writecount;
} oops_cxt;
static void mark_page_used(struct mtdoops_context *cxt, int page)
{
set_bit(page, cxt->oops_page_used);
}
static void mark_page_unused(struct mtdoops_context *cxt, int page)
{
clear_bit(page, cxt->oops_page_used);
}
static int page_is_used(struct mtdoops_context *cxt, int page)
{
return test_bit(page, cxt->oops_page_used);
}
static void mtdoops_erase_callback(struct erase_info *done)
{
wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
wake_up(wait_q);
}
static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
{
struct mtd_info *mtd = cxt->mtd;
u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
u32 start_page = start_page_offset / record_size;
u32 erase_pages = mtd->erasesize / record_size;
struct erase_info erase;
DECLARE_WAITQUEUE(wait, current);
wait_queue_head_t wait_q;
int ret;
int page;
init_waitqueue_head(&wait_q);
erase.mtd = mtd;
@ -81,25 +116,24 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
"on \"%s\" failed\n",
(unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
(unsigned long long)erase.addr,
(unsigned long long)erase.len, mtddev);
return ret;
}
schedule(); /* Wait for erase to finish. */
remove_wait_queue(&wait_q, &wait);
/* Mark pages as unused */
for (page = start_page; page < start_page + erase_pages; page++)
mark_page_unused(cxt, page);
return 0;
}
static void mtdoops_inc_counter(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
u32 count;
int ret;
cxt->nextpage++;
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
@ -107,25 +141,13 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt)
if (cxt->nextcount == 0xffffffff)
cxt->nextcount = 0;
ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
&retlen, (u_char *) &count);
if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
retlen, ret);
if (page_is_used(cxt, cxt->nextpage)) {
schedule_work(&cxt->work_erase);
return;
}
/* See if we need to erase the next block */
if (count != 0xffffffff) {
schedule_work(&cxt->work_erase);
return;
}
printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
cxt->nextpage, cxt->nextcount);
}
/* Scheduled work - when we can't proceed without erasing a block */
@ -140,47 +162,47 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
if (!mtd)
return;
mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
mod = (cxt->nextpage * record_size) % mtd->erasesize;
if (mod != 0) {
cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
}
while (mtd->block_isbad) {
ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
if (!ret)
break;
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
return;
}
badblock:
printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
cxt->nextpage * OOPS_PAGE_SIZE);
printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
cxt->nextpage * record_size);
i++;
cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
printk(KERN_ERR "mtdoops: All blocks bad!\n");
if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
printk(KERN_ERR "mtdoops: all blocks bad!\n");
return;
}
}
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
if (ret >= 0) {
printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
cxt->nextpage, cxt->nextcount);
return;
}
if (mtd->block_markbad && (ret == -EIO)) {
ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
if (mtd->block_markbad && ret == -EIO) {
ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
}
}
@ -191,36 +213,37 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
{
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
u32 *hdr;
int ret;
if (cxt->writecount < OOPS_PAGE_SIZE)
memset(cxt->oops_buf + cxt->writecount, 0xff,
OOPS_PAGE_SIZE - cxt->writecount);
/* Add mtdoops header to the buffer */
hdr = cxt->oops_buf;
hdr[0] = cxt->nextcount;
hdr[1] = MTDOOPS_KERNMSG_MAGIC;
if (panic)
ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
record_size, &retlen, cxt->oops_buf);
else
ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
ret = mtd->write(mtd, cxt->nextpage * record_size,
record_size, &retlen, cxt->oops_buf);
cxt->writecount = 0;
if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
if (retlen != record_size || ret < 0)
printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
cxt->nextpage * record_size, retlen, record_size, ret);
mark_page_used(cxt, cxt->nextpage);
memset(cxt->oops_buf, 0xff, record_size);
mtdoops_inc_counter(cxt);
}
static void mtdoops_workfunc_write(struct work_struct *work)
{
struct mtdoops_context *cxt =
container_of(work, struct mtdoops_context, work_write);
mtdoops_write(cxt, 0);
}
}
static void find_next_position(struct mtdoops_context *cxt)
{
@ -230,28 +253,33 @@ static void find_next_position(struct mtdoops_context *cxt)
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) {
printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)"
", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
/* Assume the page is used */
mark_page_used(cxt, page);
ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
&retlen, (u_char *) &count[0]);
if (retlen != MTDOOPS_HEADER_SIZE ||
(ret < 0 && ret != -EUCLEAN)) {
printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
page * record_size, retlen,
MTDOOPS_HEADER_SIZE, ret);
continue;
}
if (count[1] != MTDOOPS_KERNMSG_MAGIC)
continue;
if (count[0] == 0xffffffff && count[1] == 0xffffffff)
mark_page_unused(cxt, page);
if (count[0] == 0xffffffff)
continue;
if (maxcount == 0xffffffff) {
maxcount = count[0];
maxpos = page;
} else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) {
} else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
maxcount = count[0];
maxpos = page;
} else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) {
} else if (count[0] > maxcount && count[0] < 0xc0000000) {
maxcount = count[0];
maxpos = page;
} else if ((count[0] > maxcount) && (count[0] > 0xc0000000)
&& (maxcount > 0x80000000)) {
} else if (count[0] > maxcount && count[0] > 0xc0000000
&& maxcount > 0x80000000) {
maxcount = count[0];
maxpos = page;
}
@ -269,37 +297,91 @@ static void find_next_position(struct mtdoops_context *cxt)
mtdoops_inc_counter(cxt);
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
unsigned long s1_start, s2_start;
unsigned long l1_cpy, l2_cpy;
char *dst;
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
s2_start = l2 - l2_cpy;
s1_start = l1 - l1_cpy;
memcpy(dst, s1 + s1_start, l1_cpy);
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
/* Panics must be written immediately */
if (reason == KMSG_DUMP_PANIC) {
if (!cxt->mtd->panic_write)
printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
else
mtdoops_write(cxt, 1);
return;
}
/* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write);
}
static void mtdoops_notify_add(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
u64 mtdoops_pages = div_u64(mtd->size, record_size);
int err;
if (cxt->name && !strcmp(mtd->name, cxt->name))
if (!strcmp(mtd->name, mtddev))
cxt->mtd_index = mtd->index;
if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
return;
if (mtd->size < (mtd->erasesize * 2)) {
printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n",
mtd->index);
if (mtd->size < mtd->erasesize * 2) {
printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
mtd->index);
return;
}
if (mtd->erasesize < record_size) {
printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
mtd->index);
return;
}
if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
return;
}
if (mtd->erasesize < OOPS_PAGE_SIZE) {
printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
mtd->index);
/* oops_page_used is a bit field */
cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
BITS_PER_LONG));
if (!cxt->oops_page_used) {
printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
}
cxt->dump.dump = mtdoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
vfree(cxt->oops_page_used);
cxt->oops_page_used = NULL;
return;
}
cxt->mtd = mtd;
if (mtd->size > INT_MAX)
cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
else
cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
cxt->oops_pages = (int)mtd->size / record_size;
find_next_position(cxt);
printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
}
@ -307,149 +389,78 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
return;
if (kmsg_dump_unregister(&cxt->dump) < 0)
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
flush_scheduled_work();
}
static void mtdoops_console_sync(void)
{
struct mtdoops_context *cxt = &oops_cxt;
struct mtd_info *mtd = cxt->mtd;
unsigned long flags;
if (!cxt->ready || !mtd || cxt->writecount == 0)
return;
/*
* Once ready is 0 and we've held the lock no further writes to the
* buffer will happen
*/
spin_lock_irqsave(&cxt->writecount_lock, flags);
if (!cxt->ready) {
spin_unlock_irqrestore(&cxt->writecount_lock, flags);
return;
}
cxt->ready = 0;
spin_unlock_irqrestore(&cxt->writecount_lock, flags);
if (mtd->panic_write && in_interrupt())
/* Interrupt context, we're going to panic so try and log */
mtdoops_write(cxt, 1);
else
schedule_work(&cxt->work_write);
}
static void
mtdoops_console_write(struct console *co, const char *s, unsigned int count)
{
struct mtdoops_context *cxt = co->data;
struct mtd_info *mtd = cxt->mtd;
unsigned long flags;
if (!oops_in_progress) {
mtdoops_console_sync();
return;
}
if (!cxt->ready || !mtd)
return;
/* Locking on writecount ensures sequential writes to the buffer */
spin_lock_irqsave(&cxt->writecount_lock, flags);
/* Check ready status didn't change whilst waiting for the lock */
if (!cxt->ready) {
spin_unlock_irqrestore(&cxt->writecount_lock, flags);
return;
}
if (cxt->writecount == 0) {
u32 *stamp = cxt->oops_buf;
*stamp++ = cxt->nextcount;
*stamp = MTDOOPS_KERNMSG_MAGIC;
cxt->writecount = 8;
}
if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
count = OOPS_PAGE_SIZE - cxt->writecount;
memcpy(cxt->oops_buf + cxt->writecount, s, count);
cxt->writecount += count;
spin_unlock_irqrestore(&cxt->writecount_lock, flags);
if (cxt->writecount == OOPS_PAGE_SIZE)
mtdoops_console_sync();
}
static int __init mtdoops_console_setup(struct console *co, char *options)
{
struct mtdoops_context *cxt = co->data;
if (cxt->mtd_index != -1 || cxt->name)
return -EBUSY;
if (options) {
cxt->name = kstrdup(options, GFP_KERNEL);
return 0;
}
if (co->index == -1)
return -EINVAL;
cxt->mtd_index = co->index;
return 0;
}
static struct mtd_notifier mtdoops_notifier = {
.add = mtdoops_notify_add,
.remove = mtdoops_notify_remove,
};
static struct console mtdoops_console = {
.name = "ttyMTD",
.write = mtdoops_console_write,
.setup = mtdoops_console_setup,
.unblank = mtdoops_console_sync,
.index = -1,
.data = &oops_cxt,
};
static int __init mtdoops_console_init(void)
static int __init mtdoops_init(void)
{
struct mtdoops_context *cxt = &oops_cxt;
int mtd_index;
char *endp;
if (strlen(mtddev) == 0) {
printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
return -EINVAL;
}
if ((record_size & 4095) != 0) {
printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
return -EINVAL;
}
if (record_size < 4096) {
printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
return -EINVAL;
}
/* Setup the MTD device to use */
cxt->mtd_index = -1;
cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
spin_lock_init(&cxt->writecount_lock);
mtd_index = simple_strtoul(mtddev, &endp, 0);
if (*endp == '\0')
cxt->mtd_index = mtd_index;
if (cxt->mtd_index > MAX_MTD_DEVICES) {
printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
mtd_index);
return -EINVAL;
}
cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf) {
printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
return -ENOMEM;
}
memset(cxt->oops_buf, 0xff, record_size);
INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
register_console(&mtdoops_console);
register_mtd_user(&mtdoops_notifier);
return 0;
}
static void __exit mtdoops_console_exit(void)
static void __exit mtdoops_exit(void)
{
struct mtdoops_context *cxt = &oops_cxt;
unregister_mtd_user(&mtdoops_notifier);
unregister_console(&mtdoops_console);
kfree(cxt->name);
vfree(cxt->oops_buf);
vfree(cxt->oops_page_used);
}
subsys_initcall(mtdoops_console_init);
module_exit(mtdoops_console_exit);
module_init(mtdoops_init);
module_exit(mtdoops_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");

View File

@ -201,6 +201,22 @@ config MTD_NAND_S3C2410_CLKSTOP
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
config MTD_NAND_BCM_UMI
tristate "NAND Flash support for BCM Reference Boards"
depends on ARCH_BCMRING && MTD_NAND
help
This enables the NAND flash controller on the BCM UMI block.
No board specfic support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_BCM_UMI_HWCS
bool "BCM UMI NAND Hardware CS"
depends on MTD_NAND_BCM_UMI
help
Enable the use of the BCM UMI block's internal CS using NAND.
This should only be used if you know the external NAND CS can toggle.
config MTD_NAND_DISKONCHIP
tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
depends on EXPERIMENTAL

View File

@ -42,5 +42,6 @@ obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
nand-objs := nand_base.o nand_bbt.o

View File

@ -372,15 +372,6 @@ static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
return __alauda_read_page(mtd, from, ignore_buf, oob);
}
static int popcount8(u8 c)
{
int ret = 0;
for ( ; c; c>>=1)
ret += c & 1;
return ret;
}
static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
{
u8 oob[16];
@ -391,7 +382,7 @@ static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
return err;
/* A block is marked bad if two or more bits are zero */
return popcount8(oob[5]) >= 7 ? 0 : 1;
return hweight8(oob[5]) >= 7 ? 0 : 1;
}
static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,

View File

@ -192,7 +192,6 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
unsigned int ecc_value;
/* get the first 2 ECC bytes */
@ -464,7 +463,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (host->board->det_pin) {
if (gpio_get_value(host->board->det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
res = ENXIO;
res = -ENXIO;
goto err_no_card;
}
}
@ -535,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if ((!partitions) || (num_partitions == 0)) {
printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
res = ENXIO;
res = -ENXIO;
goto err_no_partitions;
}

View File

@ -0,0 +1,213 @@
/*****************************************************************************
* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
#include "nand_bcm_umi.h"
/* ---- External Variable Declarations ----------------------------------- */
/* ---- External Function Prototypes ------------------------------------- */
/* ---- Public Variables ------------------------------------------------- */
/* ---- Private Constants and Types -------------------------------------- */
/* ---- Private Function Prototypes -------------------------------------- */
static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int page);
static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf);
/* ---- Private Variables ------------------------------------------------ */
/*
** nand_hw_eccoob
** New oob placement block for use with hardware ecc generation.
*/
static struct nand_ecclayout nand_hw_eccoob_512 = {
/* Reserve 5 for BI indicator */
.oobfree = {
#if (NAND_ECC_NUM_BYTES > 3)
{.offset = 0, .length = 2}
#else
{.offset = 0, .length = 5},
{.offset = 6, .length = 7}
#endif
}
};
/*
** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
** except the BI is at byte 0.
*/
static struct nand_ecclayout nand_hw_eccoob_2048 = {
/* Reserve 0 as BI indicator */
.oobfree = {
#if (NAND_ECC_NUM_BYTES > 10)
{.offset = 1, .length = 2},
#elif (NAND_ECC_NUM_BYTES > 7)
{.offset = 1, .length = 5},
{.offset = 16, .length = 6},
{.offset = 32, .length = 6},
{.offset = 48, .length = 6}
#else
{.offset = 1, .length = 8},
{.offset = 16, .length = 9},
{.offset = 32, .length = 9},
{.offset = 48, .length = 9}
#endif
}
};
/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
* except the BI is at byte 0. */
static struct nand_ecclayout nand_hw_eccoob_4096 = {
/* Reserve 0 as BI indicator */
.oobfree = {
#if (NAND_ECC_NUM_BYTES > 10)
{.offset = 1, .length = 2},
{.offset = 16, .length = 3},
{.offset = 32, .length = 3},
{.offset = 48, .length = 3},
{.offset = 64, .length = 3},
{.offset = 80, .length = 3},
{.offset = 96, .length = 3},
{.offset = 112, .length = 3}
#else
{.offset = 1, .length = 5},
{.offset = 16, .length = 6},
{.offset = 32, .length = 6},
{.offset = 48, .length = 6},
{.offset = 64, .length = 6},
{.offset = 80, .length = 6},
{.offset = 96, .length = 6},
{.offset = 112, .length = 6}
#endif
}
};
/* ---- Private Functions ------------------------------------------------ */
/* ==== Public Functions ================================================= */
/****************************************************************************
*
* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
*
***************************************************************************/
static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t * buf,
int page)
{
int sectorIdx = 0;
int eccsize = chip->ecc.size;
int eccsteps = chip->ecc.steps;
uint8_t *datap = buf;
uint8_t eccCalc[NAND_ECC_NUM_BYTES];
int sectorOobSize = mtd->oobsize / eccsteps;
int stat;
for (sectorIdx = 0; sectorIdx < eccsteps;
sectorIdx++, datap += eccsize) {
if (sectorIdx > 0) {
/* Seek to page location within sector */
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
-1);
}
/* Enable hardware ECC before reading the buf */
nand_bcm_umi_bch_enable_read_hwecc();
/* Read in data */
bcm_umi_nand_read_buf(mtd, datap, eccsize);
/* Pause hardware ECC after reading the buf */
nand_bcm_umi_bch_pause_read_ecc_calc();
/* Read the OOB ECC */
chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
mtd->writesize + sectorIdx * sectorOobSize, -1);
nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
NAND_ECC_NUM_BYTES,
chip->oob_poi +
sectorIdx * sectorOobSize);
/* Correct any ECC detected errors */
stat =
nand_bcm_umi_bch_correct_page(datap, eccCalc,
NAND_ECC_NUM_BYTES);
/* Update Stats */
if (stat < 0) {
#if defined(NAND_BCM_UMI_DEBUG)
printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
__func__, sectorIdx);
printk(KERN_WARNING
"%s data %02x %02x %02x %02x "
"%02x %02x %02x %02x\n",
__func__, datap[0], datap[1], datap[2], datap[3],
datap[4], datap[5], datap[6], datap[7]);
printk(KERN_WARNING
"%s ecc %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x "
"%02x %02x %02x\n",
__func__, eccCalc[0], eccCalc[1], eccCalc[2],
eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
eccCalc[11], eccCalc[12]);
BUG();
#endif
mtd->ecc_stats.failed++;
} else {
#if defined(NAND_BCM_UMI_DEBUG)
if (stat > 0) {
printk(KERN_INFO
"%s %d correctable_errors detected\n",
__func__, stat);
}
#endif
mtd->ecc_stats.corrected += stat;
}
}
return 0;
}
/****************************************************************************
*
* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
*
***************************************************************************/
static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf)
{
int sectorIdx = 0;
int eccsize = chip->ecc.size;
int eccsteps = chip->ecc.steps;
const uint8_t *datap = buf;
uint8_t *oobp = chip->oob_poi;
int sectorOobSize = mtd->oobsize / eccsteps;
for (sectorIdx = 0; sectorIdx < eccsteps;
sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
/* Enable hardware ECC before writing the buf */
nand_bcm_umi_bch_enable_write_hwecc();
bcm_umi_nand_write_buf(mtd, datap, eccsize);
nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
NAND_ECC_NUM_BYTES);
}
bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
}

View File

@ -0,0 +1,581 @@
/*****************************************************************************
* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <asm/mach-types.h>
#include <asm/system.h>
#include <mach/reg_nand.h>
#include <mach/reg_umi.h>
#include "nand_bcm_umi.h"
#include <mach/memory_settings.h>
#define USE_DMA 1
#include <mach/dma.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
/* ---- External Variable Declarations ----------------------------------- */
/* ---- External Function Prototypes ------------------------------------- */
/* ---- Public Variables ------------------------------------------------- */
/* ---- Private Constants and Types -------------------------------------- */
static const __devinitconst char gBanner[] = KERN_INFO \
"BCM UMI MTD NAND Driver: 1.00\n";
#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
#endif
#if NAND_ECC_BCH
static uint8_t scan_ff_pattern[] = { 0xff };
static struct nand_bbt_descr largepage_bbt = {
.options = 0,
.offs = 0,
.len = 1,
.pattern = scan_ff_pattern
};
#endif
/*
** Preallocate a buffer to avoid having to do this every dma operation.
** This is the size of the preallocated coherent DMA buffer.
*/
#if USE_DMA
#define DMA_MIN_BUFLEN 512
#define DMA_MAX_BUFLEN PAGE_SIZE
#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
((len) > DMA_MAX_BUFLEN))
/*
* The current NAND data space goes from 0x80001900 to 0x80001FFF,
* which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
* size NAND flash. Need to break the DMA down to multiple 1Ks.
*
* Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
*/
#define DMA_MAX_LEN 1024
#else /* !USE_DMA */
#define DMA_MIN_BUFLEN 0
#define DMA_MAX_BUFLEN 0
#define USE_DIRECT_IO(len) 1
#endif
/* ---- Private Function Prototypes -------------------------------------- */
static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
int len);
/* ---- Private Variables ------------------------------------------------ */
static struct mtd_info *board_mtd;
static void __iomem *bcm_umi_io_base;
static void *virtPtr;
static dma_addr_t physPtr;
static struct completion nand_comp;
/* ---- Private Functions ------------------------------------------------ */
#if NAND_ECC_BCH
#include "bcm_umi_bch.c"
#else
#include "bcm_umi_hamming.c"
#endif
#if USE_DMA
/* Handler called when the DMA finishes. */
static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
{
complete(&nand_comp);
}
static int nand_dma_init(void)
{
int rc;
rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
nand_dma_handler, NULL);
if (rc != 0) {
printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
return rc;
}
virtPtr =
dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
if (virtPtr == NULL) {
printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
return -ENOMEM;
}
return 0;
}
static void nand_dma_term(void)
{
if (virtPtr != NULL)
dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
}
static void nand_dma_read(void *buf, int len)
{
int offset = 0;
int tmp_len = 0;
int len_left = len;
DMA_Handle_t hndl;
if (virtPtr == NULL)
panic("nand_dma_read: virtPtr == NULL\n");
if ((void *)physPtr == NULL)
panic("nand_dma_read: physPtr == NULL\n");
hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
if (hndl < 0) {
printk(KERN_ERR
"nand_dma_read: unable to allocate dma channel: %d\n",
(int)hndl);
panic("\n");
}
while (len_left > 0) {
if (len_left > DMA_MAX_LEN) {
tmp_len = DMA_MAX_LEN;
len_left -= DMA_MAX_LEN;
} else {
tmp_len = len_left;
len_left = 0;
}
init_completion(&nand_comp);
dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
physPtr + offset, tmp_len);
wait_for_completion(&nand_comp);
offset += tmp_len;
}
dma_free_channel(hndl);
if (buf != NULL)
memcpy(buf, virtPtr, len);
}
static void nand_dma_write(const void *buf, int len)
{
int offset = 0;
int tmp_len = 0;
int len_left = len;
DMA_Handle_t hndl;
if (buf == NULL)
panic("nand_dma_write: buf == NULL\n");
if (virtPtr == NULL)
panic("nand_dma_write: virtPtr == NULL\n");
if ((void *)physPtr == NULL)
panic("nand_dma_write: physPtr == NULL\n");
memcpy(virtPtr, buf, len);
hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
if (hndl < 0) {
printk(KERN_ERR
"nand_dma_write: unable to allocate dma channel: %d\n",
(int)hndl);
panic("\n");
}
while (len_left > 0) {
if (len_left > DMA_MAX_LEN) {
tmp_len = DMA_MAX_LEN;
len_left -= DMA_MAX_LEN;
} else {
tmp_len = len_left;
len_left = 0;
}
init_completion(&nand_comp);
dma_transfer_mem_to_mem(hndl, physPtr + offset,
REG_NAND_DATA_PADDR, tmp_len);
wait_for_completion(&nand_comp);
offset += tmp_len;
}
dma_free_channel(hndl);
}
#endif
static int nand_dev_ready(struct mtd_info *mtd)
{
return nand_bcm_umi_dev_ready();
}
/****************************************************************************
*
* bcm_umi_nand_inithw
*
* This routine does the necessary hardware (board-specific)
* initializations. This includes setting up the timings, etc.
*
***************************************************************************/
int bcm_umi_nand_inithw(void)
{
/* Configure nand timing parameters */
REG_UMI_NAND_TCR &= ~0x7ffff;
REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
/* enable software control of CS */
REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
#endif
/* keep NAND chip select asserted */
REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
/* enable writes to flash */
REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
nand_bcm_umi_wait_till_ready();
#if NAND_ECC_BCH
nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
#endif
return 0;
}
/* Used to turn latch the proper register for access. */
static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
/* send command to hardware */
struct nand_chip *chip = mtd->priv;
if (ctrl & NAND_CTRL_CHANGE) {
if (ctrl & NAND_CLE) {
chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
goto CMD;
}
if (ctrl & NAND_ALE) {
chip->IO_ADDR_W =
bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
goto CMD;
}
chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
}
CMD:
/* Send command to chip directly */
if (cmd != NAND_CMD_NONE)
writeb(cmd, chip->IO_ADDR_W);
}
static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
int len)
{
if (USE_DIRECT_IO(len)) {
/* Do it the old way if the buffer is small or too large.
* Probably quicker than starting and checking dma. */
int i;
struct nand_chip *this = mtd->priv;
for (i = 0; i < len; i++)
writeb(buf[i], this->IO_ADDR_W);
}
#if USE_DMA
else
nand_dma_write(buf, len);
#endif
}
static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
{
if (USE_DIRECT_IO(len)) {
int i;
struct nand_chip *this = mtd->priv;
for (i = 0; i < len; i++)
buf[i] = readb(this->IO_ADDR_R);
}
#if USE_DMA
else
nand_dma_read(buf, len);
#endif
}
static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
int len)
{
/*
* Try to readback page with ECC correction. This is necessary
* for MLC parts which may have permanently stuck bits.
*/
struct nand_chip *chip = mtd->priv;
int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
if (ret < 0)
return -EFAULT;
else {
if (memcmp(readbackbuf, buf, len) == 0)
return 0;
return -EFAULT;
}
return 0;
}
static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *r;
int err = 0;
printk(gBanner);
/* Allocate memory for MTD device structure and private data */
board_mtd =
kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
GFP_KERNEL);
if (!board_mtd) {
printk(KERN_WARNING
"Unable to allocate NAND MTD device structure.\n");
return -ENOMEM;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENXIO;
/* map physical adress */
bcm_umi_io_base = ioremap(r->start, r->end - r->start + 1);
if (!bcm_umi_io_base) {
printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
kfree(board_mtd);
return -EIO;
}
/* Get pointer to private data */
this = (struct nand_chip *)(&board_mtd[1]);
/* Initialize structures */
memset((char *)board_mtd, 0, sizeof(struct mtd_info));
memset((char *)this, 0, sizeof(struct nand_chip));
/* Link the private data with the MTD structure */
board_mtd->priv = this;
/* Initialize the NAND hardware. */
if (bcm_umi_nand_inithw() < 0) {
printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
iounmap(bcm_umi_io_base);
kfree(board_mtd);
return -EIO;
}
/* Set address of NAND IO lines */
this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
/* Set command delay time, see datasheet for correct value */
this->chip_delay = 0;
/* Assign the device ready function, if available */
this->dev_ready = nand_dev_ready;
this->options = 0;
this->write_buf = bcm_umi_nand_write_buf;
this->read_buf = bcm_umi_nand_read_buf;
this->verify_buf = bcm_umi_nand_verify_buf;
this->cmd_ctrl = bcm_umi_nand_hwcontrol;
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 512;
this->ecc.bytes = NAND_ECC_NUM_BYTES;
#if NAND_ECC_BCH
this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
#else
this->ecc.correct = nand_correct_data512;
this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
#endif
#if USE_DMA
err = nand_dma_init();
if (err != 0)
return err;
#endif
/* Figure out the size of the device that we have.
* We need to do this to figure out which ECC
* layout we'll be using.
*/
err = nand_scan_ident(board_mtd, 1);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
iounmap(bcm_umi_io_base);
kfree(board_mtd);
return err;
}
/* Now that we know the nand size, we can setup the ECC layout */
switch (board_mtd->writesize) { /* writesize is the pagesize */
case 4096:
this->ecc.layout = &nand_hw_eccoob_4096;
break;
case 2048:
this->ecc.layout = &nand_hw_eccoob_2048;
break;
case 512:
this->ecc.layout = &nand_hw_eccoob_512;
break;
default:
{
printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
board_mtd->writesize);
return -EINVAL;
}
}
#if NAND_ECC_BCH
if (board_mtd->writesize > 512) {
if (this->options & NAND_USE_FLASH_BBT)
largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
this->badblock_pattern = &largepage_bbt;
}
#endif
/* Now finish off the scan, now that ecc.layout has been initialized. */
err = nand_scan_tail(board_mtd);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
iounmap(bcm_umi_io_base);
kfree(board_mtd);
return err;
}
/* Register the partitions */
{
int nr_partitions;
struct mtd_partition *partition_info;
board_mtd->name = "bcm_umi-nand";
nr_partitions =
parse_mtd_partitions(board_mtd, part_probes,
&partition_info, 0);
if (nr_partitions <= 0) {
printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
nr_partitions);
iounmap(bcm_umi_io_base);
kfree(board_mtd);
return -EIO;
}
add_mtd_partitions(board_mtd, partition_info, nr_partitions);
}
/* Return happy */
return 0;
}
static int bcm_umi_nand_remove(struct platform_device *pdev)
{
#if USE_DMA
nand_dma_term();
#endif
/* Release resources, unregister device */
nand_release(board_mtd);
/* unmap physical adress */
iounmap(bcm_umi_io_base);
/* Free the MTD device structure */
kfree(board_mtd);
return 0;
}
#ifdef CONFIG_PM
static int bcm_umi_nand_suspend(struct platform_device *pdev,
pm_message_t state)
{
printk(KERN_ERR "MTD NAND suspend is being called\n");
return 0;
}
static int bcm_umi_nand_resume(struct platform_device *pdev)
{
printk(KERN_ERR "MTD NAND resume is being called\n");
return 0;
}
#else
#define bcm_umi_nand_suspend NULL
#define bcm_umi_nand_resume NULL
#endif
static struct platform_driver nand_driver = {
.driver = {
.name = "bcm-nand",
.owner = THIS_MODULE,
},
.probe = bcm_umi_nand_probe,
.remove = bcm_umi_nand_remove,
.suspend = bcm_umi_nand_suspend,
.resume = bcm_umi_nand_resume,
};
static int __init nand_init(void)
{
return platform_driver_register(&nand_driver);
}
static void __exit nand_exit(void)
{
platform_driver_unregister(&nand_driver);
}
module_init(nand_init);
module_exit(nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("BCM UMI MTD NAND driver");

View File

@ -591,6 +591,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
info->ioaddr = (uint32_t __force) vaddr;
@ -599,7 +601,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
info->mask_ale = pdata->mask_cle ? : MASK_ALE;
info->mask_ale = pdata->mask_ale ? : MASK_ALE;
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */

View File

@ -128,7 +128,7 @@ static int excite_nand_devready(struct mtd_info *mtd)
* The binding to the mtd and all allocated
* resources are released.
*/
static int __exit excite_nand_remove(struct platform_device *dev)
static int __devexit excite_nand_remove(struct platform_device *dev)
{
struct excite_nand_drvdata * const this = platform_get_drvdata(dev);

View File

@ -237,12 +237,15 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
ctrl->use_mdr = 0;
dev_vdbg(ctrl->dev,
"fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n",
ctrl->status, ctrl->mdr, in_be32(&lbc->fmr));
if (ctrl->status != LTESR_CC) {
dev_info(ctrl->dev,
"command failed: fir %x fcr %x status %x mdr %x\n",
in_be32(&lbc->fir), in_be32(&lbc->fcr),
ctrl->status, ctrl->mdr);
return -EIO;
}
/* returns 0 on success otherwise non-zero) */
return ctrl->status == LTESR_CC ? 0 : -EIO;
return 0;
}
static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
@ -253,17 +256,17 @@ static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
if (priv->page_size) {
out_be32(&lbc->fir,
(FIR_OP_CW0 << FIR_OP0_SHIFT) |
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_CW1 << FIR_OP3_SHIFT) |
(FIR_OP_CM1 << FIR_OP3_SHIFT) |
(FIR_OP_RBW << FIR_OP4_SHIFT));
out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
(NAND_CMD_READSTART << FCR_CMD1_SHIFT));
} else {
out_be32(&lbc->fir,
(FIR_OP_CW0 << FIR_OP0_SHIFT) |
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_RBW << FIR_OP3_SHIFT));
@ -332,7 +335,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_READID:
dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) |
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
@ -359,16 +362,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
out_be32(&lbc->fir,
(FIR_OP_CW0 << FIR_OP0_SHIFT) |
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_PA << FIR_OP1_SHIFT) |
(FIR_OP_CM1 << FIR_OP2_SHIFT));
(FIR_OP_CM2 << FIR_OP2_SHIFT) |
(FIR_OP_CW1 << FIR_OP3_SHIFT) |
(FIR_OP_RS << FIR_OP4_SHIFT));
out_be32(&lbc->fcr,
(NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
(NAND_CMD_ERASE2 << FCR_CMD1_SHIFT));
(NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
out_be32(&lbc->fbcr, 0);
ctrl->read_bytes = 0;
ctrl->use_mdr = 1;
fsl_elbc_run_command(mtd);
return;
@ -383,40 +390,41 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
ctrl->column = column;
ctrl->oob = 0;
ctrl->use_mdr = 1;
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
if (priv->page_size) {
fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT);
out_be32(&lbc->fir,
(FIR_OP_CW0 << FIR_OP0_SHIFT) |
(FIR_OP_CM2 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_WB << FIR_OP3_SHIFT) |
(FIR_OP_CW1 << FIR_OP4_SHIFT));
(FIR_OP_CM3 << FIR_OP4_SHIFT) |
(FIR_OP_CW1 << FIR_OP5_SHIFT) |
(FIR_OP_RS << FIR_OP6_SHIFT));
} else {
fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
out_be32(&lbc->fir,
(FIR_OP_CW0 << FIR_OP0_SHIFT) |
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CM2 << FIR_OP1_SHIFT) |
(FIR_OP_CA << FIR_OP2_SHIFT) |
(FIR_OP_PA << FIR_OP3_SHIFT) |
(FIR_OP_WB << FIR_OP4_SHIFT) |
(FIR_OP_CW1 << FIR_OP5_SHIFT));
(FIR_OP_CM3 << FIR_OP5_SHIFT) |
(FIR_OP_CW1 << FIR_OP6_SHIFT) |
(FIR_OP_RS << FIR_OP7_SHIFT));
if (column >= mtd->writesize) {
/* OOB area --> READOOB */
column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
ctrl->oob = 1;
} else if (column < 256) {
} else {
WARN_ON(column != 0);
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
} else {
/* Second 256 bytes --> READ1 */
fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
}
}
@ -628,22 +636,6 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
struct fsl_elbc_mtd *priv = chip->priv;
struct fsl_elbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
if (ctrl->status != LTESR_CC)
return NAND_STATUS_FAIL;
/* Use READ_STATUS command, but wait for the device to be ready */
ctrl->use_mdr = 0;
out_be32(&lbc->fir,
(FIR_OP_CW0 << FIR_OP0_SHIFT) |
(FIR_OP_RBW << FIR_OP1_SHIFT));
out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
out_be32(&lbc->fbcr, 1);
set_addr(mtd, 0, 0, 0);
ctrl->read_bytes = 1;
fsl_elbc_run_command(mtd);
if (ctrl->status != LTESR_CC)
return NAND_STATUS_FAIL;
@ -651,8 +643,7 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
/* The chip always seems to report that it is
* write-protected, even when it is not.
*/
setbits8(ctrl->addr, NAND_STATUS_WP);
return fsl_elbc_read_byte(mtd);
return (ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
@ -946,6 +937,13 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
{
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
/*
* NAND transactions can tie up the bus for a long time, so set the
* bus timeout to max by clearing LBCR[BMT] (highest base counter
* value) and setting LBCR[BMTPS] to the highest prescaler value.
*/
clrsetbits_be32(&lbc->lbcr, LBCR_BMT, 15);
/* clear event registers */
setbits32(&lbc->ltesr, LTESR_NAND_MASK);
out_be32(&lbc->lteatr, 0);

View File

@ -112,7 +112,7 @@ static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
if (mchip_nr == -1) {
chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
} else if (mchip_nr >= 0) {
} else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
fun->mchip_number = mchip_nr;
chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
chip->IO_ADDR_W = chip->IO_ADDR_R;

File diff suppressed because it is too large Load Diff

View File

@ -428,6 +428,28 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
return nand_isbad_bbt(mtd, ofs, allowbbt);
}
/**
* panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
* @mtd: MTD device structure
* @timeo: Timeout
*
* Helper function for nand_wait_ready used when needing to wait in interrupt
* context.
*/
static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
{
struct nand_chip *chip = mtd->priv;
int i;
/* Wait for the device to get ready */
for (i = 0; i < timeo; i++) {
if (chip->dev_ready(mtd))
break;
touch_softlockup_watchdog();
mdelay(1);
}
}
/*
* Wait for the ready pin, after a command
* The timeout is catched later.
@ -437,6 +459,10 @@ void nand_wait_ready(struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
unsigned long timeo = jiffies + 2;
/* 400ms timeout */
if (in_interrupt() || oops_in_progress)
return panic_nand_wait_ready(mtd, 400);
led_trigger_event(nand_led_trigger, LED_FULL);
/* wait until command is processed or timeout occures */
do {
@ -671,6 +697,22 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
nand_wait_ready(mtd);
}
/**
* panic_nand_get_device - [GENERIC] Get chip for selected access
* @chip: the nand chip descriptor
* @mtd: MTD device structure
* @new_state: the state which is requested
*
* Used when in panic, no locks are taken.
*/
static void panic_nand_get_device(struct nand_chip *chip,
struct mtd_info *mtd, int new_state)
{
/* Hardware controller shared among independend devices */
chip->controller->active = chip;
chip->state = new_state;
}
/**
* nand_get_device - [GENERIC] Get chip for selected access
* @chip: the nand chip descriptor
@ -698,8 +740,14 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
return 0;
}
if (new_state == FL_PM_SUSPENDED) {
spin_unlock(lock);
return (chip->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
if (chip->controller->active->state == FL_PM_SUSPENDED) {
chip->state = FL_PM_SUSPENDED;
spin_unlock(lock);
return 0;
} else {
spin_unlock(lock);
return -EAGAIN;
}
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(wq, &wait);
@ -709,6 +757,32 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
goto retry;
}
/**
* panic_nand_wait - [GENERIC] wait until the command is done
* @mtd: MTD device structure
* @chip: NAND chip structure
* @timeo: Timeout
*
* Wait for command done. This is a helper function for nand_wait used when
* we are in interrupt context. May happen when in panic and trying to write
* an oops trough mtdoops.
*/
static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
unsigned long timeo)
{
int i;
for (i = 0; i < timeo; i++) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
break;
} else {
if (chip->read_byte(mtd) & NAND_STATUS_READY)
break;
}
mdelay(1);
}
}
/**
* nand_wait - [DEFAULT] wait until the command is done
* @mtd: MTD device structure
@ -740,15 +814,19 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
else
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
while (time_before(jiffies, timeo)) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
break;
} else {
if (chip->read_byte(mtd) & NAND_STATUS_READY)
break;
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
else {
while (time_before(jiffies, timeo)) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
break;
} else {
if (chip->read_byte(mtd) & NAND_STATUS_READY)
break;
}
cond_resched();
}
cond_resched();
}
led_trigger_event(nand_led_trigger, LED_OFF);
@ -1948,6 +2026,45 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
return ret;
}
/**
* panic_nand_write - [MTD Interface] NAND write with ECC
* @mtd: MTD device structure
* @to: offset to write to
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of written bytes
* @buf: the data to write
*
* NAND write with ECC. Used when performing writes in interrupt context, this
* may for example be called by mtdoops when writing an oops while in panic.
*/
static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd->priv;
int ret;
/* Do not allow reads past end of device */
if ((to + len) > mtd->size)
return -EINVAL;
if (!len)
return 0;
/* Wait for the device to get ready. */
panic_nand_wait(mtd, chip, 400);
/* Grab the device. */
panic_nand_get_device(chip, mtd, FL_WRITING);
chip->ops.len = len;
chip->ops.datbuf = (uint8_t *)buf;
chip->ops.oobbuf = NULL;
ret = nand_do_write_ops(mtd, to, &chip->ops);
*retlen = chip->ops.retlen;
return ret;
}
/**
* nand_write - [MTD Interface] NAND write with ECC
* @mtd: MTD device structure
@ -2645,7 +2762,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
if (IS_ERR(type)) {
printk(KERN_WARNING "No NAND device found!!!\n");
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
printk(KERN_WARNING "No NAND device found.\n");
chip->select_chip(mtd, -1);
return PTR_ERR(type);
}
@ -2877,6 +2995,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->unpoint = NULL;
mtd->read = nand_read;
mtd->write = nand_write;
mtd->panic_write = panic_nand_write;
mtd->read_oob = nand_read_oob;
mtd->write_oob = nand_write_oob;
mtd->sync = nand_sync;

View File

@ -0,0 +1,149 @@
/*****************************************************************************
* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
#include <mach/reg_umi.h>
#include "nand_bcm_umi.h"
#ifdef BOOT0_BUILD
#include <uart.h>
#endif
/* ---- External Variable Declarations ----------------------------------- */
/* ---- External Function Prototypes ------------------------------------- */
/* ---- Public Variables ------------------------------------------------- */
/* ---- Private Constants and Types -------------------------------------- */
/* ---- Private Function Prototypes -------------------------------------- */
/* ---- Private Variables ------------------------------------------------ */
/* ---- Private Functions ------------------------------------------------ */
#if NAND_ECC_BCH
/****************************************************************************
* nand_bch_ecc_flip_bit - Routine to flip an errored bit
*
* PURPOSE:
* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
* errored bit specified
*
* PARAMETERS:
* datap - Container that holds the 512 byte data
* errorLocation - Location of the bit that needs to be flipped
*
* RETURNS:
* None
****************************************************************************/
static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
{
int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
uint8_t errorByte = 0;
uint8_t byteMask = 1 << locWithinAByte;
/* BCH uses big endian, need to change the location
* bits to little endian */
locWithinAWord = 3 - locWithinAWord;
errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
#ifdef BOOT0_BUILD
puthexs("\nECC Correct Offset: ",
locWithinAPage * sizeof(uint32_t) + locWithinAWord);
puthexs(" errorByte:", errorByte);
puthex8(" Bit: ", locWithinAByte);
#endif
if (errorByte & byteMask) {
/* bit needs to be cleared */
errorByte &= ~byteMask;
} else {
/* bit needs to be set */
errorByte |= byteMask;
}
/* write back the value with the fixed bit */
datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
}
/****************************************************************************
* nand_correct_page_bch - Routine to correct bit errors when reading NAND
*
* PURPOSE:
* This routine reads the BCH registers to determine if there are any bit
* errors during the read of the last 512 bytes of data + ECC bytes. If
* errors exists, the routine fixes it.
*
* PARAMETERS:
* datap - Container that holds the 512 byte data
*
* RETURNS:
* 0 or greater = Number of errors corrected
* (No errors are found or errors have been fixed)
* -1 = Error(s) cannot be fixed
****************************************************************************/
int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
int numEccBytes)
{
int numErrors;
int errorLocation;
int idx;
uint32_t regValue;
/* wait for read ECC to be valid */
regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
/*
* read the control status register to determine if there
* are error'ed bits
* see if errors are correctible
*/
if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
int i;
for (i = 0; i < numEccBytes; i++) {
if (readEccData[i] != 0xff) {
/* errors cannot be fixed, return -1 */
return -1;
}
}
/* If ECC is unprogrammed then we can't correct,
* assume everything OK */
return 0;
}
if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
/* no errors */
return 0;
}
/*
* Fix errored bits by doing the following:
* 1. Read the number of errors in the control and status register
* 2. Read the error location registers that corresponds to the number
* of errors reported
* 3. Invert the bit in the data
*/
numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
for (idx = 0; idx < numErrors; idx++) {
errorLocation =
REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
/* Flip bit */
nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
}
/* Errors corrected */
return numErrors;
}
#endif

View File

@ -0,0 +1,358 @@
/*****************************************************************************
* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
#ifndef NAND_BCM_UMI_H
#define NAND_BCM_UMI_H
/* ---- Include Files ---------------------------------------------------- */
#include <mach/reg_umi.h>
#include <mach/reg_nand.h>
#include <cfg_global.h>
/* ---- Constants and Types ---------------------------------------------- */
#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
#else
#define NAND_ECC_BCH 0
#endif
#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
#if NAND_ECC_BCH
#ifdef BOOT0_BUILD
#define NAND_ECC_NUM_BYTES 13
#else
#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
#endif
#else
#define NAND_ECC_NUM_BYTES 3
#endif
#define NAND_DATA_ACCESS_SIZE 512
/* ---- Variable Externs ------------------------------------------ */
/* ---- Function Prototypes --------------------------------------- */
int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
int numEccBytes);
/* Check in device is ready */
static inline int nand_bcm_umi_dev_ready(void)
{
return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
}
/* Wait until device is ready */
static inline void nand_bcm_umi_wait_till_ready(void)
{
while (nand_bcm_umi_dev_ready() == 0)
;
}
/* Enable Hamming ECC */
static inline void nand_bcm_umi_hamming_enable_hwecc(void)
{
/* disable and reset ECC, 512 byte page */
REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
REG_UMI_NAND_ECC_CSR_256BYTE);
/* enable ECC */
REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
}
#if NAND_ECC_BCH
/* BCH ECC specifics */
#define ECC_BITS_PER_CORRECTABLE_BIT 13
/* Enable BCH Read ECC */
static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
{
/* disable and reset ECC */
REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
/* Turn on ECC */
REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
}
/* Enable BCH Write ECC */
static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
{
/* disable and reset ECC */
REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
/* Turn on ECC */
REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
}
/* Config number of BCH ECC bytes */
static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
{
uint32_t nValue;
uint32_t tValue;
uint32_t kValue;
uint32_t numBits = numEccBytes * 8;
/* disable and reset ECC */
REG_UMI_BCH_CTRL_STATUS =
REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
/* Every correctible bit requires 13 ECC bits */
tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
/* Total data in number of bits for generating and computing BCH ECC */
nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
/* K parameter is used internally. K = N - (T * 13) */
kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
/* Write the settings */
REG_UMI_BCH_N = nValue;
REG_UMI_BCH_T = tValue;
REG_UMI_BCH_K = kValue;
}
/* Pause during ECC read calculation to skip bytes in OOB */
static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
{
REG_UMI_BCH_CTRL_STATUS =
REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
}
/* Resume during ECC read calculation after skipping bytes in OOB */
static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
{
REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
}
/* Poll read ECC calc to check when hardware completes */
static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
{
uint32_t regVal;
do {
/* wait for ECC to be valid */
regVal = REG_UMI_BCH_CTRL_STATUS;
} while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
return regVal;
}
/* Poll write ECC calc to check when hardware completes */
static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
{
/* wait for ECC to be valid */
while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
== 0)
;
}
/* Read the OOB and ECC, for kernel write OOB to a buffer */
#if defined(__KERNEL__) && !defined(STANDALONE)
static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
#else
static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
uint8_t *eccCalc, int numEccBytes)
#endif
{
int eccPos = 0;
int numToRead = 16; /* There are 16 bytes per sector in the OOB */
/* ECC is already paused when this function is called */
if (pageSize == NAND_DATA_ACCESS_SIZE) {
while (numToRead > numEccBytes) {
/* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
}
/* read ECC bytes before BI */
nand_bcm_umi_bch_resume_read_ecc_calc();
while (numToRead > 11) {
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp = REG_NAND_DATA8;
eccCalc[eccPos++] = *oobp;
oobp++;
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
}
nand_bcm_umi_bch_pause_read_ecc_calc();
if (numToRead == 11) {
/* read BI */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
}
/* read ECC bytes */
nand_bcm_umi_bch_resume_read_ecc_calc();
while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp = REG_NAND_DATA8;
eccCalc[eccPos++] = *oobp;
oobp++;
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
}
} else {
/* skip BI */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
while (numToRead > numEccBytes) {
/* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp++ = REG_NAND_DATA8;
#else
REG_NAND_DATA8;
#endif
numToRead--;
}
/* read ECC bytes */
nand_bcm_umi_bch_resume_read_ecc_calc();
while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
*oobp = REG_NAND_DATA8;
eccCalc[eccPos++] = *oobp;
oobp++;
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
}
}
}
/* Helper function to write ECC */
static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
uint8_t *oobp, uint8_t eccVal)
{
if (eccBytePos <= numEccBytes)
*oobp = eccVal;
}
/* Write OOB with ECC */
static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
uint8_t *oobp, int numEccBytes)
{
uint32_t eccVal = 0xffffffff;
/* wait for write ECC to be valid */
nand_bcm_umi_bch_poll_write_ecc_calc();
/*
** Get the hardware ecc from the 32-bit result registers.
** Read after 512 byte accesses. Format B3B2B1B0
** where B3 = ecc3, etc.
*/
if (pageSize == NAND_DATA_ACCESS_SIZE) {
/* Now fill in the ECC bytes */
if (numEccBytes >= 13)
eccVal = REG_UMI_BCH_WR_ECC_3;
/* Usually we skip CM in oob[0,1] */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
(eccVal >> 16) & 0xff);
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
(eccVal >> 8) & 0xff);
/* Write ECC in oob[2,3,4] */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
eccVal & 0xff); /* ECC 12 */
if (numEccBytes >= 9)
eccVal = REG_UMI_BCH_WR_ECC_2;
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
(eccVal >> 24) & 0xff); /* ECC11 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
(eccVal >> 16) & 0xff); /* ECC10 */
/* Always Skip BI in oob[5] */
} else {
/* Always Skip BI in oob[0] */
/* Now fill in the ECC bytes */
if (numEccBytes >= 13)
eccVal = REG_UMI_BCH_WR_ECC_3;
/* Usually skip CM in oob[1,2] */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
(eccVal >> 16) & 0xff);
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
(eccVal >> 8) & 0xff);
/* Write ECC in oob[3-15] */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
eccVal & 0xff); /* ECC12 */
if (numEccBytes >= 9)
eccVal = REG_UMI_BCH_WR_ECC_2;
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
(eccVal >> 24) & 0xff); /* ECC11 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
(eccVal >> 16) & 0xff); /* ECC10 */
}
/* Fill in the remainder of ECC locations */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
(eccVal >> 8) & 0xff); /* ECC9 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
eccVal & 0xff); /* ECC8 */
if (numEccBytes >= 5)
eccVal = REG_UMI_BCH_WR_ECC_1;
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
(eccVal >> 24) & 0xff); /* ECC7 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
(eccVal >> 16) & 0xff); /* ECC6 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
(eccVal >> 8) & 0xff); /* ECC5 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
eccVal & 0xff); /* ECC4 */
if (numEccBytes >= 1)
eccVal = REG_UMI_BCH_WR_ECC_0;
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
(eccVal >> 24) & 0xff); /* ECC3 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
(eccVal >> 16) & 0xff); /* ECC2 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
(eccVal >> 8) & 0xff); /* ECC1 */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
eccVal & 0xff); /* ECC0 */
}
#endif
#endif /* NAND_BCM_UMI_H */

View File

@ -150,20 +150,19 @@ static const char addressbits[256] = {
};
/**
* nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* block
* @mtd: MTD block structure
* @buf: input buffer with raw data
* @eccsize: data bytes per ecc step (256 or 512)
* @code: output buffer with ECC
*/
int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
unsigned char *code)
{
int i;
const uint32_t *bp = (uint32_t *)buf;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
const uint32_t eccsize_mult = eccsize >> 8;
uint32_t cur; /* current value in buffer */
/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
@ -412,6 +411,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
(invparity[par & 0x55] << 2) |
(invparity[rp17] << 1) |
(invparity[rp16] << 0);
}
EXPORT_SYMBOL(__nand_calculate_ecc);
/**
* nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* block
* @mtd: MTD block structure
* @buf: input buffer with raw data
* @code: output buffer with ECC
*/
int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
unsigned char *code)
{
__nand_calculate_ecc(buf,
((struct nand_chip *)mtd->priv)->ecc.size, code);
return 0;
}
EXPORT_SYMBOL(nand_calculate_ecc);

View File

@ -161,7 +161,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
/* The largest possible page size */
#define NS_LARGEST_PAGE_SIZE 2048
#define NS_LARGEST_PAGE_SIZE 4096
/* The prefix for simulator output */
#define NS_OUTPUT_PREFIX "[nandsim]"
@ -259,7 +259,8 @@ MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of mem
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_LARGEPAGE (OPT_PAGE2048) /* 2048-byte page chips */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
/* Remove action bits ftom state */
@ -588,6 +589,8 @@ static int init_nandsim(struct mtd_info *mtd)
ns->options |= OPT_PAGE512_8BIT;
} else if (ns->geom.pgsz == 2048) {
ns->options |= OPT_PAGE2048;
} else if (ns->geom.pgsz == 4096) {
ns->options |= OPT_PAGE4096;
} else {
NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
return -EIO;

View File

@ -34,7 +34,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = pdev->dev.platform_data;
struct plat_nand_data *data;
int res = 0;
struct resource *res;
int err = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
/* Allocate memory for the device structure (and zero it) */
data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
@ -43,12 +48,18 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
return -ENOMEM;
}
data->io_base = ioremap(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
if (!request_mem_region(res->start, resource_size(res),
dev_name(&pdev->dev))) {
dev_err(&pdev->dev, "request_mem_region failed\n");
err = -EBUSY;
goto out_free;
}
data->io_base = ioremap(res->start, resource_size(res));
if (data->io_base == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
kfree(data);
return -EIO;
err = -EIO;
goto out_release_io;
}
data->chip.priv = &data;
@ -74,24 +85,24 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
/* Handle any platform specific setup */
if (pdata->ctrl.probe) {
res = pdata->ctrl.probe(pdev);
if (res)
err = pdata->ctrl.probe(pdev);
if (err)
goto out;
}
/* Scan to find existance of the device */
if (nand_scan(&data->mtd, 1)) {
res = -ENXIO;
err = -ENXIO;
goto out;
}
#ifdef CONFIG_MTD_PARTITIONS
if (pdata->chip.part_probe_types) {
res = parse_mtd_partitions(&data->mtd,
err = parse_mtd_partitions(&data->mtd,
pdata->chip.part_probe_types,
&data->parts, 0);
if (res > 0) {
add_mtd_partitions(&data->mtd, data->parts, res);
if (err > 0) {
add_mtd_partitions(&data->mtd, data->parts, err);
return 0;
}
}
@ -99,14 +110,14 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
res = add_mtd_partitions(&data->mtd, data->parts,
err = add_mtd_partitions(&data->mtd, data->parts,
pdata->chip.nr_partitions);
} else
#endif
res = add_mtd_device(&data->mtd);
err = add_mtd_device(&data->mtd);
if (!res)
return res;
if (!err)
return err;
nand_release(&data->mtd);
out:
@ -114,8 +125,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
pdata->ctrl.remove(pdev);
platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
out_release_io:
release_mem_region(res->start, resource_size(res));
out_free:
kfree(data);
return res;
return err;
}
/*
@ -125,6 +139,9 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = pdev->dev.platform_data;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
@ -134,6 +151,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
iounmap(data->io_base);
release_mem_region(res->start, resource_size(res));
kfree(data);
return 0;

View File

@ -774,7 +774,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->select_chip = s3c2410_nand_select_chip;
chip->chip_delay = 50;
chip->priv = nmtd;
chip->options = 0;
chip->options = set->options;
chip->controller = &info->controller;
switch (info->cpu_type) {

View File

@ -429,11 +429,10 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
chip = mtd->priv;
txx9_priv = chip->priv;
nand_release(mtd);
#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(mtd);
kfree(drvdata->parts[i]);
#endif
del_mtd_device(mtd);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}

View File

@ -112,10 +112,24 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
unsigned long timeout;
u32 syscfg;
if (state == FL_RESETING) {
int i;
if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
state == FL_VERIFYING_ERASE) {
int i = 21;
unsigned int intr_flags = ONENAND_INT_MASTER;
for (i = 0; i < 20; i++) {
switch (state) {
case FL_RESETING:
intr_flags |= ONENAND_INT_RESET;
break;
case FL_PREPARING_ERASE:
intr_flags |= ONENAND_INT_ERASE;
break;
case FL_VERIFYING_ERASE:
i = 101;
break;
}
while (--i) {
udelay(1);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if (intr & ONENAND_INT_MASTER)
@ -126,7 +140,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
if (!(intr & ONENAND_INT_RESET)) {
if ((intr & intr_flags) != intr_flags) {
wait_err("timeout", state, ctrl, intr);
return -EIO;
}

File diff suppressed because it is too large Load Diff

View File

@ -5,3 +5,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o

View File

@ -0,0 +1,87 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <linux/mtd/nand_ecc.h>
#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
static void inject_single_bit_error(void *data, size_t size)
{
unsigned long offset = random32() % (size * BITS_PER_BYTE);
__change_bit(offset, data);
}
static unsigned char data[512];
static unsigned char error_data[512];
static int nand_ecc_test(const size_t size)
{
unsigned char code[3];
unsigned char error_code[3];
char testname[30];
BUG_ON(sizeof(data) < size);
sprintf(testname, "nand-ecc-%zu", size);
get_random_bytes(data, size);
memcpy(error_data, data, size);
inject_single_bit_error(error_data, size);
__nand_calculate_ecc(data, size, code);
__nand_calculate_ecc(error_data, size, error_code);
__nand_correct_data(error_data, code, error_code, size);
if (!memcmp(data, error_data, size)) {
printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname);
return 0;
}
printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname);
printk(KERN_DEBUG "hexdump of data:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
data, size, false);
printk(KERN_DEBUG "hexdump of error data:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
error_data, size, false);
return -1;
}
#else
static int nand_ecc_test(const size_t size)
{
return 0;
}
#endif
static int __init ecc_test_init(void)
{
srandom32(jiffies);
nand_ecc_test(256);
nand_ecc_test(512);
return 0;
}
static void __exit ecc_test_exit(void)
{
}
module_init(ecc_test_init);
module_exit(ecc_test_exit);
MODULE_DESCRIPTION("NAND ECC function test module");
MODULE_AUTHOR("Akinobu Mita");
MODULE_LICENSE("GPL");

View File

@ -343,7 +343,6 @@ static int scan_for_bad_eraseblocks(void)
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
@ -392,7 +391,6 @@ static int __init mtd_oobtest_init(void)
mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
err = -ENOMEM;
mtd->erasesize = mtd->erasesize;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!readbuf) {
printk(PRINT_PREF "error: cannot allocate memory\n");
@ -476,18 +474,10 @@ static int __init mtd_oobtest_init(void)
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
simple_srand(5);
printk(PRINT_PREF "writing OOBs of whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock(i);
if (err)
goto out;
if (i % 256 == 0)
printk(PRINT_PREF "written up to eraseblock %u\n", i);
cond_resched();
}
printk(PRINT_PREF "written %u eraseblocks\n", i);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
use_offset = 0;

View File

@ -523,6 +523,7 @@ static int __init mtd_pagetest_init(void)
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
pgsize = mtd->writesize;
printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "

View File

@ -700,7 +700,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
struct jffs2_raw_inode ri;
struct jffs2_node_frag *last_frag;
union jffs2_device_node dev;
char *mdata = NULL, mdatalen = 0;
char *mdata = NULL;
int mdatalen = 0;
uint32_t alloclen, ilen;
int ret;

View File

@ -1284,7 +1284,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
f->target = NULL;
mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -ret;
return ret;
}
f->target[je32_to_cpu(latest_node->csize)] = '\0';

View File

@ -23,7 +23,7 @@
int jffs2_sum_init(struct jffs2_sb_info *c)
{
uint32_t sum_size = max_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);

60
include/linux/kmsg_dump.h Normal file
View File

@ -0,0 +1,60 @@
/*
* linux/include/kmsg_dump.h
*
* Copyright (C) 2009 Net Insight AB
*
* Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#ifndef _LINUX_KMSG_DUMP_H
#define _LINUX_KMSG_DUMP_H
#include <linux/list.h>
enum kmsg_dump_reason {
KMSG_DUMP_OOPS,
KMSG_DUMP_PANIC,
};
/**
* struct kmsg_dumper - kernel crash message dumper structure
* @dump: The callback which gets called on crashes. The buffer is passed
* as two sections, where s1 (length l1) contains the older
* messages and s2 (length l2) contains the newer.
* @list: Entry in the dumper list (private)
* @registered: Flag that specifies if this is already registered
*/
struct kmsg_dumper {
void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
const char *s1, unsigned long l1,
const char *s2, unsigned long l2);
struct list_head list;
int registered;
};
#ifdef CONFIG_PRINTK
void kmsg_dump(enum kmsg_dump_reason reason);
int kmsg_dump_register(struct kmsg_dumper *dumper);
int kmsg_dump_unregister(struct kmsg_dumper *dumper);
#else
static inline void kmsg_dump(enum kmsg_dump_reason reason)
{
}
static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
{
return -EINVAL;
}
static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
{
return -EINVAL;
}
#endif
#endif /* _LINUX_KMSG_DUMP_H */

View File

@ -19,22 +19,21 @@
/**
* struct nand_bbt_descr - bad block table descriptor
* @options: options for this descriptor
* @pages: the page(s) where we find the bbt, used with
* option BBT_ABSPAGE when bbt is searched,
* then we store the found bbts pages here.
* Its an array and supports up to 8 chips now
* @offs: offset of the pattern in the oob area of the page
* @veroffs: offset of the bbt version counter in the oob area of the page
* @version: version read from the bbt page during scan
* @len: length of the pattern, if 0 no pattern check is performed
* @maxblocks: maximum number of blocks to search for a bbt. This
* number of blocks is reserved at the end of the device
* where the tables are written.
* @reserved_block_code: if non-0, this pattern denotes a reserved
* (rather than bad) block in the stored bbt
* @pattern: pattern to identify bad block table or factory marked
* good / bad blocks, can be NULL, if len = 0
* @options: options for this descriptor
* @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
* when bbt is searched, then we store the found bbts pages here.
* Its an array and supports up to 8 chips now
* @offs: offset of the pattern in the oob area of the page
* @veroffs: offset of the bbt version counter in the oob are of the page
* @version: version read from the bbt page during scan
* @len: length of the pattern, if 0 no pattern check is performed
* @maxblocks: maximum number of blocks to search for a bbt. This number of
* blocks is reserved at the end of the device where the tables are
* written.
* @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
* bad) block in the stored bbt
* @pattern: pattern to identify bad block table or factory marked good /
* bad blocks, can be NULL, if len = 0
*
* Descriptor for the bad block table marker and the descriptor for the
* pattern which identifies good and bad blocks. The assumption is made
@ -90,7 +89,9 @@ struct nand_bbt_descr {
/*
* Constants for oob configuration
*/
#define ONENAND_BADBLOCK_POS 0
#define NAND_SMALL_BADBLOCK_POS 5
#define NAND_LARGE_BADBLOCK_POS 0
#define ONENAND_BADBLOCK_POS 0
/*
* Bad block scanning errors

View File

@ -518,10 +518,11 @@ struct cfi_fixup {
#define CFI_MFR_ANY 0xffff
#define CFI_ID_ANY 0xffff
#define CFI_MFR_AMD 0x0001
#define CFI_MFR_ATMEL 0x001F
#define CFI_MFR_SAMSUNG 0x00EC
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
#define CFI_MFR_AMD 0x0001
#define CFI_MFR_INTEL 0x0089
#define CFI_MFR_ATMEL 0x001F
#define CFI_MFR_SAMSUNG 0x00EC
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);

View File

@ -38,6 +38,15 @@ typedef enum {
FL_XIP_WHILE_ERASING,
FL_XIP_WHILE_WRITING,
FL_SHUTDOWN,
/* These 2 come from nand_state_t, which has been unified here */
FL_READING,
FL_CACHEDPRG,
/* These 4 come from onenand_state_t, which has been unified here */
FL_RESETING,
FL_OTPING,
FL_PREPARING_ERASE,
FL_VERIFYING_ERASE,
FL_UNKNOWN
} flstate_t;

View File

@ -21,6 +21,8 @@
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
struct mtd_info;
/* Scan and identify a NAND device */
@ -168,7 +170,6 @@ typedef enum {
/* Chip does not allow subpage writes */
#define NAND_NO_SUBPAGE_WRITE 0x00000200
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS \
(NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@ -194,6 +195,9 @@ typedef enum {
/* This option is defined if the board driver allocates its own buffers
(e.g. because it needs them DMA-coherent */
#define NAND_OWN_BUFFERS 0x00040000
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00080000
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@ -202,20 +206,6 @@ typedef enum {
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
/*
* nand_state_t - chip states
* Enumeration for NAND flash chip state
*/
typedef enum {
FL_READY,
FL_READING,
FL_WRITING,
FL_ERASING,
FL_SYNCING,
FL_CACHEDPRG,
FL_PM_SUSPENDED,
} nand_state_t;
/* Keep gcc happy */
struct nand_chip;
@ -402,7 +392,7 @@ struct nand_chip {
uint8_t cellinfo;
int badblockpos;
nand_state_t state;
flstate_t state;
uint8_t *oob_poi;
struct nand_hw_control *controller;
@ -470,75 +460,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
/**
* struct nand_bbt_descr - bad block table descriptor
* @options: options for this descriptor
* @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
* when bbt is searched, then we store the found bbts pages here.
* Its an array and supports up to 8 chips now
* @offs: offset of the pattern in the oob area of the page
* @veroffs: offset of the bbt version counter in the oob are of the page
* @version: version read from the bbt page during scan
* @len: length of the pattern, if 0 no pattern check is performed
* @maxblocks: maximum number of blocks to search for a bbt. This number of
* blocks is reserved at the end of the device where the tables are
* written.
* @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
* bad) block in the stored bbt
* @pattern: pattern to identify bad block table or factory marked good /
* bad blocks, can be NULL, if len = 0
*
* Descriptor for the bad block table marker and the descriptor for the
* pattern which identifies good and bad blocks. The assumption is made
* that the pattern and the version count are always located in the oob area
* of the first block.
*/
struct nand_bbt_descr {
int options;
int pages[NAND_MAX_CHIPS];
int offs;
int veroffs;
uint8_t version[NAND_MAX_CHIPS];
int len;
int maxblocks;
int reserved_block_code;
uint8_t *pattern;
};
/* Options for the bad block table descriptors */
/* The number of bits used per block in the bbt on the device */
#define NAND_BBT_NRBITS_MSK 0x0000000F
#define NAND_BBT_1BIT 0x00000001
#define NAND_BBT_2BIT 0x00000002
#define NAND_BBT_4BIT 0x00000004
#define NAND_BBT_8BIT 0x00000008
/* The bad block table is in the last good block of the device */
#define NAND_BBT_LASTBLOCK 0x00000010
/* The bbt is at the given page, else we must scan for the bbt */
#define NAND_BBT_ABSPAGE 0x00000020
/* The bbt is at the given page, else we must scan for the bbt */
#define NAND_BBT_SEARCH 0x00000040
/* bbt is stored per chip on multichip devices */
#define NAND_BBT_PERCHIP 0x00000080
/* bbt has a version counter at offset veroffs */
#define NAND_BBT_VERSION 0x00000100
/* Create a bbt if none axists */
#define NAND_BBT_CREATE 0x00000200
/* Search good / bad pattern through all pages of a block */
#define NAND_BBT_SCANALLPAGES 0x00000400
/* Scan block empty during good / bad block scan */
#define NAND_BBT_SCANEMPTY 0x00000800
/* Write bbt if neccecary */
#define NAND_BBT_WRITE 0x00001000
/* Read and write back block contents when writing bbt */
#define NAND_BBT_SAVECONTENT 0x00002000
/* Search good / bad pattern on the first and the second page */
#define NAND_BBT_SCAN2NDPAGE 0x00004000
/* The maximum number of blocks to scan for a bbt */
#define NAND_BBT_SCAN_MAXBLOCKS 4
extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_default_bbt(struct mtd_info *mtd);
@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, uint8_t * buf);
/*
* Constants for oob configuration
*/
#define NAND_SMALL_BADBLOCK_POS 5
#define NAND_LARGE_BADBLOCK_POS 0
/**
* struct platform_nand_chip - chip level device structure
* @nr_chips: max. number of chips to scan for

View File

@ -16,7 +16,13 @@
struct mtd_info;
/*
* Calculate 3 byte ECC code for 256 byte block
* Calculate 3 byte ECC code for eccsize byte block
*/
void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
u_char *ecc_code);
/*
* Calculate 3 byte ECC code for 256/512 byte block
*/
int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
unsigned int eccsize);
/*
* Detect and correct a 1 bit error for 256 byte block
* Detect and correct a 1 bit error for 256/512 byte block
*/
int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);

View File

@ -1,7 +1,7 @@
/*
* linux/include/linux/mtd/onenand.h
*
* Copyright (C) 2005-2007 Samsung Electronics
* Copyright © 2005-2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
/* Free resources held by the OneNAND device */
extern void onenand_release(struct mtd_info *mtd);
/*
* onenand_state_t - chip states
* Enumeration for OneNAND flash chip state
*/
typedef enum {
FL_READY,
FL_READING,
FL_WRITING,
FL_ERASING,
FL_SYNCING,
FL_LOCKING,
FL_RESETING,
FL_OTPING,
FL_PM_SUSPENDED,
} onenand_state_t;
/**
* struct onenand_bufferram - OneNAND BufferRAM Data
* @blockpage: block & page address in BufferRAM
@ -137,7 +122,7 @@ struct onenand_chip {
spinlock_t chip_lock;
wait_queue_head_t wq;
onenand_state_t state;
flstate_t state;
unsigned char *page_buf;
unsigned char *oob_buf;
@ -152,6 +137,8 @@ struct onenand_chip {
/*
* Helper macros
*/
#define ONENAND_PAGES_PER_BLOCK (1<<6)
#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)

View File

@ -131,6 +131,8 @@
#define ONENAND_CMD_LOCK_TIGHT (0x2C)
#define ONENAND_CMD_UNLOCK_ALL (0x27)
#define ONENAND_CMD_ERASE (0x94)
#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
#define ONENAND_CMD_ERASE_VERIFY (0x71)
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)

View File

@ -10,6 +10,7 @@
*/
#include <linux/debug_locks.h>
#include <linux/interrupt.h>
#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/module.h>
@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...)
dump_stack();
#endif
kmsg_dump(KMSG_DUMP_PANIC);
/*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
@ -339,6 +341,7 @@ void oops_exit(void)
{
do_oops_enter_exit();
print_oops_end_marker();
kmsg_dump(KMSG_DUMP_OOPS);
}
#ifdef WANT_WARN_ON_SLOWPATH

View File

@ -34,6 +34,7 @@
#include <linux/syscalls.h>
#include <linux/kexec.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
#include <asm/uaccess.h>
@ -1405,4 +1406,122 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies,
return false;
}
EXPORT_SYMBOL(printk_timed_ratelimit);
static DEFINE_SPINLOCK(dump_list_lock);
static LIST_HEAD(dump_list);
/**
* kmsg_dump_register - register a kernel log dumper.
* @dump: pointer to the kmsg_dumper structure
*
* Adds a kernel log dumper to the system. The dump callback in the
* structure will be called when the kernel oopses or panics and must be
* set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
*/
int kmsg_dump_register(struct kmsg_dumper *dumper)
{
unsigned long flags;
int err = -EBUSY;
/* The dump callback needs to be set */
if (!dumper->dump)
return -EINVAL;
spin_lock_irqsave(&dump_list_lock, flags);
/* Don't allow registering multiple times */
if (!dumper->registered) {
dumper->registered = 1;
list_add_tail(&dumper->list, &dump_list);
err = 0;
}
spin_unlock_irqrestore(&dump_list_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(kmsg_dump_register);
/**
* kmsg_dump_unregister - unregister a kmsg dumper.
* @dump: pointer to the kmsg_dumper structure
*
* Removes a dump device from the system. Returns zero on success and
* %-EINVAL otherwise.
*/
int kmsg_dump_unregister(struct kmsg_dumper *dumper)
{
unsigned long flags;
int err = -EINVAL;
spin_lock_irqsave(&dump_list_lock, flags);
if (dumper->registered) {
dumper->registered = 0;
list_del(&dumper->list);
err = 0;
}
spin_unlock_irqrestore(&dump_list_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
static const char const *kmsg_reasons[] = {
[KMSG_DUMP_OOPS] = "oops",
[KMSG_DUMP_PANIC] = "panic",
};
static const char *kmsg_to_str(enum kmsg_dump_reason reason)
{
if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
return "unknown";
return kmsg_reasons[reason];
}
/**
* kmsg_dump - dump kernel log to kernel message dumpers.
* @reason: the reason (oops, panic etc) for dumping
*
* Iterate through each of the dump devices and call the oops/panic
* callbacks with the log buffer.
*/
void kmsg_dump(enum kmsg_dump_reason reason)
{
unsigned long end;
unsigned chars;
struct kmsg_dumper *dumper;
const char *s1, *s2;
unsigned long l1, l2;
unsigned long flags;
/* Theoretically, the log could move on after we do this, but
there's not a lot we can do about that. The new messages
will overwrite the start of what we dump. */
spin_lock_irqsave(&logbuf_lock, flags);
end = log_end & LOG_BUF_MASK;
chars = logged_chars;
spin_unlock_irqrestore(&logbuf_lock, flags);
if (logged_chars > end) {
s1 = log_buf + log_buf_len - logged_chars + end;
l1 = logged_chars - end;
s2 = log_buf;
l2 = end;
} else {
s1 = "";
l1 = 0;
s2 = log_buf + end - logged_chars;
l2 = logged_chars;
}
if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
kmsg_to_str(reason));
return;
}
list_for_each_entry(dumper, &dump_list, list)
dumper->dump(dumper, reason, s1, l1, s2, l2);
spin_unlock_irqrestore(&dump_list_lock, flags);
}
#endif