summaryrefslogtreecommitdiffstats
path: root/u-boot/drivers/mtd
diff options
context:
space:
mode:
authorH. Nikolaus Schaller <hns@goldelico.com>2012-03-26 20:55:28 +0200
committerH. Nikolaus Schaller <hns@goldelico.com>2012-03-26 20:55:28 +0200
commit92988a21ad4c4c9504295ccb580c9f806134471b (patch)
tree5effc9f14170112450de05c67dafbe8d5034d595 /u-boot/drivers/mtd
parentca2b506783b676c95762c54ea24dcfdaae1947c9 (diff)
downloadbootable_bootloader_goldelico_gta04-92988a21ad4c4c9504295ccb580c9f806134471b.zip
bootable_bootloader_goldelico_gta04-92988a21ad4c4c9504295ccb580c9f806134471b.tar.gz
bootable_bootloader_goldelico_gta04-92988a21ad4c4c9504295ccb580c9f806134471b.tar.bz2
added boot script files to repository
Diffstat (limited to 'u-boot/drivers/mtd')
-rw-r--r--u-boot/drivers/mtd/Makefile55
-rw-r--r--u-boot/drivers/mtd/at45.c559
-rw-r--r--u-boot/drivers/mtd/cfi_flash.c2210
-rw-r--r--u-boot/drivers/mtd/cfi_mtd.c276
-rw-r--r--u-boot/drivers/mtd/dataflash.c463
-rw-r--r--u-boot/drivers/mtd/jedec_flash.c438
-rw-r--r--u-boot/drivers/mtd/mtdconcat.c807
-rw-r--r--u-boot/drivers/mtd/mtdcore.c188
-rw-r--r--u-boot/drivers/mtd/mtdpart.c476
-rw-r--r--u-boot/drivers/mtd/mw_eeprom.c236
-rw-r--r--u-boot/drivers/mtd/nand/Makefile71
-rw-r--r--u-boot/drivers/mtd/nand/atmel_nand.c343
-rw-r--r--u-boot/drivers/mtd/nand/atmel_nand_ecc.h36
-rw-r--r--u-boot/drivers/mtd/nand/bfin_nand.c391
-rw-r--r--u-boot/drivers/mtd/nand/davinci_nand.c648
-rw-r--r--u-boot/drivers/mtd/nand/diskonchip.c1779
-rw-r--r--u-boot/drivers/mtd/nand/fsl_elbc_nand.c823
-rw-r--r--u-boot/drivers/mtd/nand/fsl_upm.c202
-rw-r--r--u-boot/drivers/mtd/nand/kb9202_nand.c150
-rw-r--r--u-boot/drivers/mtd/nand/kirkwood_nand.c82
-rw-r--r--u-boot/drivers/mtd/nand/kmeter1_nand.c135
-rw-r--r--u-boot/drivers/mtd/nand/mpc5121_nfc.c693
-rw-r--r--u-boot/drivers/mtd/nand/mxc_nand.c1393
-rw-r--r--u-boot/drivers/mtd/nand/nand.c98
-rw-r--r--u-boot/drivers/mtd/nand/nand_base.c2896
-rw-r--r--u-boot/drivers/mtd/nand/nand_bbt.c1220
-rw-r--r--u-boot/drivers/mtd/nand/nand_ecc.c202
-rw-r--r--u-boot/drivers/mtd/nand/nand_ids.c146
-rw-r--r--u-boot/drivers/mtd/nand/nand_plat.c64
-rw-r--r--u-boot/drivers/mtd/nand/nand_util.c655
-rw-r--r--u-boot/drivers/mtd/nand/ndfc.c217
-rw-r--r--u-boot/drivers/mtd/nand/nomadik.c221
-rw-r--r--u-boot/drivers/mtd/nand/omap_gpmc.c344
-rw-r--r--u-boot/drivers/mtd/nand/s3c2410_nand.c189
-rw-r--r--u-boot/drivers/mtd/nand/s3c64xx.c319
-rw-r--r--u-boot/drivers/mtd/nand/spr_nand.c124
-rw-r--r--u-boot/drivers/mtd/onenand/Makefile46
-rw-r--r--u-boot/drivers/mtd/onenand/onenand_base.c2755
-rw-r--r--u-boot/drivers/mtd/onenand/onenand_bbt.c268
-rw-r--r--u-boot/drivers/mtd/onenand/onenand_uboot.c56
-rw-r--r--u-boot/drivers/mtd/onenand/samsung.c636
-rw-r--r--u-boot/drivers/mtd/spi/Makefile55
-rw-r--r--u-boot/drivers/mtd/spi/atmel.c552
-rw-r--r--u-boot/drivers/mtd/spi/eeprom_m95xxx.c122
-rw-r--r--u-boot/drivers/mtd/spi/eon.c275
-rw-r--r--u-boot/drivers/mtd/spi/macronix.c338
-rw-r--r--u-boot/drivers/mtd/spi/ramtron.c319
-rw-r--r--u-boot/drivers/mtd/spi/spansion.c359
-rw-r--r--u-boot/drivers/mtd/spi/spi_flash.c233
-rw-r--r--u-boot/drivers/mtd/spi/spi_flash_internal.h54
-rw-r--r--u-boot/drivers/mtd/spi/sst.c375
-rw-r--r--u-boot/drivers/mtd/spi/stmicro.c373
-rw-r--r--u-boot/drivers/mtd/spi/winbond.c361
-rw-r--r--u-boot/drivers/mtd/spr_smi.c523
-rw-r--r--u-boot/drivers/mtd/ubi/Makefile51
-rw-r--r--u-boot/drivers/mtd/ubi/build.c1193
-rw-r--r--u-boot/drivers/mtd/ubi/crc32.c518
-rw-r--r--u-boot/drivers/mtd/ubi/crc32defs.h32
-rw-r--r--u-boot/drivers/mtd/ubi/crc32table.h136
-rw-r--r--u-boot/drivers/mtd/ubi/debug.c192
-rw-r--r--u-boot/drivers/mtd/ubi/debug.h152
-rw-r--r--u-boot/drivers/mtd/ubi/eba.c1256
-rw-r--r--u-boot/drivers/mtd/ubi/io.c1274
-rw-r--r--u-boot/drivers/mtd/ubi/kapi.c638
-rw-r--r--u-boot/drivers/mtd/ubi/misc.c106
-rw-r--r--u-boot/drivers/mtd/ubi/scan.c1360
-rw-r--r--u-boot/drivers/mtd/ubi/scan.h165
-rw-r--r--u-boot/drivers/mtd/ubi/ubi-media.h372
-rw-r--r--u-boot/drivers/mtd/ubi/ubi.h641
-rw-r--r--u-boot/drivers/mtd/ubi/upd.c441
-rw-r--r--u-boot/drivers/mtd/ubi/vmt.c862
-rw-r--r--u-boot/drivers/mtd/ubi/vtbl.c838
-rw-r--r--u-boot/drivers/mtd/ubi/wl.c1675
73 files changed, 38751 insertions, 0 deletions
diff --git a/u-boot/drivers/mtd/Makefile b/u-boot/drivers/mtd/Makefile
new file mode 100644
index 0000000..999431c
--- /dev/null
+++ b/u-boot/drivers/mtd/Makefile
@@ -0,0 +1,55 @@
+#
+# (C) Copyright 2000-2007
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+include $(TOPDIR)/config.mk
+
+LIB := $(obj)libmtd.o
+
+COBJS-$(CONFIG_MTD_DEVICE) += mtdcore.o
+COBJS-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+COBJS-$(CONFIG_MTD_CONCAT) += mtdconcat.o
+COBJS-$(CONFIG_HAS_DATAFLASH) += at45.o
+COBJS-$(CONFIG_FLASH_CFI_DRIVER) += cfi_flash.o
+COBJS-$(CONFIG_FLASH_CFI_MTD) += cfi_mtd.o
+COBJS-$(CONFIG_HAS_DATAFLASH) += dataflash.o
+COBJS-$(CONFIG_FLASH_CFI_LEGACY) += jedec_flash.o
+COBJS-$(CONFIG_MW_EEPROM) += mw_eeprom.o
+COBJS-$(CONFIG_SPEARSMI) += spr_smi.o
+
+COBJS := $(COBJS-y)
+SRCS := $(COBJS:.o=.c)
+OBJS := $(addprefix $(obj),$(COBJS))
+
+all: $(LIB)
+
+$(LIB): $(obj).depend $(OBJS)
+ $(call cmd_link_o_target, $(OBJS))
+
+#########################################################################
+
+# defines $(obj).depend target
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
+
+#########################################################################
diff --git a/u-boot/drivers/mtd/at45.c b/u-boot/drivers/mtd/at45.c
new file mode 100644
index 0000000..d1a60aa
--- /dev/null
+++ b/u-boot/drivers/mtd/at45.c
@@ -0,0 +1,559 @@
+/* Driver for ATMEL DataFlash support
+ * Author : Hamid Ikdoumi (Atmel)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+#include <config.h>
+#include <common.h>
+#include <dataflash.h>
+
+/*
+ * spi.c API
+ */
+extern unsigned int AT91F_SpiWrite(AT91PS_DataflashDesc pDesc);
+extern void AT91F_SpiEnable(int cs);
+
+#define AT91C_TIMEOUT_WRDY 200000
+
+/*----------------------------------------------------------------------*/
+/* \fn AT91F_DataFlashSendCommand */
+/* \brief Generic function to send a command to the dataflash */
+/*----------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_DataFlashSendCommand(AT91PS_DataFlash pDataFlash,
+ unsigned char OpCode,
+ unsigned int CmdSize,
+ unsigned int DataflashAddress)
+{
+ unsigned int adr;
+
+ if ((pDataFlash->pDataFlashDesc->state) != IDLE)
+ return DATAFLASH_BUSY;
+
+ /* process the address to obtain page address and byte address */
+ adr = ((DataflashAddress / (pDataFlash->pDevice->pages_size)) <<
+ pDataFlash->pDevice->page_offset) +
+ (DataflashAddress % (pDataFlash->pDevice->pages_size));
+
+ /* fill the command buffer */
+ pDataFlash->pDataFlashDesc->command[0] = OpCode;
+ if (pDataFlash->pDevice->pages_number >= 16384) {
+ pDataFlash->pDataFlashDesc->command[1] =
+ (unsigned char)((adr & 0x0F000000) >> 24);
+ pDataFlash->pDataFlashDesc->command[2] =
+ (unsigned char)((adr & 0x00FF0000) >> 16);
+ pDataFlash->pDataFlashDesc->command[3] =
+ (unsigned char)((adr & 0x0000FF00) >> 8);
+ pDataFlash->pDataFlashDesc->command[4] =
+ (unsigned char)(adr & 0x000000FF);
+ } else {
+ pDataFlash->pDataFlashDesc->command[1] =
+ (unsigned char)((adr & 0x00FF0000) >> 16);
+ pDataFlash->pDataFlashDesc->command[2] =
+ (unsigned char)((adr & 0x0000FF00) >> 8);
+ pDataFlash->pDataFlashDesc->command[3] =
+ (unsigned char)(adr & 0x000000FF);
+ pDataFlash->pDataFlashDesc->command[4] = 0;
+ }
+ pDataFlash->pDataFlashDesc->command[5] = 0;
+ pDataFlash->pDataFlashDesc->command[6] = 0;
+ pDataFlash->pDataFlashDesc->command[7] = 0;
+
+ /* Initialize the SpiData structure for the spi write fuction */
+ pDataFlash->pDataFlashDesc->tx_cmd_pt =
+ pDataFlash->pDataFlashDesc->command;
+ pDataFlash->pDataFlashDesc->tx_cmd_size = CmdSize;
+ pDataFlash->pDataFlashDesc->rx_cmd_pt =
+ pDataFlash->pDataFlashDesc->command;
+ pDataFlash->pDataFlashDesc->rx_cmd_size = CmdSize;
+
+ /* send the command and read the data */
+ return AT91F_SpiWrite(pDataFlash->pDataFlashDesc);
+}
+
+/*----------------------------------------------------------------------*/
+/* \fn AT91F_DataFlashGetStatus */
+/* \brief Read the status register of the dataflash */
+/*----------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_DataFlashGetStatus(AT91PS_DataflashDesc pDesc)
+{
+ AT91S_DataFlashStatus status;
+
+ /* if a transfert is in progress ==> return 0 */
+ if ((pDesc->state) != IDLE)
+ return DATAFLASH_BUSY;
+
+ /* first send the read status command (D7H) */
+ pDesc->command[0] = DB_STATUS;
+ pDesc->command[1] = 0;
+
+ pDesc->DataFlash_state = GET_STATUS;
+ pDesc->tx_data_size = 0; /* Transmit the command */
+ /* and receive response */
+ pDesc->tx_cmd_pt = pDesc->command;
+ pDesc->rx_cmd_pt = pDesc->command;
+ pDesc->rx_cmd_size = 2;
+ pDesc->tx_cmd_size = 2;
+ status = AT91F_SpiWrite(pDesc);
+
+ pDesc->DataFlash_state = *((unsigned char *)(pDesc->rx_cmd_pt) + 1);
+
+ return status;
+}
+
+/*----------------------------------------------------------------------*/
+/* \fn AT91F_DataFlashWaitReady */
+/* \brief wait for dataflash ready (bit7 of the status register == 1) */
+/*----------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_DataFlashWaitReady(AT91PS_DataflashDesc
+ pDataFlashDesc,
+ unsigned int timeout)
+{
+ pDataFlashDesc->DataFlash_state = IDLE;
+
+ do {
+ AT91F_DataFlashGetStatus(pDataFlashDesc);
+ timeout--;
+ } while (((pDataFlashDesc->DataFlash_state & 0x80) != 0x80) &&
+ (timeout > 0));
+
+ if ((pDataFlashDesc->DataFlash_state & 0x80) != 0x80)
+ return DATAFLASH_ERROR;
+
+ return DATAFLASH_OK;
+}
+
+/*--------------------------------------------------------------------------*/
+/* Function Name : AT91F_DataFlashContinuousRead */
+/* Object : Continuous stream Read */
+/* Input Parameters : DataFlash Service */
+/* : <src> = dataflash address */
+/* : <*dataBuffer> = data buffer pointer */
+/* : <sizeToRead> = data buffer size */
+/* Return value : State of the dataflash */
+/*--------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_DataFlashContinuousRead(
+ AT91PS_DataFlash pDataFlash,
+ int src,
+ unsigned char *dataBuffer,
+ int sizeToRead)
+{
+ AT91S_DataFlashStatus status;
+ /* Test the size to read in the device */
+ if ((src + sizeToRead) >
+ (pDataFlash->pDevice->pages_size *
+ (pDataFlash->pDevice->pages_number)))
+ return DATAFLASH_MEMORY_OVERFLOW;
+
+ pDataFlash->pDataFlashDesc->rx_data_pt = dataBuffer;
+ pDataFlash->pDataFlashDesc->rx_data_size = sizeToRead;
+ pDataFlash->pDataFlashDesc->tx_data_pt = dataBuffer;
+ pDataFlash->pDataFlashDesc->tx_data_size = sizeToRead;
+
+ status = AT91F_DataFlashSendCommand(
+ pDataFlash, DB_CONTINUOUS_ARRAY_READ, 8, src);
+ /* Send the command to the dataflash */
+ return (status);
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_DataFlashPagePgmBuf */
+/* Object : Main memory page program thru buffer 1 or buffer 2 */
+/* Input Parameters : DataFlash Service */
+/* : <*src> = Source buffer */
+/* : <dest> = dataflash destination address */
+/* : <SizeToWrite> = data buffer size */
+/* Return value : State of the dataflash */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_DataFlashPagePgmBuf(AT91PS_DataFlash pDataFlash,
+ unsigned char *src,
+ unsigned int dest,
+ unsigned int SizeToWrite)
+{
+ int cmdsize;
+ pDataFlash->pDataFlashDesc->tx_data_pt = src;
+ pDataFlash->pDataFlashDesc->tx_data_size = SizeToWrite;
+ pDataFlash->pDataFlashDesc->rx_data_pt = src;
+ pDataFlash->pDataFlashDesc->rx_data_size = SizeToWrite;
+
+ cmdsize = 4;
+ /* Send the command to the dataflash */
+ if (pDataFlash->pDevice->pages_number >= 16384)
+ cmdsize = 5;
+ return (AT91F_DataFlashSendCommand(
+ pDataFlash, DB_PAGE_PGM_BUF1, cmdsize, dest));
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_MainMemoryToBufferTransfert */
+/* Object : Read a page in the SRAM Buffer 1 or 2 */
+/* Input Parameters : DataFlash Service */
+/* : Page concerned */
+/* : */
+/* Return value : State of the dataflash */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_MainMemoryToBufferTransfert(
+ AT91PS_DataFlash
+ pDataFlash,
+ unsigned char
+ BufferCommand,
+ unsigned int page)
+{
+ int cmdsize;
+ /* Test if the buffer command is legal */
+ if ((BufferCommand != DB_PAGE_2_BUF1_TRF) &&
+ (BufferCommand != DB_PAGE_2_BUF2_TRF)) {
+ return DATAFLASH_BAD_COMMAND;
+ }
+
+ /* no data to transmit or receive */
+ pDataFlash->pDataFlashDesc->tx_data_size = 0;
+ cmdsize = 4;
+ if (pDataFlash->pDevice->pages_number >= 16384)
+ cmdsize = 5;
+ return (AT91F_DataFlashSendCommand(
+ pDataFlash, BufferCommand, cmdsize,
+ page * pDataFlash->pDevice->pages_size));
+}
+
+/*-------------------------------------------------------------------------- */
+/* Function Name : AT91F_DataFlashWriteBuffer */
+/* Object : Write data to the internal sram buffer 1 or 2 */
+/* Input Parameters : DataFlash Service */
+/* : <BufferCommand> = command to write buffer1 or 2 */
+/* : <*dataBuffer> = data buffer to write */
+/* : <bufferAddress> = address in the internal buffer */
+/* : <SizeToWrite> = data buffer size */
+/* Return value : State of the dataflash */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_DataFlashWriteBuffer(
+ AT91PS_DataFlash pDataFlash,
+ unsigned char BufferCommand,
+ unsigned char *dataBuffer,
+ unsigned int bufferAddress,
+ int SizeToWrite)
+{
+ int cmdsize;
+ /* Test if the buffer command is legal */
+ if ((BufferCommand != DB_BUF1_WRITE) &&
+ (BufferCommand != DB_BUF2_WRITE)) {
+ return DATAFLASH_BAD_COMMAND;
+ }
+
+ /* buffer address must be lower than page size */
+ if (bufferAddress > pDataFlash->pDevice->pages_size)
+ return DATAFLASH_BAD_ADDRESS;
+
+ if ((pDataFlash->pDataFlashDesc->state) != IDLE)
+ return DATAFLASH_BUSY;
+
+ /* Send first Write Command */
+ pDataFlash->pDataFlashDesc->command[0] = BufferCommand;
+ pDataFlash->pDataFlashDesc->command[1] = 0;
+ if (pDataFlash->pDevice->pages_number >= 16384) {
+ pDataFlash->pDataFlashDesc->command[2] = 0;
+ pDataFlash->pDataFlashDesc->command[3] =
+ (unsigned char)(((unsigned int)(bufferAddress &
+ pDataFlash->pDevice->
+ byte_mask)) >> 8);
+ pDataFlash->pDataFlashDesc->command[4] =
+ (unsigned char)((unsigned int)bufferAddress & 0x00FF);
+ cmdsize = 5;
+ } else {
+ pDataFlash->pDataFlashDesc->command[2] =
+ (unsigned char)(((unsigned int)(bufferAddress &
+ pDataFlash->pDevice->
+ byte_mask)) >> 8);
+ pDataFlash->pDataFlashDesc->command[3] =
+ (unsigned char)((unsigned int)bufferAddress & 0x00FF);
+ pDataFlash->pDataFlashDesc->command[4] = 0;
+ cmdsize = 4;
+ }
+
+ pDataFlash->pDataFlashDesc->tx_cmd_pt =
+ pDataFlash->pDataFlashDesc->command;
+ pDataFlash->pDataFlashDesc->tx_cmd_size = cmdsize;
+ pDataFlash->pDataFlashDesc->rx_cmd_pt =
+ pDataFlash->pDataFlashDesc->command;
+ pDataFlash->pDataFlashDesc->rx_cmd_size = cmdsize;
+
+ pDataFlash->pDataFlashDesc->rx_data_pt = dataBuffer;
+ pDataFlash->pDataFlashDesc->tx_data_pt = dataBuffer;
+ pDataFlash->pDataFlashDesc->rx_data_size = SizeToWrite;
+ pDataFlash->pDataFlashDesc->tx_data_size = SizeToWrite;
+
+ return AT91F_SpiWrite(pDataFlash->pDataFlashDesc);
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_PageErase */
+/* Object : Erase a page */
+/* Input Parameters : DataFlash Service */
+/* : Page concerned */
+/* : */
+/* Return value : State of the dataflash */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_PageErase(
+ AT91PS_DataFlash pDataFlash,
+ unsigned int page)
+{
+ int cmdsize;
+ /* Test if the buffer command is legal */
+ /* no data to transmit or receive */
+ pDataFlash->pDataFlashDesc->tx_data_size = 0;
+
+ cmdsize = 4;
+ if (pDataFlash->pDevice->pages_number >= 16384)
+ cmdsize = 5;
+ return (AT91F_DataFlashSendCommand(pDataFlash,
+ DB_PAGE_ERASE, cmdsize,
+ page * pDataFlash->pDevice->pages_size));
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_BlockErase */
+/* Object : Erase a Block */
+/* Input Parameters : DataFlash Service */
+/* : Page concerned */
+/* : */
+/* Return value : State of the dataflash */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_BlockErase(
+ AT91PS_DataFlash pDataFlash,
+ unsigned int block)
+{
+ int cmdsize;
+ /* Test if the buffer command is legal */
+ /* no data to transmit or receive */
+ pDataFlash->pDataFlashDesc->tx_data_size = 0;
+ cmdsize = 4;
+ if (pDataFlash->pDevice->pages_number >= 16384)
+ cmdsize = 5;
+ return (AT91F_DataFlashSendCommand(pDataFlash, DB_BLOCK_ERASE, cmdsize,
+ block * 8 *
+ pDataFlash->pDevice->pages_size));
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_WriteBufferToMain */
+/* Object : Write buffer to the main memory */
+/* Input Parameters : DataFlash Service */
+/* : <BufferCommand> = command to send to buffer1 or buffer2 */
+/* : <dest> = main memory address */
+/* Return value : State of the dataflash */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_WriteBufferToMain(AT91PS_DataFlash pDataFlash,
+ unsigned char BufferCommand,
+ unsigned int dest)
+{
+ int cmdsize;
+ /* Test if the buffer command is correct */
+ if ((BufferCommand != DB_BUF1_PAGE_PGM) &&
+ (BufferCommand != DB_BUF1_PAGE_ERASE_PGM) &&
+ (BufferCommand != DB_BUF2_PAGE_PGM) &&
+ (BufferCommand != DB_BUF2_PAGE_ERASE_PGM))
+ return DATAFLASH_BAD_COMMAND;
+
+ /* no data to transmit or receive */
+ pDataFlash->pDataFlashDesc->tx_data_size = 0;
+
+ cmdsize = 4;
+ if (pDataFlash->pDevice->pages_number >= 16384)
+ cmdsize = 5;
+ /* Send the command to the dataflash */
+ return (AT91F_DataFlashSendCommand(pDataFlash, BufferCommand,
+ cmdsize, dest));
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_PartialPageWrite */
+/* Object : Erase partielly a page */
+/* Input Parameters : <page> = page number */
+/* : <AdrInpage> = adr to begin the fading */
+/* : <length> = Number of bytes to erase */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_PartialPageWrite(AT91PS_DataFlash pDataFlash,
+ unsigned char *src,
+ unsigned int dest,
+ unsigned int size)
+{
+ unsigned int page;
+ unsigned int AdrInPage;
+
+ page = dest / (pDataFlash->pDevice->pages_size);
+ AdrInPage = dest % (pDataFlash->pDevice->pages_size);
+
+ /* Read the contents of the page in the Sram Buffer */
+ AT91F_MainMemoryToBufferTransfert(pDataFlash, DB_PAGE_2_BUF1_TRF, page);
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+ /*Update the SRAM buffer */
+ AT91F_DataFlashWriteBuffer(pDataFlash, DB_BUF1_WRITE, src,
+ AdrInPage, size);
+
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+
+ /* Erase page if a 128 Mbits device */
+ if (pDataFlash->pDevice->pages_number >= 16384) {
+ AT91F_PageErase(pDataFlash, page);
+ /* Rewrite the modified Sram Buffer in the main memory */
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+ }
+
+ /* Rewrite the modified Sram Buffer in the main memory */
+ return (AT91F_WriteBufferToMain(pDataFlash, DB_BUF1_PAGE_ERASE_PGM,
+ (page *
+ pDataFlash->pDevice->pages_size)));
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_DataFlashWrite */
+/* Object : */
+/* Input Parameters : <*src> = Source buffer */
+/* : <dest> = dataflash adress */
+/* : <size> = data buffer size */
+/*---------------------------------------------------------------------------*/
+AT91S_DataFlashStatus AT91F_DataFlashWrite(AT91PS_DataFlash pDataFlash,
+ unsigned char *src,
+ int dest, int size)
+{
+ unsigned int length;
+ unsigned int page;
+ unsigned int status;
+
+ AT91F_SpiEnable(pDataFlash->pDevice->cs);
+
+ if ((dest + size) > (pDataFlash->pDevice->pages_size *
+ (pDataFlash->pDevice->pages_number)))
+ return DATAFLASH_MEMORY_OVERFLOW;
+
+ /* If destination does not fit a page start address */
+ if ((dest % ((unsigned int)(pDataFlash->pDevice->pages_size))) != 0) {
+ length =
+ pDataFlash->pDevice->pages_size -
+ (dest % ((unsigned int)(pDataFlash->pDevice->pages_size)));
+
+ if (size < length)
+ length = size;
+
+ if (!AT91F_PartialPageWrite(pDataFlash, src, dest, length))
+ return DATAFLASH_ERROR;
+
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+
+ /* Update size, source and destination pointers */
+ size -= length;
+ dest += length;
+ src += length;
+ }
+
+ while ((size - pDataFlash->pDevice->pages_size) >= 0) {
+ /* program dataflash page */
+ page = (unsigned int)dest / (pDataFlash->pDevice->pages_size);
+
+ status = AT91F_DataFlashWriteBuffer(pDataFlash,
+ DB_BUF1_WRITE, src, 0,
+ pDataFlash->pDevice->
+ pages_size);
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+
+ status = AT91F_PageErase(pDataFlash, page);
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+ if (!status)
+ return DATAFLASH_ERROR;
+
+ status = AT91F_WriteBufferToMain(pDataFlash,
+ DB_BUF1_PAGE_PGM, dest);
+ if (!status)
+ return DATAFLASH_ERROR;
+
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+
+ /* Update size, source and destination pointers */
+ size -= pDataFlash->pDevice->pages_size;
+ dest += pDataFlash->pDevice->pages_size;
+ src += pDataFlash->pDevice->pages_size;
+ }
+
+ /* If still some bytes to read */
+ if (size > 0) {
+ /* program dataflash page */
+ if (!AT91F_PartialPageWrite(pDataFlash, src, dest, size))
+ return DATAFLASH_ERROR;
+
+ AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY);
+ }
+ return DATAFLASH_OK;
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_DataFlashRead */
+/* Object : Read a block in dataflash */
+/* Input Parameters : */
+/* Return value : */
+/*---------------------------------------------------------------------------*/
+int AT91F_DataFlashRead(AT91PS_DataFlash pDataFlash,
+ unsigned long addr, unsigned long size, char *buffer)
+{
+ unsigned long SizeToRead;
+
+ AT91F_SpiEnable(pDataFlash->pDevice->cs);
+
+ if (AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY) != DATAFLASH_OK)
+ return -1;
+
+ while (size) {
+ SizeToRead = (size < 0x8000) ? size : 0x8000;
+
+ if (AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc,
+ AT91C_TIMEOUT_WRDY) !=
+ DATAFLASH_OK)
+ return -1;
+
+ if (AT91F_DataFlashContinuousRead(pDataFlash, addr,
+ (uchar *) buffer,
+ SizeToRead) != DATAFLASH_OK)
+ return -1;
+
+ size -= SizeToRead;
+ addr += SizeToRead;
+ buffer += SizeToRead;
+ }
+
+ return DATAFLASH_OK;
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_DataflashProbe */
+/* Object : */
+/* Input Parameters : */
+/* Return value : Dataflash status register */
+/*---------------------------------------------------------------------------*/
+int AT91F_DataflashProbe(int cs, AT91PS_DataflashDesc pDesc)
+{
+ AT91F_SpiEnable(cs);
+ AT91F_DataFlashGetStatus(pDesc);
+ return ((pDesc->command[1] == 0xFF) ? 0 : pDesc->command[1] & 0x3C);
+}
diff --git a/u-boot/drivers/mtd/cfi_flash.c b/u-boot/drivers/mtd/cfi_flash.c
new file mode 100644
index 0000000..dd394a8
--- /dev/null
+++ b/u-boot/drivers/mtd/cfi_flash.c
@@ -0,0 +1,2210 @@
+/*
+ * (C) Copyright 2002-2004
+ * Brad Kemp, Seranoa Networks, Brad.Kemp@seranoa.com
+ *
+ * Copyright (C) 2003 Arabella Software Ltd.
+ * Yuli Barcohen <yuli@arabellasw.com>
+ *
+ * Copyright (C) 2004
+ * Ed Okerson
+ *
+ * Copyright (C) 2006
+ * Tolunay Orkun <listmember@orkun.us>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+/* The DEBUG define must be before common to enable debugging */
+/* #define DEBUG */
+
+#include <common.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <environment.h>
+#include <mtd/cfi_flash.h>
+
+/*
+ * This file implements a Common Flash Interface (CFI) driver for
+ * U-Boot.
+ *
+ * The width of the port and the width of the chips are determined at
+ * initialization. These widths are used to calculate the address for
+ * access CFI data structures.
+ *
+ * References
+ * JEDEC Standard JESD68 - Common Flash Interface (CFI)
+ * JEDEC Standard JEP137-A Common Flash Interface (CFI) ID Codes
+ * Intel Application Note 646 Common Flash Interface (CFI) and Command Sets
+ * Intel 290667-008 3 Volt Intel StrataFlash Memory datasheet
+ * AMD CFI Specification, Release 2.0 December 1, 2001
+ * AMD/Spansion Application Note: Migration from Single-byte to Three-byte
+ * Device IDs, Publication Number 25538 Revision A, November 8, 2001
+ *
+ * Define CONFIG_SYS_WRITE_SWAPPED_DATA, if you have to swap the Bytes between
+ * reading and writing ... (yes there is such a Hardware).
+ */
+
+static uint flash_offset_cfi[2] = { FLASH_OFFSET_CFI, FLASH_OFFSET_CFI_ALT };
+#ifdef CONFIG_FLASH_CFI_MTD
+static uint flash_verbose = 1;
+#else
+#define flash_verbose 1
+#endif
+
+flash_info_t flash_info[CFI_MAX_FLASH_BANKS]; /* FLASH chips info */
+
+/*
+ * Check if chip width is defined. If not, start detecting with 8bit.
+ */
+#ifndef CONFIG_SYS_FLASH_CFI_WIDTH
+#define CONFIG_SYS_FLASH_CFI_WIDTH FLASH_CFI_8BIT
+#endif
+
+/*
+ * 0xffff is an undefined value for the configuration register. When
+ * this value is returned, the configuration register shall not be
+ * written at all (default mode).
+ */
+static u16 cfi_flash_config_reg(int i)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_CONFIG_REGS
+ return ((u16 [])CONFIG_SYS_CFI_FLASH_CONFIG_REGS)[i];
+#else
+ return 0xffff;
+#endif
+}
+
+#if defined(CONFIG_SYS_MAX_FLASH_BANKS_DETECT)
+int cfi_flash_num_flash_banks = CONFIG_SYS_MAX_FLASH_BANKS_DETECT;
+#endif
+
+static phys_addr_t __cfi_flash_bank_addr(int i)
+{
+ return ((phys_addr_t [])CONFIG_SYS_FLASH_BANKS_LIST)[i];
+}
+phys_addr_t cfi_flash_bank_addr(int i)
+ __attribute__((weak, alias("__cfi_flash_bank_addr")));
+
+static unsigned long __cfi_flash_bank_size(int i)
+{
+#ifdef CONFIG_SYS_FLASH_BANKS_SIZES
+ return ((unsigned long [])CONFIG_SYS_FLASH_BANKS_SIZES)[i];
+#else
+ return 0;
+#endif
+}
+unsigned long cfi_flash_bank_size(int i)
+ __attribute__((weak, alias("__cfi_flash_bank_size")));
+
+static void __flash_write8(u8 value, void *addr)
+{
+ __raw_writeb(value, addr);
+}
+
+static void __flash_write16(u16 value, void *addr)
+{
+ __raw_writew(value, addr);
+}
+
+static void __flash_write32(u32 value, void *addr)
+{
+ __raw_writel(value, addr);
+}
+
+static void __flash_write64(u64 value, void *addr)
+{
+ /* No architectures currently implement __raw_writeq() */
+ *(volatile u64 *)addr = value;
+}
+
+static u8 __flash_read8(void *addr)
+{
+ return __raw_readb(addr);
+}
+
+static u16 __flash_read16(void *addr)
+{
+ return __raw_readw(addr);
+}
+
+static u32 __flash_read32(void *addr)
+{
+ return __raw_readl(addr);
+}
+
+static u64 __flash_read64(void *addr)
+{
+ /* No architectures currently implement __raw_readq() */
+ return *(volatile u64 *)addr;
+}
+
+#ifdef CONFIG_CFI_FLASH_USE_WEAK_ACCESSORS
+void flash_write8(u8 value, void *addr)__attribute__((weak, alias("__flash_write8")));
+void flash_write16(u16 value, void *addr)__attribute__((weak, alias("__flash_write16")));
+void flash_write32(u32 value, void *addr)__attribute__((weak, alias("__flash_write32")));
+void flash_write64(u64 value, void *addr)__attribute__((weak, alias("__flash_write64")));
+u8 flash_read8(void *addr)__attribute__((weak, alias("__flash_read8")));
+u16 flash_read16(void *addr)__attribute__((weak, alias("__flash_read16")));
+u32 flash_read32(void *addr)__attribute__((weak, alias("__flash_read32")));
+u64 flash_read64(void *addr)__attribute__((weak, alias("__flash_read64")));
+#else
+#define flash_write8 __flash_write8
+#define flash_write16 __flash_write16
+#define flash_write32 __flash_write32
+#define flash_write64 __flash_write64
+#define flash_read8 __flash_read8
+#define flash_read16 __flash_read16
+#define flash_read32 __flash_read32
+#define flash_read64 __flash_read64
+#endif
+
+/*-----------------------------------------------------------------------
+ */
+#if defined(CONFIG_ENV_IS_IN_FLASH) || defined(CONFIG_ENV_ADDR_REDUND) || (CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE)
+flash_info_t *flash_get_info(ulong base)
+{
+ int i;
+ flash_info_t *info = NULL;
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ info = & flash_info[i];
+ if (info->size && info->start[0] <= base &&
+ base <= info->start[0] + info->size - 1)
+ break;
+ }
+
+ return info;
+}
+#endif
+
+unsigned long flash_sector_size(flash_info_t *info, flash_sect_t sect)
+{
+ if (sect != (info->sector_count - 1))
+ return info->start[sect + 1] - info->start[sect];
+ else
+ return info->start[0] + info->size - info->start[sect];
+}
+
+/*-----------------------------------------------------------------------
+ * create an address based on the offset and the port width
+ */
+static inline void *
+flash_map (flash_info_t * info, flash_sect_t sect, uint offset)
+{
+ unsigned int byte_offset = offset * info->portwidth;
+
+ return (void *)(info->start[sect] + byte_offset);
+}
+
+static inline void flash_unmap(flash_info_t *info, flash_sect_t sect,
+ unsigned int offset, void *addr)
+{
+}
+
+/*-----------------------------------------------------------------------
+ * make a proper sized command based on the port and chip widths
+ */
+static void flash_make_cmd(flash_info_t *info, u32 cmd, void *cmdbuf)
+{
+ int i;
+ int cword_offset;
+ int cp_offset;
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ u32 cmd_le = cpu_to_le32(cmd);
+#endif
+ uchar val;
+ uchar *cp = (uchar *) cmdbuf;
+
+ for (i = info->portwidth; i > 0; i--){
+ cword_offset = (info->portwidth-i)%info->chipwidth;
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ cp_offset = info->portwidth - i;
+ val = *((uchar*)&cmd_le + cword_offset);
+#else
+ cp_offset = i - 1;
+ val = *((uchar*)&cmd + sizeof(u32) - cword_offset - 1);
+#endif
+ cp[cp_offset] = (cword_offset >= sizeof(u32)) ? 0x00 : val;
+ }
+}
+
+#ifdef DEBUG
+/*-----------------------------------------------------------------------
+ * Debug support
+ */
+static void print_longlong (char *str, unsigned long long data)
+{
+ int i;
+ char *cp;
+
+ cp = (char *) &data;
+ for (i = 0; i < 8; i++)
+ sprintf (&str[i * 2], "%2.2x", *cp++);
+}
+
+static void flash_printqry (struct cfi_qry *qry)
+{
+ u8 *p = (u8 *)qry;
+ int x, y;
+
+ for (x = 0; x < sizeof(struct cfi_qry); x += 16) {
+ debug("%02x : ", x);
+ for (y = 0; y < 16; y++)
+ debug("%2.2x ", p[x + y]);
+ debug(" ");
+ for (y = 0; y < 16; y++) {
+ unsigned char c = p[x + y];
+ if (c >= 0x20 && c <= 0x7e)
+ debug("%c", c);
+ else
+ debug(".");
+ }
+ debug("\n");
+ }
+}
+#endif
+
+
+/*-----------------------------------------------------------------------
+ * read a character at a port width address
+ */
+static inline uchar flash_read_uchar (flash_info_t * info, uint offset)
+{
+ uchar *cp;
+ uchar retval;
+
+ cp = flash_map (info, 0, offset);
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ retval = flash_read8(cp);
+#else
+ retval = flash_read8(cp + info->portwidth - 1);
+#endif
+ flash_unmap (info, 0, offset, cp);
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ * read a word at a port width address, assume 16bit bus
+ */
+static inline ushort flash_read_word (flash_info_t * info, uint offset)
+{
+ ushort *addr, retval;
+
+ addr = flash_map (info, 0, offset);
+ retval = flash_read16 (addr);
+ flash_unmap (info, 0, offset, addr);
+ return retval;
+}
+
+
+/*-----------------------------------------------------------------------
+ * read a long word by picking the least significant byte of each maximum
+ * port size word. Swap for ppc format.
+ */
+static ulong flash_read_long (flash_info_t * info, flash_sect_t sect,
+ uint offset)
+{
+ uchar *addr;
+ ulong retval;
+
+#ifdef DEBUG
+ int x;
+#endif
+ addr = flash_map (info, sect, offset);
+
+#ifdef DEBUG
+ debug ("long addr is at %p info->portwidth = %d\n", addr,
+ info->portwidth);
+ for (x = 0; x < 4 * info->portwidth; x++) {
+ debug ("addr[%x] = 0x%x\n", x, flash_read8(addr + x));
+ }
+#endif
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ retval = ((flash_read8(addr) << 16) |
+ (flash_read8(addr + info->portwidth) << 24) |
+ (flash_read8(addr + 2 * info->portwidth)) |
+ (flash_read8(addr + 3 * info->portwidth) << 8));
+#else
+ retval = ((flash_read8(addr + 2 * info->portwidth - 1) << 24) |
+ (flash_read8(addr + info->portwidth - 1) << 16) |
+ (flash_read8(addr + 4 * info->portwidth - 1) << 8) |
+ (flash_read8(addr + 3 * info->portwidth - 1)));
+#endif
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*
+ * Write a proper sized command to the correct address
+ */
+void flash_write_cmd (flash_info_t * info, flash_sect_t sect,
+ uint offset, u32 cmd)
+{
+
+ void *addr;
+ cfiword_t cword;
+
+ addr = flash_map (info, sect, offset);
+ flash_make_cmd (info, cmd, &cword);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ debug ("fwc addr %p cmd %x %x 8bit x %d bit\n", addr, cmd,
+ cword.c, info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ flash_write8(cword.c, addr);
+ break;
+ case FLASH_CFI_16BIT:
+ debug ("fwc addr %p cmd %x %4.4x 16bit x %d bit\n", addr,
+ cmd, cword.w,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ flash_write16(cword.w, addr);
+ break;
+ case FLASH_CFI_32BIT:
+ debug ("fwc addr %p cmd %x %8.8lx 32bit x %d bit\n", addr,
+ cmd, cword.l,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ flash_write32(cword.l, addr);
+ break;
+ case FLASH_CFI_64BIT:
+#ifdef DEBUG
+ {
+ char str[20];
+
+ print_longlong (str, cword.ll);
+
+ debug ("fwrite addr %p cmd %x %s 64 bit x %d bit\n",
+ addr, cmd, str,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ }
+#endif
+ flash_write64(cword.ll, addr);
+ break;
+ }
+
+ /* Ensure all the instructions are fully finished */
+ sync();
+
+ flash_unmap(info, sect, offset, addr);
+}
+
+static void flash_unlock_seq (flash_info_t * info, flash_sect_t sect)
+{
+ flash_write_cmd (info, sect, info->addr_unlock1, AMD_CMD_UNLOCK_START);
+ flash_write_cmd (info, sect, info->addr_unlock2, AMD_CMD_UNLOCK_ACK);
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_isequal (flash_info_t * info, flash_sect_t sect,
+ uint offset, uchar cmd)
+{
+ void *addr;
+ cfiword_t cword;
+ int retval;
+
+ addr = flash_map (info, sect, offset);
+ flash_make_cmd (info, cmd, &cword);
+
+ debug ("is= cmd %x(%c) addr %p ", cmd, cmd, addr);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ debug ("is= %x %x\n", flash_read8(addr), cword.c);
+ retval = (flash_read8(addr) == cword.c);
+ break;
+ case FLASH_CFI_16BIT:
+ debug ("is= %4.4x %4.4x\n", flash_read16(addr), cword.w);
+ retval = (flash_read16(addr) == cword.w);
+ break;
+ case FLASH_CFI_32BIT:
+ debug ("is= %8.8x %8.8lx\n", flash_read32(addr), cword.l);
+ retval = (flash_read32(addr) == cword.l);
+ break;
+ case FLASH_CFI_64BIT:
+#ifdef DEBUG
+ {
+ char str1[20];
+ char str2[20];
+
+ print_longlong (str1, flash_read64(addr));
+ print_longlong (str2, cword.ll);
+ debug ("is= %s %s\n", str1, str2);
+ }
+#endif
+ retval = (flash_read64(addr) == cword.ll);
+ break;
+ default:
+ retval = 0;
+ break;
+ }
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_isset (flash_info_t * info, flash_sect_t sect,
+ uint offset, uchar cmd)
+{
+ void *addr;
+ cfiword_t cword;
+ int retval;
+
+ addr = flash_map (info, sect, offset);
+ flash_make_cmd (info, cmd, &cword);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ retval = ((flash_read8(addr) & cword.c) == cword.c);
+ break;
+ case FLASH_CFI_16BIT:
+ retval = ((flash_read16(addr) & cword.w) == cword.w);
+ break;
+ case FLASH_CFI_32BIT:
+ retval = ((flash_read32(addr) & cword.l) == cword.l);
+ break;
+ case FLASH_CFI_64BIT:
+ retval = ((flash_read64(addr) & cword.ll) == cword.ll);
+ break;
+ default:
+ retval = 0;
+ break;
+ }
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_toggle (flash_info_t * info, flash_sect_t sect,
+ uint offset, uchar cmd)
+{
+ void *addr;
+ cfiword_t cword;
+ int retval;
+
+ addr = flash_map (info, sect, offset);
+ flash_make_cmd (info, cmd, &cword);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ retval = flash_read8(addr) != flash_read8(addr);
+ break;
+ case FLASH_CFI_16BIT:
+ retval = flash_read16(addr) != flash_read16(addr);
+ break;
+ case FLASH_CFI_32BIT:
+ retval = flash_read32(addr) != flash_read32(addr);
+ break;
+ case FLASH_CFI_64BIT:
+ retval = ( (flash_read32( addr ) != flash_read32( addr )) ||
+ (flash_read32(addr+4) != flash_read32(addr+4)) );
+ break;
+ default:
+ retval = 0;
+ break;
+ }
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*
+ * flash_is_busy - check to see if the flash is busy
+ *
+ * This routine checks the status of the chip and returns true if the
+ * chip is busy.
+ */
+static int flash_is_busy (flash_info_t * info, flash_sect_t sect)
+{
+ int retval;
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ retval = !flash_isset (info, sect, 0, FLASH_STATUS_DONE);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+#endif
+ retval = flash_toggle (info, sect, 0, AMD_STATUS_TOGGLE);
+ break;
+ default:
+ retval = 0;
+ }
+ debug ("flash_is_busy: %d\n", retval);
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ * wait for XSR.7 to be set. Time out with an error if it does not.
+ * This routine does not set the flash to read-array mode.
+ */
+static int flash_status_check (flash_info_t * info, flash_sect_t sector,
+ ulong tout, char *prompt)
+{
+ ulong start;
+
+#if CONFIG_SYS_HZ != 1000
+ if ((ulong)CONFIG_SYS_HZ > 100000)
+ tout *= (ulong)CONFIG_SYS_HZ / 1000; /* for a big HZ, avoid overflow */
+ else
+ tout = DIV_ROUND_UP(tout * (ulong)CONFIG_SYS_HZ, 1000);
+#endif
+
+ /* Wait for command completion */
+ reset_timer();
+ start = get_timer (0);
+ while (flash_is_busy (info, sector)) {
+ if (get_timer (start) > tout) {
+ printf ("Flash %s timeout at address %lx data %lx\n",
+ prompt, info->start[sector],
+ flash_read_long (info, sector, 0));
+ flash_write_cmd (info, sector, 0, info->cmd_reset);
+ return ERR_TIMOUT;
+ }
+ udelay (1); /* also triggers watchdog */
+ }
+ return ERR_OK;
+}
+
+/*-----------------------------------------------------------------------
+ * Wait for XSR.7 to be set, if it times out print an error, otherwise
+ * do a full status check.
+ *
+ * This routine sets the flash to read-array mode.
+ */
+static int flash_full_status_check (flash_info_t * info, flash_sect_t sector,
+ ulong tout, char *prompt)
+{
+ int retcode;
+
+ retcode = flash_status_check (info, sector, tout, prompt);
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ case CFI_CMDSET_INTEL_STANDARD:
+ if ((retcode != ERR_OK)
+ && !flash_isequal (info, sector, 0, FLASH_STATUS_DONE)) {
+ retcode = ERR_INVAL;
+ printf ("Flash %s error at address %lx\n", prompt,
+ info->start[sector]);
+ if (flash_isset (info, sector, 0, FLASH_STATUS_ECLBS |
+ FLASH_STATUS_PSLBS)) {
+ puts ("Command Sequence Error.\n");
+ } else if (flash_isset (info, sector, 0,
+ FLASH_STATUS_ECLBS)) {
+ puts ("Block Erase Error.\n");
+ retcode = ERR_NOT_ERASED;
+ } else if (flash_isset (info, sector, 0,
+ FLASH_STATUS_PSLBS)) {
+ puts ("Locking Error\n");
+ }
+ if (flash_isset (info, sector, 0, FLASH_STATUS_DPS)) {
+ puts ("Block locked.\n");
+ retcode = ERR_PROTECTED;
+ }
+ if (flash_isset (info, sector, 0, FLASH_STATUS_VPENS))
+ puts ("Vpp Low Error.\n");
+ }
+ flash_write_cmd (info, sector, 0, info->cmd_reset);
+ break;
+ default:
+ break;
+ }
+ return retcode;
+}
+
+static int use_flash_status_poll(flash_info_t *info)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_STATUS_POLL
+ if (info->vendor == CFI_CMDSET_AMD_EXTENDED ||
+ info->vendor == CFI_CMDSET_AMD_STANDARD)
+ return 1;
+#endif
+ return 0;
+}
+
+static int flash_status_poll(flash_info_t *info, void *src, void *dst,
+ ulong tout, char *prompt)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_STATUS_POLL
+ ulong start;
+ int ready;
+
+#if CONFIG_SYS_HZ != 1000
+ if ((ulong)CONFIG_SYS_HZ > 100000)
+ tout *= (ulong)CONFIG_SYS_HZ / 1000; /* for a big HZ, avoid overflow */
+ else
+ tout = DIV_ROUND_UP(tout * (ulong)CONFIG_SYS_HZ, 1000);
+#endif
+
+ /* Wait for command completion */
+ reset_timer();
+ start = get_timer(0);
+ while (1) {
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ ready = flash_read8(dst) == flash_read8(src);
+ break;
+ case FLASH_CFI_16BIT:
+ ready = flash_read16(dst) == flash_read16(src);
+ break;
+ case FLASH_CFI_32BIT:
+ ready = flash_read32(dst) == flash_read32(src);
+ break;
+ case FLASH_CFI_64BIT:
+ ready = flash_read64(dst) == flash_read64(src);
+ break;
+ default:
+ ready = 0;
+ break;
+ }
+ if (ready)
+ break;
+ if (get_timer(start) > tout) {
+ printf("Flash %s timeout at address %lx data %lx\n",
+ prompt, (ulong)dst, (ulong)flash_read8(dst));
+ return ERR_TIMOUT;
+ }
+ udelay(1); /* also triggers watchdog */
+ }
+#endif /* CONFIG_SYS_CFI_FLASH_STATUS_POLL */
+ return ERR_OK;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static void flash_add_byte (flash_info_t * info, cfiword_t * cword, uchar c)
+{
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ unsigned short w;
+ unsigned int l;
+ unsigned long long ll;
+#endif
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ cword->c = c;
+ break;
+ case FLASH_CFI_16BIT:
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ w = c;
+ w <<= 8;
+ cword->w = (cword->w >> 8) | w;
+#else
+ cword->w = (cword->w << 8) | c;
+#endif
+ break;
+ case FLASH_CFI_32BIT:
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ l = c;
+ l <<= 24;
+ cword->l = (cword->l >> 8) | l;
+#else
+ cword->l = (cword->l << 8) | c;
+#endif
+ break;
+ case FLASH_CFI_64BIT:
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ ll = c;
+ ll <<= 56;
+ cword->ll = (cword->ll >> 8) | ll;
+#else
+ cword->ll = (cword->ll << 8) | c;
+#endif
+ break;
+ }
+}
+
+/*
+ * Loop through the sector table starting from the previously found sector.
+ * Searches forwards or backwards, dependent on the passed address.
+ */
+static flash_sect_t find_sector (flash_info_t * info, ulong addr)
+{
+ static flash_sect_t saved_sector = 0; /* previously found sector */
+ flash_sect_t sector = saved_sector;
+
+ while ((info->start[sector] < addr)
+ && (sector < info->sector_count - 1))
+ sector++;
+ while ((info->start[sector] > addr) && (sector > 0))
+ /*
+ * also decrements the sector in case of an overshot
+ * in the first loop
+ */
+ sector--;
+
+ saved_sector = sector;
+ return sector;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_write_cfiword (flash_info_t * info, ulong dest,
+ cfiword_t cword)
+{
+ void *dstaddr = (void *)dest;
+ int flag;
+ flash_sect_t sect = 0;
+ char sect_found = 0;
+
+ /* Check if Flash is (sufficiently) erased */
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flag = ((flash_read8(dstaddr) & cword.c) == cword.c);
+ break;
+ case FLASH_CFI_16BIT:
+ flag = ((flash_read16(dstaddr) & cword.w) == cword.w);
+ break;
+ case FLASH_CFI_32BIT:
+ flag = ((flash_read32(dstaddr) & cword.l) == cword.l);
+ break;
+ case FLASH_CFI_64BIT:
+ flag = ((flash_read64(dstaddr) & cword.ll) == cword.ll);
+ break;
+ default:
+ flag = 0;
+ break;
+ }
+ if (!flag)
+ return ERR_NOT_ERASED;
+
+ /* Disable interrupts which might cause a timeout here */
+ flag = disable_interrupts ();
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ case CFI_CMDSET_INTEL_STANDARD:
+ flash_write_cmd (info, 0, 0, FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd (info, 0, 0, FLASH_CMD_WRITE);
+ break;
+ case CFI_CMDSET_AMD_EXTENDED:
+ case CFI_CMDSET_AMD_STANDARD:
+ sect = find_sector(info, dest);
+ flash_unlock_seq (info, sect);
+ flash_write_cmd (info, sect, info->addr_unlock1, AMD_CMD_WRITE);
+ sect_found = 1;
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ sect = find_sector(info, dest);
+ flash_unlock_seq (info, 0);
+ flash_write_cmd (info, 0, info->addr_unlock1, AMD_CMD_WRITE);
+ sect_found = 1;
+ break;
+#endif
+ }
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flash_write8(cword.c, dstaddr);
+ break;
+ case FLASH_CFI_16BIT:
+ flash_write16(cword.w, dstaddr);
+ break;
+ case FLASH_CFI_32BIT:
+ flash_write32(cword.l, dstaddr);
+ break;
+ case FLASH_CFI_64BIT:
+ flash_write64(cword.ll, dstaddr);
+ break;
+ }
+
+ /* re-enable interrupts if necessary */
+ if (flag)
+ enable_interrupts ();
+
+ if (!sect_found)
+ sect = find_sector (info, dest);
+
+ if (use_flash_status_poll(info))
+ return flash_status_poll(info, &cword, dstaddr,
+ info->write_tout, "write");
+ else
+ return flash_full_status_check(info, sect,
+ info->write_tout, "write");
+}
+
+#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE
+
+static int flash_write_cfibuffer (flash_info_t * info, ulong dest, uchar * cp,
+ int len)
+{
+ flash_sect_t sector;
+ int cnt;
+ int retcode;
+ void *src = cp;
+ void *dst = (void *)dest;
+ void *dst2 = dst;
+ int flag = 0;
+ uint offset = 0;
+ unsigned int shift;
+ uchar write_cmd;
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ shift = 0;
+ break;
+ case FLASH_CFI_16BIT:
+ shift = 1;
+ break;
+ case FLASH_CFI_32BIT:
+ shift = 2;
+ break;
+ case FLASH_CFI_64BIT:
+ shift = 3;
+ break;
+ default:
+ retcode = ERR_INVAL;
+ goto out_unmap;
+ }
+
+ cnt = len >> shift;
+
+ while ((cnt-- > 0) && (flag == 0)) {
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flag = ((flash_read8(dst2) & flash_read8(src)) ==
+ flash_read8(src));
+ src += 1, dst2 += 1;
+ break;
+ case FLASH_CFI_16BIT:
+ flag = ((flash_read16(dst2) & flash_read16(src)) ==
+ flash_read16(src));
+ src += 2, dst2 += 2;
+ break;
+ case FLASH_CFI_32BIT:
+ flag = ((flash_read32(dst2) & flash_read32(src)) ==
+ flash_read32(src));
+ src += 4, dst2 += 4;
+ break;
+ case FLASH_CFI_64BIT:
+ flag = ((flash_read64(dst2) & flash_read64(src)) ==
+ flash_read64(src));
+ src += 8, dst2 += 8;
+ break;
+ }
+ }
+ if (!flag) {
+ retcode = ERR_NOT_ERASED;
+ goto out_unmap;
+ }
+
+ src = cp;
+ sector = find_sector (info, dest);
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ write_cmd = (info->vendor == CFI_CMDSET_INTEL_PROG_REGIONS) ?
+ FLASH_CMD_WRITE_BUFFER_PROG : FLASH_CMD_WRITE_TO_BUFFER;
+ flash_write_cmd (info, sector, 0, FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd (info, sector, 0, FLASH_CMD_READ_STATUS);
+ flash_write_cmd (info, sector, 0, write_cmd);
+ retcode = flash_status_check (info, sector,
+ info->buffer_write_tout,
+ "write to buffer");
+ if (retcode == ERR_OK) {
+ /* reduce the number of loops by the width of
+ * the port */
+ cnt = len >> shift;
+ flash_write_cmd (info, sector, 0, cnt - 1);
+ while (cnt-- > 0) {
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flash_write8(flash_read8(src), dst);
+ src += 1, dst += 1;
+ break;
+ case FLASH_CFI_16BIT:
+ flash_write16(flash_read16(src), dst);
+ src += 2, dst += 2;
+ break;
+ case FLASH_CFI_32BIT:
+ flash_write32(flash_read32(src), dst);
+ src += 4, dst += 4;
+ break;
+ case FLASH_CFI_64BIT:
+ flash_write64(flash_read64(src), dst);
+ src += 8, dst += 8;
+ break;
+ default:
+ retcode = ERR_INVAL;
+ goto out_unmap;
+ }
+ }
+ flash_write_cmd (info, sector, 0,
+ FLASH_CMD_WRITE_BUFFER_CONFIRM);
+ retcode = flash_full_status_check (
+ info, sector, info->buffer_write_tout,
+ "buffer write");
+ }
+
+ break;
+
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ flash_unlock_seq(info,0);
+
+#ifdef CONFIG_FLASH_SPANSION_S29WS_N
+ offset = ((unsigned long)dst - info->start[sector]) >> shift;
+#endif
+ flash_write_cmd(info, sector, offset, AMD_CMD_WRITE_TO_BUFFER);
+ cnt = len >> shift;
+ flash_write_cmd(info, sector, offset, cnt - 1);
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ while (cnt-- > 0) {
+ flash_write8(flash_read8(src), dst);
+ src += 1, dst += 1;
+ }
+ break;
+ case FLASH_CFI_16BIT:
+ while (cnt-- > 0) {
+ flash_write16(flash_read16(src), dst);
+ src += 2, dst += 2;
+ }
+ break;
+ case FLASH_CFI_32BIT:
+ while (cnt-- > 0) {
+ flash_write32(flash_read32(src), dst);
+ src += 4, dst += 4;
+ }
+ break;
+ case FLASH_CFI_64BIT:
+ while (cnt-- > 0) {
+ flash_write64(flash_read64(src), dst);
+ src += 8, dst += 8;
+ }
+ break;
+ default:
+ retcode = ERR_INVAL;
+ goto out_unmap;
+ }
+
+ flash_write_cmd (info, sector, 0, AMD_CMD_WRITE_BUFFER_CONFIRM);
+ if (use_flash_status_poll(info))
+ retcode = flash_status_poll(info, src - (1 << shift),
+ dst - (1 << shift),
+ info->buffer_write_tout,
+ "buffer write");
+ else
+ retcode = flash_full_status_check(info, sector,
+ info->buffer_write_tout,
+ "buffer write");
+ break;
+
+ default:
+ debug ("Unknown Command Set\n");
+ retcode = ERR_INVAL;
+ break;
+ }
+
+out_unmap:
+ return retcode;
+}
+#endif /* CONFIG_SYS_FLASH_USE_BUFFER_WRITE */
+
+
+/*-----------------------------------------------------------------------
+ */
+int flash_erase (flash_info_t * info, int s_first, int s_last)
+{
+ int rcode = 0;
+ int prot;
+ flash_sect_t sect;
+ int st;
+
+ if (info->flash_id != FLASH_MAN_CFI) {
+ puts ("Can't erase unknown flash type - aborted\n");
+ return 1;
+ }
+ if ((s_first < 0) || (s_first > s_last)) {
+ puts ("- no sectors to erase\n");
+ return 1;
+ }
+
+ prot = 0;
+ for (sect = s_first; sect <= s_last; ++sect) {
+ if (info->protect[sect]) {
+ prot++;
+ }
+ }
+ if (prot) {
+ printf ("- Warning: %d protected sectors will not be erased!\n",
+ prot);
+ } else if (flash_verbose) {
+ putc ('\n');
+ }
+
+
+ for (sect = s_first; sect <= s_last; sect++) {
+ if (info->protect[sect] == 0) { /* not protected */
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ flash_write_cmd (info, sect, 0,
+ FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd (info, sect, 0,
+ FLASH_CMD_BLOCK_ERASE);
+ flash_write_cmd (info, sect, 0,
+ FLASH_CMD_ERASE_CONFIRM);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ flash_unlock_seq (info, sect);
+ flash_write_cmd (info, sect,
+ info->addr_unlock1,
+ AMD_CMD_ERASE_START);
+ flash_unlock_seq (info, sect);
+ flash_write_cmd (info, sect, 0,
+ AMD_CMD_ERASE_SECTOR);
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ flash_unlock_seq (info, 0);
+ flash_write_cmd (info, 0, info->addr_unlock1,
+ AMD_CMD_ERASE_START);
+ flash_unlock_seq (info, 0);
+ flash_write_cmd (info, sect, 0,
+ AMD_CMD_ERASE_SECTOR);
+ break;
+#endif
+ default:
+ debug ("Unkown flash vendor %d\n",
+ info->vendor);
+ break;
+ }
+
+ if (use_flash_status_poll(info)) {
+ cfiword_t cword = (cfiword_t)0xffffffffffffffffULL;
+ void *dest;
+ dest = flash_map(info, sect, 0);
+ st = flash_status_poll(info, &cword, dest,
+ info->erase_blk_tout, "erase");
+ flash_unmap(info, sect, 0, dest);
+ } else
+ st = flash_full_status_check(info, sect,
+ info->erase_blk_tout,
+ "erase");
+ if (st)
+ rcode = 1;
+ else if (flash_verbose)
+ putc ('.');
+ }
+ }
+
+ if (flash_verbose)
+ puts (" done\n");
+
+ return rcode;
+}
+
+#ifdef CONFIG_SYS_FLASH_EMPTY_INFO
+static int sector_erased(flash_info_t *info, int i)
+{
+ int k;
+ int size;
+ u32 *flash;
+
+ /*
+ * Check if whole sector is erased
+ */
+ size = flash_sector_size(info, i);
+ flash = (u32 *)info->start[i];
+ /* divide by 4 for longword access */
+ size = size >> 2;
+
+ for (k = 0; k < size; k++) {
+ if (flash_read32(flash++) != 0xffffffff)
+ return 0; /* not erased */
+ }
+
+ return 1; /* erased */
+}
+#endif /* CONFIG_SYS_FLASH_EMPTY_INFO */
+
+void flash_print_info (flash_info_t * info)
+{
+ int i;
+
+ if (info->flash_id != FLASH_MAN_CFI) {
+ puts ("missing or unknown FLASH type\n");
+ return;
+ }
+
+ printf ("%s flash (%d x %d)",
+ info->name,
+ (info->portwidth << 3), (info->chipwidth << 3));
+ if (info->size < 1024*1024)
+ printf (" Size: %ld kB in %d Sectors\n",
+ info->size >> 10, info->sector_count);
+ else
+ printf (" Size: %ld MB in %d Sectors\n",
+ info->size >> 20, info->sector_count);
+ printf (" ");
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ printf ("Intel Prog Regions");
+ break;
+ case CFI_CMDSET_INTEL_STANDARD:
+ printf ("Intel Standard");
+ break;
+ case CFI_CMDSET_INTEL_EXTENDED:
+ printf ("Intel Extended");
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ printf ("AMD Standard");
+ break;
+ case CFI_CMDSET_AMD_EXTENDED:
+ printf ("AMD Extended");
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ printf ("AMD Legacy");
+ break;
+#endif
+ default:
+ printf ("Unknown (%d)", info->vendor);
+ break;
+ }
+ printf (" command set, Manufacturer ID: 0x%02X, Device ID: 0x",
+ info->manufacturer_id);
+ printf (info->chipwidth == FLASH_CFI_16BIT ? "%04X" : "%02X",
+ info->device_id);
+ if (info->device_id == 0x7E) {
+ printf("%04X", info->device_id2);
+ }
+ printf ("\n Erase timeout: %ld ms, write timeout: %ld ms\n",
+ info->erase_blk_tout,
+ info->write_tout);
+ if (info->buffer_size > 1) {
+ printf (" Buffer write timeout: %ld ms, "
+ "buffer size: %d bytes\n",
+ info->buffer_write_tout,
+ info->buffer_size);
+ }
+
+ puts ("\n Sector Start Addresses:");
+ for (i = 0; i < info->sector_count; ++i) {
+ if (ctrlc())
+ break;
+ if ((i % 5) == 0)
+ putc('\n');
+#ifdef CONFIG_SYS_FLASH_EMPTY_INFO
+ /* print empty and read-only info */
+ printf (" %08lX %c %s ",
+ info->start[i],
+ sector_erased(info, i) ? 'E' : ' ',
+ info->protect[i] ? "RO" : " ");
+#else /* ! CONFIG_SYS_FLASH_EMPTY_INFO */
+ printf (" %08lX %s ",
+ info->start[i],
+ info->protect[i] ? "RO" : " ");
+#endif
+ }
+ putc ('\n');
+ return;
+}
+
+/*-----------------------------------------------------------------------
+ * This is used in a few places in write_buf() to show programming
+ * progress. Making it a function is nasty because it needs to do side
+ * effect updates to digit and dots. Repeated code is nasty too, so
+ * we define it once here.
+ */
+#ifdef CONFIG_FLASH_SHOW_PROGRESS
+#define FLASH_SHOW_PROGRESS(scale, dots, digit, dots_sub) \
+ if (flash_verbose) { \
+ dots -= dots_sub; \
+ if ((scale > 0) && (dots <= 0)) { \
+ if ((digit % 5) == 0) \
+ printf ("%d", digit / 5); \
+ else \
+ putc ('.'); \
+ digit--; \
+ dots += scale; \
+ } \
+ }
+#else
+#define FLASH_SHOW_PROGRESS(scale, dots, digit, dots_sub)
+#endif
+
+/*-----------------------------------------------------------------------
+ * Copy memory to flash, returns:
+ * 0 - OK
+ * 1 - write timeout
+ * 2 - Flash not erased
+ */
+int write_buff (flash_info_t * info, uchar * src, ulong addr, ulong cnt)
+{
+ ulong wp;
+ uchar *p;
+ int aln;
+ cfiword_t cword;
+ int i, rc;
+#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE
+ int buffered_size;
+#endif
+#ifdef CONFIG_FLASH_SHOW_PROGRESS
+ int digit = CONFIG_FLASH_SHOW_PROGRESS;
+ int scale = 0;
+ int dots = 0;
+
+ /*
+ * Suppress if there are fewer than CONFIG_FLASH_SHOW_PROGRESS writes.
+ */
+ if (cnt >= CONFIG_FLASH_SHOW_PROGRESS) {
+ scale = (int)((cnt + CONFIG_FLASH_SHOW_PROGRESS - 1) /
+ CONFIG_FLASH_SHOW_PROGRESS);
+ }
+#endif
+
+ /* get lower aligned address */
+ wp = (addr & ~(info->portwidth - 1));
+
+ /* handle unaligned start */
+ if ((aln = addr - wp) != 0) {
+ cword.l = 0;
+ p = (uchar *)wp;
+ for (i = 0; i < aln; ++i)
+ flash_add_byte (info, &cword, flash_read8(p + i));
+
+ for (; (i < info->portwidth) && (cnt > 0); i++) {
+ flash_add_byte (info, &cword, *src++);
+ cnt--;
+ }
+ for (; (cnt == 0) && (i < info->portwidth); ++i)
+ flash_add_byte (info, &cword, flash_read8(p + i));
+
+ rc = flash_write_cfiword (info, wp, cword);
+ if (rc != 0)
+ return rc;
+
+ wp += i;
+ FLASH_SHOW_PROGRESS(scale, dots, digit, i);
+ }
+
+ /* handle the aligned part */
+#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE
+ buffered_size = (info->portwidth / info->chipwidth);
+ buffered_size *= info->buffer_size;
+ while (cnt >= info->portwidth) {
+ /* prohibit buffer write when buffer_size is 1 */
+ if (info->buffer_size == 1) {
+ cword.l = 0;
+ for (i = 0; i < info->portwidth; i++)
+ flash_add_byte (info, &cword, *src++);
+ if ((rc = flash_write_cfiword (info, wp, cword)) != 0)
+ return rc;
+ wp += info->portwidth;
+ cnt -= info->portwidth;
+ continue;
+ }
+
+ /* write buffer until next buffered_size aligned boundary */
+ i = buffered_size - (wp % buffered_size);
+ if (i > cnt)
+ i = cnt;
+ if ((rc = flash_write_cfibuffer (info, wp, src, i)) != ERR_OK)
+ return rc;
+ i -= i & (info->portwidth - 1);
+ wp += i;
+ src += i;
+ cnt -= i;
+ FLASH_SHOW_PROGRESS(scale, dots, digit, i);
+ }
+#else
+ while (cnt >= info->portwidth) {
+ cword.l = 0;
+ for (i = 0; i < info->portwidth; i++) {
+ flash_add_byte (info, &cword, *src++);
+ }
+ if ((rc = flash_write_cfiword (info, wp, cword)) != 0)
+ return rc;
+ wp += info->portwidth;
+ cnt -= info->portwidth;
+ FLASH_SHOW_PROGRESS(scale, dots, digit, info->portwidth);
+ }
+#endif /* CONFIG_SYS_FLASH_USE_BUFFER_WRITE */
+
+ if (cnt == 0) {
+ return (0);
+ }
+
+ /*
+ * handle unaligned tail bytes
+ */
+ cword.l = 0;
+ p = (uchar *)wp;
+ for (i = 0; (i < info->portwidth) && (cnt > 0); ++i) {
+ flash_add_byte (info, &cword, *src++);
+ --cnt;
+ }
+ for (; i < info->portwidth; ++i)
+ flash_add_byte (info, &cword, flash_read8(p + i));
+
+ return flash_write_cfiword (info, wp, cword);
+}
+
+/*-----------------------------------------------------------------------
+ */
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+
+int flash_real_protect (flash_info_t * info, long sector, int prot)
+{
+ int retcode = 0;
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ /*
+ * see errata called
+ * "Numonyx Axcell P33/P30 Specification Update" :)
+ */
+ flash_write_cmd (info, sector, 0, FLASH_CMD_READ_ID);
+ if (!flash_isequal (info, sector, FLASH_OFFSET_PROTECT,
+ prot)) {
+ /*
+ * cmd must come before FLASH_CMD_PROTECT + 20us
+ * Disable interrupts which might cause a timeout here.
+ */
+ int flag = disable_interrupts ();
+ unsigned short cmd;
+
+ if (prot)
+ cmd = FLASH_CMD_PROTECT_SET;
+ else
+ cmd = FLASH_CMD_PROTECT_CLEAR;
+
+ flash_write_cmd (info, sector, 0,
+ FLASH_CMD_PROTECT);
+ flash_write_cmd (info, sector, 0, cmd);
+ /* re-enable interrupts if necessary */
+ if (flag)
+ enable_interrupts ();
+ }
+ break;
+ case CFI_CMDSET_AMD_EXTENDED:
+ case CFI_CMDSET_AMD_STANDARD:
+ /* U-Boot only checks the first byte */
+ if (info->manufacturer_id == (uchar)ATM_MANUFACT) {
+ if (prot) {
+ flash_unlock_seq (info, 0);
+ flash_write_cmd (info, 0,
+ info->addr_unlock1,
+ ATM_CMD_SOFTLOCK_START);
+ flash_unlock_seq (info, 0);
+ flash_write_cmd (info, sector, 0,
+ ATM_CMD_LOCK_SECT);
+ } else {
+ flash_write_cmd (info, 0,
+ info->addr_unlock1,
+ AMD_CMD_UNLOCK_START);
+ if (info->device_id == ATM_ID_BV6416)
+ flash_write_cmd (info, sector,
+ 0, ATM_CMD_UNLOCK_SECT);
+ }
+ }
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ flash_write_cmd (info, sector, 0, FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd (info, sector, 0, FLASH_CMD_PROTECT);
+ if (prot)
+ flash_write_cmd (info, sector, 0, FLASH_CMD_PROTECT_SET);
+ else
+ flash_write_cmd (info, sector, 0, FLASH_CMD_PROTECT_CLEAR);
+#endif
+ };
+
+ /*
+ * Flash needs to be in status register read mode for
+ * flash_full_status_check() to work correctly
+ */
+ flash_write_cmd(info, sector, 0, FLASH_CMD_READ_STATUS);
+ if ((retcode =
+ flash_full_status_check (info, sector, info->erase_blk_tout,
+ prot ? "protect" : "unprotect")) == 0) {
+
+ info->protect[sector] = prot;
+
+ /*
+ * On some of Intel's flash chips (marked via legacy_unlock)
+ * unprotect unprotects all locking.
+ */
+ if ((prot == 0) && (info->legacy_unlock)) {
+ flash_sect_t i;
+
+ for (i = 0; i < info->sector_count; i++) {
+ if (info->protect[i])
+ flash_real_protect (info, i, 1);
+ }
+ }
+ }
+ return retcode;
+}
+
+/*-----------------------------------------------------------------------
+ * flash_read_user_serial - read the OneTimeProgramming cells
+ */
+void flash_read_user_serial (flash_info_t * info, void *buffer, int offset,
+ int len)
+{
+ uchar *src;
+ uchar *dst;
+
+ dst = buffer;
+ src = flash_map (info, 0, FLASH_OFFSET_USER_PROTECTION);
+ flash_write_cmd (info, 0, 0, FLASH_CMD_READ_ID);
+ memcpy (dst, src + offset, len);
+ flash_write_cmd (info, 0, 0, info->cmd_reset);
+ flash_unmap(info, 0, FLASH_OFFSET_USER_PROTECTION, src);
+}
+
+/*
+ * flash_read_factory_serial - read the device Id from the protection area
+ */
+void flash_read_factory_serial (flash_info_t * info, void *buffer, int offset,
+ int len)
+{
+ uchar *src;
+
+ src = flash_map (info, 0, FLASH_OFFSET_INTEL_PROTECTION);
+ flash_write_cmd (info, 0, 0, FLASH_CMD_READ_ID);
+ memcpy (buffer, src + offset, len);
+ flash_write_cmd (info, 0, 0, info->cmd_reset);
+ flash_unmap(info, 0, FLASH_OFFSET_INTEL_PROTECTION, src);
+}
+
+#endif /* CONFIG_SYS_FLASH_PROTECTION */
+
+/*-----------------------------------------------------------------------
+ * Reverse the order of the erase regions in the CFI QRY structure.
+ * This is needed for chips that are either a) correctly detected as
+ * top-boot, or b) buggy.
+ */
+static void cfi_reverse_geometry(struct cfi_qry *qry)
+{
+ unsigned int i, j;
+ u32 tmp;
+
+ for (i = 0, j = qry->num_erase_regions - 1; i < j; i++, j--) {
+ tmp = qry->erase_region_info[i];
+ qry->erase_region_info[i] = qry->erase_region_info[j];
+ qry->erase_region_info[j] = tmp;
+ }
+}
+
+/*-----------------------------------------------------------------------
+ * read jedec ids from device and set corresponding fields in info struct
+ *
+ * Note: assume cfi->vendor, cfi->portwidth and cfi->chipwidth are correct
+ *
+ */
+static void cmdset_intel_read_jedec_ids(flash_info_t *info)
+{
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_READ_ID);
+ udelay(1000); /* some flash are slow to respond */
+ info->manufacturer_id = flash_read_uchar (info,
+ FLASH_OFFSET_MANUFACTURER_ID);
+ info->device_id = (info->chipwidth == FLASH_CFI_16BIT) ?
+ flash_read_word (info, FLASH_OFFSET_DEVICE_ID) :
+ flash_read_uchar (info, FLASH_OFFSET_DEVICE_ID);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+}
+
+static int cmdset_intel_init(flash_info_t *info, struct cfi_qry *qry)
+{
+ info->cmd_reset = FLASH_CMD_RESET;
+
+ cmdset_intel_read_jedec_ids(info);
+ flash_write_cmd(info, 0, info->cfi_offset, FLASH_CMD_CFI);
+
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ /* read legacy lock/unlock bit from intel flash */
+ if (info->ext_addr) {
+ info->legacy_unlock = flash_read_uchar (info,
+ info->ext_addr + 5) & 0x08;
+ }
+#endif
+
+ return 0;
+}
+
+static void cmdset_amd_read_jedec_ids(flash_info_t *info)
+{
+ ushort bankId = 0;
+ uchar manuId;
+
+ flash_write_cmd(info, 0, 0, AMD_CMD_RESET);
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, 0, info->addr_unlock1, FLASH_CMD_READ_ID);
+ udelay(1000); /* some flash are slow to respond */
+
+ manuId = flash_read_uchar (info, FLASH_OFFSET_MANUFACTURER_ID);
+ /* JEDEC JEP106Z specifies ID codes up to bank 7 */
+ while (manuId == FLASH_CONTINUATION_CODE && bankId < 0x800) {
+ bankId += 0x100;
+ manuId = flash_read_uchar (info,
+ bankId | FLASH_OFFSET_MANUFACTURER_ID);
+ }
+ info->manufacturer_id = manuId;
+
+ switch (info->chipwidth){
+ case FLASH_CFI_8BIT:
+ info->device_id = flash_read_uchar (info,
+ FLASH_OFFSET_DEVICE_ID);
+ if (info->device_id == 0x7E) {
+ /* AMD 3-byte (expanded) device ids */
+ info->device_id2 = flash_read_uchar (info,
+ FLASH_OFFSET_DEVICE_ID2);
+ info->device_id2 <<= 8;
+ info->device_id2 |= flash_read_uchar (info,
+ FLASH_OFFSET_DEVICE_ID3);
+ }
+ break;
+ case FLASH_CFI_16BIT:
+ info->device_id = flash_read_word (info,
+ FLASH_OFFSET_DEVICE_ID);
+ break;
+ default:
+ break;
+ }
+ flash_write_cmd(info, 0, 0, AMD_CMD_RESET);
+}
+
+static int cmdset_amd_init(flash_info_t *info, struct cfi_qry *qry)
+{
+ info->cmd_reset = AMD_CMD_RESET;
+
+ cmdset_amd_read_jedec_ids(info);
+ flash_write_cmd(info, 0, info->cfi_offset, FLASH_CMD_CFI);
+
+ return 0;
+}
+
+#ifdef CONFIG_FLASH_CFI_LEGACY
+static void flash_read_jedec_ids (flash_info_t * info)
+{
+ info->manufacturer_id = 0;
+ info->device_id = 0;
+ info->device_id2 = 0;
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ cmdset_intel_read_jedec_ids(info);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ cmdset_amd_read_jedec_ids(info);
+ break;
+ default:
+ break;
+ }
+}
+
+/*-----------------------------------------------------------------------
+ * Call board code to request info about non-CFI flash.
+ * board_flash_get_legacy needs to fill in at least:
+ * info->portwidth, info->chipwidth and info->interface for Jedec probing.
+ */
+static int flash_detect_legacy(phys_addr_t base, int banknum)
+{
+ flash_info_t *info = &flash_info[banknum];
+
+ if (board_flash_get_legacy(base, banknum, info)) {
+ /* board code may have filled info completely. If not, we
+ use JEDEC ID probing. */
+ if (!info->vendor) {
+ int modes[] = {
+ CFI_CMDSET_AMD_STANDARD,
+ CFI_CMDSET_INTEL_STANDARD
+ };
+ int i;
+
+ for (i = 0; i < sizeof(modes) / sizeof(modes[0]); i++) {
+ info->vendor = modes[i];
+ info->start[0] =
+ (ulong)map_physmem(base,
+ info->portwidth,
+ MAP_NOCACHE);
+ if (info->portwidth == FLASH_CFI_8BIT
+ && info->interface == FLASH_CFI_X8X16) {
+ info->addr_unlock1 = 0x2AAA;
+ info->addr_unlock2 = 0x5555;
+ } else {
+ info->addr_unlock1 = 0x5555;
+ info->addr_unlock2 = 0x2AAA;
+ }
+ flash_read_jedec_ids(info);
+ debug("JEDEC PROBE: ID %x %x %x\n",
+ info->manufacturer_id,
+ info->device_id,
+ info->device_id2);
+ if (jedec_flash_match(info, info->start[0]))
+ break;
+ else
+ unmap_physmem((void *)info->start[0],
+ MAP_NOCACHE);
+ }
+ }
+
+ switch(info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ info->cmd_reset = FLASH_CMD_RESET;
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ case CFI_CMDSET_AMD_LEGACY:
+ info->cmd_reset = AMD_CMD_RESET;
+ break;
+ }
+ info->flash_id = FLASH_MAN_CFI;
+ return 1;
+ }
+ return 0; /* use CFI */
+}
+#else
+static inline int flash_detect_legacy(phys_addr_t base, int banknum)
+{
+ return 0; /* use CFI */
+}
+#endif
+
+/*-----------------------------------------------------------------------
+ * detect if flash is compatible with the Common Flash Interface (CFI)
+ * http://www.jedec.org/download/search/jesd68.pdf
+ */
+static void flash_read_cfi (flash_info_t *info, void *buf,
+ unsigned int start, size_t len)
+{
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = flash_read_uchar(info, start + i);
+}
+
+void __flash_cmd_reset(flash_info_t *info)
+{
+ /*
+ * We do not yet know what kind of commandset to use, so we issue
+ * the reset command in both Intel and AMD variants, in the hope
+ * that AMD flash roms ignore the Intel command.
+ */
+ flash_write_cmd(info, 0, 0, AMD_CMD_RESET);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+}
+void flash_cmd_reset(flash_info_t *info)
+ __attribute__((weak,alias("__flash_cmd_reset")));
+
+static int __flash_detect_cfi (flash_info_t * info, struct cfi_qry *qry)
+{
+ int cfi_offset;
+
+ /* Issue FLASH reset command */
+ flash_cmd_reset(info);
+
+ for (cfi_offset=0;
+ cfi_offset < sizeof(flash_offset_cfi) / sizeof(uint);
+ cfi_offset++) {
+ flash_write_cmd (info, 0, flash_offset_cfi[cfi_offset],
+ FLASH_CMD_CFI);
+ if (flash_isequal (info, 0, FLASH_OFFSET_CFI_RESP, 'Q')
+ && flash_isequal (info, 0, FLASH_OFFSET_CFI_RESP + 1, 'R')
+ && flash_isequal (info, 0, FLASH_OFFSET_CFI_RESP + 2, 'Y')) {
+ flash_read_cfi(info, qry, FLASH_OFFSET_CFI_RESP,
+ sizeof(struct cfi_qry));
+ info->interface = le16_to_cpu(qry->interface_desc);
+
+ info->cfi_offset = flash_offset_cfi[cfi_offset];
+ debug ("device interface is %d\n",
+ info->interface);
+ debug ("found port %d chip %d ",
+ info->portwidth, info->chipwidth);
+ debug ("port %d bits chip %d bits\n",
+ info->portwidth << CFI_FLASH_SHIFT_WIDTH,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+
+ /* calculate command offsets as in the Linux driver */
+ info->addr_unlock1 = 0x555;
+ info->addr_unlock2 = 0x2aa;
+
+ /*
+ * modify the unlock address if we are
+ * in compatibility mode
+ */
+ if ( /* x8/x16 in x8 mode */
+ ((info->chipwidth == FLASH_CFI_BY8) &&
+ (info->interface == FLASH_CFI_X8X16)) ||
+ /* x16/x32 in x16 mode */
+ ((info->chipwidth == FLASH_CFI_BY16) &&
+ (info->interface == FLASH_CFI_X16X32)))
+ {
+ info->addr_unlock1 = 0xaaa;
+ info->addr_unlock2 = 0x555;
+ }
+
+ info->name = "CFI conformant";
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int flash_detect_cfi (flash_info_t * info, struct cfi_qry *qry)
+{
+ debug ("flash detect cfi\n");
+
+ for (info->portwidth = CONFIG_SYS_FLASH_CFI_WIDTH;
+ info->portwidth <= FLASH_CFI_64BIT; info->portwidth <<= 1) {
+ for (info->chipwidth = FLASH_CFI_BY8;
+ info->chipwidth <= info->portwidth;
+ info->chipwidth <<= 1)
+ if (__flash_detect_cfi(info, qry))
+ return 1;
+ }
+ debug ("not found\n");
+ return 0;
+}
+
+/*
+ * Manufacturer-specific quirks. Add workarounds for geometry
+ * reversal, etc. here.
+ */
+static void flash_fixup_amd(flash_info_t *info, struct cfi_qry *qry)
+{
+ /* check if flash geometry needs reversal */
+ if (qry->num_erase_regions > 1) {
+ /* reverse geometry if top boot part */
+ if (info->cfi_version < 0x3131) {
+ /* CFI < 1.1, try to guess from device id */
+ if ((info->device_id & 0x80) != 0)
+ cfi_reverse_geometry(qry);
+ } else if (flash_read_uchar(info, info->ext_addr + 0xf) == 3) {
+ /* CFI >= 1.1, deduct from top/bottom flag */
+ /* note: ext_addr is valid since cfi_version > 0 */
+ cfi_reverse_geometry(qry);
+ }
+ }
+}
+
+static void flash_fixup_atmel(flash_info_t *info, struct cfi_qry *qry)
+{
+ int reverse_geometry = 0;
+
+ /* Check the "top boot" bit in the PRI */
+ if (info->ext_addr && !(flash_read_uchar(info, info->ext_addr + 6) & 1))
+ reverse_geometry = 1;
+
+ /* AT49BV6416(T) list the erase regions in the wrong order.
+ * However, the device ID is identical with the non-broken
+ * AT49BV642D they differ in the high byte.
+ */
+ if (info->device_id == 0xd6 || info->device_id == 0xd2)
+ reverse_geometry = !reverse_geometry;
+
+ if (reverse_geometry)
+ cfi_reverse_geometry(qry);
+}
+
+static void flash_fixup_stm(flash_info_t *info, struct cfi_qry *qry)
+{
+ /* check if flash geometry needs reversal */
+ if (qry->num_erase_regions > 1) {
+ /* reverse geometry if top boot part */
+ if (info->cfi_version < 0x3131) {
+ /* CFI < 1.1, guess by device id (M29W320{DT,ET} only) */
+ if (info->device_id == 0x22CA ||
+ info->device_id == 0x2256) {
+ cfi_reverse_geometry(qry);
+ }
+ }
+ }
+}
+
+/*
+ * The following code cannot be run from FLASH!
+ *
+ */
+ulong flash_get_size (phys_addr_t base, int banknum)
+{
+ flash_info_t *info = &flash_info[banknum];
+ int i, j;
+ flash_sect_t sect_cnt;
+ phys_addr_t sector;
+ unsigned long tmp;
+ int size_ratio;
+ uchar num_erase_regions;
+ int erase_region_size;
+ int erase_region_count;
+ struct cfi_qry qry;
+ unsigned long max_size;
+
+ memset(&qry, 0, sizeof(qry));
+
+ info->ext_addr = 0;
+ info->cfi_version = 0;
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ info->legacy_unlock = 0;
+#endif
+
+ info->start[0] = (ulong)map_physmem(base, info->portwidth, MAP_NOCACHE);
+
+ if (flash_detect_cfi (info, &qry)) {
+ info->vendor = le16_to_cpu(qry.p_id);
+ info->ext_addr = le16_to_cpu(qry.p_adr);
+ num_erase_regions = qry.num_erase_regions;
+
+ if (info->ext_addr) {
+ info->cfi_version = (ushort) flash_read_uchar (info,
+ info->ext_addr + 3) << 8;
+ info->cfi_version |= (ushort) flash_read_uchar (info,
+ info->ext_addr + 4);
+ }
+
+#ifdef DEBUG
+ flash_printqry (&qry);
+#endif
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ cmdset_intel_init(info, &qry);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ cmdset_amd_init(info, &qry);
+ break;
+ default:
+ printf("CFI: Unknown command set 0x%x\n",
+ info->vendor);
+ /*
+ * Unfortunately, this means we don't know how
+ * to get the chip back to Read mode. Might
+ * as well try an Intel-style reset...
+ */
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+ return 0;
+ }
+
+ /* Do manufacturer-specific fixups */
+ switch (info->manufacturer_id) {
+ case 0x0001:
+ flash_fixup_amd(info, &qry);
+ break;
+ case 0x001f:
+ flash_fixup_atmel(info, &qry);
+ break;
+ case 0x0020:
+ flash_fixup_stm(info, &qry);
+ break;
+ }
+
+ debug ("manufacturer is %d\n", info->vendor);
+ debug ("manufacturer id is 0x%x\n", info->manufacturer_id);
+ debug ("device id is 0x%x\n", info->device_id);
+ debug ("device id2 is 0x%x\n", info->device_id2);
+ debug ("cfi version is 0x%04x\n", info->cfi_version);
+
+ size_ratio = info->portwidth / info->chipwidth;
+ /* if the chip is x8/x16 reduce the ratio by half */
+ if ((info->interface == FLASH_CFI_X8X16)
+ && (info->chipwidth == FLASH_CFI_BY8)) {
+ size_ratio >>= 1;
+ }
+ debug ("size_ratio %d port %d bits chip %d bits\n",
+ size_ratio, info->portwidth << CFI_FLASH_SHIFT_WIDTH,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ info->size = 1 << qry.dev_size;
+ /* multiply the size by the number of chips */
+ info->size *= size_ratio;
+ max_size = cfi_flash_bank_size(banknum);
+ if (max_size && (info->size > max_size)) {
+ debug("[truncated from %ldMiB]", info->size >> 20);
+ info->size = max_size;
+ }
+ debug ("found %d erase regions\n", num_erase_regions);
+ sect_cnt = 0;
+ sector = base;
+ for (i = 0; i < num_erase_regions; i++) {
+ if (i > NUM_ERASE_REGIONS) {
+ printf ("%d erase regions found, only %d used\n",
+ num_erase_regions, NUM_ERASE_REGIONS);
+ break;
+ }
+
+ tmp = le32_to_cpu(qry.erase_region_info[i]);
+ debug("erase region %u: 0x%08lx\n", i, tmp);
+
+ erase_region_count = (tmp & 0xffff) + 1;
+ tmp >>= 16;
+ erase_region_size =
+ (tmp & 0xffff) ? ((tmp & 0xffff) * 256) : 128;
+ debug ("erase_region_count = %d erase_region_size = %d\n",
+ erase_region_count, erase_region_size);
+ for (j = 0; j < erase_region_count; j++) {
+ if (sector - base >= info->size)
+ break;
+ if (sect_cnt >= CONFIG_SYS_MAX_FLASH_SECT) {
+ printf("ERROR: too many flash sectors\n");
+ break;
+ }
+ info->start[sect_cnt] =
+ (ulong)map_physmem(sector,
+ info->portwidth,
+ MAP_NOCACHE);
+ sector += (erase_region_size * size_ratio);
+
+ /*
+ * Only read protection status from
+ * supported devices (intel...)
+ */
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ case CFI_CMDSET_INTEL_STANDARD:
+ /*
+ * Set flash to read-id mode. Otherwise
+ * reading protected status is not
+ * guaranteed.
+ */
+ flash_write_cmd(info, sect_cnt, 0,
+ FLASH_CMD_READ_ID);
+ info->protect[sect_cnt] =
+ flash_isset (info, sect_cnt,
+ FLASH_OFFSET_PROTECT,
+ FLASH_STATUS_PROTECT);
+ break;
+ default:
+ /* default: not protected */
+ info->protect[sect_cnt] = 0;
+ }
+
+ sect_cnt++;
+ }
+ }
+
+ info->sector_count = sect_cnt;
+ info->buffer_size = 1 << le16_to_cpu(qry.max_buf_write_size);
+ tmp = 1 << qry.block_erase_timeout_typ;
+ info->erase_blk_tout = tmp *
+ (1 << qry.block_erase_timeout_max);
+ tmp = (1 << qry.buf_write_timeout_typ) *
+ (1 << qry.buf_write_timeout_max);
+
+ /* round up when converting to ms */
+ info->buffer_write_tout = (tmp + 999) / 1000;
+ tmp = (1 << qry.word_write_timeout_typ) *
+ (1 << qry.word_write_timeout_max);
+ /* round up when converting to ms */
+ info->write_tout = (tmp + 999) / 1000;
+ info->flash_id = FLASH_MAN_CFI;
+ if ((info->interface == FLASH_CFI_X8X16) &&
+ (info->chipwidth == FLASH_CFI_BY8)) {
+ /* XXX - Need to test on x8/x16 in parallel. */
+ info->portwidth >>= 1;
+ }
+
+ flash_write_cmd (info, 0, 0, info->cmd_reset);
+ }
+
+ return (info->size);
+}
+
+#ifdef CONFIG_FLASH_CFI_MTD
+void flash_set_verbose(uint v)
+{
+ flash_verbose = v;
+}
+#endif
+
+static void cfi_flash_set_config_reg(u32 base, u16 val)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_CONFIG_REGS
+ /*
+ * Only set this config register if really defined
+ * to a valid value (0xffff is invalid)
+ */
+ if (val == 0xffff)
+ return;
+
+ /*
+ * Set configuration register. Data is "encrypted" in the 16 lower
+ * address bits.
+ */
+ flash_write16(FLASH_CMD_SETUP, (void *)(base + (val << 1)));
+ flash_write16(FLASH_CMD_SET_CR_CONFIRM, (void *)(base + (val << 1)));
+
+ /*
+ * Finally issue reset-command to bring device back to
+ * read-array mode
+ */
+ flash_write16(FLASH_CMD_RESET, (void *)base);
+#endif
+}
+
+/*-----------------------------------------------------------------------
+ */
+unsigned long flash_init (void)
+{
+ unsigned long size = 0;
+ int i;
+#if defined(CONFIG_SYS_FLASH_AUTOPROTECT_LIST)
+ struct apl_s {
+ ulong start;
+ ulong size;
+ } apl[] = CONFIG_SYS_FLASH_AUTOPROTECT_LIST;
+#endif
+
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ /* read environment from EEPROM */
+ char s[64];
+ getenv_f("unlock", s, sizeof(s));
+#endif
+
+ /* Init: no FLASHes known */
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; ++i) {
+ flash_info[i].flash_id = FLASH_UNKNOWN;
+
+ /* Optionally write flash configuration register */
+ cfi_flash_set_config_reg(cfi_flash_bank_addr(i),
+ cfi_flash_config_reg(i));
+
+ if (!flash_detect_legacy(cfi_flash_bank_addr(i), i))
+ flash_get_size(cfi_flash_bank_addr(i), i);
+ size += flash_info[i].size;
+ if (flash_info[i].flash_id == FLASH_UNKNOWN) {
+#ifndef CONFIG_SYS_FLASH_QUIET_TEST
+ printf ("## Unknown flash on Bank %d "
+ "- Size = 0x%08lx = %ld MB\n",
+ i+1, flash_info[i].size,
+ flash_info[i].size >> 20);
+#endif /* CONFIG_SYS_FLASH_QUIET_TEST */
+ }
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ else if ((s != NULL) && (strcmp(s, "yes") == 0)) {
+ /*
+ * Only the U-Boot image and it's environment
+ * is protected, all other sectors are
+ * unprotected (unlocked) if flash hardware
+ * protection is used (CONFIG_SYS_FLASH_PROTECTION)
+ * and the environment variable "unlock" is
+ * set to "yes".
+ */
+ if (flash_info[i].legacy_unlock) {
+ int k;
+
+ /*
+ * Disable legacy_unlock temporarily,
+ * since flash_real_protect would
+ * relock all other sectors again
+ * otherwise.
+ */
+ flash_info[i].legacy_unlock = 0;
+
+ /*
+ * Legacy unlocking (e.g. Intel J3) ->
+ * unlock only one sector. This will
+ * unlock all sectors.
+ */
+ flash_real_protect (&flash_info[i], 0, 0);
+
+ flash_info[i].legacy_unlock = 1;
+
+ /*
+ * Manually mark other sectors as
+ * unlocked (unprotected)
+ */
+ for (k = 1; k < flash_info[i].sector_count; k++)
+ flash_info[i].protect[k] = 0;
+ } else {
+ /*
+ * No legancy unlocking -> unlock all sectors
+ */
+ flash_protect (FLAG_PROTECT_CLEAR,
+ flash_info[i].start[0],
+ flash_info[i].start[0]
+ + flash_info[i].size - 1,
+ &flash_info[i]);
+ }
+ }
+#endif /* CONFIG_SYS_FLASH_PROTECTION */
+ }
+
+ /* Monitor protection ON by default */
+#if (CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE) && \
+ (!defined(CONFIG_MONITOR_IS_IN_RAM))
+ flash_protect (FLAG_PROTECT_SET,
+ CONFIG_SYS_MONITOR_BASE,
+ CONFIG_SYS_MONITOR_BASE + monitor_flash_len - 1,
+ flash_get_info(CONFIG_SYS_MONITOR_BASE));
+#endif
+
+ /* Environment protection ON by default */
+#ifdef CONFIG_ENV_IS_IN_FLASH
+ flash_protect (FLAG_PROTECT_SET,
+ CONFIG_ENV_ADDR,
+ CONFIG_ENV_ADDR + CONFIG_ENV_SECT_SIZE - 1,
+ flash_get_info(CONFIG_ENV_ADDR));
+#endif
+
+ /* Redundant environment protection ON by default */
+#ifdef CONFIG_ENV_ADDR_REDUND
+ flash_protect (FLAG_PROTECT_SET,
+ CONFIG_ENV_ADDR_REDUND,
+ CONFIG_ENV_ADDR_REDUND + CONFIG_ENV_SECT_SIZE - 1,
+ flash_get_info(CONFIG_ENV_ADDR_REDUND));
+#endif
+
+#if defined(CONFIG_SYS_FLASH_AUTOPROTECT_LIST)
+ for (i = 0; i < (sizeof(apl) / sizeof(struct apl_s)); i++) {
+ debug("autoprotecting from %08x to %08x\n",
+ apl[i].start, apl[i].start + apl[i].size - 1);
+ flash_protect (FLAG_PROTECT_SET,
+ apl[i].start,
+ apl[i].start + apl[i].size - 1,
+ flash_get_info(apl[i].start));
+ }
+#endif
+
+#ifdef CONFIG_FLASH_CFI_MTD
+ cfi_mtd_init();
+#endif
+
+ return (size);
+}
diff --git a/u-boot/drivers/mtd/cfi_mtd.c b/u-boot/drivers/mtd/cfi_mtd.c
new file mode 100644
index 0000000..cbcc165
--- /dev/null
+++ b/u-boot/drivers/mtd/cfi_mtd.c
@@ -0,0 +1,276 @@
+/*
+ * (C) Copyright 2008 Semihalf
+ *
+ * Written by: Piotr Ziecik <kosmo@semihalf.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+#include <common.h>
+#include <flash.h>
+#include <malloc.h>
+
+#include <asm/errno.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/concat.h>
+#include <mtd/cfi_flash.h>
+
+static struct mtd_info cfi_mtd_info[CFI_MAX_FLASH_BANKS];
+static char cfi_mtd_names[CFI_MAX_FLASH_BANKS][16];
+#ifdef CONFIG_MTD_CONCAT
+static char c_mtd_name[16];
+#endif
+
+static int cfi_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ flash_info_t *fi = mtd->priv;
+ size_t a_start = fi->start[0] + instr->addr;
+ size_t a_end = a_start + instr->len;
+ int s_first = -1;
+ int s_last = -1;
+ int error, sect;
+
+ for (sect = 0; sect < fi->sector_count; sect++) {
+ if (a_start == fi->start[sect])
+ s_first = sect;
+
+ if (sect < fi->sector_count - 1) {
+ if (a_end == fi->start[sect + 1]) {
+ s_last = sect;
+ break;
+ }
+ } else {
+ s_last = sect;
+ break;
+ }
+ }
+
+ if (s_first >= 0 && s_first <= s_last) {
+ instr->state = MTD_ERASING;
+
+ flash_set_verbose(0);
+ error = flash_erase(fi, s_first, s_last);
+ flash_set_verbose(1);
+
+ if (error) {
+ instr->state = MTD_ERASE_FAILED;
+ return -EIO;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cfi_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ flash_info_t *fi = mtd->priv;
+ u_char *f = (u_char*)(fi->start[0]) + from;
+
+ memcpy(buf, f, len);
+ *retlen = len;
+
+ return 0;
+}
+
+static int cfi_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ flash_info_t *fi = mtd->priv;
+ u_long t = fi->start[0] + to;
+ int error;
+
+ flash_set_verbose(0);
+ error = write_buff(fi, (u_char*)buf, t, len);
+ flash_set_verbose(1);
+
+ if (!error) {
+ *retlen = len;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static void cfi_mtd_sync(struct mtd_info *mtd)
+{
+ /*
+ * This function should wait until all pending operations
+ * finish. However this driver is fully synchronous, so
+ * this function returns immediately
+ */
+}
+
+static int cfi_mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ flash_info_t *fi = mtd->priv;
+
+ flash_set_verbose(0);
+ flash_protect(FLAG_PROTECT_SET, fi->start[0] + ofs,
+ fi->start[0] + ofs + len - 1, fi);
+ flash_set_verbose(1);
+
+ return 0;
+}
+
+static int cfi_mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ flash_info_t *fi = mtd->priv;
+
+ flash_set_verbose(0);
+ flash_protect(FLAG_PROTECT_CLEAR, fi->start[0] + ofs,
+ fi->start[0] + ofs + len - 1, fi);
+ flash_set_verbose(1);
+
+ return 0;
+}
+
+static int cfi_mtd_set_erasesize(struct mtd_info *mtd, flash_info_t *fi)
+{
+ int sect_size = 0;
+ int sect_size_old = 0;
+ int sect;
+ int regions = 0;
+ int numblocks = 0;
+ ulong offset;
+ ulong base_addr;
+
+ /*
+ * First detect the number of eraseregions so that we can allocate
+ * the array of eraseregions correctly
+ */
+ for (sect = 0; sect < fi->sector_count; sect++) {
+ if (sect_size_old != flash_sector_size(fi, sect))
+ regions++;
+ sect_size_old = flash_sector_size(fi, sect);
+ }
+
+ switch (regions) {
+ case 0:
+ return 1;
+ case 1: /* flash has uniform erase size */
+ mtd->numeraseregions = 0;
+ mtd->erasesize = sect_size_old;
+ return 0;
+ }
+
+ mtd->numeraseregions = regions;
+ mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info) * regions);
+
+ /*
+ * Now detect the largest sector and fill the eraseregions
+ */
+ regions = 0;
+ base_addr = offset = fi->start[0];
+ sect_size_old = flash_sector_size(fi, 0);
+ for (sect = 0; sect < fi->sector_count; sect++) {
+ if (sect_size_old != flash_sector_size(fi, sect)) {
+ mtd->eraseregions[regions].offset = offset - base_addr;
+ mtd->eraseregions[regions].erasesize = sect_size_old;
+ mtd->eraseregions[regions].numblocks = numblocks;
+ /* Now start counting the next eraseregions */
+ numblocks = 0;
+ regions++;
+ offset = fi->start[sect];
+ }
+ numblocks++;
+
+ /*
+ * Select the largest sector size as erasesize (e.g. for UBI)
+ */
+ if (flash_sector_size(fi, sect) > sect_size)
+ sect_size = flash_sector_size(fi, sect);
+
+ sect_size_old = flash_sector_size(fi, sect);
+ }
+
+ /*
+ * Set the last region
+ */
+ mtd->eraseregions[regions].offset = offset - base_addr;
+ mtd->eraseregions[regions].erasesize = sect_size_old;
+ mtd->eraseregions[regions].numblocks = numblocks;
+
+ mtd->erasesize = sect_size;
+
+ return 0;
+}
+
+int cfi_mtd_init(void)
+{
+ struct mtd_info *mtd;
+ flash_info_t *fi;
+ int error, i;
+ int devices_found = 0;
+ struct mtd_info *mtd_list[CONFIG_SYS_MAX_FLASH_BANKS];
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ fi = &flash_info[i];
+ mtd = &cfi_mtd_info[i];
+
+ memset(mtd, 0, sizeof(struct mtd_info));
+
+ error = cfi_mtd_set_erasesize(mtd, fi);
+ if (error)
+ continue;
+
+ sprintf(cfi_mtd_names[i], "nor%d", i);
+ mtd->name = cfi_mtd_names[i];
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = fi->size;
+ mtd->writesize = 1;
+
+ mtd->erase = cfi_mtd_erase;
+ mtd->read = cfi_mtd_read;
+ mtd->write = cfi_mtd_write;
+ mtd->sync = cfi_mtd_sync;
+ mtd->lock = cfi_mtd_lock;
+ mtd->unlock = cfi_mtd_unlock;
+ mtd->priv = fi;
+
+ if (add_mtd_device(mtd))
+ return -ENOMEM;
+
+ mtd_list[devices_found++] = mtd;
+ }
+
+#ifdef CONFIG_MTD_CONCAT
+ if (devices_found > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ sprintf(c_mtd_name, "nor%d", devices_found);
+ mtd = mtd_concat_create(mtd_list, devices_found, c_mtd_name);
+
+ if (mtd == NULL)
+ return -ENXIO;
+
+ if (add_mtd_device(mtd))
+ return -ENOMEM;
+ }
+#endif /* CONFIG_MTD_CONCAT */
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/dataflash.c b/u-boot/drivers/mtd/dataflash.c
new file mode 100644
index 0000000..96cd395
--- /dev/null
+++ b/u-boot/drivers/mtd/dataflash.c
@@ -0,0 +1,463 @@
+/* LowLevel function for ATMEL DataFlash support
+ * Author : Hamid Ikdoumi (Atmel)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+#include <common.h>
+#include <config.h>
+#include <asm/hardware.h>
+#include <dataflash.h>
+
+static AT91S_DataFlash DataFlashInst;
+
+extern void AT91F_SpiInit (void);
+extern int AT91F_DataflashProbe (int i, AT91PS_DataflashDesc pDesc);
+extern int AT91F_DataFlashRead (AT91PS_DataFlash pDataFlash,
+ unsigned long addr,
+ unsigned long size, char *buffer);
+extern int AT91F_DataFlashWrite( AT91PS_DataFlash pDataFlash,
+ unsigned char *src,
+ int dest,
+ int size );
+
+int AT91F_DataflashInit (void)
+{
+ int i, j;
+ int dfcode;
+ int part;
+ int last_part;
+ int found[CONFIG_SYS_MAX_DATAFLASH_BANKS];
+ unsigned char protected;
+
+ AT91F_SpiInit ();
+
+ for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) {
+ found[i] = 0;
+ dataflash_info[i].Desc.state = IDLE;
+ dataflash_info[i].id = 0;
+ dataflash_info[i].Device.pages_number = 0;
+ dfcode = AT91F_DataflashProbe (cs[i].cs,
+ &dataflash_info[i].Desc);
+
+ switch (dfcode) {
+ case AT45DB021:
+ dataflash_info[i].Device.pages_number = 1024;
+ dataflash_info[i].Device.pages_size = 264;
+ dataflash_info[i].Device.page_offset = 9;
+ dataflash_info[i].Device.byte_mask = 0x300;
+ dataflash_info[i].Device.cs = cs[i].cs;
+ dataflash_info[i].Desc.DataFlash_state = IDLE;
+ dataflash_info[i].logical_address = cs[i].addr;
+ dataflash_info[i].id = dfcode;
+ found[i] += dfcode;;
+ break;
+
+ case AT45DB081:
+ dataflash_info[i].Device.pages_number = 4096;
+ dataflash_info[i].Device.pages_size = 264;
+ dataflash_info[i].Device.page_offset = 9;
+ dataflash_info[i].Device.byte_mask = 0x300;
+ dataflash_info[i].Device.cs = cs[i].cs;
+ dataflash_info[i].Desc.DataFlash_state = IDLE;
+ dataflash_info[i].logical_address = cs[i].addr;
+ dataflash_info[i].id = dfcode;
+ found[i] += dfcode;;
+ break;
+
+ case AT45DB161:
+ dataflash_info[i].Device.pages_number = 4096;
+ dataflash_info[i].Device.pages_size = 528;
+ dataflash_info[i].Device.page_offset = 10;
+ dataflash_info[i].Device.byte_mask = 0x300;
+ dataflash_info[i].Device.cs = cs[i].cs;
+ dataflash_info[i].Desc.DataFlash_state = IDLE;
+ dataflash_info[i].logical_address = cs[i].addr;
+ dataflash_info[i].id = dfcode;
+ found[i] += dfcode;;
+ break;
+
+ case AT45DB321:
+ dataflash_info[i].Device.pages_number = 8192;
+ dataflash_info[i].Device.pages_size = 528;
+ dataflash_info[i].Device.page_offset = 10;
+ dataflash_info[i].Device.byte_mask = 0x300;
+ dataflash_info[i].Device.cs = cs[i].cs;
+ dataflash_info[i].Desc.DataFlash_state = IDLE;
+ dataflash_info[i].logical_address = cs[i].addr;
+ dataflash_info[i].id = dfcode;
+ found[i] += dfcode;;
+ break;
+
+ case AT45DB642:
+ dataflash_info[i].Device.pages_number = 8192;
+ dataflash_info[i].Device.pages_size = 1056;
+ dataflash_info[i].Device.page_offset = 11;
+ dataflash_info[i].Device.byte_mask = 0x700;
+ dataflash_info[i].Device.cs = cs[i].cs;
+ dataflash_info[i].Desc.DataFlash_state = IDLE;
+ dataflash_info[i].logical_address = cs[i].addr;
+ dataflash_info[i].id = dfcode;
+ found[i] += dfcode;;
+ break;
+
+ case AT45DB128:
+ dataflash_info[i].Device.pages_number = 16384;
+ dataflash_info[i].Device.pages_size = 1056;
+ dataflash_info[i].Device.page_offset = 11;
+ dataflash_info[i].Device.byte_mask = 0x700;
+ dataflash_info[i].Device.cs = cs[i].cs;
+ dataflash_info[i].Desc.DataFlash_state = IDLE;
+ dataflash_info[i].logical_address = cs[i].addr;
+ dataflash_info[i].id = dfcode;
+ found[i] += dfcode;;
+ break;
+
+ default:
+ dfcode = 0;
+ break;
+ }
+ /* set the last area end to the dataflash size*/
+ dataflash_info[i].end_address =
+ (dataflash_info[i].Device.pages_number *
+ dataflash_info[i].Device.pages_size) - 1;
+
+ part = 0;
+ last_part = 0;
+ /* set the area addresses */
+ for(j = 0; j < NB_DATAFLASH_AREA; j++) {
+ if(found[i]!=0) {
+ dataflash_info[i].Device.area_list[j].start =
+ area_list[part].start +
+ dataflash_info[i].logical_address;
+ if(area_list[part].end == 0xffffffff) {
+ dataflash_info[i].Device.area_list[j].end =
+ dataflash_info[i].end_address +
+ dataflash_info[i].logical_address;
+ last_part = 1;
+ } else {
+ dataflash_info[i].Device.area_list[j].end =
+ area_list[part].end +
+ dataflash_info[i].logical_address;
+ }
+ protected = area_list[part].protected;
+ /* Set the environment according to the label...*/
+ if(protected == FLAG_PROTECT_INVALID) {
+ dataflash_info[i].Device.area_list[j].protected =
+ FLAG_PROTECT_INVALID;
+ } else {
+ dataflash_info[i].Device.area_list[j].protected =
+ protected;
+ }
+ strcpy((char*)(dataflash_info[i].Device.area_list[j].label),
+ (const char *)area_list[part].label);
+ }
+ part++;
+ }
+ }
+ return found[0];
+}
+
+void AT91F_DataflashSetEnv (void)
+{
+ int i, j;
+ int part;
+ unsigned char env;
+ unsigned char s[32]; /* Will fit a long int in hex */
+ unsigned long start;
+
+ for (i = 0, part= 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) {
+ for(j = 0; j < NB_DATAFLASH_AREA; j++) {
+ env = area_list[part].setenv;
+ /* Set the environment according to the label...*/
+ if((env & FLAG_SETENV) == FLAG_SETENV) {
+ start = dataflash_info[i].Device.area_list[j].start;
+ sprintf((char*) s,"%lX",start);
+ setenv((char*) area_list[part].label,(char*) s);
+ }
+ part++;
+ }
+ }
+}
+
+void dataflash_print_info (void)
+{
+ int i, j;
+
+ for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) {
+ if (dataflash_info[i].id != 0) {
+ printf("DataFlash:");
+ switch (dataflash_info[i].id) {
+ case AT45DB021:
+ printf("AT45DB021\n");
+ break;
+ case AT45DB161:
+ printf("AT45DB161\n");
+ break;
+
+ case AT45DB321:
+ printf("AT45DB321\n");
+ break;
+
+ case AT45DB642:
+ printf("AT45DB642\n");
+ break;
+ case AT45DB128:
+ printf("AT45DB128\n");
+ break;
+ }
+
+ printf("Nb pages: %6d\n"
+ "Page Size: %6d\n"
+ "Size=%8d bytes\n"
+ "Logical address: 0x%08X\n",
+ (unsigned int) dataflash_info[i].Device.pages_number,
+ (unsigned int) dataflash_info[i].Device.pages_size,
+ (unsigned int) dataflash_info[i].Device.pages_number *
+ dataflash_info[i].Device.pages_size,
+ (unsigned int) dataflash_info[i].logical_address);
+ for (j = 0; j < NB_DATAFLASH_AREA; j++) {
+ switch(dataflash_info[i].Device.area_list[j].protected) {
+ case FLAG_PROTECT_SET:
+ case FLAG_PROTECT_CLEAR:
+ printf("Area %i:\t%08lX to %08lX %s", j,
+ dataflash_info[i].Device.area_list[j].start,
+ dataflash_info[i].Device.area_list[j].end,
+ (dataflash_info[i].Device.area_list[j].protected==FLAG_PROTECT_SET) ? "(RO)" : " ");
+ printf(" %s\n", dataflash_info[i].Device.area_list[j].label);
+ break;
+ case FLAG_PROTECT_INVALID:
+ break;
+ }
+ }
+ }
+ }
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : AT91F_DataflashSelect */
+/* Object : Select the correct device */
+/*---------------------------------------------------------------------------*/
+AT91PS_DataFlash AT91F_DataflashSelect (AT91PS_DataFlash pFlash,
+ unsigned long *addr)
+{
+ char addr_valid = 0;
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++)
+ if ( dataflash_info[i].id
+ && ((((int) *addr) & 0xFF000000) ==
+ dataflash_info[i].logical_address)) {
+ addr_valid = 1;
+ break;
+ }
+ if (!addr_valid) {
+ pFlash = (AT91PS_DataFlash) 0;
+ return pFlash;
+ }
+ pFlash->pDataFlashDesc = &(dataflash_info[i].Desc);
+ pFlash->pDevice = &(dataflash_info[i].Device);
+ *addr -= dataflash_info[i].logical_address;
+ return (pFlash);
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : addr_dataflash */
+/* Object : Test if address is valid */
+/*---------------------------------------------------------------------------*/
+int addr_dataflash (unsigned long addr)
+{
+ int addr_valid = 0;
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) {
+ if ((((int) addr) & 0xFF000000) ==
+ dataflash_info[i].logical_address) {
+ addr_valid = 1;
+ break;
+ }
+ }
+
+ return addr_valid;
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : size_dataflash */
+/* Object : Test if address is valid regarding the size */
+/*---------------------------------------------------------------------------*/
+int size_dataflash (AT91PS_DataFlash pdataFlash, unsigned long addr,
+ unsigned long size)
+{
+ /* is outside the dataflash */
+ if (((int)addr & 0x0FFFFFFF) > (pdataFlash->pDevice->pages_size *
+ pdataFlash->pDevice->pages_number)) return 0;
+ /* is too large for the dataflash */
+ if (size > ((pdataFlash->pDevice->pages_size *
+ pdataFlash->pDevice->pages_number) -
+ ((int)addr & 0x0FFFFFFF))) return 0;
+
+ return 1;
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : prot_dataflash */
+/* Object : Test if destination area is protected */
+/*---------------------------------------------------------------------------*/
+int prot_dataflash (AT91PS_DataFlash pdataFlash, unsigned long addr)
+{
+ int area;
+
+ /* find area */
+ for (area = 0; area < NB_DATAFLASH_AREA; area++) {
+ if ((addr >= pdataFlash->pDevice->area_list[area].start) &&
+ (addr < pdataFlash->pDevice->area_list[area].end))
+ break;
+ }
+ if (area == NB_DATAFLASH_AREA)
+ return -1;
+
+ /*test protection value*/
+ if (pdataFlash->pDevice->area_list[area].protected == FLAG_PROTECT_SET)
+ return 0;
+ if (pdataFlash->pDevice->area_list[area].protected == FLAG_PROTECT_INVALID)
+ return 0;
+
+ return 1;
+}
+
+/*--------------------------------------------------------------------------*/
+/* Function Name : dataflash_real_protect */
+/* Object : protect/unprotect area */
+/*--------------------------------------------------------------------------*/
+int dataflash_real_protect (int flag, unsigned long start_addr,
+ unsigned long end_addr)
+{
+ int i,j, area1, area2, addr_valid = 0;
+
+ /* find dataflash */
+ for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) {
+ if ((((int) start_addr) & 0xF0000000) ==
+ dataflash_info[i].logical_address) {
+ addr_valid = 1;
+ break;
+ }
+ }
+ if (!addr_valid) {
+ return -1;
+ }
+ /* find start area */
+ for (area1 = 0; area1 < NB_DATAFLASH_AREA; area1++) {
+ if (start_addr == dataflash_info[i].Device.area_list[area1].start)
+ break;
+ }
+ if (area1 == NB_DATAFLASH_AREA) return -1;
+ /* find end area */
+ for (area2 = 0; area2 < NB_DATAFLASH_AREA; area2++) {
+ if (end_addr == dataflash_info[i].Device.area_list[area2].end)
+ break;
+ }
+ if (area2 == NB_DATAFLASH_AREA)
+ return -1;
+
+ /*set protection value*/
+ for(j = area1; j < area2 + 1 ; j++)
+ if(dataflash_info[i].Device.area_list[j].protected
+ != FLAG_PROTECT_INVALID) {
+ if (flag == 0) {
+ dataflash_info[i].Device.area_list[j].protected
+ = FLAG_PROTECT_CLEAR;
+ } else {
+ dataflash_info[i].Device.area_list[j].protected
+ = FLAG_PROTECT_SET;
+ }
+ }
+
+ return (area2 - area1 + 1);
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : read_dataflash */
+/* Object : dataflash memory read */
+/*---------------------------------------------------------------------------*/
+int read_dataflash (unsigned long addr, unsigned long size, char *result)
+{
+ unsigned long AddrToRead = addr;
+ AT91PS_DataFlash pFlash = &DataFlashInst;
+
+ pFlash = AT91F_DataflashSelect (pFlash, &AddrToRead);
+
+ if (pFlash == 0)
+ return ERR_UNKNOWN_FLASH_TYPE;
+
+ if (size_dataflash(pFlash,addr,size) == 0)
+ return ERR_INVAL;
+
+ return (AT91F_DataFlashRead (pFlash, AddrToRead, size, result));
+}
+
+/*---------------------------------------------------------------------------*/
+/* Function Name : write_dataflash */
+/* Object : write a block in dataflash */
+/*---------------------------------------------------------------------------*/
+int write_dataflash (unsigned long addr_dest, unsigned long addr_src,
+ unsigned long size)
+{
+ unsigned long AddrToWrite = addr_dest;
+ AT91PS_DataFlash pFlash = &DataFlashInst;
+
+ pFlash = AT91F_DataflashSelect (pFlash, &AddrToWrite);
+
+ if (pFlash == 0)
+ return ERR_UNKNOWN_FLASH_TYPE;
+
+ if (size_dataflash(pFlash,addr_dest,size) == 0)
+ return ERR_INVAL;
+
+ if (prot_dataflash(pFlash,addr_dest) == 0)
+ return ERR_PROTECTED;
+
+ if (AddrToWrite == -1)
+ return -1;
+
+ return AT91F_DataFlashWrite (pFlash, (uchar *)addr_src,
+ AddrToWrite, size);
+}
+
+void dataflash_perror (int err)
+{
+ switch (err) {
+ case ERR_OK:
+ break;
+ case ERR_TIMOUT:
+ printf("Timeout writing to DataFlash\n");
+ break;
+ case ERR_PROTECTED:
+ printf("Can't write to protected/invalid DataFlash sectors\n");
+ break;
+ case ERR_INVAL:
+ printf("Outside available DataFlash\n");
+ break;
+ case ERR_UNKNOWN_FLASH_TYPE:
+ printf("Unknown Type of DataFlash\n");
+ break;
+ case ERR_PROG_ERROR:
+ printf("General DataFlash Programming Error\n");
+ break;
+ default:
+ printf("%s[%d] FIXME: rc=%d\n", __FILE__, __LINE__, err);
+ break;
+ }
+}
diff --git a/u-boot/drivers/mtd/jedec_flash.c b/u-boot/drivers/mtd/jedec_flash.c
new file mode 100644
index 0000000..da8c9b1
--- /dev/null
+++ b/u-boot/drivers/mtd/jedec_flash.c
@@ -0,0 +1,438 @@
+/*
+ * (C) Copyright 2007
+ * Michael Schwingen, <michael@schwingen.org>
+ *
+ * based in great part on jedec_probe.c from linux kernel:
+ * (C) 2000 Red Hat. GPL'd.
+ * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+/* The DEBUG define must be before common to enable debugging */
+/*#define DEBUG*/
+
+#include <common.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <environment.h>
+
+#define P_ID_AMD_STD CFI_CMDSET_AMD_LEGACY
+
+/* AMD */
+#define AM29DL800BB 0x22CB
+#define AM29DL800BT 0x224A
+
+#define AM29F400BB 0x22AB
+#define AM29F800BB 0x2258
+#define AM29F800BT 0x22D6
+#define AM29LV400BB 0x22BA
+#define AM29LV400BT 0x22B9
+#define AM29LV800BB 0x225B
+#define AM29LV800BT 0x22DA
+#define AM29LV160DT 0x22C4
+#define AM29LV160DB 0x2249
+#define AM29F017D 0x003D
+#define AM29F016D 0x00AD
+#define AM29F080 0x00D5
+#define AM29F040 0x00A4
+#define AM29LV040B 0x004F
+#define AM29F032B 0x0041
+#define AM29F002T 0x00B0
+
+/* SST */
+#define SST39LF800 0x2781
+#define SST39LF160 0x2782
+#define SST39VF1601 0x234b
+#define SST39LF512 0x00D4
+#define SST39LF010 0x00D5
+#define SST39LF020 0x00D6
+#define SST39LF040 0x00D7
+#define SST39SF010A 0x00B5
+#define SST39SF020A 0x00B6
+
+/* MXIC */
+#define MX29LV040 0x004F
+
+/* WINBOND */
+#define W39L040A 0x00D6
+
+/* AMIC */
+#define A29L040 0x0092
+
+/* EON */
+#define EN29LV040A 0x004F
+
+/*
+ * Unlock address sets for AMD command sets.
+ * Intel command sets use the MTD_UADDR_UNNECESSARY.
+ * Each identifier, except MTD_UADDR_UNNECESSARY, and
+ * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
+ * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
+ * initialization need not require initializing all of the
+ * unlock addresses for all bit widths.
+ */
+enum uaddr {
+ MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
+ MTD_UADDR_0x0555_0x02AA,
+ MTD_UADDR_0x0555_0x0AAA,
+ MTD_UADDR_0x5555_0x2AAA,
+ MTD_UADDR_0x0AAA_0x0555,
+ MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
+ MTD_UADDR_UNNECESSARY, /* Does not require any address */
+};
+
+
+struct unlock_addr {
+ u32 addr1;
+ u32 addr2;
+};
+
+
+/*
+ * I don't like the fact that the first entry in unlock_addrs[]
+ * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
+ * should not be used. The problem is that structures with
+ * initializers have extra fields initialized to 0. It is _very_
+ * desireable to have the unlock address entries for unsupported
+ * data widths automatically initialized - that means that
+ * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
+ * must go unused.
+ */
+static const struct unlock_addr unlock_addrs[] = {
+ [MTD_UADDR_NOT_SUPPORTED] = {
+ .addr1 = 0xffff,
+ .addr2 = 0xffff
+ },
+
+ [MTD_UADDR_0x0555_0x02AA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x02aa
+ },
+
+ [MTD_UADDR_0x0555_0x0AAA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x0aaa
+ },
+
+ [MTD_UADDR_0x5555_0x2AAA] = {
+ .addr1 = 0x5555,
+ .addr2 = 0x2aaa
+ },
+
+ [MTD_UADDR_0x0AAA_0x0555] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0555
+ },
+
+ [MTD_UADDR_DONT_CARE] = {
+ .addr1 = 0x0000, /* Doesn't matter which address */
+ .addr2 = 0x0000 /* is used - must be last entry */
+ },
+
+ [MTD_UADDR_UNNECESSARY] = {
+ .addr1 = 0x0000,
+ .addr2 = 0x0000
+ }
+};
+
+
+struct amd_flash_info {
+ const __u16 mfr_id;
+ const __u16 dev_id;
+ const char *name;
+ const int DevSize;
+ const int NumEraseRegions;
+ const int CmdSet;
+ const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
+ const ulong regions[6];
+};
+
+#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
+
+#define SIZE_64KiB 16
+#define SIZE_128KiB 17
+#define SIZE_256KiB 18
+#define SIZE_512KiB 19
+#define SIZE_1MiB 20
+#define SIZE_2MiB 21
+#define SIZE_4MiB 22
+#define SIZE_8MiB 23
+
+static const struct amd_flash_info jedec_table[] = {
+#ifdef CONFIG_SYS_FLASH_LEGACY_256Kx8
+ {
+ .mfr_id = (u16)SST_MANUFACT,
+ .dev_id = SST39LF020,
+ .name = "SST 39LF020",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ },
+#endif
+#ifdef CONFIG_SYS_FLASH_LEGACY_512Kx8
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29LV040B,
+ .name = "AMD AM29LV040B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ },
+ {
+ .mfr_id = (u16)SST_MANUFACT,
+ .dev_id = SST39LF040,
+ .name = "SST 39LF040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ },
+ {
+ .mfr_id = (u16)STM_MANUFACT,
+ .dev_id = STM_ID_M29W040B,
+ .name = "ST Micro M29W040B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ },
+ {
+ .mfr_id = (u16)MX_MANUFACT,
+ .dev_id = MX29LV040,
+ .name = "MXIC MX29LV040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+ {
+ .mfr_id = (u16)WINB_MANUFACT,
+ .dev_id = W39L040A,
+ .name = "WINBOND W39L040A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+ {
+ .mfr_id = (u16)AMIC_MANUFACT,
+ .dev_id = A29L040,
+ .name = "AMIC A29L040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+ {
+ .mfr_id = (u16)EON_MANUFACT,
+ .dev_id = EN29LV040A,
+ .name = "EON EN29LV040A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+#endif
+#ifdef CONFIG_SYS_FLASH_LEGACY_512Kx16
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29F400BB,
+ .name = "AMD AM29F400BB",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x10000, 7),
+ }
+ },
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29LV400BB,
+ .name = "AMD AM29LV400BB",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7),
+ }
+ },
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29LV800BB,
+ .name = "AMD AM29LV800BB",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x10000, 15),
+ }
+ },
+#endif
+};
+
+static inline void fill_info(flash_info_t *info, const struct amd_flash_info *jedec_entry, ulong base)
+{
+ int i,j;
+ int sect_cnt;
+ int size_ratio;
+ int total_size;
+ enum uaddr uaddr_idx;
+
+ size_ratio = info->portwidth / info->chipwidth;
+
+ debug("Found JEDEC Flash: %s\n", jedec_entry->name);
+ info->vendor = jedec_entry->CmdSet;
+ /* Todo: do we need device-specific timeouts? */
+ info->erase_blk_tout = 30000;
+ info->buffer_write_tout = 1000;
+ info->write_tout = 100;
+ info->name = jedec_entry->name;
+
+ /* copy unlock addresses from device table to CFI info struct. This
+ is just here because the addresses are in the table anyway - if
+ the flash is not detected due to wrong unlock addresses,
+ flash_detect_legacy would have to try all of them before we even
+ get here. */
+ switch(info->chipwidth) {
+ case FLASH_CFI_8BIT:
+ uaddr_idx = jedec_entry->uaddr[0];
+ break;
+ case FLASH_CFI_16BIT:
+ uaddr_idx = jedec_entry->uaddr[1];
+ break;
+ case FLASH_CFI_32BIT:
+ uaddr_idx = jedec_entry->uaddr[2];
+ break;
+ default:
+ uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
+ break;
+ }
+
+ debug("unlock address index %d\n", uaddr_idx);
+ info->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
+ info->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
+ debug("unlock addresses are 0x%x/0x%x\n", info->addr_unlock1, info->addr_unlock2);
+
+ sect_cnt = 0;
+ total_size = 0;
+ for (i = 0; i < jedec_entry->NumEraseRegions; i++) {
+ ulong erase_region_size = jedec_entry->regions[i] >> 8;
+ ulong erase_region_count = (jedec_entry->regions[i] & 0xff) + 1;
+
+ total_size += erase_region_size * erase_region_count;
+ debug ("erase_region_count = %d erase_region_size = %d\n",
+ erase_region_count, erase_region_size);
+ for (j = 0; j < erase_region_count; j++) {
+ if (sect_cnt >= CONFIG_SYS_MAX_FLASH_SECT) {
+ printf("ERROR: too many flash sectors\n");
+ break;
+ }
+ info->start[sect_cnt] = base;
+ base += (erase_region_size * size_ratio);
+ sect_cnt++;
+ }
+ }
+ info->sector_count = sect_cnt;
+ info->size = total_size * size_ratio;
+}
+
+/*-----------------------------------------------------------------------
+ * match jedec ids against table. If a match is found, fill flash_info entry
+ */
+int jedec_flash_match(flash_info_t *info, ulong base)
+{
+ int ret = 0;
+ int i;
+ ulong mask = 0xFFFF;
+ if (info->chipwidth == 1)
+ mask = 0xFF;
+
+ for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
+ if ((jedec_table[i].mfr_id & mask) == (info->manufacturer_id & mask) &&
+ (jedec_table[i].dev_id & mask) == (info->device_id & mask)) {
+ fill_info(info, &jedec_table[i], base);
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
diff --git a/u-boot/drivers/mtd/mtdconcat.c b/u-boot/drivers/mtd/mtdconcat.c
new file mode 100644
index 0000000..fc22701
--- /dev/null
+++ b/u-boot/drivers/mtd/mtdconcat.c
@@ -0,0 +1,807 @@
+/*
+ * MTD device concatenation layer
+ *
+ * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
+ *
+ * NAND support by Christian Gan <cgan@iders.ca>
+ *
+ * This code is GPL
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/concat.h>
+#include <ubi_uboot.h>
+
+/*
+ * Our storage structure:
+ * Subdev points to an array of pointers to struct mtd_info objects
+ * which is allocated along with this structure
+ *
+ */
+struct mtd_concat {
+ struct mtd_info mtd;
+ int num_subdev;
+ struct mtd_info **subdev;
+};
+
+/*
+ * how to calculate the size required for the above structure,
+ * including the pointer array subdev points to:
+ */
+#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
+ ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
+
+/*
+ * Given a pointer to the MTD object in the mtd_concat structure,
+ * we can retrieve the pointer to that structure with this macro.
+ */
+#define CONCAT(x) ((struct mtd_concat *)(x))
+
+/*
+ * MTD methods which look up the relevant subdevice, translate the
+ * effective address and pass through to the subdevice.
+ */
+
+static int
+concat_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int ret = 0, err;
+ int i;
+
+ *retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
+ from -= subdev->size;
+ continue;
+ }
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
+ else
+ /* Entire transaction goes into this subdev */
+ size = len;
+
+ err = subdev->read(subdev, from, size, &retsize, buf);
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (err == -EBADMSG) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (err == -EUCLEAN) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ return ret;
+
+ buf += size;
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ *retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ size = 0;
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ if (!(subdev->flags & MTD_WRITEABLE))
+ err = -EROFS;
+ else
+ err = subdev->write(subdev, to, size, &retsize, buf);
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+static int
+concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err, ret = 0;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (from >= subdev->size) {
+ from -= subdev->size;
+ continue;
+ }
+
+ /* partial read ? */
+ if (from + devops.len > subdev->size)
+ devops.len = subdev->size - from;
+
+ err = subdev->read_oob(subdev, from, &devops);
+ ops->retlen += devops.retlen;
+ ops->oobretlen += devops.oobretlen;
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (err == -EBADMSG) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (err == -EUCLEAN) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return ret;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return ret;
+ devops.oobbuf += ops->oobretlen;
+ }
+
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ ops->retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ /* partial write ? */
+ if (to + devops.len > subdev->size)
+ devops.len = subdev->size - to;
+
+ err = subdev->write_oob(subdev, to, &devops);
+ ops->retlen += devops.retlen;
+ if (err)
+ return err;
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return 0;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return 0;
+ devops.oobbuf += devops.oobretlen;
+ }
+ to = 0;
+ }
+ return -EINVAL;
+}
+
+static void concat_erase_callback(struct erase_info *instr)
+{
+ /* Nothing to do here in U-Boot */
+}
+
+static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ int err;
+ wait_queue_head_t waitq;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * This code was stol^H^H^H^Hinspired by mtdchar.c
+ */
+ init_waitqueue_head(&waitq);
+
+ erase->mtd = mtd;
+ erase->callback = concat_erase_callback;
+ erase->priv = (unsigned long) &waitq;
+
+ /*
+ * FIXME: Allow INTERRUPTIBLE. Which means
+ * not having the wait_queue head on the stack.
+ */
+ err = mtd->erase(mtd, erase);
+ if (!err) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&waitq, &wait);
+ if (erase->state != MTD_ERASE_DONE
+ && erase->state != MTD_ERASE_FAILED)
+ schedule();
+ remove_wait_queue(&waitq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
+ }
+ return err;
+}
+
+static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_info *subdev;
+ int i, err;
+ uint64_t length, offset = 0;
+ struct erase_info *erase;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ if (instr->addr > concat->mtd.size)
+ return -EINVAL;
+
+ if (instr->len + instr->addr > concat->mtd.size)
+ return -EINVAL;
+
+ /*
+ * Check for proper erase block alignment of the to-be-erased area.
+ * It is easier to do this based on the super device's erase
+ * region info rather than looking at each particular sub-device
+ * in turn.
+ */
+ if (!concat->mtd.numeraseregions) {
+ /* the easy case: device has uniform erase block size */
+ if (instr->addr & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ if (instr->len & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ } else {
+ /* device has variable erase size */
+ struct mtd_erase_region_info *erase_regions =
+ concat->mtd.eraseregions;
+
+ /*
+ * Find the erase region where the to-be-erased area begins:
+ */
+ for (i = 0; i < concat->mtd.numeraseregions &&
+ instr->addr >= erase_regions[i].offset; i++) ;
+ --i;
+
+ /*
+ * Now erase_regions[i] is the region in which the
+ * to-be-erased area begins. Verify that the starting
+ * offset is aligned to this region's erase size:
+ */
+ if (instr->addr & (erase_regions[i].erasesize - 1))
+ return -EINVAL;
+
+ /*
+ * now find the erase region where the to-be-erased area ends:
+ */
+ for (; i < concat->mtd.numeraseregions &&
+ (instr->addr + instr->len) >= erase_regions[i].offset;
+ ++i) ;
+ --i;
+ /*
+ * check if the ending offset is aligned to this region's erase size
+ */
+ if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
+ 1))
+ return -EINVAL;
+ }
+
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+
+ /* make a local copy of instr to avoid modifying the caller's struct */
+ erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
+
+ if (!erase)
+ return -ENOMEM;
+
+ *erase = *instr;
+ length = instr->len;
+
+ /*
+ * find the subdevice where the to-be-erased area begins, adjust
+ * starting offset to be relative to the subdevice start
+ */
+ for (i = 0; i < concat->num_subdev; i++) {
+ subdev = concat->subdev[i];
+ if (subdev->size <= erase->addr) {
+ erase->addr -= subdev->size;
+ offset += subdev->size;
+ } else {
+ break;
+ }
+ }
+
+ /* must never happen since size limit has been verified above */
+ BUG_ON(i >= concat->num_subdev);
+
+ /* now do the erase: */
+ err = 0;
+ for (; length > 0; i++) {
+ /* loop for all subdevices affected by this request */
+ subdev = concat->subdev[i]; /* get current subdevice */
+
+ /* limit length to subdevice's size: */
+ if (erase->addr + length > subdev->size)
+ erase->len = subdev->size - erase->addr;
+ else
+ erase->len = length;
+
+ if (!(subdev->flags & MTD_WRITEABLE)) {
+ err = -EROFS;
+ break;
+ }
+ length -= erase->len;
+ if ((err = concat_dev_erase(subdev, erase))) {
+ /* sanity check: should never happen since
+ * block alignment has been checked above */
+ BUG_ON(err == -EINVAL);
+ if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr = erase->fail_addr + offset;
+ break;
+ }
+ /*
+ * erase->addr specifies the offset of the area to be
+ * erased *within the current subdevice*. It can be
+ * non-zero only the first time through this loop, i.e.
+ * for the first subdevice where blocks need to be erased.
+ * All the following erases must begin at the start of the
+ * current subdevice, i.e. at offset zero.
+ */
+ erase->addr = 0;
+ offset += subdev->size;
+ }
+ instr->state = erase->state;
+ kfree(erase);
+ if (err)
+ return err;
+
+ if (instr->callback)
+ instr->callback(instr);
+ return 0;
+}
+
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = subdev->lock(subdev, ofs, size);
+
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = 0;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = subdev->unlock(subdev, ofs, size);
+
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static void concat_sync(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ subdev->sync(subdev);
+ }
+}
+
+static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, res = 0;
+
+ if (!concat->subdev[0]->block_isbad)
+ return res;
+
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ res = subdev->block_isbad(subdev, ofs);
+ break;
+ }
+
+ return res;
+}
+
+static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ if (!concat->subdev[0]->block_markbad)
+ return 0;
+
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ err = subdev->block_markbad(subdev, ofs);
+ if (!err)
+ mtd->ecc_stats.badblocks++;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * This function constructs a virtual MTD device by concatenating
+ * num_devs MTD devices. A pointer to the new device object is
+ * stored to *new_dev upon success. This function does _not_
+ * register any devices: this is the caller's responsibility.
+ */
+struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ const char *name)
+{ /* name for the new device */
+ int i;
+ size_t size;
+ struct mtd_concat *concat;
+ uint32_t max_erasesize, curr_erasesize;
+ int num_erase_region;
+
+ debug("Concatenating MTD devices:\n");
+ for (i = 0; i < num_devs; i++)
+ debug("(%d): \"%s\"\n", i, subdev[i]->name);
+ debug("into device \"%s\"\n", name);
+
+ /* allocate the device structure */
+ size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
+ concat = kzalloc(size, GFP_KERNEL);
+ if (!concat) {
+ printk
+ ("memory allocation error while creating concatenated device \"%s\"\n",
+ name);
+ return NULL;
+ }
+ concat->subdev = (struct mtd_info **) (concat + 1);
+
+ /*
+ * Set up the new "super" device's MTD object structure, check for
+ * incompatibilites between the subdevices.
+ */
+ concat->mtd.type = subdev[0]->type;
+ concat->mtd.flags = subdev[0]->flags;
+ concat->mtd.size = subdev[0]->size;
+ concat->mtd.erasesize = subdev[0]->erasesize;
+ concat->mtd.writesize = subdev[0]->writesize;
+ concat->mtd.subpage_sft = subdev[0]->subpage_sft;
+ concat->mtd.oobsize = subdev[0]->oobsize;
+ concat->mtd.oobavail = subdev[0]->oobavail;
+ if (subdev[0]->read_oob)
+ concat->mtd.read_oob = concat_read_oob;
+ if (subdev[0]->write_oob)
+ concat->mtd.write_oob = concat_write_oob;
+ if (subdev[0]->block_isbad)
+ concat->mtd.block_isbad = concat_block_isbad;
+ if (subdev[0]->block_markbad)
+ concat->mtd.block_markbad = concat_block_markbad;
+
+ concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
+
+ concat->subdev[0] = subdev[0];
+
+ for (i = 1; i < num_devs; i++) {
+ if (concat->mtd.type != subdev[i]->type) {
+ kfree(concat);
+ printk("Incompatible device type on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ if (concat->mtd.flags != subdev[i]->flags) {
+ /*
+ * Expect all flags except MTD_WRITEABLE to be
+ * equal on all subdevices.
+ */
+ if ((concat->mtd.flags ^ subdev[i]->
+ flags) & ~MTD_WRITEABLE) {
+ kfree(concat);
+ printk("Incompatible device flags on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ } else
+ /* if writeable attribute differs,
+ make super device writeable */
+ concat->mtd.flags |=
+ subdev[i]->flags & MTD_WRITEABLE;
+ }
+
+ concat->mtd.size += subdev[i]->size;
+ concat->mtd.ecc_stats.badblocks +=
+ subdev[i]->ecc_stats.badblocks;
+ if (concat->mtd.writesize != subdev[i]->writesize ||
+ concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
+ concat->mtd.oobsize != subdev[i]->oobsize ||
+ !concat->mtd.read_oob != !subdev[i]->read_oob ||
+ !concat->mtd.write_oob != !subdev[i]->write_oob) {
+ kfree(concat);
+ printk("Incompatible OOB or ECC data on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ concat->subdev[i] = subdev[i];
+
+ }
+
+ concat->mtd.ecclayout = subdev[0]->ecclayout;
+
+ concat->num_subdev = num_devs;
+ concat->mtd.name = name;
+
+ concat->mtd.erase = concat_erase;
+ concat->mtd.read = concat_read;
+ concat->mtd.write = concat_write;
+ concat->mtd.sync = concat_sync;
+ concat->mtd.lock = concat_lock;
+ concat->mtd.unlock = concat_unlock;
+
+ /*
+ * Combine the erase block size info of the subdevices:
+ *
+ * first, walk the map of the new device and see how
+ * many changes in erase size we have
+ */
+ max_erasesize = curr_erasesize = subdev[0]->erasesize;
+ num_erase_region = 1;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /* if it differs from the last subdevice's erase size, count it */
+ ++num_erase_region;
+ curr_erasesize = subdev[i]->erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].erasesize !=
+ curr_erasesize) {
+ ++num_erase_region;
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ }
+ }
+ }
+
+ if (num_erase_region == 1) {
+ /*
+ * All subdevices have the same uniform erase size.
+ * This is easy:
+ */
+ concat->mtd.erasesize = curr_erasesize;
+ concat->mtd.numeraseregions = 0;
+ } else {
+ uint64_t tmp64;
+
+ /*
+ * erase block size varies across the subdevices: allocate
+ * space to store the data describing the variable erase regions
+ */
+ struct mtd_erase_region_info *erase_region_p;
+ uint64_t begin, position;
+
+ concat->mtd.erasesize = max_erasesize;
+ concat->mtd.numeraseregions = num_erase_region;
+ concat->mtd.eraseregions = erase_region_p =
+ kmalloc(num_erase_region *
+ sizeof (struct mtd_erase_region_info), GFP_KERNEL);
+ if (!erase_region_p) {
+ kfree(concat);
+ printk
+ ("memory allocation error while creating erase region list"
+ " for device \"%s\"\n", name);
+ return NULL;
+ }
+
+ /*
+ * walk the map of the new device once more and fill in
+ * in erase region info:
+ */
+ curr_erasesize = subdev[0]->erasesize;
+ begin = position = 0;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /*
+ * fill in an mtd_erase_region_info structure for the area
+ * we have walked so far:
+ */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize = subdev[i]->erasesize;
+ ++erase_region_p;
+ }
+ position += subdev[i]->size;
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].
+ erasesize != curr_erasesize) {
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ ++erase_region_p;
+ }
+ position +=
+ subdev[i]->eraseregions[j].
+ numblocks * (uint64_t)curr_erasesize;
+ }
+ }
+ }
+ /* Now write the final entry */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ }
+
+ return &concat->mtd;
+}
diff --git a/u-boot/drivers/mtd/mtdcore.c b/u-boot/drivers/mtd/mtdcore.c
new file mode 100644
index 0000000..a195dda
--- /dev/null
+++ b/u-boot/drivers/mtd/mtdcore.c
@@ -0,0 +1,188 @@
+/*
+ * Core registration and callback routines for MTD
+ * drivers and users.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/compat.h>
+#include <ubi_uboot.h>
+
+struct mtd_info *mtd_table[MAX_MTD_DEVICES];
+
+int add_mtd_device(struct mtd_info *mtd)
+{
+ int i;
+
+ BUG_ON(mtd->writesize == 0);
+
+ for (i = 0; i < MAX_MTD_DEVICES; i++)
+ if (!mtd_table[i]) {
+ mtd_table[i] = mtd;
+ mtd->index = i;
+ mtd->usecount = 0;
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * del_mtd_device - unregister an MTD device
+ * @mtd: pointer to MTD device info structure
+ *
+ * Remove a device from the list of MTD devices present in the system,
+ * and notify each currently active MTD 'user' of its departure.
+ * Returns zero on success or 1 on failure, which currently will happen
+ * if the requested device does not appear to be present in the list.
+ */
+int del_mtd_device(struct mtd_info *mtd)
+{
+ int ret;
+
+ if (mtd_table[mtd->index] != mtd) {
+ ret = -ENODEV;
+ } else if (mtd->usecount) {
+ printk(KERN_NOTICE "Removing MTD device #%d (%s)"
+ " with use count %d\n",
+ mtd->index, mtd->name, mtd->usecount);
+ ret = -EBUSY;
+ } else {
+ /* No need to get a refcount on the module containing
+ * the notifier, since we hold the mtd_table_mutex */
+ mtd_table[mtd->index] = NULL;
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * get_mtd_device - obtain a validated handle for an MTD device
+ * @mtd: last known address of the required MTD device
+ * @num: internal device number of the required MTD device
+ *
+ * Given a number and NULL address, return the num'th entry in the device
+ * table, if any. Given an address and num == -1, search the device table
+ * for a device with that address and return if it's still present. Given
+ * both, return the num'th driver only if its address matches. Return
+ * error code if not.
+ */
+struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
+{
+ struct mtd_info *ret = NULL;
+ int i, err = -ENODEV;
+
+ if (num == -1) {
+ for (i = 0; i < MAX_MTD_DEVICES; i++)
+ if (mtd_table[i] == mtd)
+ ret = mtd_table[i];
+ } else if (num < MAX_MTD_DEVICES) {
+ ret = mtd_table[num];
+ if (mtd && mtd != ret)
+ ret = NULL;
+ }
+
+ if (!ret)
+ goto out_unlock;
+
+ ret->usecount++;
+ return ret;
+
+out_unlock:
+ return ERR_PTR(err);
+}
+
+/**
+ * get_mtd_device_nm - obtain a validated handle for an MTD device by
+ * device name
+ * @name: MTD device name to open
+ *
+ * This function returns MTD device description structure in case of
+ * success and an error code in case of failure.
+ */
+struct mtd_info *get_mtd_device_nm(const char *name)
+{
+ int i, err = -ENODEV;
+ struct mtd_info *mtd = NULL;
+
+ for (i = 0; i < MAX_MTD_DEVICES; i++) {
+ if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
+ mtd = mtd_table[i];
+ break;
+ }
+ }
+
+ if (!mtd)
+ goto out_unlock;
+
+ mtd->usecount++;
+ return mtd;
+
+out_unlock:
+ return ERR_PTR(err);
+}
+
+void put_mtd_device(struct mtd_info *mtd)
+{
+ int c;
+
+ c = --mtd->usecount;
+ BUG_ON(c < 0);
+}
+
+#if defined(CONFIG_CMD_MTDPARTS_SPREAD)
+/**
+ * mtd_get_len_incl_bad
+ *
+ * Check if length including bad blocks fits into device.
+ *
+ * @param mtd an MTD device
+ * @param offset offset in flash
+ * @param length image length
+ * @return image length including bad blocks in *len_incl_bad and whether or not
+ * the length returned was truncated in *truncated
+ */
+void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
+ const uint64_t length, uint64_t *len_incl_bad,
+ int *truncated)
+{
+ *truncated = 0;
+ *len_incl_bad = 0;
+
+ if (!mtd->block_isbad) {
+ *len_incl_bad = length;
+ return;
+ }
+
+ uint64_t len_excl_bad = 0;
+ uint64_t block_len;
+
+ while (len_excl_bad < length) {
+ if (offset >= mtd->size) {
+ *truncated = 1;
+ return;
+ }
+
+ block_len = mtd->erasesize - (offset & (mtd->erasesize - 1));
+
+ if (!mtd->block_isbad(mtd, offset & ~(mtd->erasesize - 1)))
+ len_excl_bad += block_len;
+
+ *len_incl_bad += block_len;
+ offset += block_len;
+ }
+}
+#endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */
diff --git a/u-boot/drivers/mtd/mtdpart.c b/u-boot/drivers/mtd/mtdpart.c
new file mode 100644
index 0000000..f647e43
--- /dev/null
+++ b/u-boot/drivers/mtd/mtdpart.c
@@ -0,0 +1,476 @@
+/*
+ * Simple MTD partitioning layer
+ *
+ * (C) 2000 Nicolas Pitre <nico@cam.org>
+ *
+ * This code is GPL
+ *
+ * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
+ * added support for read_oob, write_oob
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <asm/errno.h>
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/compat.h>
+
+/* Our partition linked list */
+struct list_head mtd_partitions;
+
+/* Our partition node structure */
+struct mtd_part {
+ struct mtd_info mtd;
+ struct mtd_info *master;
+ uint64_t offset;
+ int index;
+ struct list_head list;
+ int registered;
+};
+
+/*
+ * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
+ * the pointer to that structure with this macro.
+ */
+#define PART(x) ((struct mtd_part *)(x))
+
+
+/*
+ * MTD methods which simply translate the effective address and pass through
+ * to the _real_ device.
+ */
+
+static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ struct mtd_ecc_stats stats;
+ int res;
+
+ stats = part->master->ecc_stats;
+
+ if (from >= mtd->size)
+ len = 0;
+ else if (from + len > mtd->size)
+ len = mtd->size - from;
+ res = part->master->read(part->master, from + part->offset,
+ len, retlen, buf);
+ if (unlikely(res)) {
+ if (res == -EUCLEAN)
+ mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
+ if (res == -EBADMSG)
+ mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
+ }
+ return res;
+}
+
+static int part_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_part *part = PART(mtd);
+ int res;
+
+ if (from >= mtd->size)
+ return -EINVAL;
+ if (ops->datbuf && from + ops->len > mtd->size)
+ return -EINVAL;
+ res = part->master->read_oob(part->master, from + part->offset, ops);
+
+ if (unlikely(res)) {
+ if (res == -EUCLEAN)
+ mtd->ecc_stats.corrected++;
+ if (res == -EBADMSG)
+ mtd->ecc_stats.failed++;
+ }
+ return res;
+}
+
+static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->read_user_prot_reg(part->master, from,
+ len, retlen, buf);
+}
+
+static int part_get_user_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->get_user_prot_info(part->master, buf, len);
+}
+
+static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->read_fact_prot_reg(part->master, from,
+ len, retlen, buf);
+}
+
+static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
+ size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->get_fact_prot_info(part->master, buf, len);
+}
+
+static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (to >= mtd->size)
+ len = 0;
+ else if (to + len > mtd->size)
+ len = mtd->size - to;
+ return part->master->write(part->master, to + part->offset,
+ len, retlen, buf);
+}
+
+static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (to >= mtd->size)
+ len = 0;
+ else if (to + len > mtd->size)
+ len = mtd->size - to;
+ return part->master->panic_write(part->master, to + part->offset,
+ len, retlen, buf);
+}
+
+static int part_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_part *part = PART(mtd);
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ if (to >= mtd->size)
+ return -EINVAL;
+ if (ops->datbuf && to + ops->len > mtd->size)
+ return -EINVAL;
+ return part->master->write_oob(part->master, to + part->offset, ops);
+}
+
+static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->write_user_prot_reg(part->master, from,
+ len, retlen, buf);
+}
+
+static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->lock_user_prot_reg(part->master, from, len);
+}
+
+static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_part *part = PART(mtd);
+ int ret;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (instr->addr >= mtd->size)
+ return -EINVAL;
+ instr->addr += part->offset;
+ ret = part->master->erase(part->master, instr);
+ if (ret) {
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+ }
+ return ret;
+}
+
+void mtd_erase_callback(struct erase_info *instr)
+{
+ if (instr->mtd->erase == part_erase) {
+ struct mtd_part *part = PART(instr->mtd);
+
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+ }
+ if (instr->callback)
+ instr->callback(instr);
+}
+
+static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+ return part->master->lock(part->master, ofs + part->offset, len);
+}
+
+static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+ return part->master->unlock(part->master, ofs + part->offset, len);
+}
+
+static void part_sync(struct mtd_info *mtd)
+{
+ struct mtd_part *part = PART(mtd);
+ part->master->sync(part->master);
+}
+
+static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ if (ofs >= mtd->size)
+ return -EINVAL;
+ ofs += part->offset;
+ return part->master->block_isbad(part->master, ofs);
+}
+
+static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ int res;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (ofs >= mtd->size)
+ return -EINVAL;
+ ofs += part->offset;
+ res = part->master->block_markbad(part->master, ofs);
+ if (!res)
+ mtd->ecc_stats.badblocks++;
+ return res;
+}
+
+/*
+ * This function unregisters and destroy all slave MTD objects which are
+ * attached to the given master MTD object.
+ */
+
+int del_mtd_partitions(struct mtd_info *master)
+{
+ struct mtd_part *slave, *next;
+
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+ if (slave->master == master) {
+ list_del(&slave->list);
+ if (slave->registered)
+ del_mtd_device(&slave->mtd);
+ kfree(slave);
+ }
+
+ return 0;
+}
+
+static struct mtd_part *add_one_partition(struct mtd_info *master,
+ const struct mtd_partition *part, int partno,
+ uint64_t cur_offset)
+{
+ struct mtd_part *slave;
+
+ /* allocate the partition structure */
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
+ master->name);
+ del_mtd_partitions(master);
+ return NULL;
+ }
+ list_add(&slave->list, &mtd_partitions);
+
+ /* set up the MTD object for this partition */
+ slave->mtd.type = master->type;
+ slave->mtd.flags = master->flags & ~part->mask_flags;
+ slave->mtd.size = part->size;
+ slave->mtd.writesize = master->writesize;
+ slave->mtd.oobsize = master->oobsize;
+ slave->mtd.oobavail = master->oobavail;
+ slave->mtd.subpage_sft = master->subpage_sft;
+
+ slave->mtd.name = part->name;
+ slave->mtd.owner = master->owner;
+
+ slave->mtd.read = part_read;
+ slave->mtd.write = part_write;
+
+ if (master->panic_write)
+ slave->mtd.panic_write = part_panic_write;
+
+ if (master->read_oob)
+ slave->mtd.read_oob = part_read_oob;
+ if (master->write_oob)
+ slave->mtd.write_oob = part_write_oob;
+ if (master->read_user_prot_reg)
+ slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+ if (master->read_fact_prot_reg)
+ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+ if (master->write_user_prot_reg)
+ slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+ if (master->lock_user_prot_reg)
+ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+ if (master->get_user_prot_info)
+ slave->mtd.get_user_prot_info = part_get_user_prot_info;
+ if (master->get_fact_prot_info)
+ slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
+ if (master->sync)
+ slave->mtd.sync = part_sync;
+ if (master->lock)
+ slave->mtd.lock = part_lock;
+ if (master->unlock)
+ slave->mtd.unlock = part_unlock;
+ if (master->block_isbad)
+ slave->mtd.block_isbad = part_block_isbad;
+ if (master->block_markbad)
+ slave->mtd.block_markbad = part_block_markbad;
+ slave->mtd.erase = part_erase;
+ slave->master = master;
+ slave->offset = part->offset;
+ slave->index = partno;
+
+ if (slave->offset == MTDPART_OFS_APPEND)
+ slave->offset = cur_offset;
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
+ slave->offset = cur_offset;
+ if (mtd_mod_by_eb(cur_offset, master) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ printk(KERN_NOTICE "Moving partition %d: "
+ "0x%012llx -> 0x%012llx\n", partno,
+ (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+ }
+ }
+ if (slave->mtd.size == MTDPART_SIZ_FULL)
+ slave->mtd.size = master->size - slave->offset;
+
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
+ (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
+
+ /* let's do some sanity checks */
+ if (slave->offset >= master->size) {
+ /* let's register it anyway to preserve ordering */
+ slave->offset = 0;
+ slave->mtd.size = 0;
+ printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ goto out_register;
+ }
+ if (slave->offset + slave->mtd.size > master->size) {
+ slave->mtd.size = master->size - slave->offset;
+ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
+ part->name, master->name, (unsigned long long)slave->mtd.size);
+ }
+ if (master->numeraseregions > 1) {
+ /* Deal with variable erase size stuff */
+ int i, max = master->numeraseregions;
+ u64 end = slave->offset + slave->mtd.size;
+ struct mtd_erase_region_info *regions = master->eraseregions;
+
+ /* Find the first erase regions which is part of this
+ * partition. */
+ for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ ;
+ /* The loop searched for the region _behind_ the first one */
+ i--;
+
+ /* Pick biggest erasesize */
+ for (; i < max && regions[i].offset < end; i++) {
+ if (slave->mtd.erasesize < regions[i].erasesize) {
+ slave->mtd.erasesize = regions[i].erasesize;
+ }
+ }
+ BUG_ON(slave->mtd.erasesize == 0);
+ } else {
+ /* Single erase size */
+ slave->mtd.erasesize = master->erasesize;
+ }
+
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->offset, &slave->mtd)) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of
+ * _minor_ erase size though */
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ part->name);
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ part->name);
+ }
+
+ slave->mtd.ecclayout = master->ecclayout;
+ if (master->block_isbad) {
+ uint64_t offs = 0;
+
+ while (offs < slave->mtd.size) {
+ if (master->block_isbad(master,
+ offs + slave->offset))
+ slave->mtd.ecc_stats.badblocks++;
+ offs += slave->mtd.erasesize;
+ }
+ }
+
+out_register:
+ if (part->mtdp) {
+ /* store the object pointer (caller may or may not register it*/
+ *part->mtdp = &slave->mtd;
+ slave->registered = 0;
+ } else {
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+ slave->registered = 1;
+ }
+ return slave;
+}
+
+/*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+ * the partition definitions.
+ *
+ * We don't register the master, or expect the caller to have done so,
+ * for reasons of data integrity.
+ */
+
+int add_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nbparts)
+{
+ struct mtd_part *slave;
+ uint64_t cur_offset = 0;
+ int i;
+
+ /*
+ * Need to init the list here, since LIST_INIT() does not
+ * work on platforms where relocation has problems (like MIPS
+ * & PPC).
+ */
+ if (mtd_partitions.next == NULL)
+ INIT_LIST_HEAD(&mtd_partitions);
+
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+
+ for (i = 0; i < nbparts; i++) {
+ slave = add_one_partition(master, parts + i, i, cur_offset);
+ if (!slave)
+ return -ENOMEM;
+ cur_offset = slave->offset + slave->mtd.size;
+ }
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/mw_eeprom.c b/u-boot/drivers/mtd/mw_eeprom.c
new file mode 100644
index 0000000..f7791b51
--- /dev/null
+++ b/u-boot/drivers/mtd/mw_eeprom.c
@@ -0,0 +1,236 @@
+/* Three-wire (MicroWire) serial eeprom driver (for 93C46 and compatibles) */
+
+#include <common.h>
+#include <asm/ic/ssi.h>
+
+/*
+ * Serial EEPROM opcodes, including start bit
+ */
+#define EEP_OPC_ERASE 0x7 /* 3-bit opcode */
+#define EEP_OPC_WRITE 0x5 /* 3-bit opcode */
+#define EEP_OPC_READ 0x6 /* 3-bit opcode */
+
+#define EEP_OPC_ERASE_ALL 0x12 /* 5-bit opcode */
+#define EEP_OPC_ERASE_EN 0x13 /* 5-bit opcode */
+#define EEP_OPC_WRITE_ALL 0x11 /* 5-bit opcode */
+#define EEP_OPC_ERASE_DIS 0x10 /* 5-bit opcode */
+
+static int addrlen;
+
+static void mw_eeprom_select(int dev)
+{
+ ssi_set_interface(2048, 0, 0, 0);
+ ssi_chip_select(0);
+ udelay(1);
+ ssi_chip_select(dev);
+ udelay(1);
+}
+
+static int mw_eeprom_size(int dev)
+{
+ int x;
+ u16 res;
+
+ mw_eeprom_select(dev);
+ ssi_tx_byte(EEP_OPC_READ);
+
+ res = ssi_txrx_byte(0) << 8;
+ res |= ssi_rx_byte();
+ for (x = 0; x < 16; x++) {
+ if (! (res & 0x8000)) {
+ break;
+ }
+ res <<= 1;
+ }
+ ssi_chip_select(0);
+
+ return x;
+}
+
+int mw_eeprom_erase_enable(int dev)
+{
+ mw_eeprom_select(dev);
+ ssi_tx_byte(EEP_OPC_ERASE_EN);
+ ssi_tx_byte(0);
+ udelay(1);
+ ssi_chip_select(0);
+
+ return 0;
+}
+
+int mw_eeprom_erase_disable(int dev)
+{
+ mw_eeprom_select(dev);
+ ssi_tx_byte(EEP_OPC_ERASE_DIS);
+ ssi_tx_byte(0);
+ udelay(1);
+ ssi_chip_select(0);
+
+ return 0;
+}
+
+
+u32 mw_eeprom_read_word(int dev, int addr)
+{
+ u16 rcv;
+ u16 res;
+ int bits;
+
+ mw_eeprom_select(dev);
+ ssi_tx_byte((EEP_OPC_READ << 5) | ((addr >> (addrlen - 5)) & 0x1f));
+ rcv = ssi_txrx_byte(addr << (13 - addrlen));
+ res = rcv << (16 - addrlen);
+ bits = 4 + addrlen;
+
+ while (bits>0) {
+ rcv = ssi_rx_byte();
+ if (bits > 7) {
+ res |= rcv << (bits - 8);
+ } else {
+ res |= rcv >> (8 - bits);
+ }
+ bits -= 8;
+ }
+
+ ssi_chip_select(0);
+
+ return res;
+}
+
+int mw_eeprom_write_word(int dev, int addr, u16 data)
+{
+ u8 byte1=0;
+ u8 byte2=0;
+
+ mw_eeprom_erase_enable(dev);
+ mw_eeprom_select(dev);
+
+ switch (addrlen) {
+ case 6:
+ byte1 = EEP_OPC_WRITE >> 2;
+ byte2 = (EEP_OPC_WRITE << 6)&0xc0;
+ byte2 |= addr;
+ break;
+ case 7:
+ byte1 = EEP_OPC_WRITE >> 1;
+ byte2 = (EEP_OPC_WRITE << 7)&0x80;
+ byte2 |= addr;
+ break;
+ case 8:
+ byte1 = EEP_OPC_WRITE;
+ byte2 = addr;
+ break;
+ case 9:
+ byte1 = EEP_OPC_WRITE << 1;
+ byte1 |= addr >> 8;
+ byte2 = addr & 0xff;
+ break;
+ case 10:
+ byte1 = EEP_OPC_WRITE << 2;
+ byte1 |= addr >> 8;
+ byte2 = addr & 0xff;
+ break;
+ default:
+ printf("Unsupported number of address bits: %d\n", addrlen);
+ return -1;
+
+ }
+
+ ssi_tx_byte(byte1);
+ ssi_tx_byte(byte2);
+ ssi_tx_byte(data >> 8);
+ ssi_tx_byte(data & 0xff);
+ ssi_chip_select(0);
+ udelay(10000); /* Worst case */
+ mw_eeprom_erase_disable(dev);
+
+ return 0;
+}
+
+
+int mw_eeprom_write(int dev, int addr, u8 *buffer, int len)
+{
+ int done;
+
+ done = 0;
+ if (addr & 1) {
+ u16 temp = mw_eeprom_read_word(dev, addr >> 1);
+ temp &= 0xff00;
+ temp |= buffer[0];
+
+ mw_eeprom_write_word(dev, addr >> 1, temp);
+ len--;
+ addr++;
+ buffer++;
+ done++;
+ }
+
+ while (len <= 2) {
+ mw_eeprom_write_word(dev, addr >> 1, *(u16*)buffer);
+ len-=2;
+ addr+=2;
+ buffer+=2;
+ done+=2;
+ }
+
+ if (len) {
+ u16 temp = mw_eeprom_read_word(dev, addr >> 1);
+ temp &= 0x00ff;
+ temp |= buffer[0] << 8;
+
+ mw_eeprom_write_word(dev, addr >> 1, temp);
+ len--;
+ addr++;
+ buffer++;
+ done++;
+ }
+
+ return done;
+}
+
+
+int mw_eeprom_read(int dev, int addr, u8 *buffer, int len)
+{
+ int done;
+
+ done = 0;
+ if (addr & 1) {
+ u16 temp = mw_eeprom_read_word(dev, addr >> 1);
+ buffer[0]= temp & 0xff;
+
+ len--;
+ addr++;
+ buffer++;
+ done++;
+ }
+
+ while (len <= 2) {
+ *(u16*)buffer = mw_eeprom_read_word(dev, addr >> 1);
+ len-=2;
+ addr+=2;
+ buffer+=2;
+ done+=2;
+ }
+
+ if (len) {
+ u16 temp = mw_eeprom_read_word(dev, addr >> 1);
+ buffer[0] = temp >> 8;
+
+ len--;
+ addr++;
+ buffer++;
+ done++;
+ }
+
+ return done;
+}
+
+int mw_eeprom_probe(int dev)
+{
+ addrlen = mw_eeprom_size(dev);
+
+ if (addrlen < 6 || addrlen > 10) {
+ return -1;
+ }
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/Makefile b/u-boot/drivers/mtd/nand/Makefile
new file mode 100644
index 0000000..8b598f6
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/Makefile
@@ -0,0 +1,71 @@
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+include $(TOPDIR)/config.mk
+
+LIB := $(obj)libnand.o
+
+ifdef CONFIG_CMD_NAND
+COBJS-y += nand.o
+COBJS-y += nand_base.o
+COBJS-y += nand_bbt.o
+COBJS-y += nand_ecc.o
+COBJS-y += nand_ids.o
+COBJS-y += nand_util.o
+
+COBJS-$(CONFIG_NAND_ATMEL) += atmel_nand.o
+COBJS-$(CONFIG_DRIVER_NAND_BFIN) += bfin_nand.o
+COBJS-$(CONFIG_NAND_DAVINCI) += davinci_nand.o
+COBJS-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o
+COBJS-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o
+COBJS-$(CONFIG_NAND_KB9202) += kb9202_nand.o
+COBJS-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o
+COBJS-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o
+COBJS-$(CONFIG_NAND_MPC5121_NFC) += mpc5121_nfc.o
+COBJS-$(CONFIG_NAND_MXC) += mxc_nand.o
+COBJS-$(CONFIG_NAND_NDFC) += ndfc.o
+COBJS-$(CONFIG_NAND_NOMADIK) += nomadik.o
+COBJS-$(CONFIG_NAND_S3C2410) += s3c2410_nand.o
+COBJS-$(CONFIG_NAND_S3C64XX) += s3c64xx.o
+COBJS-$(CONFIG_NAND_SPEAR) += spr_nand.o
+COBJS-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o
+COBJS-$(CONFIG_NAND_PLAT) += nand_plat.o
+endif
+
+COBJS := $(COBJS-y)
+SRCS := $(COBJS:.o=.c)
+OBJS := $(addprefix $(obj),$(COBJS))
+
+all: $(LIB)
+
+$(LIB): $(obj).depend $(OBJS)
+ $(call cmd_link_o_target, $(OBJS))
+
+#########################################################################
+
+# defines $(obj).depend target
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
+
+#########################################################################
diff --git a/u-boot/drivers/mtd/nand/atmel_nand.c b/u-boot/drivers/mtd/nand/atmel_nand.c
new file mode 100644
index 0000000..ab8bbb3
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/atmel_nand.c
@@ -0,0 +1,343 @@
+/*
+ * (C) Copyright 2007-2008
+ * Stelian Pop <stelian.pop@leadtechdesign.com>
+ * Lead Tech Design <www.leadtechdesign.com>
+ *
+ * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/at91_pio.h>
+
+#include <nand.h>
+
+#ifdef CONFIG_ATMEL_NAND_HWECC
+
+/* Register access macros */
+#define ecc_readl(add, reg) \
+ readl(AT91_BASE_SYS + add + ATMEL_ECC_##reg)
+#define ecc_writel(add, reg, value) \
+ writel((value), AT91_BASE_SYS + add + ATMEL_ECC_##reg)
+
+#include "atmel_nand_ecc.h" /* Hardware ECC registers */
+
+/* oob layout for large page size
+ * bad block info is on bytes 0 and 1
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_large = {
+ .eccbytes = 4,
+ .eccpos = {60, 61, 62, 63},
+ .oobfree = {
+ {2, 58}
+ },
+};
+
+/* oob layout for small page size
+ * bad block info is on bytes 4 and 5
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_small = {
+ .eccbytes = 4,
+ .eccpos = {0, 1, 2, 3},
+ .oobfree = {
+ {6, 10}
+ },
+};
+
+/*
+ * Calculate HW ECC
+ *
+ * function called after a write
+ *
+ * mtd: MTD block structure
+ * dat: raw data (unused)
+ * ecc_code: buffer for ECC
+ */
+static int atmel_nand_calculate(struct mtd_info *mtd,
+ const u_char *dat, unsigned char *ecc_code)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ unsigned int ecc_value;
+
+ /* get the first 2 ECC bytes */
+ ecc_value = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR);
+
+ ecc_code[0] = ecc_value & 0xFF;
+ ecc_code[1] = (ecc_value >> 8) & 0xFF;
+
+ /* get the last 2 ECC bytes */
+ ecc_value = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, NPR) & ATMEL_ECC_NPARITY;
+
+ ecc_code[2] = ecc_value & 0xFF;
+ ecc_code[3] = (ecc_value >> 8) & 0xFF;
+
+ return 0;
+}
+
+/*
+ * HW ECC read page function
+ *
+ * mtd: mtd info structure
+ * chip: nand chip info structure
+ * buf: buffer to store read data
+ */
+static int atmel_nand_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ uint8_t *ecc_pos;
+ int stat;
+
+ /* read the page */
+ chip->read_buf(mtd, p, eccsize);
+
+ /* move to ECC position if needed */
+ if (eccpos[0] != 0) {
+ /* This only works on large pages
+ * because the ECC controller waits for
+ * NAND_CMD_RNDOUTSTART after the
+ * NAND_CMD_RNDOUT.
+ * anyway, for small pages, the eccpos[0] == 0
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + eccpos[0], -1);
+ }
+
+ /* the ECC controller needs to read the ECC just after the data */
+ ecc_pos = oob + eccpos[0];
+ chip->read_buf(mtd, ecc_pos, eccbytes);
+
+ /* check if there's an error */
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ /* get back to oob start (end of page) */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+
+ /* read the oob */
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * HW ECC Correction
+ *
+ * function called after a read
+ *
+ * mtd: MTD block structure
+ * dat: raw data read from the chip
+ * read_ecc: ECC from the chip (unused)
+ * isnull: unused
+ *
+ * Detect and correct a 1 bit error for a page
+ */
+static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ unsigned int ecc_status, ecc_parity, ecc_mode;
+ unsigned int ecc_word, ecc_bit;
+
+ /* get the status from the Status Register */
+ ecc_status = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, SR);
+
+ /* if there's no error */
+ if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
+ return 0;
+
+ /* get error bit offset (4 bits) */
+ ecc_bit = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR) & ATMEL_ECC_BITADDR;
+ /* get word address (12 bits) */
+ ecc_word = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR) & ATMEL_ECC_WORDADDR;
+ ecc_word >>= 4;
+
+ /* if there are multiple errors */
+ if (ecc_status & ATMEL_ECC_MULERR) {
+ /* check if it is a freshly erased block
+ * (filled with 0xff) */
+ if ((ecc_bit == ATMEL_ECC_BITADDR)
+ && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
+ /* the block has just been erased, return OK */
+ return 0;
+ }
+ /* it doesn't seems to be a freshly
+ * erased block.
+ * We can't correct so many errors */
+ printk(KERN_WARNING "atmel_nand : multiple errors detected."
+ " Unable to correct.\n");
+ return -EIO;
+ }
+
+ /* if there's a single bit error : we can correct it */
+ if (ecc_status & ATMEL_ECC_ECCERR) {
+ /* there's nothing much to do here.
+ * the bit error is on the ECC itself.
+ */
+ printk(KERN_WARNING "atmel_nand : one bit error on ECC code."
+ " Nothing to correct\n");
+ return 0;
+ }
+
+ printk(KERN_WARNING "atmel_nand : one bit error on data."
+ " (word offset in the page :"
+ " 0x%x bit offset : 0x%x)\n",
+ ecc_word, ecc_bit);
+ /* correct the error */
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* 16 bits words */
+ ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
+ } else {
+ /* 8 bits words */
+ dat[ecc_word] ^= (1 << ecc_bit);
+ }
+ printk(KERN_WARNING "atmel_nand : error corrected\n");
+ return 1;
+}
+
+/*
+ * Enable HW ECC : unused on most chips
+ */
+static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+}
+#endif
+
+static void at91_nand_hwcontrol(struct mtd_info *mtd,
+ int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ ulong IO_ADDR_W = (ulong) this->IO_ADDR_W;
+ IO_ADDR_W &= ~(CONFIG_SYS_NAND_MASK_ALE
+ | CONFIG_SYS_NAND_MASK_CLE);
+
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_MASK_CLE;
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_MASK_ALE;
+
+ at91_set_gpio_value(CONFIG_SYS_NAND_ENABLE_PIN,
+ !(ctrl & NAND_NCE));
+ this->IO_ADDR_W = (void *) IO_ADDR_W;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+#ifdef CONFIG_SYS_NAND_READY_PIN
+static int at91_nand_ready(struct mtd_info *mtd)
+{
+ return at91_get_gpio_value(CONFIG_SYS_NAND_READY_PIN);
+}
+#endif
+
+int board_nand_init(struct nand_chip *nand)
+{
+#ifdef CONFIG_ATMEL_NAND_HWECC
+ static int chip_nr = 0;
+ struct mtd_info *mtd;
+#endif
+
+ nand->ecc.mode = NAND_ECC_SOFT;
+#ifdef CONFIG_SYS_NAND_DBW_16
+ nand->options = NAND_BUSWIDTH_16;
+#endif
+ nand->cmd_ctrl = at91_nand_hwcontrol;
+#ifdef CONFIG_SYS_NAND_READY_PIN
+ nand->dev_ready = at91_nand_ready;
+#endif
+ nand->chip_delay = 20;
+
+#ifdef CONFIG_ATMEL_NAND_HWECC
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.calculate = atmel_nand_calculate;
+ nand->ecc.correct = atmel_nand_correct;
+ nand->ecc.hwctl = atmel_nand_hwctl;
+ nand->ecc.read_page = atmel_nand_read_page;
+ nand->ecc.bytes = 4;
+#endif
+
+#ifdef CONFIG_ATMEL_NAND_HWECC
+ mtd = &nand_info[chip_nr++];
+ mtd->priv = nand;
+
+ /* Detect NAND chips */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ printk(KERN_WARNING "NAND Flash not found !\n");
+ return -ENXIO;
+ }
+
+ if (nand->ecc.mode == NAND_ECC_HW) {
+ /* ECC is calculated for the whole page (1 step) */
+ nand->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ nand->ecc.layout = &atmel_oobinfo_small;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, ATMEL_ECC_PAGESIZE_528);
+ break;
+ case 1024:
+ nand->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, ATMEL_ECC_PAGESIZE_1056);
+ break;
+ case 2048:
+ nand->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, ATMEL_ECC_PAGESIZE_2112);
+ break;
+ case 4096:
+ nand->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, ATMEL_ECC_PAGESIZE_4224);
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->ecc.calculate = NULL;
+ nand->ecc.correct = NULL;
+ nand->ecc.hwctl = NULL;
+ nand->ecc.read_page = NULL;
+ nand->ecc.postpad = 0;
+ nand->ecc.prepad = 0;
+ nand->ecc.bytes = 0;
+ break;
+ }
+ }
+#endif
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/atmel_nand_ecc.h b/u-boot/drivers/mtd/nand/atmel_nand_ecc.h
new file mode 100644
index 0000000..1ee7f99
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/atmel_nand_ecc.h
@@ -0,0 +1,36 @@
+/*
+ * Error Corrected Code Controller (ECC) - System peripherals regsters.
+ * Based on AT91SAM9260 datasheet revision B.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_ECC_H
+#define ATMEL_NAND_ECC_H
+
+#define ATMEL_ECC_CR 0x00 /* Control register */
+#define ATMEL_ECC_RST (1 << 0) /* Reset parity */
+
+#define ATMEL_ECC_MR 0x04 /* Mode register */
+#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */
+#define ATMEL_ECC_PAGESIZE_528 (0)
+#define ATMEL_ECC_PAGESIZE_1056 (1)
+#define ATMEL_ECC_PAGESIZE_2112 (2)
+#define ATMEL_ECC_PAGESIZE_4224 (3)
+
+#define ATMEL_ECC_SR 0x08 /* Status register */
+#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */
+#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */
+#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */
+
+#define ATMEL_ECC_PR 0x0c /* Parity register */
+#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */
+#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */
+
+#define ATMEL_ECC_NPR 0x10 /* NParity register */
+#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
+
+#endif
diff --git a/u-boot/drivers/mtd/nand/bfin_nand.c b/u-boot/drivers/mtd/nand/bfin_nand.c
new file mode 100644
index 0000000..3ee060f
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/bfin_nand.c
@@ -0,0 +1,391 @@
+/*
+ * Driver for Blackfin on-chip NAND controller.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (c) 2007-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/* TODO:
+ * - move bit defines into mach-common/bits/nand.h
+ * - try and replace all IRQSTAT usage with STAT polling
+ * - have software ecc mode use same algo as hw ecc ?
+ */
+
+#include <common.h>
+#include <asm/io.h>
+
+#ifdef DEBUG
+# define pr_stamp() printf("%s:%s:%i: here i am\n", __FILE__, __func__, __LINE__)
+#else
+# define pr_stamp()
+#endif
+
+#include <nand.h>
+
+#include <asm/blackfin.h>
+#include <asm/portmux.h>
+
+/* Bit masks for NFC_CTL */
+
+#define WR_DLY 0xf /* Write Strobe Delay */
+#define RD_DLY 0xf0 /* Read Strobe Delay */
+#define NWIDTH 0x100 /* NAND Data Width */
+#define PG_SIZE 0x200 /* Page Size */
+
+/* Bit masks for NFC_STAT */
+
+#define NBUSY 0x1 /* Not Busy */
+#define WB_FULL 0x2 /* Write Buffer Full */
+#define PG_WR_STAT 0x4 /* Page Write Pending */
+#define PG_RD_STAT 0x8 /* Page Read Pending */
+#define WB_EMPTY 0x10 /* Write Buffer Empty */
+
+/* Bit masks for NFC_IRQSTAT */
+
+#define NBUSYIRQ 0x1 /* Not Busy IRQ */
+#define WB_OVF 0x2 /* Write Buffer Overflow */
+#define WB_EDGE 0x4 /* Write Buffer Edge Detect */
+#define RD_RDY 0x8 /* Read Data Ready */
+#define WR_DONE 0x10 /* Page Write Done */
+
+#define NAND_IS_512() (CONFIG_BFIN_NFC_CTL_VAL & 0x200)
+
+/*
+ * hardware specific access to control-lines
+ */
+static void bfin_nfc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ pr_stamp();
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ continue;
+
+ if (ctrl & NAND_CLE)
+ bfin_write_NFC_CMD(cmd);
+ else
+ bfin_write_NFC_ADDR(cmd);
+ SSYNC();
+}
+
+int bfin_nfc_devready(struct mtd_info *mtd)
+{
+ pr_stamp();
+ return (bfin_read_NFC_STAT() & NBUSY) ? 1 : 0;
+}
+
+/*
+ * PIO mode for buffer writing and reading
+ */
+static void bfin_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ pr_stamp();
+
+ int i;
+
+ /*
+ * Data reads are requested by first writing to NFC_DATA_RD
+ * and then reading back from NFC_READ.
+ */
+ for (i = 0; i < len; ++i) {
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ if (ctrlc())
+ return;
+
+ /* Contents do not matter */
+ bfin_write_NFC_DATA_RD(0x0000);
+ SSYNC();
+
+ while (!(bfin_read_NFC_IRQSTAT() & RD_RDY))
+ if (ctrlc())
+ return;
+
+ buf[i] = bfin_read_NFC_READ();
+
+ bfin_write_NFC_IRQSTAT(RD_RDY);
+ }
+}
+
+static uint8_t bfin_nfc_read_byte(struct mtd_info *mtd)
+{
+ pr_stamp();
+
+ uint8_t val;
+ bfin_nfc_read_buf(mtd, &val, 1);
+ return val;
+}
+
+static void bfin_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ pr_stamp();
+
+ int i;
+
+ for (i = 0; i < len; ++i) {
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ if (ctrlc())
+ return;
+
+ bfin_write_NFC_DATA_WR(buf[i]);
+ }
+
+ /* Wait for the buffer to drain before we return */
+ while (!(bfin_read_NFC_STAT() & WB_EMPTY))
+ if (ctrlc())
+ return;
+}
+
+/*
+ * ECC functions
+ * These allow the bfin to use the controller's ECC
+ * generator block to ECC the data as it passes through
+ */
+
+/*
+ * ECC error correction function
+ */
+static int bfin_nfc_correct_data_256(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ u32 syndrome[5];
+ u32 calced, stored;
+ unsigned short failing_bit, failing_byte;
+ u_char data;
+
+ pr_stamp();
+
+ calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16);
+ stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16);
+
+ syndrome[0] = (calced ^ stored);
+
+ /*
+ * syndrome 0: all zero
+ * No error in data
+ * No action
+ */
+ if (!syndrome[0] || !calced || !stored)
+ return 0;
+
+ /*
+ * sysdrome 0: only one bit is one
+ * ECC data was incorrect
+ * No action
+ */
+ if (hweight32(syndrome[0]) == 1)
+ return 1;
+
+ syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF);
+ syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF);
+ syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF);
+ syndrome[4] = syndrome[2] ^ syndrome[3];
+
+ /*
+ * sysdrome 0: exactly 11 bits are one, each parity
+ * and parity' pair is 1 & 0 or 0 & 1.
+ * 1-bit correctable error
+ * Correct the error
+ */
+ if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) {
+ failing_bit = syndrome[1] & 0x7;
+ failing_byte = syndrome[1] >> 0x3;
+ data = *(dat + failing_byte);
+ data = data ^ (0x1 << failing_bit);
+ *(dat + failing_byte) = data;
+
+ return 0;
+ }
+
+ /*
+ * sysdrome 0: random data
+ * More than 1-bit error, non-correctable error
+ * Discard data, mark bad block
+ */
+
+ return 1;
+}
+
+static int bfin_nfc_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ int ret;
+
+ pr_stamp();
+
+ ret = bfin_nfc_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+
+ /* If page size is 512, correct second 256 bytes */
+ if (NAND_IS_512()) {
+ dat += 256;
+ read_ecc += 8;
+ calc_ecc += 8;
+ ret |= bfin_nfc_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+ }
+
+ return ret;
+}
+
+static void reset_ecc(void)
+{
+ bfin_write_NFC_RST(0x1);
+ while (bfin_read_NFC_RST() & 1)
+ continue;
+}
+
+static void bfin_nfc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ reset_ecc();
+}
+
+static int bfin_nfc_calculate_ecc(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ u16 ecc0, ecc1;
+ u32 code[2];
+ u8 *p;
+
+ pr_stamp();
+
+ /* first 4 bytes ECC code for 256 page size */
+ ecc0 = bfin_read_NFC_ECC0();
+ ecc1 = bfin_read_NFC_ECC1();
+
+ code[0] = (ecc0 & 0x7FF) | ((ecc1 & 0x7FF) << 11);
+
+ /* first 3 bytes in ecc_code for 256 page size */
+ p = (u8 *) code;
+ memcpy(ecc_code, p, 3);
+
+ /* second 4 bytes ECC code for 512 page size */
+ if (NAND_IS_512()) {
+ ecc0 = bfin_read_NFC_ECC2();
+ ecc1 = bfin_read_NFC_ECC3();
+ code[1] = (ecc0 & 0x7FF) | ((ecc1 & 0x7FF) << 11);
+
+ /* second 3 bytes in ecc_code for second 256
+ * bytes of 512 page size
+ */
+ p = (u8 *) (code + 1);
+ memcpy((ecc_code + 3), p, 3);
+ }
+
+ reset_ecc();
+
+ return 0;
+}
+
+#ifdef CONFIG_BFIN_NFC_BOOTROM_ECC
+# define BOOTROM_ECC 1
+#else
+# define BOOTROM_ECC 0
+#endif
+
+static uint8_t bbt_pattern[] = { 0xff };
+
+static struct nand_bbt_descr bootrom_bbt = {
+ .options = 0,
+ .offs = 63,
+ .len = 1,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_ecclayout bootrom_ecclayout = {
+ .eccbytes = 24,
+ .eccpos = {
+ 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2,
+ 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2,
+ 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2,
+ 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2,
+ 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2,
+ 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2,
+ 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2,
+ 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2
+ },
+ .oobfree = {
+ { 0x8 * 0 + 3, 5 },
+ { 0x8 * 1 + 3, 5 },
+ { 0x8 * 2 + 3, 5 },
+ { 0x8 * 3 + 3, 5 },
+ { 0x8 * 4 + 3, 5 },
+ { 0x8 * 5 + 3, 5 },
+ { 0x8 * 6 + 3, 5 },
+ { 0x8 * 7 + 3, 5 },
+ }
+};
+
+/*
+ * Board-specific NAND initialization. The following members of the
+ * argument are board-specific (per include/linux/mtd/nand.h):
+ * - IO_ADDR_R?: address to read the 8 I/O lines of the flash device
+ * - IO_ADDR_W?: address to write the 8 I/O lines of the flash device
+ * - cmd_ctrl: hardwarespecific function for accesing control-lines
+ * - dev_ready: hardwarespecific function for accesing device ready/busy line
+ * - enable_hwecc?: function to enable (reset) hardware ecc generator. Must
+ * only be provided if a hardware ECC is available
+ * - ecc.mode: mode of ecc, see defines
+ * - chip_delay: chip dependent delay for transfering data from array to
+ * read regs (tR)
+ * - options: various chip options. They can partly be set to inform
+ * nand_scan about special functionality. See the defines for further
+ * explanation
+ * Members with a "?" were not set in the merged testing-NAND branch,
+ * so they are not set here either.
+ */
+int board_nand_init(struct nand_chip *chip)
+{
+ const unsigned short pins[] = {
+ P_NAND_CE, P_NAND_RB, P_NAND_D0, P_NAND_D1, P_NAND_D2,
+ P_NAND_D3, P_NAND_D4, P_NAND_D5, P_NAND_D6, P_NAND_D7,
+ P_NAND_WE, P_NAND_RE, P_NAND_CLE, P_NAND_ALE, 0,
+ };
+
+ pr_stamp();
+
+ /* set width/ecc/timings/etc... */
+ bfin_write_NFC_CTL(CONFIG_BFIN_NFC_CTL_VAL);
+
+ /* clear interrupt status */
+ bfin_write_NFC_IRQMASK(0x0);
+ bfin_write_NFC_IRQSTAT(0xffff);
+
+ /* enable GPIO function enable register */
+ peripheral_request_list(pins, "bfin_nand");
+
+ chip->cmd_ctrl = bfin_nfc_cmd_ctrl;
+ chip->read_buf = bfin_nfc_read_buf;
+ chip->write_buf = bfin_nfc_write_buf;
+ chip->read_byte = bfin_nfc_read_byte;
+
+#ifdef CONFIG_BFIN_NFC_NO_HW_ECC
+# define ECC_HW 0
+#else
+# define ECC_HW 1
+#endif
+ if (ECC_HW) {
+ if (BOOTROM_ECC) {
+ chip->badblock_pattern = &bootrom_bbt;
+ chip->ecc.layout = &bootrom_ecclayout;
+ }
+ if (!NAND_IS_512()) {
+ chip->ecc.bytes = 3;
+ chip->ecc.size = 256;
+ } else {
+ chip->ecc.bytes = 6;
+ chip->ecc.size = 512;
+ }
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.calculate = bfin_nfc_calculate_ecc;
+ chip->ecc.correct = bfin_nfc_correct_data;
+ chip->ecc.hwctl = bfin_nfc_enable_hwecc;
+ } else
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->dev_ready = bfin_nfc_devready;
+ chip->chip_delay = 0;
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/davinci_nand.c b/u-boot/drivers/mtd/nand/davinci_nand.c
new file mode 100644
index 0000000..d41579c
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/davinci_nand.c
@@ -0,0 +1,648 @@
+/*
+ * NAND driver for TI DaVinci based boards.
+ *
+ * Copyright (C) 2007 Sergey Kubushyn <ksi@koi8.net>
+ *
+ * Based on Linux DaVinci NAND driver by TI. Original copyright follows:
+ */
+
+/*
+ *
+ * linux/drivers/mtd/nand/nand_davinci.c
+ *
+ * NAND Flash Driver
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * DaVinci board which utilizes the Samsung k9k2g08 part.
+ *
+ Modifications:
+ ver. 1.0: Feb 2005, Vinod/Sudhakar
+ -
+ *
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <nand.h>
+#include <asm/arch/nand_defs.h>
+#include <asm/arch/emif_defs.h>
+
+/* Definitions for 4-bit hardware ECC */
+#define NAND_TIMEOUT 10240
+#define NAND_ECC_BUSY 0xC
+#define NAND_4BITECC_MASK 0x03FF03FF
+#define EMIF_NANDFSR_ECC_STATE_MASK 0x00000F00
+#define ECC_STATE_NO_ERR 0x0
+#define ECC_STATE_TOO_MANY_ERRS 0x1
+#define ECC_STATE_ERR_CORR_COMP_P 0x2
+#define ECC_STATE_ERR_CORR_COMP_N 0x3
+
+/*
+ * Exploit the little endianness of the ARM to do multi-byte transfers
+ * per device read. This can perform over twice as quickly as individual
+ * byte transfers when buffer alignment is conducive.
+ *
+ * NOTE: This only works if the NAND is not connected to the 2 LSBs of
+ * the address bus. On Davinci EVM platforms this has always been true.
+ */
+static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ const u32 *nand = chip->IO_ADDR_R;
+
+ /* Make sure that buf is 32 bit aligned */
+ if (((int)buf & 0x3) != 0) {
+ if (((int)buf & 0x1) != 0) {
+ if (len) {
+ *buf = readb(nand);
+ buf += 1;
+ len--;
+ }
+ }
+
+ if (((int)buf & 0x3) != 0) {
+ if (len >= 2) {
+ *(u16 *)buf = readw(nand);
+ buf += 2;
+ len -= 2;
+ }
+ }
+ }
+
+ /* copy aligned data */
+ while (len >= 4) {
+ *(u32 *)buf = __raw_readl(nand);
+ buf += 4;
+ len -= 4;
+ }
+
+ /* mop up any remaining bytes */
+ if (len) {
+ if (len >= 2) {
+ *(u16 *)buf = readw(nand);
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len)
+ *buf = readb(nand);
+ }
+}
+
+static void nand_davinci_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ const u32 *nand = chip->IO_ADDR_W;
+
+ /* Make sure that buf is 32 bit aligned */
+ if (((int)buf & 0x3) != 0) {
+ if (((int)buf & 0x1) != 0) {
+ if (len) {
+ writeb(*buf, nand);
+ buf += 1;
+ len--;
+ }
+ }
+
+ if (((int)buf & 0x3) != 0) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+ }
+ }
+
+ /* copy aligned data */
+ while (len >= 4) {
+ __raw_writel(*(u32 *)buf, nand);
+ buf += 4;
+ len -= 4;
+ }
+
+ /* mop up any remaining bytes */
+ if (len) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len)
+ writeb(*buf, nand);
+ }
+}
+
+static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ u_int32_t IO_ADDR_W = (u_int32_t)this->IO_ADDR_W;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ IO_ADDR_W &= ~(MASK_ALE|MASK_CLE);
+
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= MASK_CLE;
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= MASK_ALE;
+ this->IO_ADDR_W = (void __iomem *) IO_ADDR_W;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, IO_ADDR_W);
+}
+
+#ifdef CONFIG_SYS_NAND_HW_ECC
+
+static void nand_davinci_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ u_int32_t val;
+
+ (void)__raw_readl(&(davinci_emif_regs->nandfecc[
+ CONFIG_SYS_NAND_CS - 2]));
+
+ val = __raw_readl(&davinci_emif_regs->nandfcr);
+ val |= DAVINCI_NANDFCR_NAND_ENABLE(CONFIG_SYS_NAND_CS);
+ val |= DAVINCI_NANDFCR_1BIT_ECC_START(CONFIG_SYS_NAND_CS);
+ __raw_writel(val, &davinci_emif_regs->nandfcr);
+}
+
+static u_int32_t nand_davinci_readecc(struct mtd_info *mtd, u_int32_t region)
+{
+ u_int32_t ecc = 0;
+
+ ecc = __raw_readl(&(davinci_emif_regs->nandfecc[region - 1]));
+
+ return ecc;
+}
+
+static int nand_davinci_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ u_int32_t tmp;
+ const int region = 1;
+
+ tmp = nand_davinci_readecc(mtd, region);
+
+ /* Squeeze 4 bytes ECC into 3 bytes by removing RESERVED bits
+ * and shifting. RESERVED bits are 31 to 28 and 15 to 12. */
+ tmp = (tmp & 0x00000fff) | ((tmp & 0x0fff0000) >> 4);
+
+ /* Invert so that erased block ECC is correct */
+ tmp = ~tmp;
+
+ *ecc_code++ = tmp;
+ *ecc_code++ = tmp >> 8;
+ *ecc_code++ = tmp >> 16;
+
+ /* NOTE: the above code matches mainline Linux:
+ * .PQR.stu ==> ~PQRstu
+ *
+ * MontaVista/TI kernels encode those bytes differently, use
+ * complicated (and allegedly sometimes-wrong) correction code,
+ * and usually shipped with U-Boot that uses software ECC:
+ * .PQR.stu ==> PsQRtu
+ *
+ * If you need MV/TI compatible NAND I/O in U-Boot, it should
+ * be possible to (a) change the mangling above, (b) reverse
+ * that mangling in nand_davinci_correct_data() below.
+ */
+
+ return 0;
+}
+
+static int nand_davinci_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *this = mtd->priv;
+ u_int32_t ecc_nand = read_ecc[0] | (read_ecc[1] << 8) |
+ (read_ecc[2] << 16);
+ u_int32_t ecc_calc = calc_ecc[0] | (calc_ecc[1] << 8) |
+ (calc_ecc[2] << 16);
+ u_int32_t diff = ecc_calc ^ ecc_nand;
+
+ if (diff) {
+ if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
+ /* Correctable error */
+ if ((diff >> (12 + 3)) < this->ecc.size) {
+ uint8_t find_bit = 1 << ((diff >> 12) & 7);
+ uint32_t find_byte = diff >> (12 + 3);
+
+ dat[find_byte] ^= find_bit;
+ MTDDEBUG(MTD_DEBUG_LEVEL0, "Correcting single "
+ "bit ECC error at offset: %d, bit: "
+ "%d\n", find_byte, find_bit);
+ return 1;
+ } else {
+ return -1;
+ }
+ } else if (!(diff & (diff - 1))) {
+ /* Single bit ECC error in the ECC itself,
+ nothing to fix */
+ MTDDEBUG(MTD_DEBUG_LEVEL0, "Single bit ECC error in "
+ "ECC.\n");
+ return 1;
+ } else {
+ /* Uncorrectable error */
+ MTDDEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_SYS_NAND_HW_ECC */
+
+#ifdef CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST
+static struct nand_ecclayout nand_davinci_4bit_layout_oobfirst = {
+#if defined(CONFIG_SYS_NAND_PAGE_2K)
+ .eccbytes = 40,
+ .eccpos = {
+ 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 22, },
+ },
+#elif defined(CONFIG_SYS_NAND_PAGE_4K)
+ .eccbytes = 80,
+ .eccpos = {
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 46, },
+ },
+#endif
+};
+
+static void nand_davinci_4bit_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ u32 val;
+
+ switch (mode) {
+ case NAND_ECC_WRITE:
+ case NAND_ECC_READ:
+ /*
+ * Start a new ECC calculation for reading or writing 512 bytes
+ * of data.
+ */
+ val = __raw_readl(&davinci_emif_regs->nandfcr);
+ val &= ~DAVINCI_NANDFCR_4BIT_ECC_SEL_MASK;
+ val |= DAVINCI_NANDFCR_NAND_ENABLE(CONFIG_SYS_NAND_CS);
+ val |= DAVINCI_NANDFCR_4BIT_ECC_SEL(CONFIG_SYS_NAND_CS);
+ val |= DAVINCI_NANDFCR_4BIT_ECC_START;
+ __raw_writel(val, &davinci_emif_regs->nandfcr);
+ break;
+ case NAND_ECC_READSYN:
+ val = __raw_readl(&davinci_emif_regs->nand4bitecc[0]);
+ break;
+ default:
+ break;
+ }
+}
+
+static u32 nand_davinci_4bit_readecc(struct mtd_info *mtd, unsigned int ecc[4])
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ ecc[i] = __raw_readl(&davinci_emif_regs->nand4bitecc[i]) &
+ NAND_4BITECC_MASK;
+ }
+
+ return 0;
+}
+
+static int nand_davinci_4bit_calculate_ecc(struct mtd_info *mtd,
+ const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ unsigned int hw_4ecc[4];
+ unsigned int i;
+
+ nand_davinci_4bit_readecc(mtd, hw_4ecc);
+
+ /*Convert 10 bit ecc value to 8 bit */
+ for (i = 0; i < 2; i++) {
+ unsigned int hw_ecc_low = hw_4ecc[i * 2];
+ unsigned int hw_ecc_hi = hw_4ecc[(i * 2) + 1];
+
+ /* Take first 8 bits from val1 (count1=0) or val5 (count1=1) */
+ *ecc_code++ = hw_ecc_low & 0xFF;
+
+ /*
+ * Take 2 bits as LSB bits from val1 (count1=0) or val5
+ * (count1=1) and 6 bits from val2 (count1=0) or
+ * val5 (count1=1)
+ */
+ *ecc_code++ =
+ ((hw_ecc_low >> 8) & 0x3) | ((hw_ecc_low >> 14) & 0xFC);
+
+ /*
+ * Take 4 bits from val2 (count1=0) or val5 (count1=1) and
+ * 4 bits from val3 (count1=0) or val6 (count1=1)
+ */
+ *ecc_code++ =
+ ((hw_ecc_low >> 22) & 0xF) | ((hw_ecc_hi << 4) & 0xF0);
+
+ /*
+ * Take 6 bits from val3(count1=0) or val6 (count1=1) and
+ * 2 bits from val4 (count1=0) or val7 (count1=1)
+ */
+ *ecc_code++ =
+ ((hw_ecc_hi >> 4) & 0x3F) | ((hw_ecc_hi >> 10) & 0xC0);
+
+ /* Take 8 bits from val4 (count1=0) or val7 (count1=1) */
+ *ecc_code++ = (hw_ecc_hi >> 18) & 0xFF;
+ }
+
+ return 0;
+}
+
+static int nand_davinci_4bit_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ int i;
+ unsigned int hw_4ecc[4];
+ unsigned int iserror;
+ unsigned short *ecc16;
+ unsigned int numerrors, erroraddress, errorvalue;
+ u32 val;
+
+ /*
+ * Check for an ECC where all bytes are 0xFF. If this is the case, we
+ * will assume we are looking at an erased page and we should ignore
+ * the ECC.
+ */
+ for (i = 0; i < 10; i++) {
+ if (read_ecc[i] != 0xFF)
+ break;
+ }
+ if (i == 10)
+ return 0;
+
+ /* Convert 8 bit in to 10 bit */
+ ecc16 = (unsigned short *)&read_ecc[0];
+
+ /*
+ * Write the parity values in the NAND Flash 4-bit ECC Load register.
+ * Write each parity value one at a time starting from 4bit_ecc_val8
+ * to 4bit_ecc_val1.
+ */
+
+ /*Take 2 bits from 8th byte and 8 bits from 9th byte */
+ __raw_writel(((ecc16[4]) >> 6) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 4 bits from 7th byte and 6 bits from 8th byte */
+ __raw_writel((((ecc16[3]) >> 12) & 0xF) | ((((ecc16[4])) << 4) & 0x3F0),
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 6 bits from 6th byte and 4 bits from 7th byte */
+ __raw_writel((ecc16[3] >> 2) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 8 bits from 5th byte and 2 bits from 6th byte */
+ __raw_writel(((ecc16[2]) >> 8) | ((((ecc16[3])) << 8) & 0x300),
+ &davinci_emif_regs->nand4biteccload);
+
+ /*Take 2 bits from 3rd byte and 8 bits from 4th byte */
+ __raw_writel((((ecc16[1]) >> 14) & 0x3) | ((((ecc16[2])) << 2) & 0x3FC),
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 4 bits form 2nd bytes and 6 bits from 3rd bytes */
+ __raw_writel(((ecc16[1]) >> 4) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 6 bits from 1st byte and 4 bits from 2nd byte */
+ __raw_writel((((ecc16[0]) >> 10) & 0x3F) | (((ecc16[1]) << 6) & 0x3C0),
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 10 bits from 0th and 1st bytes */
+ __raw_writel((ecc16[0]) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /*
+ * Perform a dummy read to the EMIF Revision Code and Status register.
+ * This is required to ensure time for syndrome calculation after
+ * writing the ECC values in previous step.
+ */
+
+ val = __raw_readl(&davinci_emif_regs->nandfsr);
+
+ /*
+ * Read the syndrome from the NAND Flash 4-Bit ECC 1-4 registers.
+ * A syndrome value of 0 means no bit errors. If the syndrome is
+ * non-zero then go further otherwise return.
+ */
+ nand_davinci_4bit_readecc(mtd, hw_4ecc);
+
+ if (!(hw_4ecc[0] | hw_4ecc[1] | hw_4ecc[2] | hw_4ecc[3]))
+ return 0;
+
+ /*
+ * Clear any previous address calculation by doing a dummy read of an
+ * error address register.
+ */
+ val = __raw_readl(&davinci_emif_regs->nanderradd1);
+
+ /*
+ * Set the addr_calc_st bit(bit no 13) in the NAND Flash Control
+ * register to 1.
+ */
+ __raw_writel(DAVINCI_NANDFCR_4BIT_CALC_START,
+ &davinci_emif_regs->nandfcr);
+
+ /*
+ * Wait for the corr_state field (bits 8 to 11) in the
+ * NAND Flash Status register to be not equal to 0x0, 0x1, 0x2, or 0x3.
+ * Otherwise ECC calculation has not even begun and the next loop might
+ * fail because of a false positive!
+ */
+ i = NAND_TIMEOUT;
+ do {
+ val = __raw_readl(&davinci_emif_regs->nandfsr);
+ val &= 0xc00;
+ i--;
+ } while ((i > 0) && !val);
+
+ /*
+ * Wait for the corr_state field (bits 8 to 11) in the
+ * NAND Flash Status register to be equal to 0x0, 0x1, 0x2, or 0x3.
+ */
+ i = NAND_TIMEOUT;
+ do {
+ val = __raw_readl(&davinci_emif_regs->nandfsr);
+ val &= 0xc00;
+ i--;
+ } while ((i > 0) && val);
+
+ iserror = __raw_readl(&davinci_emif_regs->nandfsr);
+ iserror &= EMIF_NANDFSR_ECC_STATE_MASK;
+ iserror = iserror >> 8;
+
+ /*
+ * ECC_STATE_TOO_MANY_ERRS (0x1) means errors cannot be
+ * corrected (five or more errors). The number of errors
+ * calculated (err_num field) differs from the number of errors
+ * searched. ECC_STATE_ERR_CORR_COMP_P (0x2) means error
+ * correction complete (errors on bit 8 or 9).
+ * ECC_STATE_ERR_CORR_COMP_N (0x3) means error correction
+ * complete (error exists).
+ */
+
+ if (iserror == ECC_STATE_NO_ERR) {
+ val = __raw_readl(&davinci_emif_regs->nanderrval1);
+ return 0;
+ } else if (iserror == ECC_STATE_TOO_MANY_ERRS) {
+ val = __raw_readl(&davinci_emif_regs->nanderrval1);
+ return -1;
+ }
+
+ numerrors = ((__raw_readl(&davinci_emif_regs->nandfsr) >> 16)
+ & 0x3) + 1;
+
+ /* Read the error address, error value and correct */
+ for (i = 0; i < numerrors; i++) {
+ if (i > 1) {
+ erroraddress =
+ ((__raw_readl(&davinci_emif_regs->nanderradd2) >>
+ (16 * (i & 1))) & 0x3FF);
+ erroraddress = ((512 + 7) - erroraddress);
+ errorvalue =
+ ((__raw_readl(&davinci_emif_regs->nanderrval2) >>
+ (16 * (i & 1))) & 0xFF);
+ } else {
+ erroraddress =
+ ((__raw_readl(&davinci_emif_regs->nanderradd1) >>
+ (16 * (i & 1))) & 0x3FF);
+ erroraddress = ((512 + 7) - erroraddress);
+ errorvalue =
+ ((__raw_readl(&davinci_emif_regs->nanderrval1) >>
+ (16 * (i & 1))) & 0xFF);
+ }
+ /* xor the corrupt data with error value */
+ if (erroraddress < 512)
+ dat[erroraddress] ^= errorvalue;
+ }
+
+ return numerrors;
+}
+#endif /* CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST */
+
+static int nand_davinci_dev_ready(struct mtd_info *mtd)
+{
+ return __raw_readl(&davinci_emif_regs->nandfsr) & 0x1;
+}
+
+static void nand_flash_init(void)
+{
+ /* This is for DM6446 EVM and *very* similar. DO NOT GROW THIS!
+ * Instead, have your board_init() set EMIF timings, based on its
+ * knowledge of the clocks and what devices are hooked up ... and
+ * don't even do that unless no UBL handled it.
+ */
+#ifdef CONFIG_SOC_DM644X
+ u_int32_t acfg1 = 0x3ffffffc;
+
+ /*------------------------------------------------------------------*
+ * NAND FLASH CHIP TIMEOUT @ 459 MHz *
+ * *
+ * AEMIF.CLK freq = PLL1/6 = 459/6 = 76.5 MHz *
+ * AEMIF.CLK period = 1/76.5 MHz = 13.1 ns *
+ * *
+ *------------------------------------------------------------------*/
+ acfg1 = 0
+ | (0 << 31) /* selectStrobe */
+ | (0 << 30) /* extWait */
+ | (1 << 26) /* writeSetup 10 ns */
+ | (3 << 20) /* writeStrobe 40 ns */
+ | (1 << 17) /* writeHold 10 ns */
+ | (1 << 13) /* readSetup 10 ns */
+ | (5 << 7) /* readStrobe 60 ns */
+ | (1 << 4) /* readHold 10 ns */
+ | (3 << 2) /* turnAround ?? ns */
+ | (0 << 0) /* asyncSize 8-bit bus */
+ ;
+
+ __raw_writel(acfg1, &davinci_emif_regs->ab1cr); /* CS2 */
+
+ /* NAND flash on CS2 */
+ __raw_writel(0x00000101, &davinci_emif_regs->nandfcr);
+#endif
+}
+
+void davinci_nand_init(struct nand_chip *nand)
+{
+ nand->chip_delay = 0;
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+ nand->options |= NAND_USE_FLASH_BBT;
+#endif
+#ifdef CONFIG_SYS_NAND_HW_ECC
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+ nand->ecc.calculate = nand_davinci_calculate_ecc;
+ nand->ecc.correct = nand_davinci_correct_data;
+ nand->ecc.hwctl = nand_davinci_enable_hwecc;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif /* CONFIG_SYS_NAND_HW_ECC */
+#ifdef CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST
+ nand->ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 10;
+ nand->ecc.calculate = nand_davinci_4bit_calculate_ecc;
+ nand->ecc.correct = nand_davinci_4bit_correct_data;
+ nand->ecc.hwctl = nand_davinci_4bit_enable_hwecc;
+ nand->ecc.layout = &nand_davinci_4bit_layout_oobfirst;
+#endif
+ /* Set address of hardware control function */
+ nand->cmd_ctrl = nand_davinci_hwcontrol;
+
+ nand->read_buf = nand_davinci_read_buf;
+ nand->write_buf = nand_davinci_write_buf;
+
+ nand->dev_ready = nand_davinci_dev_ready;
+
+ nand_flash_init();
+}
+
+int board_nand_init(struct nand_chip *chip) __attribute__((weak));
+
+int board_nand_init(struct nand_chip *chip)
+{
+ davinci_nand_init(chip);
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/diskonchip.c b/u-boot/drivers/mtd/nand/diskonchip.c
new file mode 100644
index 0000000..edf3a09
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/diskonchip.c
@@ -0,0 +1,1779 @@
+/*
+ * drivers/mtd/nand/diskonchip.c
+ *
+ * (C) 2003 Red Hat, Inc.
+ * (C) 2004 Dan Brown <dan_brown@ieee.org>
+ * (C) 2004 Kalev Lember <kalev@smartlink.ee>
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
+ * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
+ *
+ * Error correction code lifted from the old docecc code
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Interface to generic NAND code for M-Systems DiskOnChip devices
+ */
+
+#include <common.h>
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/rslib.h>
+#include <linux/moduleparam.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/doc2000.h>
+#include <linux/mtd/compatmac.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/inftl.h>
+
+/* Where to look for the devices? */
+#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
+#endif
+
+static unsigned long __initdata doc_locations[] = {
+#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
+ 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+ 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
+ 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+ 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+ 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
+#else /* CONFIG_MTD_DOCPROBE_HIGH */
+ 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000,
+ 0xd8000, 0xda000, 0xdc000, 0xde000,
+ 0xe0000, 0xe2000, 0xe4000, 0xe6000,
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+#endif /* CONFIG_MTD_DOCPROBE_HIGH */
+#else
+#warning Unknown architecture for DiskOnChip. No default probe locations defined
+#endif
+ 0xffffffff };
+
+static struct mtd_info *doclist = NULL;
+
+struct doc_priv {
+ void __iomem *virtadr;
+ unsigned long physadr;
+ u_char ChipID;
+ u_char CDSNControl;
+ int chips_per_floor; /* The number of chips detected on each floor */
+ int curfloor;
+ int curchip;
+ int mh0_page;
+ int mh1_page;
+ struct mtd_info *nextdoc;
+};
+
+/* This is the syndrome computed by the HW ecc generator upon reading an empty
+ page, one with all 0xff for data and stored ecc code. */
+static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
+
+/* This is the ecc value computed by the HW ecc generator upon writing an empty
+ page, one with all 0xff for data. */
+static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
+
+#define INFTL_BBT_RESERVED_BLOCKS 4
+
+#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
+#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
+#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int bitmask);
+static void doc200x_select_chip(struct mtd_info *mtd, int chip);
+
+static int debug = 0;
+module_param(debug, int, 0);
+
+static int try_dword = 1;
+module_param(try_dword, int, 0);
+
+static int no_ecc_failures = 0;
+module_param(no_ecc_failures, int, 0);
+
+static int no_autopart = 0;
+module_param(no_autopart, int, 0);
+
+static int show_firmware_partition = 0;
+module_param(show_firmware_partition, int, 0);
+
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
+static int inftl_bbt_write = 1;
+#else
+static int inftl_bbt_write = 0;
+#endif
+module_param(inftl_bbt_write, int, 0);
+
+static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
+
+/* Sector size for HW ECC */
+#define SECTOR_SIZE 512
+/* The sector bytes are packed into NB_DATA 10 bit words */
+#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
+/* Number of roots */
+#define NROOTS 4
+/* First consective root */
+#define FCR 510
+/* Number of symbols */
+#define NN 1023
+
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/*
+ * The HW decoder in the DoC ASIC's provides us a error syndrome,
+ * which we must convert to a standard syndrom usable by the generic
+ * Reed-Solomon library code.
+ *
+ * Fabrice Bellard figured this out in the old docecc code. I added
+ * some comments, improved a minor bit and converted it to make use
+ * of the generic Reed-Solomon libary. tglx
+ */
+static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
+{
+ int i, j, nerr, errpos[8];
+ uint8_t parity;
+ uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+
+ /* Convert the ecc bytes into words */
+ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
+ ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
+ ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
+ ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
+ parity = ecc[1];
+
+ /* Initialize the syndrom buffer */
+ for (i = 0; i < NROOTS; i++)
+ s[i] = ds[0];
+ /*
+ * Evaluate
+ * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
+ * where x = alpha^(FCR + i)
+ */
+ for (j = 1; j < NROOTS; j++) {
+ if (ds[j] == 0)
+ continue;
+ tmp = rs->index_of[ds[j]];
+ for (i = 0; i < NROOTS; i++)
+ s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
+ }
+
+ /* Calc s[i] = s[i] / alpha^(v + i) */
+ for (i = 0; i < NROOTS; i++) {
+ if (syn[i])
+ syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
+ }
+ /* Call the decoder library */
+ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
+
+ /* Incorrectable errors ? */
+ if (nerr < 0)
+ return nerr;
+
+ /*
+ * Correct the errors. The bitpositions are a bit of magic,
+ * but they are given by the design of the de/encoder circuit
+ * in the DoC ASIC's.
+ */
+ for (i = 0; i < nerr; i++) {
+ int index, bitpos, pos = 1015 - errpos[i];
+ uint8_t val;
+ if (pos >= NB_DATA && pos < 1019)
+ continue;
+ if (pos < NB_DATA) {
+ /* extract bit position (MSB first) */
+ pos = 10 * (NB_DATA - 1 - pos) - 6;
+ /* now correct the following 10 bits. At most two bytes
+ can be modified since pos is even */
+ index = (pos >> 3) ^ 1;
+ bitpos = pos & 7;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] >> (2 + bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ index = ((pos >> 3) + 1) ^ 1;
+ bitpos = (bitpos + 10) & 7;
+ if (bitpos == 0)
+ bitpos = 8;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] << (8 - bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ }
+ }
+ /* If the parity is wrong, no rescue possible */
+ return parity ? -EBADMSG : nerr;
+}
+
+static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
+{
+ volatile char dummy;
+ int i;
+
+ for (i = 0; i < cycles; i++) {
+ if (DoC_is_Millennium(doc))
+ dummy = ReadDOC(doc->virtadr, NOP);
+ else if (DoC_is_MillenniumPlus(doc))
+ dummy = ReadDOC(doc->virtadr, Mplus_NOP);
+ else
+ dummy = ReadDOC(doc->virtadr, DOCStatus);
+ }
+
+}
+
+#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ unsigned long timeo = jiffies + (HZ * 10);
+
+ if (debug)
+ printk("_DoC_WaitReady...\n");
+ /* Out-of-line routine to wait for chip response */
+ if (DoC_is_MillenniumPlus(doc)) {
+ while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ } else {
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ }
+
+ return 0;
+}
+
+static inline int DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ int ret = 0;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ DoC_Delay(doc, 4);
+
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ } else {
+ DoC_Delay(doc, 4);
+
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ DoC_Delay(doc, 2);
+ }
+
+ if (debug)
+ printk("DoC_WaitReady OK\n");
+ return ret;
+}
+
+static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (debug)
+ printk("write_byte %02x\n", datum);
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, 2k_CDSN_IO);
+}
+
+static u_char doc2000_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ ret = ReadDOC(docptr, 2k_CDSN_IO);
+ if (debug)
+ printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ for (i = 0; i < len; i++) {
+ buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+}
+
+static void doc2000_readbuf_dword(struct mtd_info *mtd,
+ u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf_dword of %d bytes: ", len);
+
+ if (unlikely((((unsigned long)buf) | len) & 3)) {
+ for (i = 0; i < len; i++) {
+ *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+ } else {
+ for (i = 0; i < len; i += 4) {
+ *(uint32_t*) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
+ }
+ }
+}
+
+static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
+ return -EFAULT;
+ return 0;
+}
+
+static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t ret;
+
+ doc200x_select_chip(mtd, nr);
+ doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* We cant' use dev_ready here, but at least we wait for the
+ * command to complete
+ */
+ udelay(50);
+
+ ret = this->read_byte(mtd) << 8;
+ ret |= this->read_byte(mtd);
+
+ if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
+ /* First chip probe. See if we get same results by 32-bit access */
+ union {
+ uint32_t dword;
+ uint8_t byte[4];
+ } ident;
+ void __iomem *docptr = doc->virtadr;
+
+ doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ udelay(50);
+
+ ident.dword = readl(docptr + DoC_2k_CDSN_IO);
+ if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
+ printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
+ this->read_buf = &doc2000_readbuf_dword;
+ }
+ }
+
+ return ret;
+}
+
+static void __init doc2000_count_chips(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t mfrid;
+ int i;
+
+ /* Max 4 chips per floor on DiskOnChip 2000 */
+ doc->chips_per_floor = 4;
+
+ /* Find out what the first chip is */
+ mfrid = doc200x_ident_chip(mtd, 0);
+
+ /* Find how many chips in each floor. */
+ for (i = 1; i < 4; i++) {
+ if (doc200x_ident_chip(mtd, i) != mfrid)
+ break;
+ }
+ doc->chips_per_floor = i;
+ printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
+}
+
+static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct doc_priv *doc = this->priv;
+
+ int status;
+
+ DoC_WaitReady(doc);
+ this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ DoC_WaitReady(doc);
+ status = (int)this->read_byte(mtd);
+
+ return status;
+}
+
+static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, Mil_CDSN_IO);
+ WriteDOC(datum, docptr, WritePipeTerm);
+}
+
+static u_char doc2001_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /*ReadDOC(docptr, CDSNSlowIO); */
+ /* 11.4.5 -- delay twice to allow extended length cycle */
+ DoC_Delay(doc, 2);
+ ReadDOC(docptr, ReadPipeInit);
+ /*return ReadDOC(docptr, Mil_CDSN_IO); */
+ return ReadDOC(docptr, LastDataRead);
+}
+
+static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i = 0; i < len; i++)
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ /* Terminate write pipeline */
+ WriteDOC(0x00, docptr, WritePipeTerm);
+}
+
+static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i = 0; i < len - 1; i++)
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+
+ /* Terminate read pipeline */
+ buf[i] = ReadDOC(docptr, LastDataRead);
+}
+
+static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i = 0; i < len - 1; i++)
+ if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
+ ReadDOC(docptr, LastDataRead);
+ return i;
+ }
+ if (buf[i] != ReadDOC(docptr, LastDataRead))
+ return i;
+ return 0;
+}
+
+static u_char doc2001plus_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ret = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug)
+ printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i = 0; i < len - 2; i++) {
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+
+ /* Terminate read pipeline */
+ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 2]);
+ buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 1]);
+ if (debug)
+ printk("\n");
+}
+
+static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("verifybuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i = 0; i < len - 2; i++)
+ if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
+ ReadDOC(docptr, Mplus_LastDataRead);
+ ReadDOC(docptr, Mplus_LastDataRead);
+ return i;
+ }
+ if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
+ return len - 2;
+ if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
+ return len - 1;
+ return 0;
+}
+
+static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if (debug)
+ printk("select chip (%d)\n", chip);
+
+ if (chip == -1) {
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+ return;
+ }
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* Assert ChipEnable and deassert WriteProtect */
+ WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+static void doc200x_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if (debug)
+ printk("select chip (%d)\n", chip);
+
+ if (chip == -1)
+ return;
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* 11.4.4 -- deassert CE before changing chip */
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+
+ WriteDOC(floor, docptr, FloorSelect);
+ WriteDOC(chip, docptr, CDSNDeviceSelect);
+
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ doc->CDSNControl &= ~CDSN_CTRL_MSK;
+ doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
+ if (debug)
+ printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ /* 11.4.3 -- 4 NOPs after CSDNControl write */
+ DoC_Delay(doc, 4);
+ }
+ if (cmd != NAND_CMD_NONE) {
+ if (DoC_is_2000(doc))
+ doc2000_write_byte(mtd, cmd);
+ else
+ doc2001_write_byte(mtd, cmd);
+ }
+}
+
+static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /*
+ * Must terminate write pipeline before sending any commands
+ * to the device.
+ */
+ if (command == NAND_CMD_PAGEPROG) {
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ }
+
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ WriteDOC(readcmd, docptr, Mplus_FlashCmd);
+ }
+ WriteDOC(command, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+
+ if (column != -1 || page_addr != -1) {
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ WriteDOC(column, docptr, Mplus_FlashAddress);
+ }
+ if (page_addr != -1) {
+ WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
+ WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
+ /* One more address cycle for higher density devices */
+ if (this->chipsize & 0x0c000000) {
+ WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
+ printk("high density\n");
+ }
+ }
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ /* deassert ALE */
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB || command == NAND_CMD_READID)
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ udelay(this->chip_delay);
+ WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ while (!(this->read_byte(mtd) & 0x40)) ;
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay(this->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+ /* wait until command is processed */
+ while (!this->dev_ready(mtd)) ;
+}
+
+static int doc200x_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (debug)
+ printk("not ready\n");
+ return 0;
+ }
+ if (debug)
+ printk("was ready\n");
+ return 1;
+ } else {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (debug)
+ printk("not ready\n");
+ return 0;
+ }
+ /* 11.4.2 -- Must NOP twice if it's ready */
+ DoC_Delay(doc, 2);
+ if (debug)
+ printk("was ready\n");
+ return 1;
+ }
+}
+
+static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* This is our last resort if we couldn't find or create a BBT. Just
+ pretend all blocks are good. */
+ return 0;
+}
+
+static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ break;
+ }
+}
+
+static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+ break;
+ }
+}
+
+/* This code is only called on write */
+static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ } else {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (ecc_code[i] != empty_write_ecc[i])
+ emptymatch = 0;
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#if 0
+ /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the writebuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, we do have an all-0xff data buffer.
+ Return all-0xff ecc value instead of the computed one, so
+ it'll look just like a freshly-erased page. */
+ if (emptymatch)
+ memset(ecc_code, 0xff, 6);
+#endif
+ return 0;
+}
+
+static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ int i, ret = 0;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint8_t calc_ecc[6];
+ volatile u_char dummy;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ }
+
+ /* Error occured ? */
+ if (dummy & 0x80) {
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (calc_ecc[i] != empty_read_syndrome[i])
+ emptymatch = 0;
+ }
+ /* If emptymatch=1, the read syndrome is consistent with an
+ all-0xff data and stored ecc block. Check the stored ecc. */
+ if (emptymatch) {
+ for (i = 0; i < 6; i++) {
+ if (read_ecc[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, check the data block. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the readbuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, this is almost certainly a freshly-
+ erased block, in which case the ECC will not come out right.
+ We'll suppress the error and tell the caller everything's
+ OK. Because it is. */
+ if (!emptymatch)
+ ret = doc_ecc_decode(rs_decoder, dat, calc_ecc);
+ if (ret > 0)
+ printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ if (no_ecc_failures && (ret == -EBADMSG)) {
+ printk(KERN_ERR "suppressing ECC failure\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+/*u_char mydatabuf[528]; */
+
+/* The strange out-of-order .oobfree list below is a (possibly unneeded)
+ * attempt to retain compatibility. It used to read:
+ * .oobfree = { {8, 8} }
+ * Since that leaves two bytes unusable, it was changed. But the following
+ * scheme might affect existing jffs2 installs by moving the cleanmarker:
+ * .oobfree = { {6, 10} }
+ * jffs2 seems to handle the above gracefully, but the current scheme seems
+ * safer. The only problem with it is that any code that parses oobfree must
+ * be able to handle out-of-order segments.
+ */
+static struct nand_ecclayout doc200x_oobinfo = {
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 4, 5},
+ .oobfree = {{8, 8}, {6, 2}}
+};
+
+/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
+ On sucessful return, buf will contain a copy of the media header for
+ further processing. id is the string to scan for, and will presumably be
+ either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
+ header. The page #s of the found media headers are placed in mh0_page and
+ mh1_page in the DOC private structure. */
+static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ unsigned offs;
+ int ret;
+ size_t retlen;
+
+ for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
+ ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize)
+ continue;
+ if (ret) {
+ printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs);
+ }
+ if (memcmp(buf, id, 6))
+ continue;
+ printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
+ if (doc->mh0_page == -1) {
+ doc->mh0_page = offs >> this->page_shift;
+ if (!findmirror)
+ return 1;
+ continue;
+ }
+ doc->mh1_page = offs >> this->page_shift;
+ return 2;
+ }
+ if (doc->mh0_page == -1) {
+ printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
+ return 0;
+ }
+ /* Only one mediaheader was found. We want buf to contain a
+ mediaheader on return, so we'll have to re-read the one we found. */
+ offs = doc->mh0_page << this->page_shift;
+ ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize) {
+ /* Insanity. Give up. */
+ printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct NFTLMediaHeader *mh;
+ const unsigned psize = 1 << this->page_shift;
+ int numparts = 0;
+ unsigned blocks, maxblocks;
+ int offs, numheaders;
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
+ return 0;
+ }
+ if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
+ goto out;
+ mh = (struct NFTLMediaHeader *)buf;
+
+ le16_to_cpus(&mh->NumEraseUnits);
+ le16_to_cpus(&mh->FirstPhysicalEUN);
+ le32_to_cpus(&mh->FormattedSize);
+
+ printk(KERN_INFO " DataOrgID = %s\n"
+ " NumEraseUnits = %d\n"
+ " FirstPhysicalEUN = %d\n"
+ " FormattedSize = %d\n"
+ " UnitSizeFactor = %d\n",
+ mh->DataOrgID, mh->NumEraseUnits,
+ mh->FirstPhysicalEUN, mh->FormattedSize,
+ mh->UnitSizeFactor);
+
+ blocks = mtd->size >> this->phys_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+
+ if (mh->UnitSizeFactor == 0x00) {
+ /* Auto-determine UnitSizeFactor. The constraints are:
+ - There can be at most 32768 virtual blocks.
+ - There can be at most (virtual block size - page size)
+ virtual blocks (because MediaHeader+BBT must fit in 1).
+ */
+ mh->UnitSizeFactor = 0xff;
+ while (blocks > maxblocks) {
+ blocks >>= 1;
+ maxblocks = min(32768U, (maxblocks << 1) + psize);
+ mh->UnitSizeFactor--;
+ }
+ printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
+ }
+
+ /* NOTE: The lines below modify internal variables of the NAND and MTD
+ layers; variables with have already been configured by nand_scan.
+ Unfortunately, we didn't know before this point what these values
+ should be. Thus, this code is somewhat dependant on the exact
+ implementation of the NAND layer. */
+ if (mh->UnitSizeFactor != 0xff) {
+ this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
+ printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
+ blocks = mtd->size >> this->bbt_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+ }
+
+ if (blocks > maxblocks) {
+ printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
+ goto out;
+ }
+
+ /* Skip past the media headers. */
+ offs = max(doc->mh0_page, doc->mh1_page);
+ offs <<= this->page_shift;
+ offs += mtd->erasesize;
+
+ if (show_firmware_partition == 1) {
+ parts[0].name = " DiskOnChip Firmware / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = offs;
+ numparts = 1;
+ }
+
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
+
+ offs += parts[numparts].size;
+ numparts++;
+
+ if (offs < mtd->size) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = mtd->size - offs;
+ numparts++;
+ }
+
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+/* This is a stripped-down copy of the code in inftlmount.c */
+static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct INFTLMediaHeader *mh;
+ struct INFTLPartition *ip;
+ int numparts = 0;
+ int blocks;
+ int vshift, lastvunit = 0;
+ int i;
+ int end = mtd->size;
+
+ if (inftl_bbt_write)
+ end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
+ return 0;
+ }
+
+ if (!find_media_headers(mtd, buf, "BNAND", 0))
+ goto out;
+ doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
+ mh = (struct INFTLMediaHeader *)buf;
+
+ le32_to_cpus(&mh->NoOfBootImageBlocks);
+ le32_to_cpus(&mh->NoOfBinaryPartitions);
+ le32_to_cpus(&mh->NoOfBDTLPartitions);
+ le32_to_cpus(&mh->BlockMultiplierBits);
+ le32_to_cpus(&mh->FormatFlags);
+ le32_to_cpus(&mh->PercentUsed);
+
+ printk(KERN_INFO " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = %d.%d.%d.%d\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
+ mh->PercentUsed);
+
+ vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
+
+ blocks = mtd->size >> vshift;
+ if (blocks > 32768) {
+ printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
+ goto out;
+ }
+
+ blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
+ if (inftl_bbt_write && (blocks > mtd->erasesize)) {
+ printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
+ goto out;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &(mh->Partitions[i]);
+ le32_to_cpus(&ip->virtualUnits);
+ le32_to_cpus(&ip->firstUnit);
+ le32_to_cpus(&ip->lastUnit);
+ le32_to_cpus(&ip->flags);
+ le32_to_cpus(&ip->spareUnits);
+ le32_to_cpus(&ip->Reserved0);
+
+ printk(KERN_INFO " PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if ((show_firmware_partition == 1) &&
+ (i == 0) && (ip->firstUnit > 0)) {
+ parts[0].name = " DiskOnChip IPL / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = mtd->erasesize * ip->firstUnit;
+ numparts = 1;
+ }
+
+ if (ip->flags & INFTL_BINARY)
+ parts[numparts].name = " DiskOnChip BDK partition";
+ else
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = ip->firstUnit << vshift;
+ parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
+ numparts++;
+ if (ip->lastUnit > lastvunit)
+ lastvunit = ip->lastUnit;
+ if (ip->flags & INFTL_LAST)
+ break;
+ }
+ lastvunit++;
+ if ((lastvunit << vshift) < end) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = lastvunit << vshift;
+ parts[numparts].size = end - parts[numparts].offset;
+ numparts++;
+ }
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+static int __init nftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[2];
+
+ memset((char *)parts, 0, sizeof(parts));
+ /* On NFTL, we have to find the media headers before we can read the
+ BBTs, since they're stored in the media header eraseblocks. */
+ numparts = nftl_partscan(mtd, parts);
+ if (!numparts)
+ return -EIO;
+ this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->pages[0] = doc->mh0_page + 1;
+ if (doc->mh1_page != -1) {
+ this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->pages[0] = doc->mh1_page + 1;
+ } else {
+ this->bbt_md = NULL;
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ add_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
+ if (!no_autopart)
+ add_mtd_partitions(mtd, parts, numparts);
+#endif
+ return 0;
+}
+
+static int __init inftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[5];
+
+ if (this->numchips > doc->chips_per_floor) {
+ printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
+ return -EIO;
+ }
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->pages[0] = 2;
+ this->bbt_md = NULL;
+ } else {
+ this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->offs = 8;
+ this->bbt_td->len = 8;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_td->reserved_block_code = 0x01;
+ this->bbt_td->pattern = "MSYS_BBT";
+
+ this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_md->options |= NAND_BBT_WRITE;
+ this->bbt_md->offs = 8;
+ this->bbt_md->len = 8;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_md->reserved_block_code = 0x01;
+ this->bbt_md->pattern = "TBB_SYSM";
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ memset((char *)parts, 0, sizeof(parts));
+ numparts = inftl_partscan(mtd, parts);
+ /* At least for now, require the INFTL Media Header. We could probably
+ do without it for non-INFTL use, since all it gives us is
+ autopartitioning, but I want to give it more thought. */
+ if (!numparts)
+ return -EIO;
+ add_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
+ if (!no_autopart)
+ add_mtd_partitions(mtd, parts, numparts);
+#endif
+ return 0;
+}
+
+static inline int __init doc2000_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2000_read_byte;
+ this->write_buf = doc2000_writebuf;
+ this->read_buf = doc2000_readbuf;
+ this->verify_buf = doc2000_verifybuf;
+ this->scan_bbt = nftl_scan_bbt;
+
+ doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (NFTL Model)";
+ return (4 * doc->chips_per_floor);
+}
+
+static inline int __init doc2001_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2001_read_byte;
+ this->write_buf = doc2001_writebuf;
+ this->read_buf = doc2001_readbuf;
+ this->verify_buf = doc2001_verifybuf;
+
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
+ /* It's not a Millennium; it's one of the newer
+ DiskOnChip 2000 units with a similar ASIC.
+ Treat it like a Millennium, except that it
+ can have multiple chips. */
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (INFTL Model)";
+ this->scan_bbt = inftl_scan_bbt;
+ return (4 * doc->chips_per_floor);
+ } else {
+ /* Bog-standard Millennium */
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium";
+ this->scan_bbt = nftl_scan_bbt;
+ return 1;
+ }
+}
+
+static inline int __init doc2001plus_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2001plus_read_byte;
+ this->write_buf = doc2001plus_writebuf;
+ this->read_buf = doc2001plus_readbuf;
+ this->verify_buf = doc2001plus_verifybuf;
+ this->scan_bbt = inftl_scan_bbt;
+ this->cmd_ctrl = NULL;
+ this->select_chip = doc2001plus_select_chip;
+ this->cmdfunc = doc2001plus_command;
+ this->ecc.hwctl = doc2001plus_enable_hwecc;
+
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium Plus";
+
+ return 1;
+}
+
+static int __init doc_probe(unsigned long physadr)
+{
+ unsigned char ChipID;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+ void __iomem *virtadr;
+ unsigned char save_control;
+ unsigned char tmp, tmpb, tmpc;
+ int reg, len, numchips;
+ int ret = 0;
+
+ virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+ if (!virtadr) {
+ printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
+ return -EIO;
+ }
+
+ /* It's not possible to cleanly detect the DiskOnChip - the
+ * bootup procedure will put the device into reset mode, and
+ * it's not possible to talk to it without actually writing
+ * to the DOCControl register. So we store the current contents
+ * of the DOCControl register's location, in case we later decide
+ * that it's not a DiskOnChip, and want to put it back how we
+ * found it.
+ */
+ save_control = ReadDOC(virtadr, DOCControl);
+
+ /* Reset the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+
+ /* Enable the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_Doc2k:
+ reg = DoC_2k_ECCStatus;
+ break;
+ case DOC_ChipID_DocMil:
+ reg = DoC_ECCConf;
+ break;
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ case 0:
+ /* Possible Millennium Plus, need to do more checks */
+ /* Possibly release from power down mode */
+ for (tmp = 0; (tmp < 4); tmp++)
+ ReadDOC(virtadr, Mplus_Power);
+
+ /* Reset the Millennium Plus ASIC */
+ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+
+ mdelay(1);
+ /* Enable the Millennium Plus ASIC */
+ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+ mdelay(1);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_DocMilPlus16:
+ reg = DoC_Mplus_Toggle;
+ break;
+ case DOC_ChipID_DocMilPlus32:
+ printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ break;
+
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ /* Check the TOGGLE bit in the ECC register */
+ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ if ((tmp == tmpb) || (tmp != tmpc)) {
+ printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
+ ret = -ENODEV;
+ goto notfound;
+ }
+
+ for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+ unsigned char oldval;
+ unsigned char newval;
+ nand = mtd->priv;
+ doc = nand->priv;
+ /* Use the alias resolution register to determine if this is
+ in fact the same DOC aliased to a new address. If writes
+ to one chip's alias resolution register change the value on
+ the other chip, they're the same chip. */
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ newval = ReadDOC(virtadr, Mplus_AliasResolution);
+ } else {
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ newval = ReadDOC(virtadr, AliasResolution);
+ }
+ if (oldval != newval)
+ continue;
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ WriteDOC(~newval, virtadr, Mplus_AliasResolution);
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ WriteDOC(newval, virtadr, Mplus_AliasResolution); /* restore it */
+ } else {
+ WriteDOC(~newval, virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ WriteDOC(newval, virtadr, AliasResolution); /* restore it */
+ }
+ newval = ~newval;
+ if (oldval == newval) {
+ printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
+ goto notfound;
+ }
+ }
+
+ printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
+
+ len = sizeof(struct mtd_info) +
+ sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
+ mtd = kzalloc(len, GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct doc_priv *) (nand + 1);
+ nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
+ nand->bbt_md = nand->bbt_td + 1;
+
+ mtd->priv = nand;
+ mtd->owner = THIS_MODULE;
+
+ nand->priv = doc;
+ nand->select_chip = doc200x_select_chip;
+ nand->cmd_ctrl = doc200x_hwcontrol;
+ nand->dev_ready = doc200x_dev_ready;
+ nand->waitfunc = doc200x_wait;
+ nand->block_bad = doc200x_block_bad;
+ nand->ecc.hwctl = doc200x_enable_hwecc;
+ nand->ecc.calculate = doc200x_calculate_ecc;
+ nand->ecc.correct = doc200x_correct_data;
+
+ nand->ecc.layout = &doc200x_oobinfo;
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 6;
+ nand->options = NAND_USE_FLASH_BBT;
+
+ doc->physadr = physadr;
+ doc->virtadr = virtadr;
+ doc->ChipID = ChipID;
+ doc->curfloor = -1;
+ doc->curchip = -1;
+ doc->mh0_page = -1;
+ doc->mh1_page = -1;
+ doc->nextdoc = doclist;
+
+ if (ChipID == DOC_ChipID_Doc2k)
+ numchips = doc2000_init(mtd);
+ else if (ChipID == DOC_ChipID_DocMilPlus16)
+ numchips = doc2001plus_init(mtd);
+ else
+ numchips = doc2001_init(mtd);
+
+ if ((ret = nand_scan(mtd, numchips))) {
+ /* DBB note: i believe nand_release is necessary here, as
+ buffers may have been allocated in nand_base. Check with
+ Thomas. FIX ME! */
+ /* nand_release will call del_mtd_device, but we haven't yet
+ added it. This is handled without incident by
+ del_mtd_device, as far as I can tell. */
+ nand_release(mtd);
+ kfree(mtd);
+ goto fail;
+ }
+
+ /* Success! */
+ doclist = mtd;
+ return 0;
+
+ notfound:
+ /* Put back the contents of the DOCControl register, in case it's not
+ actually a DiskOnChip. */
+ WriteDOC(save_control, virtadr, DOCControl);
+ fail:
+ iounmap(virtadr);
+ return ret;
+}
+
+static void release_nanddoc(void)
+{
+ struct mtd_info *mtd, *nextmtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+
+ for (mtd = doclist; mtd; mtd = nextmtd) {
+ nand = mtd->priv;
+ doc = nand->priv;
+
+ nextmtd = doc->nextdoc;
+ nand_release(mtd);
+ iounmap(doc->virtadr);
+ kfree(mtd);
+ }
+}
+
+static int __init init_nanddoc(void)
+{
+ int i, ret = 0;
+
+ /* We could create the decoder on demand, if memory is a concern.
+ * This way we have it handy, if an error happens
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * first consecutive root is 510
+ * primitve element to generate roots = 1
+ * generator polinomial degree = 4
+ */
+ rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
+ if (!rs_decoder) {
+ printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
+ return -ENOMEM;
+ }
+
+ if (doc_config_location) {
+ printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
+ ret = doc_probe(doc_config_location);
+ if (ret < 0)
+ goto outerr;
+ } else {
+ for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+ /* No banner message any more. Print a message if no DiskOnChip
+ found, so the user knows we at least tried. */
+ if (!doclist) {
+ printk(KERN_INFO "No valid DiskOnChip devices found\n");
+ ret = -ENODEV;
+ goto outerr;
+ }
+ return 0;
+ outerr:
+ free_rs(rs_decoder);
+ return ret;
+}
+
+static void __exit cleanup_nanddoc(void)
+{
+ /* Cleanup the nand/DoC resources */
+ release_nanddoc();
+
+ /* Free the reed solomon resources */
+ if (rs_decoder) {
+ free_rs(rs_decoder);
+ }
+}
+
+module_init(init_nanddoc);
+module_exit(cleanup_nanddoc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver\n");
diff --git a/u-boot/drivers/mtd/nand/fsl_elbc_nand.c b/u-boot/drivers/mtd/nand/fsl_elbc_nand.c
new file mode 100644
index 0000000..acdb431
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/fsl_elbc_nand.c
@@ -0,0 +1,823 @@
+/* Freescale Enhanced Local Bus Controller FCM NAND driver
+ *
+ * Copyright (c) 2006-2008 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <malloc.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+
+#include <asm/io.h>
+#include <asm/errno.h>
+
+#ifdef VERBOSE_DEBUG
+#define DEBUG_ELBC
+#define vdbg(format, arg...) printf("DEBUG: " format, ##arg)
+#else
+#define vdbg(format, arg...) do {} while (0)
+#endif
+
+/* Can't use plain old DEBUG because the linux mtd
+ * headers define it as a macro.
+ */
+#ifdef DEBUG_ELBC
+#define dbg(format, arg...) printf("DEBUG: " format, ##arg)
+#else
+#define dbg(format, arg...) do {} while (0)
+#endif
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 10 /* Maximum number of mSecs to wait for FCM */
+
+#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+
+struct fsl_elbc_ctrl;
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_elbc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+ int page_size; /* NAND page size (0=512, 1=2048) */
+ unsigned int fmr; /* FCM Flash Mode Register value */
+};
+
+/* overview of the fsl elbc controller */
+
+struct fsl_elbc_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+ /* device info */
+ fsl_lbc_t *regs;
+ u8 __iomem *addr; /* Address of assigned FCM buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from LTESR after last op */
+ unsigned int mdr; /* UPM/FCM Data Register value */
+ unsigned int use_mdr; /* Non zero if the MDR is to be set */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ uint8_t *oob_poi; /* Place to write ECC after read back */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+/* Small Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
+ .eccbytes = 3,
+ .eccpos = {6, 7, 8},
+ .oobfree = { {0, 5}, {9, 7} },
+};
+
+/* Small Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
+ .eccbytes = 3,
+ .eccpos = {8, 9, 10},
+ .oobfree = { {0, 5}, {6, 2}, {11, 5} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
+ .eccbytes = 12,
+ .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
+ .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
+ .eccbytes = 12,
+ .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
+ .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+};
+
+/*
+ * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
+ * 1, so we have to adjust bad block pattern. This pattern should be used for
+ * x8 chips only. So far hardware does not support x16 chips anyway.
+ */
+static u8 scan_ff_pattern[] = { 0xff, };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+ int buf_num;
+
+ ctrl->page = page_addr;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fbar, page_addr >> 6);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+ (oob ? FPAR_LP_MS : 0) | column);
+ buf_num = (page_addr & 1) << 2;
+ } else {
+ out_be32(&lbc->fbar, page_addr >> 5);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+ (oob ? FPAR_SP_MS : 0) | column);
+ buf_num = page_addr & 7;
+ }
+
+ ctrl->addr = priv->vbase + buf_num * 1024;
+ ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ctrl->index += priv->page_size ? 2048 : 512;
+
+ vdbg("set_addr: bank=%d, ctrl->addr=0x%p (0x%p), "
+ "index %x, pes %d ps %d\n",
+ buf_num, ctrl->addr, priv->vbase, ctrl->index,
+ chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+ long long end_tick;
+ u32 ltesr;
+
+ /* Setup the FMR[OP] to execute without write protection */
+ out_be32(&lbc->fmr, priv->fmr | 3);
+ if (ctrl->use_mdr)
+ out_be32(&lbc->mdr, ctrl->mdr);
+
+ vdbg("fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+ in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+ vdbg("fsl_elbc_run_command: fbar=%08x fpar=%08x "
+ "fbcr=%08x bank=%d\n",
+ in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+ in_be32(&lbc->fbcr), priv->bank);
+
+ /* execute special operation */
+ out_be32(&lbc->lsor, priv->bank);
+
+ /* wait for FCM complete flag or timeout */
+ end_tick = usec2ticks(FCM_TIMEOUT_MSECS * 1000) + get_ticks();
+
+ ltesr = 0;
+ while (end_tick > get_ticks()) {
+ ltesr = in_be32(&lbc->ltesr);
+ if (ltesr & LTESR_CC)
+ break;
+ }
+
+ ctrl->status = ltesr & LTESR_NAND_MASK;
+ out_be32(&lbc->ltesr, ctrl->status);
+ out_be32(&lbc->lteatr, 0);
+
+ /* store mdr value in case it was needed */
+ if (ctrl->use_mdr)
+ ctrl->mdr = in_be32(&lbc->mdr);
+
+ ctrl->use_mdr = 0;
+
+ vdbg("fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n",
+ ctrl->status, ctrl->mdr, in_be32(&lbc->fmr));
+
+ /* returns 0 on success otherwise non-zero) */
+ return ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+ if (oob)
+ out_be32(&lbc->fcr,
+ NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+ else
+ out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+
+ ctrl->use_mdr = 0;
+
+ /* clear the read buffer */
+ ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 and READ1 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ1:
+ column += 256;
+
+ /* fall-through */
+ case NAND_CMD_READ0:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+ set_addr(mtd, 0, page_addr, 0);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ctrl->index += column;
+
+ fsl_elbc_do_read(chip, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+
+ return;
+
+ /* READID must read all 5 possible bytes while CEB is active */
+ case NAND_CMD_READID:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+
+ out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_UA << FIR_OP1_SHIFT) |
+ (FIR_OP_RBW << FIR_OP2_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
+ /* 5 bytes for manuf, device and exts */
+ out_be32(&lbc->fbcr, 5);
+ ctrl->read_bytes = 5;
+ ctrl->use_mdr = 1;
+ ctrl->mdr = 0;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+ "page_addr: 0x%x.\n", page_addr);
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_PA << FIR_OP1_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP2_SHIFT));
+
+ out_be32(&lbc->fcr,
+ (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT));
+
+ out_be32(&lbc->fbcr, 0);
+ ctrl->read_bytes = 0;
+
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 fcr;
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ "page_addr: 0x%x, column: 0x%x.\n",
+ page_addr, column);
+
+ ctrl->column = column;
+ ctrl->oob = 0;
+
+ if (priv->page_size) {
+ fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) |
+ (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT);
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_WB << FIR_OP3_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP4_SHIFT));
+ } else {
+ fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+ (FIR_OP_CA << FIR_OP2_SHIFT) |
+ (FIR_OP_PA << FIR_OP3_SHIFT) |
+ (FIR_OP_WB << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT));
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+ ctrl->oob = 1;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ } else {
+ /* Second 256 bytes --> READ1 */
+ fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
+ }
+ }
+
+ out_be32(&lbc->fcr, fcr);
+ set_addr(mtd, column, page_addr, ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ int full_page;
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+ "writing %d bytes.\n", ctrl->index);
+
+ /* if the write did not start at 0 or is not a full page
+ * then set the exact length, otherwise use a full page
+ * write so the HW generates the ECC.
+ */
+ if (ctrl->oob || ctrl->column != 0 ||
+ ctrl->index != mtd->writesize + mtd->oobsize) {
+ out_be32(&lbc->fbcr, ctrl->index);
+ full_page = 0;
+ } else {
+ out_be32(&lbc->fbcr, 0);
+ full_page = 1;
+ }
+
+ fsl_elbc_run_command(mtd);
+
+ /* Read back the page in order to fill in the ECC for the
+ * caller. Is this really needed?
+ */
+ if (full_page && ctrl->oob_poi) {
+ out_be32(&lbc->fbcr, 3);
+ set_addr(mtd, 6, page_addr, 1);
+
+ ctrl->read_bytes = mtd->writesize + 9;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+
+ memcpy_fromio(ctrl->oob_poi + 6,
+ &ctrl->addr[ctrl->index], 3);
+ ctrl->index += 3;
+ }
+
+ ctrl->oob_poi = NULL;
+ return;
+ }
+
+ /* CMD_STATUS must read the status byte while CEB is active */
+ /* Note - it does not wait for the ready line */
+ case NAND_CMD_STATUS:
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP);
+ return;
+
+ /* RESET without waiting for the ready line */
+ case NAND_CMD_RESET:
+ dbg("fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+ out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ default:
+ printf("fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+ command);
+ }
+}
+
+static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ printf("write_buf of %d bytes", len);
+ ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ctrl->index) {
+ printf("write_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, bufsize - ctrl->index);
+ len = bufsize - ctrl->index;
+ }
+
+ memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
+ /*
+ * This is workaround for the weird elbc hangs during nand write,
+ * Scott Wood says: "...perhaps difference in how long it takes a
+ * write to make it through the localbus compared to a write to IMMR
+ * is causing problems, and sync isn't helping for some reason."
+ * Reading back the last byte helps though.
+ */
+ in_8(&ctrl->addr[ctrl->index] + len - 1);
+
+ ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+ /* If there are still bytes in the FCM, then use the next byte. */
+ if (ctrl->index < ctrl->read_bytes)
+ return in_8(&ctrl->addr[ctrl->index++]);
+
+ printf("read_byte beyond end of buffer\n");
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index);
+ memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail);
+ ctrl->index += avail;
+
+ if (len > avail)
+ printf("read_buf beyond end of buffer "
+ "(%d requested, %d available)\n",
+ len, avail);
+}
+
+/*
+ * Verify buffer against the FCM Controller Data Buffer
+ */
+static int fsl_elbc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ int i;
+
+ if (len < 0) {
+ printf("write_buf of %d bytes", len);
+ return -EINVAL;
+ }
+
+ if ((unsigned int)len > ctrl->read_bytes - ctrl->index) {
+ printf("verify_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, ctrl->read_bytes - ctrl->index);
+
+ ctrl->index = ctrl->read_bytes;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++)
+ if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i])
+ break;
+
+ ctrl->index += len;
+ return i == len && ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+
+ if (ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ ctrl->use_mdr = 0;
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ if (ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP);
+ return fsl_elbc_read_byte(mtd);
+}
+
+static int fsl_elbc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static void fsl_elbc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ ctrl->oob_poi = chip->oob_poi;
+}
+
+static struct fsl_elbc_ctrl *elbc_ctrl;
+
+static void fsl_elbc_ctrl_init(void)
+{
+ elbc_ctrl = kzalloc(sizeof(*elbc_ctrl), GFP_KERNEL);
+ if (!elbc_ctrl)
+ return;
+
+ elbc_ctrl->regs = LBC_BASE_ADDR;
+
+ /* clear event registers */
+ out_be32(&elbc_ctrl->regs->ltesr, LTESR_NAND_MASK);
+ out_be32(&elbc_ctrl->regs->lteatr, 0);
+
+ /* Enable interrupts for any detected events */
+ out_be32(&elbc_ctrl->regs->lteir, LTESR_NAND_MASK);
+
+ elbc_ctrl->read_bytes = 0;
+ elbc_ctrl->index = 0;
+ elbc_ctrl->addr = NULL;
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+ struct fsl_elbc_mtd *priv;
+ uint32_t br = 0, or = 0;
+
+ if (!elbc_ctrl) {
+ fsl_elbc_ctrl_init();
+ if (!elbc_ctrl)
+ return -1;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ctrl = elbc_ctrl;
+ priv->vbase = nand->IO_ADDR_R;
+
+ /* Find which chip select it is connected to. It'd be nice
+ * if we could pass more than one datum to the NAND driver...
+ */
+ for (priv->bank = 0; priv->bank < MAX_BANKS; priv->bank++) {
+ phys_addr_t base_addr = virt_to_phys(nand->IO_ADDR_R);
+
+ br = in_be32(&elbc_ctrl->regs->bank[priv->bank].br);
+ or = in_be32(&elbc_ctrl->regs->bank[priv->bank].or);
+
+ if ((br & BR_V) && (br & BR_MSEL) == BR_MS_FCM &&
+ (br & or & BR_BA) == BR_PHYS_ADDR(base_addr))
+ break;
+ }
+
+ if (priv->bank >= MAX_BANKS) {
+ printf("fsl_elbc_nand: address did not match any "
+ "chip selects\n");
+ return -ENODEV;
+ }
+
+ elbc_ctrl->chips[priv->bank] = priv;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ nand->read_byte = fsl_elbc_read_byte;
+ nand->write_buf = fsl_elbc_write_buf;
+ nand->read_buf = fsl_elbc_read_buf;
+ nand->verify_buf = fsl_elbc_verify_buf;
+ nand->select_chip = fsl_elbc_select_chip;
+ nand->cmdfunc = fsl_elbc_cmdfunc;
+ nand->waitfunc = fsl_elbc_wait;
+
+ /* set up nand options */
+ nand->bbt_td = &bbt_main_descr;
+ nand->bbt_md = &bbt_mirror_descr;
+
+ /* set up nand options */
+ nand->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
+ NAND_USE_FLASH_BBT;
+
+ nand->controller = &elbc_ctrl->controller;
+ nand->priv = priv;
+
+ nand->ecc.read_page = fsl_elbc_read_page;
+ nand->ecc.write_page = fsl_elbc_write_page;
+
+#ifdef CONFIG_FSL_ELBC_FMR
+ priv->fmr = CONFIG_FSL_ELBC_FMR;
+#else
+ priv->fmr = (15 << FMR_CWTO_SHIFT) | (2 << FMR_AL_SHIFT);
+
+ /*
+ * Hardware expects small page has ECCM0, large page has ECCM1
+ * when booting from NAND. Board config can override if not
+ * booting from NAND.
+ */
+ if (or & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
+#endif
+
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((br & BR_DECC) == BR_DECC_CHK_GEN) {
+ nand->ecc.mode = NAND_ECC_HW;
+
+ nand->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_sp_eccm1 :
+ &fsl_elbc_oob_sp_eccm0;
+
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+ nand->ecc.steps = 1;
+ } else {
+ /* otherwise fall back to default software ECC */
+ nand->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ /* Large-page-specific setup */
+ if (or & OR_FCM_PGS) {
+ priv->page_size = 1;
+ nand->badblock_pattern = &largepage_memorybased;
+
+ /* adjust ecc setup if needed */
+ if ((br & BR_DECC) == BR_DECC_CHK_GEN) {
+ nand->ecc.steps = 4;
+ nand->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_lp_eccm1 :
+ &fsl_elbc_oob_lp_eccm0;
+ }
+ }
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/fsl_upm.c b/u-boot/drivers/mtd/nand/fsl_upm.c
new file mode 100644
index 0000000..c33e278
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/fsl_upm.c
@@ -0,0 +1,202 @@
+/*
+ * FSL UPM NAND driver
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <config.h>
+#include <common.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/fsl_upm.h>
+#include <nand.h>
+
+static void fsl_upm_start_pattern(struct fsl_upm *upm, u32 pat_offset)
+{
+ clrsetbits_be32(upm->mxmr, MxMR_MAD_MSK, MxMR_OP_RUNP | pat_offset);
+ (void)in_be32(upm->mxmr);
+}
+
+static void fsl_upm_end_pattern(struct fsl_upm *upm)
+{
+ clrbits_be32(upm->mxmr, MxMR_OP_RUNP);
+
+ while (in_be32(upm->mxmr) & MxMR_OP_RUNP)
+ eieio();
+}
+
+static void fsl_upm_run_pattern(struct fsl_upm *upm, int width,
+ void __iomem *io_addr, u32 mar)
+{
+ out_be32(upm->mar, mar);
+ (void)in_be32(upm->mar);
+ switch (width) {
+ case 8:
+ out_8(io_addr, 0x0);
+ break;
+ case 16:
+ out_be16(io_addr, 0x0);
+ break;
+ case 32:
+ out_be32(io_addr, 0x0);
+ break;
+ }
+}
+
+static void fun_wait(struct fsl_upm_nand *fun)
+{
+ if (fun->dev_ready) {
+ while (!fun->dev_ready(fun->chip_nr))
+ debug("unexpected busy state\n");
+ } else {
+ /*
+ * If the R/B pin is not connected, like on the TQM8548,
+ * a short delay is necessary.
+ */
+ udelay(1);
+ }
+}
+
+#if CONFIG_SYS_NAND_MAX_CHIPS > 1
+static void fun_select_chip(struct mtd_info *mtd, int chip_nr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = chip->priv;
+
+ if (chip_nr >= 0) {
+ fun->chip_nr = chip_nr;
+ chip->IO_ADDR_R = chip->IO_ADDR_W =
+ fun->upm.io_addr + fun->chip_offset * chip_nr;
+ } else if (chip_nr == -1) {
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ }
+}
+#endif
+
+static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = chip->priv;
+ void __iomem *io_addr;
+ u32 mar;
+
+ if (!(ctrl & fun->last_ctrl)) {
+ fsl_upm_end_pattern(&fun->upm);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
+ }
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_ALE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+ else if (ctrl & NAND_CLE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+ }
+
+ mar = cmd << (32 - fun->width);
+ io_addr = fun->upm.io_addr;
+#if CONFIG_SYS_NAND_MAX_CHIPS > 1
+ if (fun->chip_nr > 0) {
+ io_addr += fun->chip_offset * fun->chip_nr;
+ if (fun->upm_mar_chip_offset)
+ mar |= fun->upm_mar_chip_offset * fun->chip_nr;
+ }
+#endif
+ fsl_upm_run_pattern(&fun->upm, fun->width, io_addr, mar);
+
+ /*
+ * Some boards/chips needs this. At least the MPC8360E-RDK and
+ * TQM8548 need it. Probably weird chip, because I don't see
+ * any need for this on MPC8555E + Samsung K9F1G08U0A. Usually
+ * here are 0-2 unexpected busy states per block read.
+ */
+ if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
+ fun_wait(fun);
+}
+
+static u8 nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ return in_8(chip->IO_ADDR_R);
+}
+
+static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = chip->priv;
+
+ for (i = 0; i < len; i++) {
+ out_8(chip->IO_ADDR_W, buf[i]);
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
+ fun_wait(fun);
+ }
+
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
+ fun_wait(fun);
+}
+
+static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ buf[i] = in_8(chip->IO_ADDR_R);
+}
+
+static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ if (buf[i] != in_8(chip->IO_ADDR_R))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = chip->priv;
+
+ return fun->dev_ready(fun->chip_nr);
+}
+
+int fsl_upm_nand_init(struct nand_chip *chip, struct fsl_upm_nand *fun)
+{
+ if (fun->width != 8 && fun->width != 16 && fun->width != 32)
+ return -ENOSYS;
+
+ fun->last_ctrl = NAND_CLE;
+
+ chip->priv = fun;
+ chip->chip_delay = fun->chip_delay;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->cmd_ctrl = fun_cmd_ctrl;
+#if CONFIG_SYS_NAND_MAX_CHIPS > 1
+ chip->select_chip = fun_select_chip;
+#endif
+ chip->read_byte = nand_read_byte;
+ chip->read_buf = nand_read_buf;
+ chip->write_buf = nand_write_buf;
+ chip->verify_buf = nand_verify_buf;
+ if (fun->dev_ready)
+ chip->dev_ready = nand_dev_ready;
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/kb9202_nand.c b/u-boot/drivers/mtd/nand/kb9202_nand.c
new file mode 100644
index 0000000..b8f46fa
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/kb9202_nand.c
@@ -0,0 +1,150 @@
+/*
+ * (C) Copyright 2006
+ * KwikByte <kb9200_dev@kwikbyte.com>
+ *
+ * (C) Copyright 2009
+ * Matthias Kaehlcke <matthias@kaehlcke.net>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/AT91RM9200.h>
+#include <asm/arch/hardware.h>
+
+#include <nand.h>
+
+/*
+ * hardware specific access to control-lines
+ */
+
+#define MASK_ALE (1 << 22) /* our ALE is A22 */
+#define MASK_CLE (1 << 21) /* our CLE is A21 */
+
+#define KB9202_NAND_NCE (1 << 28) /* EN* on D28 */
+#define KB9202_NAND_BUSY (1 << 29) /* RB* on D29 */
+
+#define KB9202_SMC2_NWS (1 << 2)
+#define KB9202_SMC2_TDF (1 << 8)
+#define KB9202_SMC2_RWSETUP (1 << 24)
+#define KB9202_SMC2_RWHOLD (1 << 29)
+
+/*
+ * Board-specific function to access device control signals
+ */
+static void kb9202_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ ulong IO_ADDR_W = (ulong) this->IO_ADDR_W;
+
+ /* clear ALE and CLE bits */
+ IO_ADDR_W &= ~(MASK_ALE | MASK_CLE);
+
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= MASK_CLE;
+
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= MASK_ALE;
+
+ this->IO_ADDR_W = (void *) IO_ADDR_W;
+
+ if (ctrl & NAND_NCE)
+ writel(KB9202_NAND_NCE, AT91C_PIOC_CODR);
+ else
+ writel(KB9202_NAND_NCE, AT91C_PIOC_SODR);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+
+/*
+ * Board-specific function to access the device ready signal.
+ */
+static int kb9202_nand_ready(struct mtd_info *mtd)
+{
+ return readl(AT91C_PIOC_PDSR) & KB9202_NAND_BUSY;
+}
+
+
+/*
+ * Board-specific NAND init. Copied from include/linux/mtd/nand.h for reference.
+ *
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the flash device
+ * @hwcontrol: [BOARDSPECIFIC] hardwarespecific function for accesing control-lines
+ * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing device ready/busy line
+ * If set to NULL no access to ready/busy is available and the ready/busy information
+ * is read from the chip status register
+ * @enable_hwecc: [BOARDSPECIFIC] function to enable (reset) hardware ecc generator. Must only
+ * be provided if a hardware ECC is available
+ * @eccmode: [BOARDSPECIFIC] mode of ecc, see defines
+ * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
+ * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
+ * special functionality. See the defines for further explanation
+*/
+/*
+ * This routine initializes controller and GPIOs.
+ */
+int board_nand_init(struct nand_chip *nand)
+{
+ unsigned int value;
+
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->cmd_ctrl = kb9202_nand_hwcontrol;
+ nand->dev_ready = kb9202_nand_ready;
+
+ /* in case running outside of bootloader */
+ writel(1 << AT91C_ID_PIOC, AT91C_PMC_PCER);
+
+ /* setup nand flash access (allow ample margin) */
+ /* 4 wait states, 1 setup, 1 hold, 1 float for 8-bit device */
+ writel(AT91C_SMC2_WSEN | KB9202_SMC2_NWS | KB9202_SMC2_TDF |
+ AT91C_SMC2_DBW_8 | KB9202_SMC2_RWSETUP | KB9202_SMC2_RWHOLD,
+ AT91C_SMC_CSR3);
+
+ /* enable internal NAND controller */
+ value = readl(AT91C_EBI_CSA);
+ value |= AT91C_EBI_CS3A_SMC_SmartMedia;
+ writel(value, AT91C_EBI_CSA);
+
+ /* enable SMOE/SMWE */
+ writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_ASR);
+ writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_PDR);
+ writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_OER);
+
+ /* set NCE to high */
+ writel(KB9202_NAND_NCE, AT91C_PIOC_SODR);
+
+ /* disable output on pin connected to the busy line of the NAND */
+ writel(KB9202_NAND_BUSY, AT91C_PIOC_ODR);
+
+ /* enable the PIO to control NCE and BUSY */
+ writel(KB9202_NAND_NCE | KB9202_NAND_BUSY, AT91C_PIOC_PER);
+
+ /* enable output for NCE */
+ writel(KB9202_NAND_NCE, AT91C_PIOC_OER);
+
+ return (0);
+}
diff --git a/u-boot/drivers/mtd/nand/kirkwood_nand.c b/u-boot/drivers/mtd/nand/kirkwood_nand.c
new file mode 100644
index 0000000..376378e
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/kirkwood_nand.c
@@ -0,0 +1,82 @@
+/*
+ * (C) Copyright 2009
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/kirkwood.h>
+#include <nand.h>
+
+/* NAND Flash Soc registers */
+struct kwnandf_registers {
+ u32 rd_params; /* 0x10418 */
+ u32 wr_param; /* 0x1041c */
+ u8 pad[0x10470 - 0x1041c - 4];
+ u32 ctrl; /* 0x10470 */
+};
+
+static struct kwnandf_registers *nf_reg =
+ (struct kwnandf_registers *)KW_NANDF_BASE;
+
+/*
+ * hardware specific access to control-lines/bits
+ */
+#define NAND_ACTCEBOOT_BIT 0x02
+
+static void kw_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nc = mtd->priv;
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << 0); /* Commands with A[1:0] == 01 */
+ else if (ctrl & NAND_ALE)
+ offs = (1 << 1); /* Addresses with A[1:0] == 10 */
+ else
+ return;
+
+ writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+void kw_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ u32 data;
+
+ data = readl(&nf_reg->ctrl);
+ data |= NAND_ACTCEBOOT_BIT;
+ writel(data, &nf_reg->ctrl);
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+ nand->options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING;
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->cmd_ctrl = kw_nand_hwcontrol;
+ nand->chip_delay = 30;
+ nand->select_chip = kw_nand_select_chip;
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/kmeter1_nand.c b/u-boot/drivers/mtd/nand/kmeter1_nand.c
new file mode 100644
index 0000000..e8e5b7b
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/kmeter1_nand.c
@@ -0,0 +1,135 @@
+/*
+ * (C) Copyright 2009
+ * Heiko Schocher, DENX Software Engineering, hs@denx.de
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <asm/io.h>
+
+#define CONFIG_NAND_MODE_REG (void *)(CONFIG_SYS_NAND_BASE + 0x20000)
+#define CONFIG_NAND_DATA_REG (void *)(CONFIG_SYS_NAND_BASE + 0x30000)
+
+#define read_mode() in_8(CONFIG_NAND_MODE_REG)
+#define write_mode(val) out_8(CONFIG_NAND_MODE_REG, val)
+#define read_data() in_8(CONFIG_NAND_DATA_REG)
+#define write_data(val) out_8(CONFIG_NAND_DATA_REG, val)
+
+#define KPN_RDY2 (1 << 7)
+#define KPN_RDY1 (1 << 6)
+#define KPN_WPN (1 << 4)
+#define KPN_CE2N (1 << 3)
+#define KPN_CE1N (1 << 2)
+#define KPN_ALE (1 << 1)
+#define KPN_CLE (1 << 0)
+
+#define KPN_DEFAULT_CHIP_DELAY 50
+
+static int kpn_chip_ready(void)
+{
+ if (read_mode() & KPN_RDY1)
+ return 1;
+
+ return 0;
+}
+
+static void kpn_wait_rdy(void)
+{
+ int cnt = 1000000;
+
+ while (--cnt && !kpn_chip_ready())
+ udelay(1);
+
+ if (!cnt)
+ printf ("timeout while waiting for RDY\n");
+}
+
+static void kpn_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ u8 reg_val = read_mode();
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ reg_val = reg_val & ~(KPN_ALE + KPN_CLE);
+
+ if (ctrl & NAND_CLE)
+ reg_val = reg_val | KPN_CLE;
+ if (ctrl & NAND_ALE)
+ reg_val = reg_val | KPN_ALE;
+ if (ctrl & NAND_NCE)
+ reg_val = reg_val & ~KPN_CE1N;
+ else
+ reg_val = reg_val | KPN_CE1N;
+
+ write_mode(reg_val);
+ }
+ if (cmd != NAND_CMD_NONE)
+ write_data(cmd);
+
+ /* wait until flash is ready */
+ kpn_wait_rdy();
+}
+
+static u_char kpn_nand_read_byte(struct mtd_info *mtd)
+{
+ return read_data();
+}
+
+static void kpn_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ write_data(buf[i]);
+ kpn_wait_rdy();
+ }
+}
+
+static void kpn_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = read_data();
+}
+
+static int kpn_nand_dev_ready(struct mtd_info *mtd)
+{
+ kpn_wait_rdy();
+
+ return 1;
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+ nand->ecc.mode = NAND_ECC_SOFT;
+
+ /* Reference hardware control function */
+ nand->cmd_ctrl = kpn_nand_hwcontrol;
+ nand->read_byte = kpn_nand_read_byte;
+ nand->write_buf = kpn_nand_write_buf;
+ nand->read_buf = kpn_nand_read_buf;
+ nand->dev_ready = kpn_nand_dev_ready;
+ nand->chip_delay = KPN_DEFAULT_CHIP_DELAY;
+
+ /* reset mode register */
+ write_mode(KPN_CE1N + KPN_CE2N + KPN_WPN);
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/mpc5121_nfc.c b/u-boot/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 0000000..7fd8a35
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ * (C) Copyright 2009 Stefan Roese <sr@denx.de>
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <common.h>
+#include <malloc.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/compat.h>
+
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <nand.h>
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT 2000 /* 2000 us */
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ uint column;
+ int spareonly;
+ int chipsel;
+};
+
+int mpc5121_nfc_chip = 0;
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ int max_retries = NFC_TIMEOUT;
+
+ while (1) {
+ max_retries--;
+ if (nfc_read(mtd, NFC_CONFIG2) & NFC_INT)
+ break;
+ udelay(1);
+ }
+
+ if (max_retries <= 0)
+ printk(KERN_WARNING DRV_NAME
+ ": Timeout while waiting for completion.\n");
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+
+/*
+ * Selecting the active device:
+ *
+ * This is different than the linux version. Switching between chips
+ * is done via board_nand_select_device(). The Linux select_chip
+ * function used here in U-Boot has only 2 valid chip numbers:
+ * 0 select
+ * -1 deselect
+ */
+
+/*
+ * Implement it as a weak default, so that boards with a specific
+ * chip-select routine can use their own function.
+ */
+void __mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+ __attribute__((weak, alias("__mpc5121_nfc_select_chip")));
+
+void board_nand_select_device(struct nand_chip *nand, int chip)
+{
+ /*
+ * Only save this chip number in global variable here. This
+ * will be used later in mpc5121_nfc_select_chip().
+ */
+ mpc5121_nfc_chip = chip;
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RESET:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 * buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o,
+ blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char * buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint) len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char * buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char * buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *) buf, len, 1);
+}
+
+/* Compare buffer with NAND flash */
+static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
+ const u_char * buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mpc5121_nfc_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *) & tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ immap_t *im = (immap_t *)CONFIG_SYS_IMMR;
+ struct nand_chip *chip = mtd->priv;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+
+ rcwh = in_be32(&(im->reset.rcwh));
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ debug(KERN_NOTICE DRV_NAME ": Configured for "
+ "%u-bit NAND, page size %u with %u spare.\n",
+ rcw_width * 8, rcw_pagesize, rcw_sparesize);
+ return 0;
+}
+
+int board_nand_init(struct nand_chip *chip)
+{
+ struct mpc5121_nfc_prv *prv;
+ struct mtd_info *mtd;
+ int resettime = 0;
+ int retval = 0;
+ int rev;
+ static int chip_nr = 0;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if (rev != 2) {
+ printk(KERN_ERR DRV_NAME
+ ": SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = malloc(sizeof(*prv));
+ if (!prv) {
+ printk(KERN_ERR DRV_NAME ": Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mtd = &nand_info[chip_nr++];
+ mtd->priv = chip;
+ chip->priv = prv;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ printk(KERN_ERR DRV_NAME ": Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->regs = (void __iomem *)CONFIG_SYS_NAND_BASE;
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->verify_buf = mpc5121_nfc_verify_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ printk(KERN_ERR DRV_NAME
+ ": Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, 1)) {
+ printk(KERN_ERR DRV_NAME ": NAND Flash not found !\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ printk(KERN_ERR DRV_NAME ": Unsupported NAND flash!\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ return 0;
+error:
+ return retval;
+}
diff --git a/u-boot/drivers/mtd/nand/mxc_nand.c b/u-boot/drivers/mtd/nand/mxc_nand.c
new file mode 100644
index 0000000..2a8dd7e
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/mxc_nand.c
@@ -0,0 +1,1393 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ * Copyright 2009 Ilya Yanok, <yanok@emcraft.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#if defined(CONFIG_MX25) || defined(CONFIG_MX27) || defined(CONFIG_MX35)
+#include <asm/arch/imx-regs.h>
+#endif
+
+#define DRIVER_NAME "mxc_nand"
+
+/*
+ * TODO: Use same register defs here as nand_spl mxc nand driver.
+ */
+/*
+ * Register map and bit definitions for the Freescale NAND Flash Controller
+ * present in various i.MX devices.
+ *
+ * MX31 and MX27 have version 1 which has
+ * 4 512 byte main buffers and
+ * 4 16 byte spare buffers
+ * to support up to 2K byte pagesize nand.
+ * Reading or writing a 2K page requires 4 FDI/FDO cycles.
+ *
+ * MX25 has version 1.1 which has
+ * 8 512 byte main buffers and
+ * 8 64 byte spare buffers
+ * to support up to 4K byte pagesize nand.
+ * Reading or writing a 2K or 4K page requires only 1 FDI/FDO cycle.
+ * Also some of registers are moved and/or changed meaning as seen below.
+ */
+#if defined(CONFIG_MX31) || defined(CONFIG_MX27)
+#define MXC_NFC_V1
+#elif defined(CONFIG_MX25) || defined(CONFIG_MX35)
+#define MXC_NFC_V1_1
+#else
+#warning "MXC NFC version not defined"
+#endif
+
+#if defined(MXC_NFC_V1)
+#define NAND_MXC_NR_BUFS 4
+#define NAND_MXC_SPARE_BUF_SIZE 16
+#define NAND_MXC_REG_OFFSET 0xe00
+#define is_mxc_nfc_11() 0
+#elif defined(MXC_NFC_V1_1)
+#define NAND_MXC_NR_BUFS 8
+#define NAND_MXC_SPARE_BUF_SIZE 64
+#define NAND_MXC_REG_OFFSET 0x1e00
+#define is_mxc_nfc_11() 1
+#else
+#error "define CONFIG_NAND_MXC_VXXX to use mtd mxc nand driver"
+#endif
+struct nfc_regs {
+ uint8_t main_area[NAND_MXC_NR_BUFS][0x200];
+ uint8_t spare_area[NAND_MXC_NR_BUFS][NAND_MXC_SPARE_BUF_SIZE];
+ /*
+ * reserved size is offset of nfc registers
+ * minus total main and spare sizes
+ */
+ uint8_t reserved1[NAND_MXC_REG_OFFSET
+ - NAND_MXC_NR_BUFS * (512 + NAND_MXC_SPARE_BUF_SIZE)];
+#if defined(MXC_NFC_V1)
+ uint16_t nfc_buf_size;
+ uint16_t reserved2;
+ uint16_t nfc_buf_addr;
+ uint16_t nfc_flash_addr;
+ uint16_t nfc_flash_cmd;
+ uint16_t nfc_config;
+ uint16_t nfc_ecc_status_result;
+ uint16_t nfc_rsltmain_area;
+ uint16_t nfc_rsltspare_area;
+ uint16_t nfc_wrprot;
+ uint16_t nfc_unlockstart_blkaddr;
+ uint16_t nfc_unlockend_blkaddr;
+ uint16_t nfc_nf_wrprst;
+ uint16_t nfc_config1;
+ uint16_t nfc_config2;
+#elif defined(MXC_NFC_V1_1)
+ uint16_t reserved2[2];
+ uint16_t nfc_buf_addr;
+ uint16_t nfc_flash_addr;
+ uint16_t nfc_flash_cmd;
+ uint16_t nfc_config;
+ uint16_t nfc_ecc_status_result;
+ uint16_t nfc_ecc_status_result2;
+ uint16_t nfc_spare_area_size;
+ uint16_t nfc_wrprot;
+ uint16_t reserved3[2];
+ uint16_t nfc_nf_wrprst;
+ uint16_t nfc_config1;
+ uint16_t nfc_config2;
+ uint16_t reserved4;
+ uint16_t nfc_unlockstart_blkaddr;
+ uint16_t nfc_unlockend_blkaddr;
+ uint16_t nfc_unlockstart_blkaddr1;
+ uint16_t nfc_unlockend_blkaddr1;
+ uint16_t nfc_unlockstart_blkaddr2;
+ uint16_t nfc_unlockend_blkaddr2;
+ uint16_t nfc_unlockstart_blkaddr3;
+ uint16_t nfc_unlockend_blkaddr3;
+#endif
+};
+
+/*
+ * Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register
+ * for Command operation
+ */
+#define NFC_CMD 0x1
+
+/*
+ * Set INT to 0, FADD to 1, rest to 0 in NFC_CONFIG2 Register
+ * for Address operation
+ */
+#define NFC_ADDR 0x2
+
+/*
+ * Set INT to 0, FDI to 1, rest to 0 in NFC_CONFIG2 Register
+ * for Input operation
+ */
+#define NFC_INPUT 0x4
+
+/*
+ * Set INT to 0, FDO to 001, rest to 0 in NFC_CONFIG2 Register
+ * for Data Output operation
+ */
+#define NFC_OUTPUT 0x8
+
+/*
+ * Set INT to 0, FD0 to 010, rest to 0 in NFC_CONFIG2 Register
+ * for Read ID operation
+ */
+#define NFC_ID 0x10
+
+/*
+ * Set INT to 0, FDO to 100, rest to 0 in NFC_CONFIG2 Register
+ * for Read Status operation
+ */
+#define NFC_STATUS 0x20
+
+/*
+ * Set INT to 1, rest to 0 in NFC_CONFIG2 Register for Read
+ * Status operation
+ */
+#define NFC_INT 0x8000
+
+#ifdef MXC_NFC_V1_1
+#define NFC_4_8N_ECC (1 << 0)
+#else
+#define NFC_4_8N_ECC 0
+#endif
+#define NFC_SP_EN (1 << 2)
+#define NFC_ECC_EN (1 << 3)
+#define NFC_BIG (1 << 5)
+#define NFC_RST (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+
+typedef enum {false, true} bool;
+
+struct mxc_nand_host {
+ struct mtd_info mtd;
+ struct nand_chip *nand;
+
+ struct nfc_regs __iomem *regs;
+ int spare_only;
+ int status_request;
+ int pagesize_2k;
+ int clk_act;
+ uint16_t col_addr;
+ unsigned int page_addr;
+};
+
+static struct mxc_nand_host mxc_host;
+static struct mxc_nand_host *host = &mxc_host;
+
+/* Define delays in microsec for NAND device operations */
+#define TROP_US_DELAY 2000
+/* Macros to get byte and bit positions of ECC */
+#define COLPOS(x) ((x) >> 3)
+#define BITPOS(x) ((x) & 0xf)
+
+/* Define single bit Error positions in Main & Spare area */
+#define MAIN_SINGLEBIT_ERROR 0x4
+#define SPARE_SINGLEBIT_ERROR 0x1
+
+/* OOB placement block for use with hardware ecc generation */
+#if defined(MXC_NFC_V1)
+#ifndef CONFIG_SYS_NAND_LARGEPAGE
+static struct nand_ecclayout nand_hw_eccoob = {
+ .eccbytes = 5,
+ .eccpos = {6, 7, 8, 9, 10},
+ .oobfree = { {0, 5}, {11, 5}, }
+};
+#else
+static struct nand_ecclayout nand_hw_eccoob2k = {
+ .eccbytes = 20,
+ .eccpos = {
+ 6, 7, 8, 9, 10,
+ 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42,
+ 54, 55, 56, 57, 58,
+ },
+ .oobfree = { {2, 4}, {11, 11}, {27, 11}, {43, 11}, {59, 5} },
+};
+#endif
+#elif defined(MXC_NFC_V1_1)
+#ifndef CONFIG_SYS_NAND_LARGEPAGE
+static struct nand_ecclayout nand_hw_eccoob = {
+ .eccbytes = 9,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {2, 5} }
+};
+#else
+static struct nand_ecclayout nand_hw_eccoob2k = {
+ .eccbytes = 36,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = { {2, 5}, {16, 7}, {32, 7}, {48, 7} },
+};
+#endif
+#endif
+
+#ifdef CONFIG_MX27
+static int is_16bit_nand(void)
+{
+ struct system_control_regs *sc_regs =
+ (struct system_control_regs *)IMX_SYSTEM_CTL_BASE;
+
+ if (readl(&sc_regs->fmcr) & NF_16BIT_SEL)
+ return 1;
+ else
+ return 0;
+}
+#elif defined(CONFIG_MX31)
+static int is_16bit_nand(void)
+{
+ struct clock_control_regs *sc_regs =
+ (struct clock_control_regs *)CCM_BASE;
+
+ if (readl(&sc_regs->rcsr) & CCM_RCSR_NF16B)
+ return 1;
+ else
+ return 0;
+}
+#elif defined(CONFIG_MX25) || defined(CONFIG_MX35)
+static int is_16bit_nand(void)
+{
+ struct ccm_regs *ccm =
+ (struct ccm_regs *)IMX_CCM_BASE;
+
+ if (readl(&ccm->rcsr) & CCM_RCSR_NF_16BIT_SEL)
+ return 1;
+ else
+ return 0;
+}
+#else
+#warning "8/16 bit NAND autodetection not supported"
+static int is_16bit_nand(void)
+{
+ return 0;
+}
+#endif
+
+static uint32_t *mxc_nand_memcpy32(uint32_t *dest, uint32_t *source, size_t size)
+{
+ uint32_t *d = dest;
+
+ size >>= 2;
+ while (size--)
+ __raw_writel(__raw_readl(source++), d++);
+ return dest;
+}
+
+/*
+ * This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit of config2 register.
+ */
+static void wait_op_done(struct mxc_nand_host *host, int max_retries,
+ uint16_t param)
+{
+ uint32_t tmp;
+
+ while (max_retries-- > 0) {
+ if (readw(&host->regs->nfc_config2) & NFC_INT) {
+ tmp = readw(&host->regs->nfc_config2);
+ tmp &= ~NFC_INT;
+ writew(tmp, &host->regs->nfc_config2);
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0) {
+ MTDDEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
+ __func__, param);
+ }
+}
+
+/*
+ * This function issues the specified command to the NAND device and
+ * waits for completion.
+ */
+static void send_cmd(struct mxc_nand_host *host, uint16_t cmd)
+{
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x)\n", cmd);
+
+ writew(cmd, &host->regs->nfc_flash_cmd);
+ writew(NFC_CMD, &host->regs->nfc_config2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, cmd);
+}
+
+/*
+ * This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command.
+ */
+static void send_addr(struct mxc_nand_host *host, uint16_t addr)
+{
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x)\n", addr);
+
+ writew(addr, &host->regs->nfc_flash_addr);
+ writew(NFC_ADDR, &host->regs->nfc_config2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, addr);
+}
+
+/*
+ * This function requests the NANDFC to initate the transfer
+ * of data currently in the NANDFC RAM buffer to the NAND device.
+ */
+static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id,
+ int spare_only)
+{
+ if (spare_only)
+ MTDDEBUG(MTD_DEBUG_LEVEL1, "send_prog_page (%d)\n", spare_only);
+
+ if (is_mxc_nfc_11()) {
+ int i;
+ /*
+ * The controller copies the 64 bytes of spare data from
+ * the first 16 bytes of each of the 4 64 byte spare buffers.
+ * Copy the contiguous data starting in spare_area[0] to
+ * the four spare area buffers.
+ */
+ for (i = 1; i < 4; i++) {
+ void __iomem *src = &host->regs->spare_area[0][i * 16];
+ void __iomem *dst = &host->regs->spare_area[i][0];
+
+ mxc_nand_memcpy32(dst, src, 16);
+ }
+ }
+
+ writew(buf_id, &host->regs->nfc_buf_addr);
+
+ /* Configure spare or page+spare access */
+ if (!host->pagesize_2k) {
+ uint16_t config1 = readw(&host->regs->nfc_config1);
+ if (spare_only)
+ config1 |= NFC_SP_EN;
+ else
+ config1 &= ~(NFC_SP_EN);
+ writew(config1, &host->regs->nfc_config1);
+ }
+
+ writew(NFC_INPUT, &host->regs->nfc_config2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, spare_only);
+}
+
+/*
+ * Requests NANDFC to initated the transfer of data from the
+ * NAND device into in the NANDFC ram buffer.
+ */
+static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id,
+ int spare_only)
+{
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only);
+
+ writew(buf_id, &host->regs->nfc_buf_addr);
+
+ /* Configure spare or page+spare access */
+ if (!host->pagesize_2k) {
+ uint32_t config1 = readw(&host->regs->nfc_config1);
+ if (spare_only)
+ config1 |= NFC_SP_EN;
+ else
+ config1 &= ~NFC_SP_EN;
+ writew(config1, &host->regs->nfc_config1);
+ }
+
+ writew(NFC_OUTPUT, &host->regs->nfc_config2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, spare_only);
+
+ if (is_mxc_nfc_11()) {
+ int i;
+
+ /*
+ * The controller copies the 64 bytes of spare data to
+ * the first 16 bytes of each of the 4 spare buffers.
+ * Make the data contiguous starting in spare_area[0].
+ */
+ for (i = 1; i < 4; i++) {
+ void __iomem *src = &host->regs->spare_area[i][0];
+ void __iomem *dst = &host->regs->spare_area[0][i * 16];
+
+ mxc_nand_memcpy32(dst, src, 16);
+ }
+ }
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id(struct mxc_nand_host *host)
+{
+ uint16_t tmp;
+
+ /* NANDFC buffer 0 is used for device ID output */
+ writew(0x0, &host->regs->nfc_buf_addr);
+
+ /* Read ID into main buffer */
+ tmp = readw(&host->regs->nfc_config1);
+ tmp &= ~NFC_SP_EN;
+ writew(tmp, &host->regs->nfc_config1);
+
+ writew(NFC_ID, &host->regs->nfc_config2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, 0);
+}
+
+/*
+ * This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status.
+ */
+static uint16_t get_dev_status(struct mxc_nand_host *host)
+{
+ void __iomem *main_buf = host->regs->main_area[1];
+ uint32_t store;
+ uint16_t ret, tmp;
+ /* Issue status request to NAND device */
+
+ /* store the main area1 first word, later do recovery */
+ store = readl(main_buf);
+ /* NANDFC buffer 1 is used for device status */
+ writew(1, &host->regs->nfc_buf_addr);
+
+ /* Read status into main buffer */
+ tmp = readw(&host->regs->nfc_config1);
+ tmp &= ~NFC_SP_EN;
+ writew(tmp, &host->regs->nfc_config1);
+
+ writew(NFC_STATUS, &host->regs->nfc_config2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, 0);
+
+ /*
+ * Status is placed in first word of main buffer
+ * get status, then recovery area 1 data
+ */
+ ret = readw(main_buf);
+ writel(store, main_buf);
+
+ return ret;
+}
+
+/* This function is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles R/B internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+#ifdef CONFIG_MXC_NAND_HWECC
+static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ /*
+ * If HW ECC is enabled, we turn it on during init. There is
+ * no need to enable again here.
+ */
+}
+
+#ifdef MXC_NFC_V1_1
+static void _mxc_nand_enable_hwecc(struct mtd_info *mtd, int on)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t tmp = readw(&host->regs->nfc_config1);
+
+ if (on)
+ tmp |= NFC_ECC_EN;
+ else
+ tmp &= ~NFC_ECC_EN;
+ writew(tmp, &host->regs->nfc_config1);
+}
+
+static int mxc_nand_read_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ struct mxc_nand_host *host = chip->priv;
+ uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+ int eccpitch = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *bufpoi = buf;
+ int i, toread;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL0,
+ "%s: Reading OOB area of page %u to oob %p\n",
+ __FUNCTION__, host->page_addr, buf);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, mtd->writesize, page);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ toread = min_t(int, length, chip->ecc.prepad);
+ if (toread) {
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ bufpoi += chip->ecc.bytes;
+ host->col_addr += chip->ecc.bytes;
+ length -= chip->ecc.bytes;
+
+ toread = min_t(int, length, chip->ecc.postpad);
+ if (toread) {
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ }
+ if (length > 0)
+ chip->read_buf(mtd, bufpoi, length);
+
+ _mxc_nand_enable_hwecc(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ mtd->writesize + chip->ecc.prepad, page);
+ bufpoi = buf + chip->ecc.prepad;
+ length = mtd->oobsize - chip->ecc.prepad;
+ for (i = 0; i < chip->ecc.steps; i++) {
+ toread = min_t(int, length, chip->ecc.bytes);
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += eccpitch;
+ length -= eccpitch;
+ host->col_addr += chip->ecc.postpad + chip->ecc.prepad;
+ }
+ _mxc_nand_enable_hwecc(mtd, 1);
+ return 1;
+}
+
+static int mxc_nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf,
+ int page)
+{
+ struct mxc_nand_host *host = chip->priv;
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+ int n;
+
+ _mxc_nand_enable_hwecc(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, host->page_addr);
+
+ for (n = 0, steps = chip->ecc.steps; steps > 0; n++, steps--) {
+ host->col_addr = n * eccsize;
+ chip->read_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->read_buf(mtd, oob, size);
+ _mxc_nand_enable_hwecc(mtd, 0);
+
+ return 0;
+}
+
+static int mxc_nand_read_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf,
+ int page)
+{
+ struct mxc_nand_host *host = chip->priv;
+ int n, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL1, "Reading page %u to buf %p oob %p\n",
+ host->page_addr, buf, oob);
+
+ /* first read out the data area and the available portion of OOB */
+ for (n = 0; eccsteps; n++, eccsteps--, p += eccsize) {
+ int stat;
+
+ host->col_addr = n * eccsize;
+
+ chip->read_buf(mtd, p, eccsize);
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ n = mtd->oobsize - (oob - chip->oob_poi);
+ if (n)
+ chip->read_buf(mtd, oob, n);
+
+ /* Then switch ECC off and read the OOB area to get the ECC code */
+ _mxc_nand_enable_hwecc(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, mtd->writesize, host->page_addr);
+ eccsteps = chip->ecc.steps;
+ oob = chip->oob_poi + chip->ecc.prepad;
+ for (n = 0; eccsteps; n++, eccsteps--, p += eccsize) {
+ host->col_addr = mtd->writesize +
+ n * eccpitch +
+ chip->ecc.prepad;
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes + chip->ecc.postpad;
+ }
+ _mxc_nand_enable_hwecc(mtd, 1);
+ return 0;
+}
+
+static int mxc_nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ struct mxc_nand_host *host = chip->priv;
+ int eccpitch = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int length = mtd->oobsize;
+ int i, len, status, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ for (i = 0; i < steps; i++) {
+ len = min_t(int, length, eccpitch);
+
+ chip->write_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ length -= len;
+ host->col_addr += chip->ecc.prepad + chip->ecc.postpad;
+ }
+ if (length > 0)
+ chip->write_buf(mtd, bufpoi, length);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static void mxc_nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ struct mxc_nand_host *host = chip->priv;
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+ int n;
+
+ for (n = 0, steps = chip->ecc.steps; steps > 0; n++, steps--) {
+ host->col_addr = n * eccsize;
+ chip->write_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ host->col_addr += eccbytes;
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->write_buf(mtd, oob, size);
+}
+
+static void mxc_nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ struct mxc_nand_host *host = chip->priv;
+ int i, n, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ for (i = n = 0;
+ eccsteps;
+ n++, eccsteps--, i += eccbytes, p += eccsize) {
+ host->col_addr = n * eccsize;
+
+ chip->write_buf(mtd, p, eccsize);
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->write_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->write_buf(mtd, oob, i);
+}
+
+static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t ecc_status = readw(&host->regs->nfc_ecc_status_result);
+ int subpages = mtd->writesize / nand_chip->subpagesize;
+ int pg2blk_shift = nand_chip->phys_erase_shift -
+ nand_chip->page_shift;
+
+ do {
+ if ((ecc_status & 0xf) > 4) {
+ static int last_bad = -1;
+
+ if (last_bad != host->page_addr >> pg2blk_shift) {
+ last_bad = host->page_addr >> pg2blk_shift;
+ printk(KERN_DEBUG
+ "MXC_NAND: HWECC uncorrectable ECC error"
+ " in block %u page %u subpage %d\n",
+ last_bad, host->page_addr,
+ mtd->writesize / nand_chip->subpagesize
+ - subpages);
+ }
+ return -1;
+ }
+ ecc_status >>= 4;
+ subpages--;
+ } while (subpages > 0);
+
+ return 0;
+}
+#else
+#define mxc_nand_read_page_syndrome NULL
+#define mxc_nand_read_page_raw_syndrome NULL
+#define mxc_nand_read_oob_syndrome NULL
+#define mxc_nand_write_page_syndrome NULL
+#define mxc_nand_write_page_raw_syndrome NULL
+#define mxc_nand_write_oob_syndrome NULL
+#define mxc_nfc_11_nand_correct_data NULL
+
+static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /*
+ * 1-Bit errors are automatically corrected in HW. No need for
+ * additional correction. 2-Bit errors cannot be corrected by
+ * HW ECC, so we need to return failure
+ */
+ uint16_t ecc_status = readw(&host->regs->nfc_ecc_status_result);
+
+ if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
+ MTDDEBUG(MTD_DEBUG_LEVEL0,
+ "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ return 0;
+}
+#endif
+
+static u_char mxc_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint8_t ret = 0;
+ uint16_t col;
+ uint16_t __iomem *main_buf =
+ (uint16_t __iomem *)host->regs->main_area[0];
+ uint16_t __iomem *spare_buf =
+ (uint16_t __iomem *)host->regs->spare_area[0];
+ union {
+ uint16_t word;
+ uint8_t bytes[2];
+ } nfc_word;
+
+ /* Check for status request */
+ if (host->status_request)
+ return get_dev_status(host) & 0xFF;
+
+ /* Get column for 16-bit access */
+ col = host->col_addr >> 1;
+
+ /* If we are accessing the spare region */
+ if (host->spare_only)
+ nfc_word.word = readw(&spare_buf[col]);
+ else
+ nfc_word.word = readw(&main_buf[col]);
+
+ /* Pick upper/lower byte of word from RAM buffer */
+ ret = nfc_word.bytes[host->col_addr & 0x1];
+
+ /* Update saved column address */
+ if (nand_chip->options & NAND_BUSWIDTH_16)
+ host->col_addr += 2;
+ else
+ host->col_addr++;
+
+ return ret;
+}
+
+static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t col, ret;
+ uint16_t __iomem *p;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3,
+ "mxc_nand_read_word(col = %d)\n", host->col_addr);
+
+ col = host->col_addr;
+ /* Adjust saved column address */
+ if (col < mtd->writesize && host->spare_only)
+ col += mtd->writesize;
+
+ if (col < mtd->writesize) {
+ p = (uint16_t __iomem *)(host->regs->main_area[0] +
+ (col >> 1));
+ } else {
+ p = (uint16_t __iomem *)(host->regs->spare_area[0] +
+ ((col - mtd->writesize) >> 1));
+ }
+
+ if (col & 1) {
+ union {
+ uint16_t word;
+ uint8_t bytes[2];
+ } nfc_word[3];
+
+ nfc_word[0].word = readw(p);
+ nfc_word[1].word = readw(p + 1);
+
+ nfc_word[2].bytes[0] = nfc_word[0].bytes[1];
+ nfc_word[2].bytes[1] = nfc_word[1].bytes[0];
+
+ ret = nfc_word[2].word;
+ } else {
+ ret = readw(p);
+ }
+
+ /* Update saved column address */
+ host->col_addr = col + 2;
+
+ return ret;
+}
+
+/*
+ * Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash
+ */
+static void mxc_nand_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ int n, col, i = 0;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3,
+ "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr,
+ len);
+
+ col = host->col_addr;
+
+ /* Adjust saved column address */
+ if (col < mtd->writesize && host->spare_only)
+ col += mtd->writesize;
+
+ n = mtd->writesize + mtd->oobsize - col;
+ n = min(len, n);
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3,
+ "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n);
+
+ while (n > 0) {
+ void __iomem *p;
+
+ if (col < mtd->writesize) {
+ p = host->regs->main_area[0] + (col & ~3);
+ } else {
+ p = host->regs->spare_area[0] -
+ mtd->writesize + (col & ~3);
+ }
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__,
+ __LINE__, p);
+
+ if (((col | (unsigned long)&buf[i]) & 3) || n < 4) {
+ union {
+ uint32_t word;
+ uint8_t bytes[4];
+ } nfc_word;
+
+ nfc_word.word = readl(p);
+ nfc_word.bytes[col & 3] = buf[i++];
+ n--;
+ col++;
+
+ writel(nfc_word.word, p);
+ } else {
+ int m = mtd->writesize - col;
+
+ if (col >= mtd->writesize)
+ m += mtd->oobsize;
+
+ m = min(n, m) & ~3;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3,
+ "%s:%d: n = %d, m = %d, i = %d, col = %d\n",
+ __func__, __LINE__, n, m, i, col);
+
+ mxc_nand_memcpy32(p, (uint32_t *)&buf[i], m);
+ col += m;
+ i += m;
+ n -= m;
+ }
+ }
+ /* Update saved column address */
+ host->col_addr = col;
+}
+
+/*
+ * Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ int n, col, i = 0;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3,
+ "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len);
+
+ col = host->col_addr;
+
+ /* Adjust saved column address */
+ if (col < mtd->writesize && host->spare_only)
+ col += mtd->writesize;
+
+ n = mtd->writesize + mtd->oobsize - col;
+ n = min(len, n);
+
+ while (n > 0) {
+ void __iomem *p;
+
+ if (col < mtd->writesize) {
+ p = host->regs->main_area[0] + (col & ~3);
+ } else {
+ p = host->regs->spare_area[0] -
+ mtd->writesize + (col & ~3);
+ }
+
+ if (((col | (int)&buf[i]) & 3) || n < 4) {
+ union {
+ uint32_t word;
+ uint8_t bytes[4];
+ } nfc_word;
+
+ nfc_word.word = readl(p);
+ buf[i++] = nfc_word.bytes[col & 3];
+ n--;
+ col++;
+ } else {
+ int m = mtd->writesize - col;
+
+ if (col >= mtd->writesize)
+ m += mtd->oobsize;
+
+ m = min(n, m) & ~3;
+ mxc_nand_memcpy32((uint32_t *)&buf[i], p, m);
+
+ col += m;
+ i += m;
+ n -= m;
+ }
+ }
+ /* Update saved column address */
+ host->col_addr = col;
+}
+
+/*
+ * Used by the upper layer to verify the data in NAND Flash
+ * with the data in the buf.
+ */
+static int mxc_nand_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mxc_nand_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used by upper layer for select and
+ * deselect of the NAND chip
+ */
+static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ switch (chip) {
+ case -1:
+ /* TODO: Disable the NFC clock */
+ if (host->clk_act)
+ host->clk_act = 0;
+ break;
+ case 0:
+ /* TODO: Enable the NFC clock */
+ if (!host->clk_act)
+ host->clk_act = 1;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash
+ */
+void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3,
+ "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ command, column, page_addr);
+
+ /* Reset command state information */
+ host->status_request = false;
+
+ /* Command pre-processing step */
+ switch (command) {
+
+ case NAND_CMD_STATUS:
+ host->col_addr = 0;
+ host->status_request = true;
+ break;
+
+ case NAND_CMD_READ0:
+ host->page_addr = page_addr;
+ host->col_addr = column;
+ host->spare_only = false;
+ break;
+
+ case NAND_CMD_READOOB:
+ host->col_addr = column;
+ host->spare_only = true;
+ if (host->pagesize_2k)
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (column >= mtd->writesize) {
+ /*
+ * before sending SEQIN command for partial write,
+ * we need read one page out. FSL NFC does not support
+ * partial write. It alway send out 512+ecc+512+ecc ...
+ * for large page nand flash. But for small page nand
+ * flash, it does support SPARE ONLY operation.
+ */
+ if (host->pagesize_2k) {
+ /* call ourself to read a page */
+ mxc_nand_command(mtd, NAND_CMD_READ0, 0,
+ page_addr);
+ }
+
+ host->col_addr = column - mtd->writesize;
+ host->spare_only = true;
+
+ /* Set program pointer to spare region */
+ if (!host->pagesize_2k)
+ send_cmd(host, NAND_CMD_READOOB);
+ } else {
+ host->spare_only = false;
+ host->col_addr = column;
+
+ /* Set program pointer to page start */
+ if (!host->pagesize_2k)
+ send_cmd(host, NAND_CMD_READ0);
+ }
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ send_prog_page(host, 0, host->spare_only);
+
+ if (host->pagesize_2k && !is_mxc_nfc_11()) {
+ /* data in 4 areas datas */
+ send_prog_page(host, 1, host->spare_only);
+ send_prog_page(host, 2, host->spare_only);
+ send_prog_page(host, 3, host->spare_only);
+ }
+
+ break;
+ }
+
+ /* Write out the command to the device. */
+ send_cmd(host, command);
+
+ /* Write out column address, if necessary */
+ if (column != -1) {
+ /*
+ * MXC NANDFC can only perform full page+spare or
+ * spare-only read/write. When the upper layers
+ * layers perform a read/write buf operation,
+ * we will used the saved column adress to index into
+ * the full page.
+ */
+ send_addr(host, 0);
+ if (host->pagesize_2k)
+ /* another col addr cycle for 2k page */
+ send_addr(host, 0);
+ }
+
+ /* Write out page address, if necessary */
+ if (page_addr != -1) {
+ u32 page_mask = nand_chip->pagemask;
+ do {
+ send_addr(host, page_addr & 0xFF);
+ page_addr >>= 8;
+ page_mask >>= 8;
+ } while (page_mask);
+ }
+
+ /* Command post-processing step */
+ switch (command) {
+
+ case NAND_CMD_RESET:
+ break;
+
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READ0:
+ if (host->pagesize_2k) {
+ /* send read confirm command */
+ send_cmd(host, NAND_CMD_READSTART);
+ /* read for each AREA */
+ send_read_page(host, 0, host->spare_only);
+ if (!is_mxc_nfc_11()) {
+ send_read_page(host, 1, host->spare_only);
+ send_read_page(host, 2, host->spare_only);
+ send_read_page(host, 3, host->spare_only);
+ }
+ } else {
+ send_read_page(host, 0, host->spare_only);
+ }
+ break;
+
+ case NAND_CMD_READID:
+ host->col_addr = 0;
+ send_read_id(host);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ break;
+
+ case NAND_CMD_STATUS:
+ break;
+
+ case NAND_CMD_ERASE2:
+ break;
+ }
+}
+
+#ifdef MXC_NFC_V1_1
+static void mxc_setup_config1(void)
+{
+ uint16_t tmp;
+
+ tmp = readw(&host->regs->nfc_config1);
+ tmp |= NFC_ONE_CYCLE;
+ tmp |= NFC_4_8N_ECC;
+ writew(tmp, &host->regs->nfc_config1);
+ if (host->pagesize_2k)
+ writew(64/2, &host->regs->nfc_spare_area_size);
+ else
+ writew(16/2, &host->regs->nfc_spare_area_size);
+}
+#else
+#define mxc_setup_config1()
+#endif
+
+int board_nand_init(struct nand_chip *this)
+{
+ struct mtd_info *mtd;
+ uint16_t tmp;
+ int err = 0;
+
+ /* structures must be linked */
+ mtd = &host->mtd;
+ mtd->priv = this;
+ host->nand = this;
+
+ /* 5 us command delay time */
+ this->chip_delay = 5;
+
+ this->priv = host;
+ this->dev_ready = mxc_nand_dev_ready;
+ this->cmdfunc = mxc_nand_command;
+ this->select_chip = mxc_nand_select_chip;
+ this->read_byte = mxc_nand_read_byte;
+ this->read_word = mxc_nand_read_word;
+ this->write_buf = mxc_nand_write_buf;
+ this->read_buf = mxc_nand_read_buf;
+ this->verify_buf = mxc_nand_verify_buf;
+
+ host->regs = (struct nfc_regs __iomem *)CONFIG_MXC_NAND_REGS_BASE;
+ host->clk_act = 1;
+
+#ifdef CONFIG_MXC_NAND_HWECC
+ this->ecc.calculate = mxc_nand_calculate_ecc;
+ this->ecc.hwctl = mxc_nand_enable_hwecc;
+ this->ecc.correct = mxc_nand_correct_data;
+ if (is_mxc_nfc_11()) {
+ this->ecc.mode = NAND_ECC_HW_SYNDROME;
+ this->ecc.read_page = mxc_nand_read_page_syndrome;
+ this->ecc.read_page_raw = mxc_nand_read_page_raw_syndrome;
+ this->ecc.read_oob = mxc_nand_read_oob_syndrome;
+ this->ecc.write_page = mxc_nand_write_page_syndrome;
+ this->ecc.write_page_raw = mxc_nand_write_page_raw_syndrome;
+ this->ecc.write_oob = mxc_nand_write_oob_syndrome;
+ this->ecc.bytes = 9;
+ this->ecc.prepad = 7;
+ } else {
+ this->ecc.mode = NAND_ECC_HW;
+ }
+
+ host->pagesize_2k = 0;
+
+ this->ecc.size = 512;
+ tmp = readw(&host->regs->nfc_config1);
+ tmp |= NFC_ECC_EN;
+ writew(tmp, &host->regs->nfc_config1);
+#else
+ this->ecc.layout = &nand_soft_eccoob;
+ this->ecc.mode = NAND_ECC_SOFT;
+ tmp = readw(&host->regs->nfc_config1);
+ tmp &= ~NFC_ECC_EN;
+ writew(tmp, &host->regs->nfc_config1);
+#endif
+ /* Reset NAND */
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /*
+ * preset operation
+ * Unlock the internal RAM Buffer
+ */
+ writew(0x2, &host->regs->nfc_config);
+
+ /* Blocks to be unlocked */
+ writew(0x0, &host->regs->nfc_unlockstart_blkaddr);
+ writew(0x4000, &host->regs->nfc_unlockend_blkaddr);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, &host->regs->nfc_wrprot);
+
+ /* NAND bus width determines access funtions used by upper layer */
+ if (is_16bit_nand())
+ this->options |= NAND_BUSWIDTH_16;
+
+#ifdef CONFIG_SYS_NAND_LARGEPAGE
+ host->pagesize_2k = 1;
+ this->ecc.layout = &nand_hw_eccoob2k;
+#else
+ host->pagesize_2k = 0;
+ this->ecc.layout = &nand_hw_eccoob;
+#endif
+ mxc_setup_config1();
+ return err;
+}
diff --git a/u-boot/drivers/mtd/nand/nand.c b/u-boot/drivers/mtd/nand/nand.c
new file mode 100644
index 0000000..d987f4c
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nand.c
@@ -0,0 +1,98 @@
+/*
+ * (C) Copyright 2005
+ * 2N Telekomunikace, a.s. <www.2n.cz>
+ * Ladislav Michl <michl@2n.cz>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <nand.h>
+
+#ifndef CONFIG_SYS_NAND_BASE_LIST
+#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE }
+#endif
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int nand_curr_device = -1;
+nand_info_t nand_info[CONFIG_SYS_MAX_NAND_DEVICE];
+
+static struct nand_chip nand_chip[CONFIG_SYS_MAX_NAND_DEVICE];
+static ulong base_address[CONFIG_SYS_MAX_NAND_DEVICE] = CONFIG_SYS_NAND_BASE_LIST;
+
+static const char default_nand_name[] = "nand";
+static __attribute__((unused)) char dev_name[CONFIG_SYS_MAX_NAND_DEVICE][8];
+
+static void nand_init_chip(struct mtd_info *mtd, struct nand_chip *nand,
+ ulong base_addr)
+{
+ int maxchips = CONFIG_SYS_NAND_MAX_CHIPS;
+ static int __attribute__((unused)) i = 0;
+
+ if (maxchips < 1)
+ maxchips = 1;
+ mtd->priv = nand;
+
+ nand->IO_ADDR_R = nand->IO_ADDR_W = (void __iomem *)base_addr;
+ if (board_nand_init(nand) == 0) {
+ if (nand_scan(mtd, maxchips) == 0) {
+ if (!mtd->name)
+ mtd->name = (char *)default_nand_name;
+#ifdef CONFIG_NEEDS_MANUAL_RELOC
+ else
+ mtd->name += gd->reloc_off;
+#endif
+
+#ifdef CONFIG_MTD_DEVICE
+ /*
+ * Add MTD device so that we can reference it later
+ * via the mtdcore infrastructure (e.g. ubi).
+ */
+ sprintf(dev_name[i], "nand%d", i);
+ mtd->name = dev_name[i++];
+ add_mtd_device(mtd);
+#endif
+ } else
+ mtd->name = NULL;
+ } else {
+ mtd->name = NULL;
+ mtd->size = 0;
+ }
+
+}
+
+void nand_init(void)
+{
+ int i;
+ unsigned int size = 0;
+ for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) {
+ nand_init_chip(&nand_info[i], &nand_chip[i], base_address[i]);
+ size += nand_info[i].size / 1024;
+ if (nand_curr_device == -1)
+ nand_curr_device = i;
+ }
+ printf("%u MiB\n", size / 1024);
+
+#ifdef CONFIG_SYS_NAND_SELECT_DEVICE
+ /*
+ * Select the chip in the board/cpu specific driver
+ */
+ board_nand_select_device(nand_info[nand_curr_device].priv, nand_curr_device);
+#endif
+}
diff --git a/u-boot/drivers/mtd/nand/nand_base.c b/u-boot/drivers/mtd/nand/nand_base.c
new file mode 100644
index 0000000..70c0593
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nand_base.c
@@ -0,0 +1,2896 @@
+/*
+ * drivers/mtd/nand.c
+ *
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ * Basic support for AG-AND chips is provided.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/doc/nand.html
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * TODO:
+ * Enable cached programming for 2k page size chips
+ * Check, if mtd->ecctype should be set to MTD_ECC_HW
+ * if we have HW ecc support.
+ * The AG-AND chips have nice features for speed improvement,
+ * which are not supported yet. Read / program 4 pages in one go.
+ * BBT table is not serialized, has to be fixed
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+
+#define ENOTSUPP 524 /* Operation is not supported */
+
+#include <malloc.h>
+#include <watchdog.h>
+#include <linux/err.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+#include <asm/io.h>
+#include <asm/errno.h>
+
+/*
+ * CONFIG_SYS_NAND_RESET_CNT is used as a timeout mechanism when resetting
+ * a flash. NAND flash is initialized prior to interrupts so standard timers
+ * can't be used. CONFIG_SYS_NAND_RESET_CNT should be set to a value
+ * which is greater than (max NAND reset time / NAND status read time).
+ * A conservative default of 200000 (500 us / 25 ns) is used as a default.
+ */
+#ifndef CONFIG_SYS_NAND_RESET_CNT
+#define CONFIG_SYS_NAND_RESET_CNT 200000
+#endif
+
+/* Define default oob placement schemes for large and small page devices */
+static struct nand_ecclayout nand_oob_8 = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3,
+ .length = 2},
+ {.offset = 6,
+ .length = 2}}
+};
+
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 6, 7},
+ .oobfree = {
+ {.offset = 8,
+ . length = 8}}
+};
+
+static struct nand_ecclayout nand_oob_64 = {
+ .eccbytes = 24,
+ .eccpos = {
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = {
+ {.offset = 2,
+ .length = 38}}
+};
+
+static struct nand_ecclayout nand_oob_128 = {
+ .eccbytes = 48,
+ .eccpos = {
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ .oobfree = {
+ {.offset = 2,
+ .length = 78}}
+};
+
+
+static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
+ int new_state);
+
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *this);
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void nand_release_device (struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ this->select_chip(mtd, -1); /* De-select the NAND device */
+}
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 8bit buswith
+ */
+static uint8_t nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return readb(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswith with
+ * endianess conversion
+ */
+static uint8_t nand_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
+}
+
+/**
+ * nand_read_word - [DEFAULT] read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswith without
+ * endianess conversion
+ */
+static u16 nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return readw(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ switch (chipnr) {
+ case -1:
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswith
+ */
+static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], chip->IO_ADDR_W);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswith
+ */
+static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_verify_buf - [DEFAULT] Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * Default verify function for 8bit buswith
+ */
+static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != readb(chip->IO_ADDR_R))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswith
+ */
+static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], chip->IO_ADDR_W);
+
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswith
+ */
+static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * Default verify function for 16bit buswith
+ */
+static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ if (p[i] != readw(chip->IO_ADDR_R))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ int page, chipnr, res = 0;
+ struct nand_chip *chip = mtd->priv;
+ u16 bad;
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ if (getchip) {
+ chipnr = (int)(ofs >> chip->chip_shift);
+
+ nand_get_device(chip, mtd, FL_READING);
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+ }
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
+ page);
+ bad = cpu_to_le16(chip->read_word(mtd));
+ if (chip->badblockpos & 0x1)
+ bad >>= 8;
+ if ((bad & 0xFF) != 0xff)
+ res = 1;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
+ if (chip->read_byte(mtd) != 0xff)
+ res = 1;
+ }
+
+ if (getchip)
+ nand_release_device(mtd);
+
+ return res;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by
+ * a hardware specific driver.
+*/
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint8_t buf[2] = { 0, 0 };
+ int block, ret;
+
+ /* Get block number */
+ block = (int)(ofs >> chip->bbt_erase_shift);
+ if (chip->bbt)
+ chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* Do we have a flash based bad block table ? */
+ if (chip->options & NAND_USE_FLASH_BBT)
+ ret = nand_update_bbt(mtd, ofs);
+ else {
+ /* We write two bytes, so we dont have to mess with 16 bit
+ * access
+ */
+ nand_get_device(chip, mtd, FL_WRITING);
+ ofs += mtd->oobsize;
+ chip->ops.len = chip->ops.ooblen = 2;
+ chip->ops.datbuf = NULL;
+ chip->ops.oobbuf = buf;
+ chip->ops.ooboffs = chip->badblockpos & ~0x01;
+
+ ret = nand_do_write_oob(mtd, ofs, &chip->ops);
+ nand_release_device(mtd);
+ }
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @mtd: MTD device structure
+ * Check, if the device is write protected
+ *
+ * The function expects, that the device is already selected
+ */
+static int nand_check_wp(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ /* Check the WP bit */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
+ int allowbbt)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (!(chip->options & NAND_BBT_SCANNED)) {
+ chip->options |= NAND_BBT_SCANNED;
+ chip->scan_bbt(mtd);
+ }
+
+ if (!chip->bbt)
+ return chip->block_bad(mtd, ofs, getchip);
+
+ /* Return info from the table */
+ return nand_isbad_bbt(mtd, ofs, allowbbt);
+}
+
+/*
+ * Wait for the ready pin, after a command
+ * The timeout is catched later.
+ */
+void nand_wait_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 timeo = (CONFIG_SYS_HZ * 20) / 1000;
+ u32 time_start;
+
+ time_start = get_timer(0);
+
+ /* wait until command is processed or timeout occures */
+ while (get_timer(time_start) < timeo) {
+ if (chip->dev_ready)
+ if (chip->dev_ready(mtd))
+ break;
+ }
+}
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page
+ * devices (256/512 Bytes per page)
+ */
+static void nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+ uint32_t rst_sts_cnt = CONFIG_SYS_NAND_RESET_CNT;
+
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->cmd_ctrl(mtd, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ chip->cmd_ctrl(mtd, command, ctrl);
+
+ /*
+ * Address cycle, when necessary
+ */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20))
+ chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd,
+ NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY) &&
+ (rst_sts_cnt--));
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices We dont have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+ uint32_t rst_sts_cnt = CONFIG_SYS_NAND_RESET_CNT;
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ chip->cmd_ctrl(mtd, command & 0xff,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ chip->cmd_ctrl(mtd, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20))
+ chip->cmd_ctrl(mtd, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * program and erase have their own busy handlers
+ * status, sequential in, and deplete1 need no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_DEPLETE1:
+ return;
+
+ /*
+ * read error status commands require only a short delay
+ */
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ case NAND_CMD_STATUS_ERROR1:
+ case NAND_CMD_STATUS_ERROR2:
+ case NAND_CMD_STATUS_ERROR3:
+ udelay(chip->chip_delay);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY) &&
+ (rst_sts_cnt--));
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ return;
+
+ case NAND_CMD_READ0:
+ chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int new_state)
+{
+ this->state = new_state;
+ return 0;
+}
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *this)
+{
+ unsigned long timeo;
+ int state = this->state;
+ u32 time_start;
+
+ if (state == FL_ERASING)
+ timeo = (CONFIG_SYS_HZ * 400) / 1000;
+ else
+ timeo = (CONFIG_SYS_HZ * 20) / 1000;
+
+ if ((state == FL_ERASING) && (this->options & NAND_IS_AND))
+ this->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
+ else
+ this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+
+ time_start = get_timer(0);
+
+ while (1) {
+ if (get_timer(time_start) > timeo) {
+ printf("Timeout!");
+ return 0x01;
+ }
+
+ if (this->dev_ready) {
+ if (this->dev_ready(mtd))
+ break;
+ } else {
+ if (this->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ }
+#ifdef PPCHAMELON_NAND_TIMER_HACK
+ time_start = get_timer(0);
+ while (get_timer(time_start) < 10)
+ ;
+#endif /* PPCHAMELON_NAND_TIMER_HACK */
+
+ return this->read_byte(mtd);
+}
+
+/**
+ * nand_read_page_raw - [Intern] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ecc controllers, which use a special oob layout
+ */
+static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->read_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->read_buf(mtd, oob, size);
+
+ return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACABLE] software ecc based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ */
+static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ chip->ecc.read_page_raw(mtd, chip, buf, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ */
+static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
+{
+ int start_step, end_step, num_steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+
+ /* Column address wihin the page aligned to ECC size (256bytes). */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+
+ /* Data size aligned to ECC ecc.size*/
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ if (data_col_addr != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+
+ p = bufpoi + data_col_addr;
+ chip->read_buf(mtd, p, datafrag_len);
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+
+ /* The performance is faster if to position offsets
+ according to ecc.pos. Let make sure here that
+ there are no gaps in ecc positions */
+ for (i = 0; i < eccfrag_len - 1; i++) {
+ if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
+ eccpos[i + start_step * chip->ecc.bytes + 1]) {
+ gaps = 1;
+ break;
+ }
+ }
+ if (gaps) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ } else {
+ /* send the command to read the particular ecc bytes */
+ /* take care about buswidth alignment in read_buf */
+ aligned_pos = eccpos[start_step * chip->ecc.bytes] & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (eccpos[start_step * chip->ecc.bytes] & (busw - 1))
+ aligned_len++;
+ if (eccpos[(start_step + num_steps) * chip->ecc.bytes] & (busw - 1))
+ aligned_len++;
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize + aligned_pos, -1);
+ chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ }
+
+ for (i = 0; i < eccfrag_len; i++)
+ chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + start_step * chip->ecc.bytes]];
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ if (stat == -1)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ecc controllers which need a special oob layout
+ */
+static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, require OOB to be read first.
+ * For this ECC mode, the write_page method is re-used from ECC_HW.
+ * These methods read/write ECC from the OOB area, unlike the
+ * ECC_HW_SYNDROME support with multiple ECC steps, follows the
+ * "infix ECC" scheme and reads/writes ECC from the data area, by
+ * overwriting the NAND manufacturer bad block markings.
+ */
+static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+
+ /* Read the OOB area first */
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
+ * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ */
+static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+ chip->read_buf(mtd, oob, eccbytes);
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->read_buf(mtd, oob, i);
+
+ return 0;
+}
+
+/**
+ * nand_transfer_oob - [Internal] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ switch(ops->mode) {
+
+ case MTD_OOB_PLACE:
+ case MTD_OOB_RAW:
+ memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+ return oob + len;
+
+ case MTD_OOB_AUTO: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, roffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for(; free->length && len; free++, len -= bytes) {
+ /* Read request not from offset 0 ? */
+ if (unlikely(roffs)) {
+ if (roffs >= free->length) {
+ roffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + roffs;
+ bytes = min_t(size_t, len,
+ (free->length - roffs));
+ roffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(oob, chip->oob_poi + boffs, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_do_read_ops - [Internal] Read data with ECC
+ *
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned;
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ int sndcmd = 1;
+ int ret = 0;
+ uint32_t readlen = ops->len;
+ uint32_t oobreadlen = ops->ooblen;
+ uint8_t *bufpoi, *oob, *buf;
+
+ stats = mtd->ecc_stats;
+
+ chipnr = (int)(from >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ col = (int)(from & (mtd->writesize - 1));
+
+ buf = ops->datbuf;
+ oob = ops->oobbuf;
+
+ while(1) {
+ bytes = min(mtd->writesize - col, readlen);
+ aligned = (bytes == mtd->writesize);
+
+ /* Is the current page in the buffer ? */
+ if (realpage != chip->pagebuf || oob) {
+ bufpoi = aligned ? buf : chip->buffers->databuf;
+
+ if (likely(sndcmd)) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ sndcmd = 0;
+ }
+
+ /* Now read the page into the buffer */
+ if (unlikely(ops->mode == MTD_OOB_RAW))
+ ret = chip->ecc.read_page_raw(mtd, chip,
+ bufpoi, page);
+ else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
+ ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi);
+ else
+ ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ page);
+ if (ret < 0)
+ break;
+
+ /* Transfer not aligned data */
+ if (!aligned) {
+ if (!NAND_SUBPAGE_READ(chip) && !oob)
+ chip->pagebuf = realpage;
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ }
+
+ buf += bytes;
+
+ if (unlikely(oob)) {
+ /* Raw mode does data:oob:data:oob */
+ if (ops->mode != MTD_OOB_RAW) {
+ int toread = min(oobreadlen,
+ chip->ecc.layout->oobavail);
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
+ } else
+ buf = nand_transfer_oob(chip,
+ buf, ops, mtd->oobsize);
+ }
+
+ if (!(chip->options & NAND_NO_READRDY)) {
+ /*
+ * Apply delay or wait for ready/busy pin. Do
+ * this before the AUTOINCR check, so no
+ * problems arise if a chip which does auto
+ * increment is marked as NOAUTOINCR by the
+ * board driver.
+ */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+ } else {
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ buf += bytes;
+ }
+
+ readlen -= bytes;
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary. */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+
+ /* Check, if the chip supports auto page increment
+ * or if we have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
+ sndcmd = 1;
+ }
+
+ ops->retlen = ops->len - (size_t) readlen;
+ if (oob)
+ ops->oobretlen = ops->ooblen - oobreadlen;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
+ * nand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ * Get hold of the chip and call nand_do_read
+ */
+static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ nand_get_device(chip, mtd, FL_READING);
+
+ chip->ops.len = len;
+ chip->ops.datbuf = buf;
+ chip->ops.oobbuf = NULL;
+
+ ret = nand_do_read_ops(mtd, from, &chip->ops);
+
+ *retlen = chip->ops.retlen;
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_read_oob_std - [REPLACABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ if (sndcmd) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ sndcmd = 0;
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return sndcmd;
+}
+
+/**
+ * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = buf;
+ int i, toread, sndrnd = 0, pos;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ else
+ chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0)
+ chip->read_buf(mtd, bufpoi, length);
+
+ return 1;
+}
+
+/**
+ * nand_write_oob_std - [REPLACABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int status = 0;
+ const uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, buf, length);
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/**
+ * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash !
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+ chip->write_buf(mtd, (uint8_t *)&fill,
+ num);
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+ chip->write_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ length -= len;
+ }
+ if (length > 0)
+ chip->write_buf(mtd, bufpoi, length);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/**
+ * nand_do_read_oob - [Intern] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area
+ */
+static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int page, realpage, chipnr, sndcmd = 1;
+ struct nand_chip *chip = mtd->priv;
+ int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+
+ MTDDEBUG (MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n",
+ (unsigned long long)from, readlen);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ len = chip->ecc.layout->oobavail;
+ else
+ len = mtd->oobsize;
+
+ if (unlikely(ops->ooboffs >= len)) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: "
+ "Attempt to start read outside oob\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+ (from >> chip->page_shift)) * len)) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: "
+ "Attempt read beyond end of device\n");
+ return -EINVAL;
+ }
+
+ chipnr = (int)(from >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ while(1) {
+ sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+
+ len = min(len, readlen);
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ if (!(chip->options & NAND_NO_READRDY)) {
+ /*
+ * Apply delay or wait for ready/busy pin. Do this
+ * before the AUTOINCR check, so no problems arise if a
+ * chip which does auto increment is marked as
+ * NOAUTOINCR by the board driver.
+ */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+
+ /* Check, if the chip supports auto page increment
+ * or if we have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
+ sndcmd = 1;
+ }
+
+ ops->oobretlen = ops->ooblen;
+ return 0;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: "
+ "Attempt read beyond end of device\n");
+ return -EINVAL;
+ }
+
+ nand_get_device(chip, mtd, FL_READING);
+
+ switch(ops->mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_AUTO:
+ case MTD_OOB_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_read_oob(mtd, from, ops);
+ else
+ ret = nand_do_read_ops(mtd, from, ops);
+
+ out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+
+/**
+ * nand_write_page_raw - [Intern] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * Not for syndrome calculating ecc controllers, which use a special oob layout
+ */
+static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+/**
+ * nand_write_page_raw_syndrome - [Intern] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static void nand_write_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->write_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->write_buf(mtd, oob, size);
+}
+/**
+ * nand_write_page_swecc - [REPLACABLE] software ecc based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ */
+static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Software ecc calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ chip->ecc.write_page_raw(mtd, chip, buf);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ */
+static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+/**
+ * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ *
+ * The hw generator calculates the error syndrome automatically. Therefor
+ * we need a special oob layout and handling.
+ */
+static void nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(mtd, p, oob);
+ chip->write_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->write_buf(mtd, oob, i);
+}
+
+/**
+ * nand_write_page - [REPLACEABLE] write one page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @buf: the data to write
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int page, int cached, int raw)
+{
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ if (unlikely(raw))
+ chip->ecc.write_page_raw(mtd, chip, buf);
+ else
+ chip->ecc.write_page(mtd, chip, buf);
+
+ /*
+ * Cached progamming disabled for now, Not sure if its worth the
+ * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
+ */
+ cached = 0;
+
+ if (!cached || !(chip->options & NAND_CACHEPRG)) {
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ /*
+ * See if operation failed and additional status checks are
+ * available
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status,
+ page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ /* Send command to read back the data */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ if (chip->verify_buf(mtd, buf, mtd->writesize))
+ return -EIO;
+#endif
+ return 0;
+}
+
+/**
+ * nand_fill_oob - [Internal] Transfer client buffer to oob
+ * @chip: nand chip structure
+ * @oob: oob data buffer
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops)
+{
+ size_t len = ops->ooblen;
+
+ switch(ops->mode) {
+
+ case MTD_OOB_PLACE:
+ case MTD_OOB_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OOB_AUTO: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, woffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for(; free->length && len; free++, len -= bytes) {
+ /* Write request not from offset 0 ? */
+ if (unlikely(woffs)) {
+ if (woffs >= free->length) {
+ woffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + woffs;
+ bytes = min_t(size_t, len,
+ (free->length - woffs));
+ woffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(chip->oob_poi + boffs, oob, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define NOTALIGNED(x) (x & (chip->subpagesize - 1)) != 0
+
+/**
+ * nand_do_write_ops - [Internal] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC
+ */
+static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, realpage, page, blockmask, column;
+ struct nand_chip *chip = mtd->priv;
+ uint32_t writelen = ops->len;
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret, subpage;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ column = to & (mtd->writesize - 1);
+ subpage = column || (writelen & (mtd->writesize - 1));
+
+ if (subpage && oob)
+ return -EINVAL;
+
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ printk (KERN_NOTICE "nand_do_write_ops: Device is write protected\n");
+ return -EIO;
+ }
+
+ realpage = (int)(to >> chip->page_shift);
+ page = realpage & chip->pagemask;
+ blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= (chip->pagebuf << chip->page_shift) &&
+ (chip->pagebuf << chip->page_shift) < (to + ops->len))
+ chip->pagebuf = -1;
+
+ /* If we're not given explicit OOB data, let it be 0xFF */
+ if (likely(!oob))
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ while(1) {
+ int bytes = mtd->writesize;
+ int cached = writelen > bytes && page != blockmask;
+ uint8_t *wbuf = buf;
+
+ /* Partial page write ? */
+ if (unlikely(column || writelen < (mtd->writesize - 1))) {
+ cached = 0;
+ bytes = min_t(int, bytes - column, (int) writelen);
+ chip->pagebuf = -1;
+ memset(chip->buffers->databuf, 0xff, mtd->writesize);
+ memcpy(&chip->buffers->databuf[column], buf, bytes);
+ wbuf = chip->buffers->databuf;
+ }
+
+ if (unlikely(oob))
+ oob = nand_fill_oob(chip, oob, ops);
+
+ ret = chip->write_page(mtd, chip, wbuf, page, cached,
+ (ops->mode == MTD_OOB_RAW));
+ if (ret)
+ break;
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+ return ret;
+}
+
+/**
+ * nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC
+ */
+static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ /* Do not allow reads past end of device */
+ if ((to + len) > mtd->size)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ nand_get_device(chip, mtd, FL_WRITING);
+
+ chip->ops.len = len;
+ chip->ops.datbuf = (uint8_t *)buf;
+ chip->ops.oobbuf = NULL;
+
+ ret = nand_do_write_ops(mtd, to, &chip->ops);
+
+ *retlen = chip->ops.retlen;
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band
+ */
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, status, len;
+ struct nand_chip *chip = mtd->priv;
+
+ MTDDEBUG (MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n",
+ (unsigned int)to, (int)ops->ooblen);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ len = chip->ecc.layout->oobavail;
+ else
+ len = mtd->oobsize;
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: "
+ "Attempt to write past end of page\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs >= len)) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: "
+ "Attempt to start write outside oob\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(to >= mtd->size ||
+ ops->ooboffs + ops->ooblen >
+ ((mtd->size >> chip->page_shift) -
+ (to >> chip->page_shift)) * len)) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: "
+ "Attempt write beyond end of device\n");
+ return -EINVAL;
+ }
+
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd))
+ return -EROFS;
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ nand_fill_oob(chip, ops->oobbuf, ops);
+ status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (ops->datbuf && (to + ops->len) > mtd->size) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: "
+ "Attempt read beyond end of device\n");
+ return -EINVAL;
+ }
+
+ nand_get_device(chip, mtd, FL_WRITING);
+
+ switch(ops->mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_AUTO:
+ case MTD_OOB_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(mtd, to, ops);
+ else
+ ret = nand_do_write_ops(mtd, to, ops);
+
+ out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * single_erease_cmd - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * Standard erase command for NAND chips
+ */
+static void single_erase_cmd(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ /* Send commands to erase a block */
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+/**
+ * multi_erease_cmd - [GENERIC] AND specific block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * AND multi block erase command function
+ * Erase 4 consecutive blocks
+ */
+static void multi_erase_cmd(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ /* Send commands to erase a block */
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand(mtd, instr, 0);
+}
+
+#define BBT_PAGE_MASK 0xffffff3f
+/**
+ * nand_erase_nand - [Internal] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks
+ */
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+ int allowbbt)
+{
+ int page, status, pages_per_block, ret, chipnr;
+ struct nand_chip *chip = mtd->priv;
+ loff_t rewrite_bbt[CONFIG_SYS_NAND_MAX_CHIPS] = {0};
+ unsigned int bbt_masked_page = 0xffffffff;
+ loff_t len;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, "
+ "len = %llu\n", (unsigned long long) instr->addr,
+ (unsigned long long) instr->len);
+
+ /* Start address must align on block boundary */
+ if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0,
+ "nand_erase: Length not block aligned\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow erase past end of device */
+ if ((instr->len + instr->addr) > mtd->size) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0,
+ "nand_erase: Erase past end of device\n");
+ return -EINVAL;
+ }
+
+ instr->fail_addr = 0xffffffff;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip, mtd, FL_ERASING);
+
+ /* Shift to get first page */
+ page = (int)(instr->addr >> chip->page_shift);
+ chipnr = (int)(instr->addr >> chip->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0,
+ "nand_erase: Device is write protected!!!\n");
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /*
+ * If BBT requires refresh, set the BBT page mask to see if the BBT
+ * should be rewritten. Otherwise the mask is set to 0xffffffff which
+ * can not be matched. This is also done when the bbt is actually
+ * erased to avoid recusrsive updates
+ */
+ if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
+ bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ instr->state = MTD_ERASING;
+
+ while (len) {
+ /*
+ * heck if we have a bad block, we do not erase bad blocks !
+ */
+ if (nand_block_checkbad(mtd, ((loff_t) page) <<
+ chip->page_shift, 0, allowbbt)) {
+ printk(KERN_WARNING "nand_erase: attempt to erase a "
+ "bad block at page 0x%08x\n", page);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page
+ */
+ if (page <= chip->pagebuf && chip->pagebuf <
+ (page + pages_per_block))
+ chip->pagebuf = -1;
+
+ chip->erase_cmd(mtd, page & chip->pagemask);
+
+ status = chip->waitfunc(mtd, chip);
+
+ /*
+ * See if operation failed and additional status checks are
+ * available
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_ERASING,
+ status, page);
+
+ /* See if block erase succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_erase: "
+ "Failed erase, page 0x%08x\n", page);
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = ((loff_t)page << chip->page_shift);
+ goto erase_exit;
+ }
+
+ /*
+ * If BBT requires refresh, set the BBT rewrite flag to the
+ * page being erased
+ */
+ if (bbt_masked_page != 0xffffffff &&
+ (page & BBT_PAGE_MASK) == bbt_masked_page)
+ rewrite_bbt[chipnr] =
+ ((loff_t)page << chip->page_shift);
+
+ /* Increment page address and decrement length */
+ len -= (1 << chip->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+
+ /*
+ * If BBT requires refresh and BBT-PERCHIP, set the BBT
+ * page mask to see if this BBT should be rewritten
+ */
+ if (bbt_masked_page != 0xffffffff &&
+ (chip->bbt_td->options & NAND_BBT_PERCHIP))
+ bbt_masked_page = chip->bbt_td->pages[chipnr] &
+ BBT_PAGE_MASK;
+ }
+ }
+ instr->state = MTD_ERASE_DONE;
+
+ erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /*
+ * If BBT requires refresh and erase was successful, rewrite any
+ * selected bad block tables
+ */
+ if (bbt_masked_page == 0xffffffff || ret)
+ return ret;
+
+ for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
+ if (!rewrite_bbt[chipnr])
+ continue;
+ /* update the BBT for chip */
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
+ "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
+ chip->bbt_td->pages[chipnr]);
+ nand_update_bbt(mtd, rewrite_bbt[chipnr]);
+ }
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ MTDDEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n");
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip, mtd, FL_SYNCING);
+ /* Release it and go back */
+ nand_release_device(mtd);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ /* Check for invalid offset */
+ if (offs > mtd->size)
+ return -EINVAL;
+
+ return nand_block_checkbad(mtd, offs, 1, 0);
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ if ((ret = nand_block_isbad(mtd, ofs))) {
+ /* If it was bad already, return success and do nothing. */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return chip->block_markbad(mtd, ofs);
+}
+
+/*
+ * Set default functions
+ */
+static void nand_set_defaults(struct nand_chip *chip, int busw)
+{
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->chip_delay)
+ chip->chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (chip->cmdfunc == NULL)
+ chip->cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->waitfunc == NULL)
+ chip->waitfunc = nand_wait;
+
+ if (!chip->select_chip)
+ chip->select_chip = nand_select_chip;
+ if (!chip->read_byte)
+ chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->read_word)
+ chip->read_word = nand_read_word;
+ if (!chip->block_bad)
+ chip->block_bad = nand_block_bad;
+ if (!chip->block_markbad)
+ chip->block_markbad = nand_default_block_markbad;
+ if (!chip->write_buf)
+ chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->read_buf)
+ chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
+ if (!chip->verify_buf)
+ chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
+ if (!chip->scan_bbt)
+ chip->scan_bbt = nand_default_bbt;
+ if (!chip->controller)
+ chip->controller = &chip->hwcontrol;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported
+ */
+static const struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int busw, int *maf_id,
+ const struct nand_flash_dev *type)
+{
+ int dev_id, maf_idx;
+ int tmp_id, tmp_manf;
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
+
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ *maf_id = chip->read_byte(mtd);
+ dev_id = chip->read_byte(mtd);
+
+ /* Try again to make sure, as some systems the bus-hold or other
+ * interface concerns can cause random data which looks like a
+ * possibly credible NAND flash to appear. If the two results do
+ * not match, ignore the device completely.
+ */
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+
+ tmp_manf = chip->read_byte(mtd);
+ tmp_id = chip->read_byte(mtd);
+
+ if (tmp_manf != *maf_id || tmp_id != dev_id) {
+ printk(KERN_INFO "%s: second ID read did not match "
+ "%02x,%02x against %02x,%02x\n", __func__,
+ *maf_id, dev_id, tmp_manf, tmp_id);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++)
+ if (dev_id == type->id)
+ break;
+
+ if (!type->name) {
+ /* supress warning if there is no nand */
+ if (*maf_id != 0x00 && *maf_id != 0xff &&
+ dev_id != 0x00 && dev_id != 0xff)
+ printk(KERN_INFO "%s: unknown NAND device: "
+ "Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ __func__, *maf_id, dev_id);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!mtd->name)
+ mtd->name = type->name;
+
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+
+ /* Newer devices have all the information in additional id bytes */
+ if (!type->pagesize) {
+ int extid;
+ /* The 3rd id byte holds MLC / multichip data */
+ chip->cellinfo = chip->read_byte(mtd);
+ /* The 4th id byte is the important one */
+ extid = chip->read_byte(mtd);
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x3);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+ } else {
+ /*
+ * Old devices have chip data hardcoded in the device id table
+ */
+ mtd->erasesize = type->erasesize;
+ mtd->writesize = type->pagesize;
+ mtd->oobsize = mtd->writesize / 32;
+ busw = type->options & NAND_BUSWIDTH_16;
+ }
+
+ /* Try to identify manufacturer */
+ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
+ if (nand_manuf_ids[maf_idx].id == *maf_id)
+ break;
+ }
+
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct !
+ */
+ if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ printk(KERN_INFO "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
+ dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
+ printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ /* Convert chipsize to number of pages per chip -1. */
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (chip->chipsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+ else
+ chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 31;
+
+ /* Set the bad block position */
+ chip->badblockpos = mtd->writesize > 512 ?
+ NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+
+ /* Get chip options, preserve non chip based options */
+ chip->options &= ~NAND_CHIPOPTIONS_MSK;
+ chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+
+ /*
+ * Set chip as a default. Board drivers can override it, if necessary
+ */
+ chip->options |= NAND_NO_AUTOINCR;
+
+ /* Check if chip is a not a samsung device. Do not clear the
+ * options for chips which are not having an extended id.
+ */
+ if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
+ chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+
+ /* Check for AND chips with 4 page planes */
+ if (chip->options & NAND_4PAGE_ARRAY)
+ chip->erase_cmd = multi_erase_cmd;
+ else
+ chip->erase_cmd = single_erase_cmd;
+
+ /* Do not replace user supplied command function ! */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, dev_id,
+ nand_manuf_ids[maf_idx].name, type->name);
+
+ return type;
+}
+
+/**
+ * nand_scan_ident - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: Number of chips to scan for
+ * @table: Alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It
+ * reads the flash ID and sets up MTD fields accordingly.
+ *
+ * The mtd->owner field must be set to the module of the caller.
+ */
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ const struct nand_flash_dev *table)
+{
+ int i, busw, nand_maf_id;
+ struct nand_chip *chip = mtd->priv;
+ const struct nand_flash_dev *type;
+
+ /* Get buswidth to select the correct functions */
+ busw = chip->options & NAND_BUSWIDTH_16;
+ /* Set the default functions */
+ nand_set_defaults(chip, busw);
+
+ /* Read the flash type */
+ type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
+
+ if (IS_ERR(type)) {
+#ifndef CONFIG_SYS_NAND_QUIET_TEST
+ printk(KERN_WARNING "No NAND device found!!!\n");
+#endif
+ chip->select_chip(mtd, -1);
+ return PTR_ERR(type);
+ }
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ chip->select_chip(mtd, i);
+ /* See comment in nand_get_flash_type for reset */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != chip->read_byte(mtd) ||
+ type->id != chip->read_byte(mtd))
+ break;
+ }
+#ifdef DEBUG
+ if (i > 1)
+ printk(KERN_INFO "%d NAND chips detected\n", i);
+#endif
+
+ /* Store the number of chips and calc total size for mtd */
+ chip->numchips = i;
+ mtd->size = i * chip->chipsize;
+
+ return 0;
+}
+
+
+/**
+ * nand_scan_tail - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ *
+ * This is the second phase of the normal nand_scan() function. It
+ * fills out all the uninitialized function pointers with the defaults
+ * and scans for a bad block table if appropriate.
+ */
+int nand_scan_tail(struct mtd_info *mtd)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (!(chip->options & NAND_OWN_BUFFERS))
+ chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
+ if (!chip->buffers)
+ return -ENOMEM;
+
+ /* Set the internal oob buffer location, just after the page data */
+ chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+
+ /*
+ * If no default placement scheme is given, select an appropriate one
+ */
+ if (!chip->ecc.layout) {
+ switch (mtd->oobsize) {
+ case 8:
+ chip->ecc.layout = &nand_oob_8;
+ break;
+ case 16:
+ chip->ecc.layout = &nand_oob_16;
+ break;
+ case 64:
+ chip->ecc.layout = &nand_oob_64;
+ break;
+ case 128:
+ chip->ecc.layout = &nand_oob_128;
+ break;
+ default:
+ printk(KERN_WARNING "No oob scheme defined for "
+ "oobsize %d\n", mtd->oobsize);
+ }
+ }
+
+ if (!chip->write_page)
+ chip->write_page = nand_write_page;
+
+ /*
+ * check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * selected and we have 256 byte pagesize fallback to software ECC
+ */
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_HW_OOB_FIRST:
+ /* Similar to NAND_ECC_HW, but a separate read_page handle */
+ if (!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) {
+ printk(KERN_WARNING "No ECC functions supplied, "
+ "Hardware ECC not possible\n");
+ BUG();
+ }
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
+ case NAND_ECC_HW:
+ /* Use standard hwecc read page function ? */
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_hwecc;
+ if (!chip->ecc.write_page)
+ chip->ecc.write_page = nand_write_page_hwecc;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ if (!chip->ecc.read_oob)
+ chip->ecc.read_oob = nand_read_oob_std;
+ if (!chip->ecc.write_oob)
+ chip->ecc.write_oob = nand_write_oob_std;
+
+ case NAND_ECC_HW_SYNDROME:
+ if ((!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) &&
+ (!chip->ecc.read_page ||
+ chip->ecc.read_page == nand_read_page_hwecc ||
+ !chip->ecc.write_page ||
+ chip->ecc.write_page == nand_write_page_hwecc)) {
+ printk(KERN_WARNING "No ECC functions supplied, "
+ "Hardware ECC not possible\n");
+ BUG();
+ }
+ /* Use standard syndrome read/write page function ? */
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_syndrome;
+ if (!chip->ecc.write_page)
+ chip->ecc.write_page = nand_write_page_syndrome;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
+ if (!chip->ecc.read_oob)
+ chip->ecc.read_oob = nand_read_oob_syndrome;
+ if (!chip->ecc.write_oob)
+ chip->ecc.write_oob = nand_write_oob_syndrome;
+
+ if (mtd->writesize >= chip->ecc.size)
+ break;
+ printk(KERN_WARNING "%d byte HW ECC not possible on "
+ "%d byte page size, fallback to SW ECC\n",
+ chip->ecc.size, mtd->writesize);
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ case NAND_ECC_SOFT:
+ chip->ecc.calculate = nand_calculate_ecc;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.read_page = nand_read_page_swecc;
+ chip->ecc.read_subpage = nand_read_subpage;
+ chip->ecc.write_page = nand_write_page_swecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.write_oob = nand_write_oob_std;
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ break;
+
+ case NAND_ECC_NONE:
+ printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. "
+ "This is not recommended !!\n");
+ chip->ecc.read_page = nand_read_page_raw;
+ chip->ecc.write_page = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.write_oob = nand_write_oob_std;
+ chip->ecc.size = mtd->writesize;
+ chip->ecc.bytes = 0;
+ break;
+
+ default:
+ printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n",
+ chip->ecc.mode);
+ BUG();
+ }
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area
+ */
+ chip->ecc.layout->oobavail = 0;
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
+ chip->ecc.layout->oobavail +=
+ chip->ecc.layout->oobfree[i].length;
+ mtd->oobavail = chip->ecc.layout->oobavail;
+
+ /*
+ * Set the number of read / write steps for one page depending on ECC
+ * mode
+ */
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ if(chip->ecc.steps * chip->ecc.size != mtd->writesize) {
+ printk(KERN_WARNING "Invalid ecc parameters\n");
+ BUG();
+ }
+ chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+
+ /*
+ * Allow subpage writes up to ecc.steps. Not possible for MLC
+ * FLASH.
+ */
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+ switch(chip->ecc.steps) {
+ case 2:
+ mtd->subpage_sft = 1;
+ break;
+ case 4:
+ case 8:
+ case 16:
+ mtd->subpage_sft = 2;
+ break;
+ }
+ }
+ chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /* Initialize state */
+ chip->state = FL_READY;
+
+ /* De-select the device */
+ chip->select_chip(mtd, -1);
+
+ /* Invalidate the pagebuffer reference */
+ chip->pagebuf = -1;
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->erase = nand_erase;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = nand_read;
+ mtd->write = nand_write;
+ mtd->read_oob = nand_read_oob;
+ mtd->write_oob = nand_write_oob;
+ mtd->sync = nand_sync;
+ mtd->lock = NULL;
+ mtd->unlock = NULL;
+ mtd->block_isbad = nand_block_isbad;
+ mtd->block_markbad = nand_block_markbad;
+
+ /* propagate ecc.layout to mtd_info */
+ mtd->ecclayout = chip->ecc.layout;
+
+ /* Check, if we should skip the bad block table scan */
+ if (chip->options & NAND_SKIP_BBTSCAN)
+ chip->options |= NAND_BBT_SCANNED;
+
+ return 0;
+}
+
+/**
+ * nand_scan - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: Number of chips to scan for
+ *
+ * This fills out all the uninitialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ * The mtd->owner field must be set to the module of the caller
+ *
+ */
+int nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int ret;
+
+ ret = nand_scan_ident(mtd, maxchips, NULL);
+ if (!ret)
+ ret = nand_scan_tail(mtd);
+ return ret;
+}
+
+/**
+ * nand_release - [NAND Interface] Free resources held by the NAND device
+ * @mtd: MTD device structure
+*/
+void nand_release(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ /* Deregister partitions */
+ del_mtd_partitions(mtd);
+#endif
+
+ /* Free bad block table memory */
+ kfree(chip->bbt);
+ if (!(chip->options & NAND_OWN_BUFFERS))
+ kfree(chip->buffers);
+}
diff --git a/u-boot/drivers/mtd/nand/nand_bbt.c b/u-boot/drivers/mtd/nand/nand_bbt.c
new file mode 100644
index 0000000..521ddde
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nand_bbt.c
@@ -0,0 +1,1220 @@
+/*
+ * drivers/mtd/nand_bbt.c
+ *
+ * Overview:
+ * Bad block table support for the NAND driver
+ *
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the bbt descriptor(s). If a bbt is found
+ * then the contents are read and the memory based bbt is created. If a
+ * mirrored bbt is selected then the mirror is searched too and the
+ * versions are compared. If the mirror has a greater version number
+ * than the mirror bbt is used to build the memory based bbt.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the bbt's is out of date or does not exist it is (re)created.
+ * If no bbt exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created bbts like the one found on M-SYS DOC devices
+ * the bbt is searched and read but never created
+ *
+ * The autogenerated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the oob area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ *
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+
+#include <asm/errno.h>
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers.
+ * If the SCAN_EMPTY option is set then check, if all bytes except the
+ * pattern area contain 0xff
+ *
+*/
+static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ int i, end = 0;
+ uint8_t *p = buf;
+
+ end = paglen + td->offs;
+ if (td->options & NAND_BBT_SCANEMPTY) {
+ for (i = 0; i < end; i++) {
+ if (p[i] != 0xff)
+ return -1;
+ }
+ }
+ p += end;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+
+ if (td->options & NAND_BBT_SCANEMPTY) {
+ p += td->len;
+ end += td->len;
+ for (i = end; i < len; i++) {
+ if (*p++ != 0xff)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers. Same as check_pattern, but
+ * no optional empty check
+ *
+*/
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[td->offs + i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @bits: number of bits per block
+ * @offs: offset in the memory table
+ * @reserved_block_code: Pattern to identify reserved blocks
+ *
+ * Read the bad block table starting from page.
+ *
+ */
+static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ int bits, int offs, int reserved_block_code)
+{
+ int res, i, j, act = 0;
+ struct nand_chip *this = mtd->priv;
+ size_t retlen, len, totlen;
+ loff_t from;
+ uint8_t msk = (uint8_t) ((1 << bits) - 1);
+
+ totlen = (num * bits) >> 3;
+ from = ((loff_t) page) << this->page_shift;
+
+ while (totlen) {
+ len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
+ res = mtd->read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ printk(KERN_INFO "nand_bbt: Error reading bad block table\n");
+ return res;
+ }
+ printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act += 2) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
+ (loff_t)((offs << 2) +
+ (act >> 1)) <<
+ this->bbt_erase_shift);
+ this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /* Leave it for now, if its matured we can move this
+ * message to MTD_DEBUG_LEVEL0 */
+ printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
+ (loff_t)((offs << 2) + (act >> 1)) <<
+ this->bbt_erase_shift);
+ /* Factory marked bad or worn out ? */
+ if (tmp == 0)
+ this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+ else
+ this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return 0;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page
+ * We assume that the bbt bits are in consecutive order.
+*/
+static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int res = 0, i;
+ int bits;
+
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < this->numchips; i++) {
+ if (chip == -1 || chip == i)
+ res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code);
+ if (res)
+ return res;
+ offs += this->chipsize >> (this->bbt_erase_shift + 2);
+ }
+ } else {
+ res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/*
+ * Scan read raw data from flash
+ */
+static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OOB_RAW;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.datbuf = buf;
+ ops.len = len;
+
+ return mtd->read_oob(mtd, offs, &ops);
+}
+
+/*
+ * Scan write data with oob to flash
+ */
+static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.datbuf = buf;
+ ops.oobbuf = oob;
+ ops.len = len;
+
+ return mtd->write_oob(mtd, offs, &ops);
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page
+ * We assume that the bbt bits are in consecutive order.
+ *
+*/
+static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ scan_read_raw(mtd, buf, (loff_t)td->pages[0] <<
+ this->page_shift, mtd->writesize);
+ td->version[0] = buf[mtd->writesize + td->veroffs];
+ printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read_raw(mtd, buf, (loff_t)md->pages[0] <<
+ this->page_shift, mtd->writesize);
+ md->version[0] = buf[mtd->writesize + md->veroffs];
+ printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+ }
+ return 1;
+}
+
+/*
+ * Scan a given block full
+ */
+static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, size_t readlen,
+ int scanlen, int len)
+{
+ int ret, j;
+
+ ret = scan_read_raw(mtd, buf, offs, readlen);
+ if (ret)
+ return ret;
+
+ for (j = 0; j < len; j++, buf += scanlen) {
+ if (check_pattern(buf, scanlen, mtd->writesize, bd))
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Scan a given block partially
+ */
+static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, int len)
+{
+ struct mtd_oob_ops ops;
+ int j, ret;
+
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ for (j = 0; j < len; j++) {
+ /*
+ * Read the full oob until read_oob is fixed to
+ * handle single byte reads for 16 bit
+ * buswidth
+ */
+ ret = mtd->read_oob(mtd, offs, &ops);
+ if (ret)
+ return ret;
+
+ if (check_short_pattern(buf, bd))
+ return 1;
+
+ offs += mtd->writesize;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device
+ * for the given good/bad block identify pattern
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen;
+
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ len = 1 << (this->bbt_erase_shift - this->page_shift);
+ else {
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ len = 2;
+ else
+ len = 1;
+ }
+
+ if (!(bd->options & NAND_BBT_SCANEMPTY)) {
+ /* We need only read few bytes from the OOB area */
+ scanlen = 0;
+ readlen = bd->len;
+ } else {
+ /* Full page content should be read */
+ scanlen = mtd->writesize + mtd->oobsize;
+ readlen = len * mtd->writesize;
+ }
+
+ if (chip == -1) {
+ /* Note that numblocks is 2 * (real numblocks) here, see i+=2
+ * below as it makes shifting and masking less painful */
+ numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+ }
+
+ for (i = startblock; i < numblocks;) {
+ int ret;
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ ret = scan_block_full(mtd, bd, from, buf, readlen,
+ scanlen, len);
+ else
+ ret = scan_block_fast(mtd, bd, from, buf, len);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ MTDDEBUG (MTD_DEBUG_LEVEL0,
+ "Bad eraseblock %d at 0x%012llx\n",
+ i >> 1, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+ }
+
+ i += 2;
+ from += (1 << this->bbt_erase_shift);
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern.
+ * Search is preformed either from the beginning up or from the end of
+ * the device downwards. The search starts always at the start of a
+ * block.
+ * If the option NAND_BBT_PERCHIP is given, each chip is searched
+ * for a bbt, which contains the bad block information of this chip.
+ * This is necessary to provide support for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page
+ * in a block.
+ */
+static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, chips;
+ int bits, startblock, block, dir;
+ int scanlen = mtd->writesize + mtd->oobsize;
+ int bbtblocks;
+ int blocktopage = this->bbt_erase_shift - this->page_shift;
+
+ /* Search direction top -> down ? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ bbtblocks = mtd->size >> this->bbt_erase_shift;
+ }
+
+ /* Number of bits for each erase block in the bbt */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+
+ /* Read first page */
+ scan_read_raw(mtd, buf, offs, mtd->writesize);
+ if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+ td->pages[i] = actblock << blocktopage;
+ if (td->options & NAND_BBT_VERSION) {
+ td->version[i] = buf[mtd->writesize + td->veroffs];
+ }
+ break;
+ }
+ }
+ startblock += this->chipsize >> this->bbt_erase_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ printk(KERN_WARNING "Bad block table not found for chip %d\n", i);
+ else
+ printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i],
+ td->version[i]);
+ }
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s)
+*/
+static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt(mtd, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(mtd, buf, md);
+
+ /* Force result check */
+ return 1;
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ *
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table
+ *
+*/
+static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, j, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, bbtoffs, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ /* Full device write or specific chip ? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+
+ /* There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /* Automatic placement of the bad block table */
+ /* Search direction top -> down ? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ switch ((this->bbt[block >> 2] >>
+ (2 * (block & 0x03))) & 0x03) {
+ case 0x01:
+ case 0x03:
+ continue;
+ }
+ page = block <<
+ (this->bbt_erase_shift - this->page_shift);
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ printk(KERN_ERR "No space left to write bad block table\n");
+ return -ENOSPC;
+ write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ bbtoffs = chip * (numblocks >> 2);
+
+ to = ((loff_t) page) << this->page_shift;
+
+ /* Must we save the block contents ? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
+ len = 1 << this->bbt_erase_shift;
+ res = mtd->read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ printk(KERN_INFO "nand_bbt: Error "
+ "reading block for writing "
+ "the bad block table\n");
+ return res;
+ }
+ printk(KERN_WARNING "nand_bbt: ECC error "
+ "while reading block for writing "
+ "bad block table\n");
+ }
+ /* Read oob data */
+ ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to >> this->page_shift);
+ offs = pageoffs << this->page_shift;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else {
+ /* Calc length */
+ len = (size_t) (numblocks >> sft);
+ /* Make it page aligned ! */
+ len = (len + (mtd->writesize - 1)) &
+ ~(mtd->writesize - 1);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ (len >> this->page_shift)* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* walk through the memory table */
+ for (i = 0; i < numblocks;) {
+ uint8_t dat;
+ dat = this->bbt[bbtoffs + (i >> 2)];
+ for (j = 0; j < 4; j++, i++) {
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ /* Do not store the reserved bbt blocks ! */
+ buf[offs + (i >> sft)] &=
+ ~(msk[dat & 0x03] << sftcnt);
+ dat >>= 2;
+ }
+ }
+
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ einfo.len = 1 << this->bbt_erase_shift;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+
+ res = scan_write_bbt(mtd, to, len, buf, &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ printk(KERN_DEBUG "Bad block table written to 0x%012llx, "
+ "version 0x%02X\n", (unsigned long long)to,
+ td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ printk(KERN_WARNING
+ "nand_bbt: Error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device
+ * for manufacturer / software marked good / bad blocks
+*/
+static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ bd->options &= ~NAND_BBT_SCANEMPTY;
+ return create_bbt(mtd, this->buffers->databuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt
+ * and creates / updates the bbt(s) if necessary
+ * Creation is necessary if no bbt was found for the chip/device
+ * Update is necessary if one of the tables is missing or the
+ * version nr. of one table is less than the other
+*/
+static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, chipsel, res;
+ struct nand_chip *this = mtd->priv;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = this->numchips;
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ rd = NULL;
+ rd2 = NULL;
+ /* Per chip or per device ? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table avilable ? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ writeops = 0x03;
+ goto create;
+ }
+
+ if (td->pages[i] == -1) {
+ rd = md;
+ td->version[i] = md->version[i];
+ writeops = 1;
+ goto writecheck;
+ }
+
+ if (md->pages[i] == -1) {
+ rd = td;
+ md->version[i] = td->version[i];
+ writeops = 2;
+ goto writecheck;
+ }
+
+ if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ goto writecheck;
+ }
+
+ if (((int8_t) (td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ md->version[i] = td->version[i];
+ writeops = 2;
+ } else {
+ rd = md;
+ td->version[i] = md->version[i];
+ writeops = 1;
+ }
+
+ goto writecheck;
+
+ } else {
+ if (td->pages[i] == -1) {
+ writeops = 0x01;
+ goto create;
+ }
+ rd = td;
+ goto writecheck;
+ }
+ create:
+ /* Create the bad block table by scanning the device ? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ create_bbt(mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ writecheck:
+ /* read back first ? */
+ if (rd)
+ read_abs_bbt(mtd, buf, rd, chipsel);
+ /* If they weren't versioned, read both. */
+ if (rd2)
+ read_abs_bbt(mtd, buf, rd2, chipsel);
+
+ /* Write the bad block table to the device ? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device ? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent
+ * accidental erasures / writes. The regions are identified by
+ * the mark 0x02.
+*/
+static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval, newval;
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ } else {
+ chips = 1;
+ nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ block <<= 1;
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0x2 << (block & 0x06));
+ this->bbt[(block >> 3)] = newval;
+ if ((oldval != newval) && td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)block <<
+ (this->bbt_erase_shift - 1));
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ block <<= 1;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0x2 << (block & 0x06));
+ this->bbt[(block >> 3)] = newval;
+ if (oldval != newval)
+ update = 1;
+ block += 2;
+ }
+ /* If we want reserved blocks to be recorded to flash, and some
+ new ones have been marked, then we need to update the stored
+ bbts. This should only happen once. */
+ if (update && td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)(block - 2) <<
+ (this->bbt_erase_shift - 1));
+ }
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already
+ * available. If not it scans the device for manufacturer
+ * marked good / bad blocks and writes the bad block table(s) to
+ * the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed
+ * by calling the nand_free_bbt function.
+ *
+*/
+int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ len = mtd->size >> (this->bbt_erase_shift + 2);
+ /* Allocate memory (2bit per block) and clear the memory bad block table */
+ this->bbt = kzalloc(len, GFP_KERNEL);
+ if (!this->bbt) {
+ printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
+ return -ENOMEM;
+ }
+
+ /* If no primary table decriptor is given, scan the device
+ * to build a memory based bad block table
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(mtd, bd))) {
+ printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n");
+ kfree(this->bbt);
+ this->bbt = NULL;
+ }
+ return res;
+ }
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = vmalloc(len);
+ if (!buf) {
+ printk(KERN_ERR "nand_bbt: Out of memory\n");
+ kfree(this->bbt);
+ this->bbt = NULL;
+ return -ENOMEM;
+ }
+
+ /* Is the bbt at a given page ? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ res = read_abs_bbts(mtd, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ res = search_read_bbts(mtd, buf, td, md);
+ }
+
+ if (res)
+ res = check_create(mtd, buf, bd);
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(mtd, td);
+ if (md)
+ mark_bbt_region(mtd, md);
+
+ vfree(buf);
+ return res;
+}
+
+/**
+ * nand_update_bbt - [NAND Interface] update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s)
+*/
+int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0, writeops = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "nand_update_bbt: Out of memory\n");
+ return -ENOMEM;
+ }
+
+ writeops = md != NULL ? 0x03 : 0x01;
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device ? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device ? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks. */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr smallpage_memorybased = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 5,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr smallpage_flashbased = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 5,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr largepage_flashbased = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern
+};
+
+static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
+
+static struct nand_bbt_descr agand_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0x20,
+ .len = 6,
+ .pattern = scan_agand_pattern
+};
+
+/* Generic flash bbt decriptors
+*/
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table
+ * support for the device and calls the nand_scan_bbt function
+ *
+*/
+int nand_default_bbt(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* Default for AG-AND. We must use a flash based
+ * bad block table as the devices have factory marked
+ * _good_ blocks. Erasing those blocks leads to loss
+ * of the good / bad information, so we _must_ store
+ * this information in a good / bad table during
+ * startup
+ */
+ if (this->options & NAND_IS_AND) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ this->options |= NAND_USE_FLASH_BBT;
+ return nand_scan_bbt(mtd, &agand_flashbased);
+ }
+
+ /* Is a flash based bad block table requested ? */
+ if (this->options & NAND_USE_FLASH_BBT) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ if (!this->badblock_pattern) {
+ this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ if (!this->badblock_pattern) {
+ this->badblock_pattern = (mtd->writesize > 512) ?
+ &largepage_memorybased : &smallpage_memorybased;
+ }
+ }
+ return nand_scan_bbt(mtd, this->badblock_pattern);
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ *
+*/
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct nand_chip *this = mtd->priv;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 2 */
+ block = (int)(offs >> (this->bbt_erase_shift - 1));
+ res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ MTDDEBUG (MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: "
+ "(block %d) 0x%02x\n", (unsigned int)offs, res, block >> 1);
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ return 1;
+ case 0x02:
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
diff --git a/u-boot/drivers/mtd/nand/nand_ecc.c b/u-boot/drivers/mtd/nand/nand_ecc.c
new file mode 100644
index 0000000..52bc916
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nand_ecc.c
@@ -0,0 +1,202 @@
+/*
+ * This file contains an ECC algorithm from Toshiba that detects and
+ * corrects 1 bit errors in a 256 byte block of data.
+ *
+ * drivers/mtd/nand/nand_ecc.c
+ *
+ * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
+ * Toshiba America Electronics Components, Inc.
+ *
+ * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * As a special exception, if other files instantiate templates or use
+ * macros or inline functions from these files, or you compile these
+ * files and link them with other works to produce a work based on these
+ * files, these files do not by themselves cause the resulting work to be
+ * covered by the GNU General Public License. However the source code for
+ * these files must still be made available in accordance with section (3)
+ * of the GNU General Public License.
+ *
+ * This exception does not invalidate any other reasons why a work based on
+ * this file might be covered by the GNU General Public License.
+ */
+
+#include <common.h>
+
+#include <asm/errno.h>
+#include <linux/mtd/mtd.h>
+
+/* The PPC4xx NDFC uses Smart Media (SMC) bytes order */
+#ifdef CONFIG_NAND_NDFC
+#define CONFIG_MTD_NAND_ECC_SMC
+#endif
+
+/*
+ * NAND-SPL has no sofware ECC for now, so don't include nand_calculate_ecc(),
+ * only nand_correct_data() is needed
+ */
+
+#ifndef CONFIG_NAND_SPL
+/*
+ * Pre-calculated 256-way 1 byte column parity
+ */
+static const u_char nand_ecc_precalc_table[] = {
+ 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
+ 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
+ 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
+ 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
+ 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
+ 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
+ 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
+ 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
+ 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
+ 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
+ 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
+ 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
+ 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
+ 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
+ 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
+ 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00
+};
+
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block
+ * @mtd: MTD block structure
+ * @dat: raw data
+ * @ecc_code: buffer for ECC
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ uint8_t idx, reg1, reg2, reg3, tmp1, tmp2;
+ int i;
+
+ /* Initialize variables */
+ reg1 = reg2 = reg3 = 0;
+
+ /* Build up column parity */
+ for(i = 0; i < 256; i++) {
+ /* Get CP0 - CP5 from table */
+ idx = nand_ecc_precalc_table[*dat++];
+ reg1 ^= (idx & 0x3f);
+
+ /* All bit XOR = 1 ? */
+ if (idx & 0x40) {
+ reg3 ^= (uint8_t) i;
+ reg2 ^= ~((uint8_t) i);
+ }
+ }
+
+ /* Create non-inverted ECC code from line parity */
+ tmp1 = (reg3 & 0x80) >> 0; /* B7 -> B7 */
+ tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */
+ tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */
+ tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */
+ tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */
+ tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */
+ tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */
+ tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */
+
+ tmp2 = (reg3 & 0x08) << 4; /* B3 -> B7 */
+ tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */
+ tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */
+ tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */
+ tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */
+ tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */
+ tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */
+ tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */
+
+ /* Calculate final ECC code */
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+ ecc_code[0] = ~tmp2;
+ ecc_code[1] = ~tmp1;
+#else
+ ecc_code[0] = ~tmp1;
+ ecc_code[1] = ~tmp2;
+#endif
+ ecc_code[2] = ((~reg1) << 2) | 0x03;
+
+ return 0;
+}
+#endif /* CONFIG_NAND_SPL */
+
+static inline int countbits(uint32_t byte)
+{
+ int res = 0;
+
+ for (;byte; byte >>= 1)
+ res += byte & 0x01;
+ return res;
+}
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @dat: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ uint8_t s0, s1, s2;
+
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+ s0 = calc_ecc[0] ^ read_ecc[0];
+ s1 = calc_ecc[1] ^ read_ecc[1];
+ s2 = calc_ecc[2] ^ read_ecc[2];
+#else
+ s1 = calc_ecc[0] ^ read_ecc[0];
+ s0 = calc_ecc[1] ^ read_ecc[1];
+ s2 = calc_ecc[2] ^ read_ecc[2];
+#endif
+ if ((s0 | s1 | s2) == 0)
+ return 0;
+
+ /* Check for a single bit error */
+ if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 &&
+ ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 &&
+ ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) {
+
+ uint32_t byteoffs, bitnum;
+
+ byteoffs = (s1 << 0) & 0x80;
+ byteoffs |= (s1 << 1) & 0x40;
+ byteoffs |= (s1 << 2) & 0x20;
+ byteoffs |= (s1 << 3) & 0x10;
+
+ byteoffs |= (s0 >> 4) & 0x08;
+ byteoffs |= (s0 >> 3) & 0x04;
+ byteoffs |= (s0 >> 2) & 0x02;
+ byteoffs |= (s0 >> 1) & 0x01;
+
+ bitnum = (s2 >> 5) & 0x04;
+ bitnum |= (s2 >> 4) & 0x02;
+ bitnum |= (s2 >> 3) & 0x01;
+
+ dat[byteoffs] ^= (1 << bitnum);
+
+ return 1;
+ }
+
+ if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1)
+ return 1;
+
+ return -EBADMSG;
+}
diff --git a/u-boot/drivers/mtd/nand/nand_ids.c b/u-boot/drivers/mtd/nand/nand_ids.c
new file mode 100644
index 0000000..8d7ea76
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nand_ids.c
@@ -0,0 +1,146 @@
+/*
+ * drivers/mtd/nandids.c
+ *
+ * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+#include <linux/mtd/nand.h>
+/*
+* Chip ID list
+*
+* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
+* options
+*
+* Pagesize; 0, 256, 512
+* 0 get this information from the extended chip ID
++ 256 256 Byte page size
+* 512 512 Byte page size
+*/
+const struct nand_flash_dev nand_flash_ids[] = {
+
+#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
+ {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
+ {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
+ {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
+ {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
+ {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
+ {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
+ {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
+
+ {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
+ {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
+ {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+ {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+#endif
+
+ {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
+ {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
+ {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
+ {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
+ {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
+ {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
+ {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
+
+ /*
+ * These are the new chips with large page size. The pagesize and the
+ * erasesize is determined from the extended id bytes
+ */
+#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+ /*512 Megabit */
+ {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
+ {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
+
+ /* 1 Gigabit */
+ {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
+ {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
+
+ /* 2 Gigabit */
+ {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS},
+ {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS},
+ {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16},
+ {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16},
+
+ /* 4 Gigabit */
+ {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS},
+ {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS},
+ {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16},
+ {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16},
+
+ /* 8 Gigabit */
+ {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS},
+ {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS},
+ {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16},
+ {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16},
+
+ /* 16 Gigabit */
+ {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS},
+ {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS},
+ {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16},
+ {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
+
+ /*
+ * Renesas AND 1 Gigabit. Those chips do not support extended id and
+ * have a strange page/block layout ! The chosen minimum erasesize is
+ * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
+ * planes 1 block = 2 pages, but due to plane arrangement the blocks
+ * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
+ * increase the eraseblock size so we chose a combined one which can be
+ * erased in one go There are more speed improvements for reads and
+ * writes possible, but not implemented now
+ */
+ {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
+ NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
+ BBT_AUTO_REFRESH
+ },
+
+ {NULL,}
+};
+
+/*
+* Manufacturer ID list
+*/
+const struct nand_manufacturers nand_manuf_ids[] = {
+ {NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_SAMSUNG, "Samsung"},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_HYNIX, "Hynix"},
+ {NAND_MFR_MICRON, "Micron"},
+ {NAND_MFR_AMD, "AMD"},
+ {0x0, "Unknown"}
+};
diff --git a/u-boot/drivers/mtd/nand/nand_plat.c b/u-boot/drivers/mtd/nand/nand_plat.c
new file mode 100644
index 0000000..37a0206
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nand_plat.c
@@ -0,0 +1,64 @@
+/*
+ * Genericish driver for memory mapped NAND devices
+ *
+ * Copyright (c) 2006-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+/* Your board must implement the following macros:
+ * NAND_PLAT_WRITE_CMD(chip, cmd)
+ * NAND_PLAT_WRITE_ADR(chip, cmd)
+ * NAND_PLAT_INIT()
+ *
+ * It may also implement the following:
+ * NAND_PLAT_DEV_READY(chip)
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#ifdef NAND_PLAT_GPIO_DEV_READY
+# include <asm/gpio.h>
+# define NAND_PLAT_DEV_READY(chip) gpio_get_value(NAND_PLAT_GPIO_DEV_READY)
+#endif
+
+#include <nand.h>
+
+static void plat_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ NAND_PLAT_WRITE_CMD(this, cmd);
+ else
+ NAND_PLAT_WRITE_ADR(this, cmd);
+}
+
+#ifdef NAND_PLAT_DEV_READY
+static int plat_dev_ready(struct mtd_info *mtd)
+{
+ return NAND_PLAT_DEV_READY((struct nand_chip *)mtd->priv);
+}
+#else
+# define plat_dev_ready NULL
+#endif
+
+int board_nand_init(struct nand_chip *nand)
+{
+#ifdef NAND_PLAT_GPIO_DEV_READY
+ gpio_request(NAND_PLAT_GPIO_DEV_READY, "nand-plat");
+ gpio_direction_input(NAND_PLAT_GPIO_DEV_READY);
+#endif
+
+#ifdef NAND_PLAT_INIT
+ NAND_PLAT_INIT();
+#endif
+
+ nand->cmd_ctrl = plat_cmd_ctrl;
+ nand->dev_ready = plat_dev_ready;
+ nand->ecc.mode = NAND_ECC_SOFT;
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/nand_util.c b/u-boot/drivers/mtd/nand/nand_util.c
new file mode 100644
index 0000000..8b4f738
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nand_util.c
@@ -0,0 +1,655 @@
+/*
+ * drivers/mtd/nand/nand_util.c
+ *
+ * Copyright (C) 2006 by Weiss-Electronic GmbH.
+ * All rights reserved.
+ *
+ * @author: Guido Classen <clagix@gmail.com>
+ * @descr: NAND Flash support
+ * @references: borrowed heavily from Linux mtd-utils code:
+ * flash_eraseall.c by Arcom Control System Ltd
+ * nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com)
+ * and Thomas Gleixner (tglx@linutronix.de)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * Copyright 2010 Freescale Semiconductor
+ * The portions of this file whose copyright is held by Freescale and which
+ * are not considered a derived work of GPL v2-only code may be distributed
+ * and/or modified under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <common.h>
+#include <command.h>
+#include <watchdog.h>
+#include <malloc.h>
+#include <div64.h>
+
+#include <asm/errno.h>
+#include <linux/mtd/mtd.h>
+#include <nand.h>
+#include <jffs2/jffs2.h>
+
+typedef struct erase_info erase_info_t;
+typedef struct mtd_info mtd_info_t;
+
+/* support only for native endian JFFS2 */
+#define cpu_to_je16(x) (x)
+#define cpu_to_je32(x) (x)
+
+/*****************************************************************************/
+static int nand_block_bad_scrub(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ return 0;
+}
+
+/**
+ * nand_erase_opts: - erase NAND flash with support for various options
+ * (jffs2 formating)
+ *
+ * @param meminfo NAND device to erase
+ * @param opts options, @see struct nand_erase_options
+ * @return 0 in case of success
+ *
+ * This code is ported from flash_eraseall.c from Linux mtd utils by
+ * Arcom Control System Ltd.
+ */
+int nand_erase_opts(nand_info_t *meminfo, const nand_erase_options_t *opts)
+{
+ struct jffs2_unknown_node cleanmarker;
+ erase_info_t erase;
+ unsigned long erase_length, erased_length; /* in blocks */
+ int bbtest = 1;
+ int result;
+ int percent_complete = -1;
+ int (*nand_block_bad_old)(struct mtd_info *, loff_t, int) = NULL;
+ const char *mtd_device = meminfo->name;
+ struct mtd_oob_ops oob_opts;
+ struct nand_chip *chip = meminfo->priv;
+
+ if ((opts->offset & (meminfo->writesize - 1)) != 0) {
+ printf("Attempt to erase non page aligned data\n");
+ return -1;
+ }
+
+ memset(&erase, 0, sizeof(erase));
+ memset(&oob_opts, 0, sizeof(oob_opts));
+
+ erase.mtd = meminfo;
+ erase.len = meminfo->erasesize;
+ erase.addr = opts->offset;
+ erase_length = lldiv(opts->length + meminfo->erasesize - 1,
+ meminfo->erasesize);
+
+ cleanmarker.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
+ cleanmarker.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
+ cleanmarker.totlen = cpu_to_je32(8);
+
+ /* scrub option allows to erase badblock. To prevent internal
+ * check from erase() method, set block check method to dummy
+ * and disable bad block table while erasing.
+ */
+ if (opts->scrub) {
+ struct nand_chip *priv_nand = meminfo->priv;
+
+ nand_block_bad_old = priv_nand->block_bad;
+ priv_nand->block_bad = nand_block_bad_scrub;
+ /* we don't need the bad block table anymore...
+ * after scrub, there are no bad blocks left!
+ */
+ if (priv_nand->bbt) {
+ kfree(priv_nand->bbt);
+ }
+ priv_nand->bbt = NULL;
+ }
+
+ for (erased_length = 0;
+ erased_length < erase_length;
+ erase.addr += meminfo->erasesize) {
+
+ WATCHDOG_RESET ();
+
+ if (!opts->scrub && bbtest) {
+ int ret = meminfo->block_isbad(meminfo, erase.addr);
+ if (ret > 0) {
+ if (!opts->quiet)
+ printf("\rSkipping bad block at "
+ "0x%08llx "
+ " \n",
+ erase.addr);
+
+ if (!opts->spread)
+ erased_length++;
+
+ continue;
+
+ } else if (ret < 0) {
+ printf("\n%s: MTD get bad block failed: %d\n",
+ mtd_device,
+ ret);
+ return -1;
+ }
+ }
+
+ erased_length++;
+
+ result = meminfo->erase(meminfo, &erase);
+ if (result != 0) {
+ printf("\n%s: MTD Erase failure: %d\n",
+ mtd_device, result);
+ continue;
+ }
+
+ /* format for JFFS2 ? */
+ if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) {
+ chip->ops.ooblen = 8;
+ chip->ops.datbuf = NULL;
+ chip->ops.oobbuf = (uint8_t *)&cleanmarker;
+ chip->ops.ooboffs = 0;
+ chip->ops.mode = MTD_OOB_AUTO;
+
+ result = meminfo->write_oob(meminfo,
+ erase.addr,
+ &chip->ops);
+ if (result != 0) {
+ printf("\n%s: MTD writeoob failure: %d\n",
+ mtd_device, result);
+ continue;
+ }
+ }
+
+ if (!opts->quiet) {
+ unsigned long long n = erased_length * 100ULL;
+ int percent;
+
+ do_div(n, erase_length);
+ percent = (int)n;
+
+ /* output progress message only at whole percent
+ * steps to reduce the number of messages printed
+ * on (slow) serial consoles
+ */
+ if (percent != percent_complete) {
+ percent_complete = percent;
+
+ printf("\rErasing at 0x%llx -- %3d%% complete.",
+ erase.addr, percent);
+
+ if (opts->jffs2 && result == 0)
+ printf(" Cleanmarker written at 0x%llx.",
+ erase.addr);
+ }
+ }
+ }
+ if (!opts->quiet)
+ printf("\n");
+
+ if (nand_block_bad_old) {
+ struct nand_chip *priv_nand = meminfo->priv;
+
+ priv_nand->block_bad = nand_block_bad_old;
+ priv_nand->scan_bbt(meminfo);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
+
+/******************************************************************************
+ * Support for locking / unlocking operations of some NAND devices
+ *****************************************************************************/
+
+#define NAND_CMD_LOCK 0x2a
+#define NAND_CMD_LOCK_TIGHT 0x2c
+#define NAND_CMD_UNLOCK1 0x23
+#define NAND_CMD_UNLOCK2 0x24
+#define NAND_CMD_LOCK_STATUS 0x7a
+
+/**
+ * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT
+ * state
+ *
+ * @param mtd nand mtd instance
+ * @param tight bring device in lock tight mode
+ *
+ * @return 0 on success, -1 in case of error
+ *
+ * The lock / lock-tight command only applies to the whole chip. To get some
+ * parts of the chip lock and others unlocked use the following sequence:
+ *
+ * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin)
+ * - Call nand_unlock() once for each consecutive area to be unlocked
+ * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1)
+ *
+ * If the device is in lock-tight state software can't change the
+ * current active lock/unlock state of all pages. nand_lock() / nand_unlock()
+ * calls will fail. It is only posible to leave lock-tight state by
+ * an hardware signal (low pulse on _WP pin) or by power down.
+ */
+int nand_lock(struct mtd_info *mtd, int tight)
+{
+ int ret = 0;
+ int status;
+ struct nand_chip *chip = mtd->priv;
+
+ /* select the NAND device */
+ chip->select_chip(mtd, 0);
+
+ chip->cmdfunc(mtd,
+ (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK),
+ -1, -1);
+
+ /* call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+
+ /* see if device thinks it succeeded */
+ if (status & 0x01) {
+ ret = -1;
+ }
+
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+
+/**
+ * nand_get_lock_status: - query current lock state from one page of NAND
+ * flash
+ *
+ * @param mtd nand mtd instance
+ * @param offset page address to query (muss be page aligned!)
+ *
+ * @return -1 in case of error
+ * >0 lock status:
+ * bitfield with the following combinations:
+ * NAND_LOCK_STATUS_TIGHT: page in tight state
+ * NAND_LOCK_STATUS_LOCK: page locked
+ * NAND_LOCK_STATUS_UNLOCK: page unlocked
+ *
+ */
+int nand_get_lock_status(struct mtd_info *mtd, loff_t offset)
+{
+ int ret = 0;
+ int chipnr;
+ int page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* select the NAND device */
+ chipnr = (int)(offset >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+
+ if ((offset & (mtd->writesize - 1)) != 0) {
+ printf ("nand_get_lock_status: "
+ "Start address must be beginning of "
+ "nand page!\n");
+ ret = -1;
+ goto out;
+ }
+
+ /* check the Lock Status */
+ page = (int)(offset >> chip->page_shift);
+ chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
+
+ ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT
+ | NAND_LOCK_STATUS_LOCK
+ | NAND_LOCK_STATUS_UNLOCK);
+
+ out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+
+/**
+ * nand_unlock: - Unlock area of NAND pages
+ * only one consecutive area can be unlocked at one time!
+ *
+ * @param mtd nand mtd instance
+ * @param start start byte address
+ * @param length number of bytes to unlock (must be a multiple of
+ * page size nand->writesize)
+ *
+ * @return 0 on success, -1 in case of error
+ */
+int nand_unlock(struct mtd_info *mtd, ulong start, ulong length)
+{
+ int ret = 0;
+ int chipnr;
+ int status;
+ int page;
+ struct nand_chip *chip = mtd->priv;
+ printf ("nand_unlock: start: %08x, length: %d!\n",
+ (int)start, (int)length);
+
+ /* select the NAND device */
+ chipnr = (int)(start >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* check the WP bit */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) {
+ printf ("nand_unlock: Device is write protected!\n");
+ ret = -1;
+ goto out;
+ }
+
+ if ((start & (mtd->erasesize - 1)) != 0) {
+ printf ("nand_unlock: Start address must be beginning of "
+ "nand block!\n");
+ ret = -1;
+ goto out;
+ }
+
+ if (length == 0 || (length & (mtd->erasesize - 1)) != 0) {
+ printf ("nand_unlock: Length must be a multiple of nand block "
+ "size %08x!\n", mtd->erasesize);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Set length so that the last address is set to the
+ * starting address of the last block
+ */
+ length -= mtd->erasesize;
+
+ /* submit address of first page to unlock */
+ page = (int)(start >> chip->page_shift);
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* submit ADDRESS of LAST page to unlock */
+ page += (int)(length >> chip->page_shift);
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask);
+
+ /* call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* see if device thinks it succeeded */
+ if (status & 0x01) {
+ /* there was an error */
+ ret = -1;
+ goto out;
+ }
+
+ out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+#endif
+
+/**
+ * check_skip_len
+ *
+ * Check if there are any bad blocks, and whether length including bad
+ * blocks fits into device
+ *
+ * @param nand NAND device
+ * @param offset offset in flash
+ * @param length image length
+ * @return 0 if the image fits and there are no bad blocks
+ * 1 if the image fits, but there are bad blocks
+ * -1 if the image does not fit
+ */
+static int check_skip_len(nand_info_t *nand, loff_t offset, size_t length)
+{
+ size_t len_excl_bad = 0;
+ int ret = 0;
+
+ while (len_excl_bad < length) {
+ size_t block_len, block_off;
+ loff_t block_start;
+
+ if (offset >= nand->size)
+ return -1;
+
+ block_start = offset & ~(loff_t)(nand->erasesize - 1);
+ block_off = offset & (nand->erasesize - 1);
+ block_len = nand->erasesize - block_off;
+
+ if (!nand_block_isbad(nand, block_start))
+ len_excl_bad += block_len;
+ else
+ ret = 1;
+
+ offset += block_len;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_write_skip_bad:
+ *
+ * Write image to NAND flash.
+ * Blocks that are marked bad are skipped and the is written to the next
+ * block instead as long as the image is short enough to fit even after
+ * skipping the bad blocks.
+ *
+ * @param nand NAND device
+ * @param offset offset in flash
+ * @param length buffer length
+ * @param buffer buffer to read from
+ * @param withoob whether write with yaffs format
+ * @return 0 in case of success
+ */
+int nand_write_skip_bad(nand_info_t *nand, loff_t offset, size_t *length,
+ u_char *buffer, int withoob)
+{
+ int rval = 0, blocksize;
+ size_t left_to_write = *length;
+ u_char *p_buffer = buffer;
+ int need_skip;
+
+#ifdef CONFIG_CMD_NAND_YAFFS
+ if (withoob) {
+ int pages;
+ pages = nand->erasesize / nand->writesize;
+ blocksize = (pages * nand->oobsize) + nand->erasesize;
+ if (*length % (nand->writesize + nand->oobsize)) {
+ printf ("Attempt to write incomplete page"
+ " in yaffs mode\n");
+ return -EINVAL;
+ }
+ } else
+#endif
+ {
+ blocksize = nand->erasesize;
+ }
+
+ /*
+ * nand_write() handles unaligned, partial page writes.
+ *
+ * We allow length to be unaligned, for convenience in
+ * using the $filesize variable.
+ *
+ * However, starting at an unaligned offset makes the
+ * semantics of bad block skipping ambiguous (really,
+ * you should only start a block skipping access at a
+ * partition boundary). So don't try to handle that.
+ */
+ if ((offset & (nand->writesize - 1)) != 0) {
+ printf ("Attempt to write non page aligned data\n");
+ *length = 0;
+ return -EINVAL;
+ }
+
+ need_skip = check_skip_len(nand, offset, *length);
+ if (need_skip < 0) {
+ printf ("Attempt to write outside the flash area\n");
+ *length = 0;
+ return -EINVAL;
+ }
+
+ if (!need_skip) {
+ rval = nand_write (nand, offset, length, buffer);
+ if (rval == 0)
+ return 0;
+
+ *length = 0;
+ printf ("NAND write to offset %llx failed %d\n",
+ offset, rval);
+ return rval;
+ }
+
+ while (left_to_write > 0) {
+ size_t block_offset = offset & (nand->erasesize - 1);
+ size_t write_size;
+
+ WATCHDOG_RESET ();
+
+ if (nand_block_isbad (nand, offset & ~(nand->erasesize - 1))) {
+ printf ("Skip bad block 0x%08llx\n",
+ offset & ~(nand->erasesize - 1));
+ offset += nand->erasesize - block_offset;
+ continue;
+ }
+
+ if (left_to_write < (blocksize - block_offset))
+ write_size = left_to_write;
+ else
+ write_size = blocksize - block_offset;
+
+#ifdef CONFIG_CMD_NAND_YAFFS
+ if (withoob) {
+ int page, pages;
+ size_t pagesize = nand->writesize;
+ size_t pagesize_oob = pagesize + nand->oobsize;
+ struct mtd_oob_ops ops;
+
+ ops.len = pagesize;
+ ops.ooblen = nand->oobsize;
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooboffs = 0;
+
+ pages = write_size / pagesize_oob;
+ for (page = 0; page < pages; page++) {
+ ops.datbuf = p_buffer;
+ ops.oobbuf = ops.datbuf + pagesize;
+
+ rval = nand->write_oob(nand, offset, &ops);
+ if (!rval)
+ break;
+
+ offset += pagesize;
+ p_buffer += pagesize_oob;
+ }
+ }
+ else
+#endif
+ {
+ rval = nand_write (nand, offset, &write_size, p_buffer);
+ offset += write_size;
+ p_buffer += write_size;
+ }
+
+ if (rval != 0) {
+ printf ("NAND write to offset %llx failed %d\n",
+ offset, rval);
+ *length -= left_to_write;
+ return rval;
+ }
+
+ left_to_write -= write_size;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_read_skip_bad:
+ *
+ * Read image from NAND flash.
+ * Blocks that are marked bad are skipped and the next block is readen
+ * instead as long as the image is short enough to fit even after skipping the
+ * bad blocks.
+ *
+ * @param nand NAND device
+ * @param offset offset in flash
+ * @param length buffer length, on return holds remaining bytes to read
+ * @param buffer buffer to write to
+ * @return 0 in case of success
+ */
+int nand_read_skip_bad(nand_info_t *nand, loff_t offset, size_t *length,
+ u_char *buffer)
+{
+ int rval;
+ size_t left_to_read = *length;
+ u_char *p_buffer = buffer;
+ int need_skip;
+
+ if ((offset & (nand->writesize - 1)) != 0) {
+ printf ("Attempt to read non page aligned data\n");
+ *length = 0;
+ return -EINVAL;
+ }
+
+ need_skip = check_skip_len(nand, offset, *length);
+ if (need_skip < 0) {
+ printf ("Attempt to read outside the flash area\n");
+ *length = 0;
+ return -EINVAL;
+ }
+
+ if (!need_skip) {
+ rval = nand_read (nand, offset, length, buffer);
+ if (!rval || rval == -EUCLEAN)
+ return 0;
+
+ *length = 0;
+ printf ("NAND read from offset %llx failed %d\n",
+ offset, rval);
+ return rval;
+ }
+
+ while (left_to_read > 0) {
+ size_t block_offset = offset & (nand->erasesize - 1);
+ size_t read_length;
+
+ WATCHDOG_RESET ();
+
+ if (nand_block_isbad (nand, offset & ~(nand->erasesize - 1))) {
+ printf ("Skipping bad block 0x%08llx\n",
+ offset & ~(nand->erasesize - 1));
+ offset += nand->erasesize - block_offset;
+ continue;
+ }
+
+ if (left_to_read < (nand->erasesize - block_offset))
+ read_length = left_to_read;
+ else
+ read_length = nand->erasesize - block_offset;
+
+ rval = nand_read (nand, offset, &read_length, p_buffer);
+ if (rval && rval != -EUCLEAN) {
+ printf ("NAND read from offset %llx failed %d\n",
+ offset, rval);
+ *length -= left_to_read;
+ return rval;
+ }
+
+ left_to_read -= read_length;
+ offset += read_length;
+ p_buffer += read_length;
+ }
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/ndfc.c b/u-boot/drivers/mtd/nand/ndfc.c
new file mode 100644
index 0000000..0729e0c
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/ndfc.c
@@ -0,0 +1,217 @@
+/*
+ * Overview:
+ * Platform independend driver for NDFC (NanD Flash Controller)
+ * integrated into IBM/AMCC PPC4xx cores
+ *
+ * (C) Copyright 2006-2009
+ * Stefan Roese, DENX Software Engineering, sr@denx.de.
+ *
+ * Based on original work by
+ * Thomas Gleixner
+ * Copyright 2006 IBM
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <linux/mtd/ndfc.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/ppc4xx.h>
+
+/*
+ * We need to store the info, which chip-select (CS) is used for the
+ * chip number. For example on Sequoia NAND chip #0 uses
+ * CS #3.
+ */
+static int ndfc_cs[NDFC_MAX_BANKS];
+
+static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ ulong base = (ulong) this->IO_ADDR_W & 0xffffff00;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ out_8((u8 *)(base + NDFC_CMD), cmd & 0xFF);
+ else
+ out_8((u8 *)(base + NDFC_ALE), cmd & 0xFF);
+}
+
+static int ndfc_dev_ready(struct mtd_info *mtdinfo)
+{
+ struct nand_chip *this = mtdinfo->priv;
+ ulong base = (ulong) this->IO_ADDR_W & 0xffffff00;
+
+ return (in_be32((u32 *)(base + NDFC_STAT)) & NDFC_STAT_IS_READY);
+}
+
+static void ndfc_enable_hwecc(struct mtd_info *mtdinfo, int mode)
+{
+ struct nand_chip *this = mtdinfo->priv;
+ ulong base = (ulong) this->IO_ADDR_W & 0xffffff00;
+ u32 ccr;
+
+ ccr = in_be32((u32 *)(base + NDFC_CCR));
+ ccr |= NDFC_CCR_RESET_ECC;
+ out_be32((u32 *)(base + NDFC_CCR), ccr);
+}
+
+static int ndfc_calculate_ecc(struct mtd_info *mtdinfo,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct nand_chip *this = mtdinfo->priv;
+ ulong base = (ulong) this->IO_ADDR_W & 0xffffff00;
+ u32 ecc;
+ u8 *p = (u8 *)&ecc;
+
+ ecc = in_be32((u32 *)(base + NDFC_ECC));
+
+ /* The NDFC uses Smart Media (SMC) bytes order
+ */
+ ecc_code[0] = p[1];
+ ecc_code[1] = p[2];
+ ecc_code[2] = p[3];
+
+ return 0;
+}
+
+/*
+ * Speedups for buffer read/write/verify
+ *
+ * NDFC allows 32bit read/write of data. So we can speed up the buffer
+ * functions. No further checking, as nand_base will always read/write
+ * page aligned.
+ */
+static void ndfc_read_buf(struct mtd_info *mtdinfo, uint8_t *buf, int len)
+{
+ struct nand_chip *this = mtdinfo->priv;
+ ulong base = (ulong) this->IO_ADDR_W & 0xffffff00;
+ uint32_t *p = (uint32_t *) buf;
+
+ for (;len > 0; len -= 4)
+ *p++ = in_be32((u32 *)(base + NDFC_DATA));
+}
+
+#ifndef CONFIG_NAND_SPL
+/*
+ * Don't use these speedup functions in NAND boot image, since the image
+ * has to fit into 4kByte.
+ */
+static void ndfc_write_buf(struct mtd_info *mtdinfo, const uint8_t *buf, int len)
+{
+ struct nand_chip *this = mtdinfo->priv;
+ ulong base = (ulong) this->IO_ADDR_W & 0xffffff00;
+ uint32_t *p = (uint32_t *) buf;
+
+ for (; len > 0; len -= 4)
+ out_be32((u32 *)(base + NDFC_DATA), *p++);
+}
+
+static int ndfc_verify_buf(struct mtd_info *mtdinfo, const uint8_t *buf, int len)
+{
+ struct nand_chip *this = mtdinfo->priv;
+ ulong base = (ulong) this->IO_ADDR_W & 0xffffff00;
+ uint32_t *p = (uint32_t *) buf;
+
+ for (; len > 0; len -= 4)
+ if (*p++ != in_be32((u32 *)(base + NDFC_DATA)))
+ return -1;
+
+ return 0;
+}
+#endif /* #ifndef CONFIG_NAND_SPL */
+
+#ifndef CONFIG_SYS_NAND_BCR
+#define CONFIG_SYS_NAND_BCR 0x80002222
+#endif
+
+void board_nand_select_device(struct nand_chip *nand, int chip)
+{
+ /*
+ * Don't use "chip" to address the NAND device,
+ * generate the cs from the address where it is encoded.
+ */
+ ulong base = (ulong)nand->IO_ADDR_W & 0xffffff00;
+ int cs = ndfc_cs[chip];
+
+ /* Set NandFlash Core Configuration Register */
+ /* 1 col x 2 rows */
+ out_be32((u32 *)(base + NDFC_CCR), 0x00000000 | (cs << 24));
+ out_be32((u32 *)(base + NDFC_BCFG0 + (cs << 2)), CONFIG_SYS_NAND_BCR);
+}
+
+static void ndfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /*
+ * Nothing to do here!
+ */
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+ int cs = (ulong)nand->IO_ADDR_W & 0x00000003;
+ ulong base = (ulong)nand->IO_ADDR_W & 0xffffff00;
+ static int chip = 0;
+
+ /*
+ * Save chip-select for this chip #
+ */
+ ndfc_cs[chip] = cs;
+
+ /*
+ * Select required NAND chip in NDFC
+ */
+ board_nand_select_device(nand, chip);
+
+ nand->IO_ADDR_R = (void __iomem *)(base + NDFC_DATA);
+ nand->IO_ADDR_W = (void __iomem *)(base + NDFC_DATA);
+ nand->cmd_ctrl = ndfc_hwcontrol;
+ nand->chip_delay = 50;
+ nand->read_buf = ndfc_read_buf;
+ nand->dev_ready = ndfc_dev_ready;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.hwctl = ndfc_enable_hwecc;
+ nand->ecc.calculate = ndfc_calculate_ecc;
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = 256;
+ nand->ecc.bytes = 3;
+ nand->select_chip = ndfc_select_chip;
+
+#ifndef CONFIG_NAND_SPL
+ nand->write_buf = ndfc_write_buf;
+ nand->verify_buf = ndfc_verify_buf;
+
+ chip++;
+#else
+ /*
+ * Setup EBC (CS0 only right now)
+ */
+ mtebc(EBC0_CFG, 0xb8400000);
+
+ mtebc(PB0CR, CONFIG_SYS_EBC_PB0CR);
+ mtebc(PB0AP, CONFIG_SYS_EBC_PB0AP);
+#endif
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/nomadik.c b/u-boot/drivers/mtd/nand/nomadik.c
new file mode 100644
index 0000000..b76f4cb
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/nomadik.c
@@ -0,0 +1,221 @@
+/*
+ * (C) Copyright 2007 STMicroelectronics, <www.st.com>
+ * (C) Copyright 2009 Alessandro Rubini <rubini@unipv.it>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <asm/io.h>
+
+static inline int parity(int b) /* b is really a byte; returns 0 or ~0 */
+{
+ __asm__ __volatile__(
+ "eor %0, %0, %0, lsr #4\n\t"
+ "eor %0, %0, %0, lsr #2\n\t"
+ "eor %0, %0, %0, lsr #1\n\t"
+ "ands %0, %0, #1\n\t"
+ "subne %0, %0, #2\t"
+ : "=r" (b) : "0" (b));
+ return b;
+}
+
+/*
+ * This is the ECC routine used in hardware, according to the manual.
+ * HW claims to make the calculation but not the correction; so we must
+ * recalculate the bytes for a comparison.
+ */
+static int ecc512(const unsigned char *data, unsigned char *ecc)
+{
+ int gpar = 0;
+ int i, val, par;
+ int pbits = 0; /* P8, P16, ... P2048 */
+ int pprime = 0; /* P8', P16', ... P2048' */
+ int lowbits; /* P1, P2, P4 and primes */
+
+ for (i = 0; i < 512; i++) {
+ par = parity((val = data[i]));
+ gpar ^= val;
+ pbits ^= (i & par);
+ }
+ /*
+ * Ok, now gpar is global parity (xor of all bytes)
+ * pbits are all the parity bits (non-prime ones)
+ */
+ par = parity(gpar);
+ pprime = pbits ^ par;
+ /* Put low bits in the right position for ecc[2] (bits 7..2) */
+ lowbits = 0
+ | (parity(gpar & 0xf0) & 0x80) /* P4 */
+ | (parity(gpar & 0x0f) & 0x40) /* P4' */
+ | (parity(gpar & 0xcc) & 0x20) /* P2 */
+ | (parity(gpar & 0x33) & 0x10) /* P2' */
+ | (parity(gpar & 0xaa) & 0x08) /* P1 */
+ | (parity(gpar & 0x55) & 0x04); /* P1' */
+
+ ecc[2] = ~(lowbits | ((pbits & 0x100) >> 7) | ((pprime & 0x100) >> 8));
+ /* now intermix bits for ecc[1] (P1024..P128') and ecc[0] (P64..P8') */
+ ecc[1] = ~( (pbits & 0x80) >> 0 | ((pprime & 0x80) >> 1)
+ | ((pbits & 0x40) >> 1) | ((pprime & 0x40) >> 2)
+ | ((pbits & 0x20) >> 2) | ((pprime & 0x20) >> 3)
+ | ((pbits & 0x10) >> 3) | ((pprime & 0x10) >> 4));
+
+ ecc[0] = ~( (pbits & 0x8) << 4 | ((pprime & 0x8) << 3)
+ | ((pbits & 0x4) << 3) | ((pprime & 0x4) << 2)
+ | ((pbits & 0x2) << 2) | ((pprime & 0x2) << 1)
+ | ((pbits & 0x1) << 1) | ((pprime & 0x1) << 0));
+ return 0;
+}
+
+/* This is the method in the chip->ecc field */
+static int nomadik_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ return ecc512(dat, ecc_code);
+}
+
+static int nomadik_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *r_ecc, uint8_t *c_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint32_t r, c, d, diff; /*read, calculated, xor of them */
+
+ if (!memcmp(r_ecc, c_ecc, chip->ecc.bytes))
+ return 0;
+
+ /* Reorder the bytes into ascending-order 24 bits -- see manual */
+ r = r_ecc[2] << 22 | r_ecc[1] << 14 | r_ecc[0] << 6 | r_ecc[2] >> 2;
+ c = c_ecc[2] << 22 | c_ecc[1] << 14 | c_ecc[0] << 6 | c_ecc[2] >> 2;
+ diff = (r ^ c) & ((1<<24)-1); /* use 24 bits only */
+
+ /* If 12 bits are different, one per pair, it's correctable */
+ if (((diff | (diff>>1)) & 0x555555) == 0x555555) {
+ int bit = ((diff & 2) >> 1)
+ | ((diff & 0x8) >> 2) | ((diff & 0x20) >> 3);
+ int byte;
+
+ d = diff >> 6; /* remove bit-order info */
+ byte = ((d & 2) >> 1)
+ | ((d & 0x8) >> 2) | ((d & 0x20) >> 3)
+ | ((d & 0x80) >> 4) | ((d & 0x200) >> 5)
+ | ((d & 0x800) >> 6) | ((d & 0x2000) >> 7)
+ | ((d & 0x8000) >> 8) | ((d & 0x20000) >> 9);
+ /* correct the single bit */
+ dat[byte] ^= 1<<bit;
+ return 0;
+ }
+ /* If 1 bit only differs, it's one bit error in ECC, ignore */
+ if ((diff ^ (1 << (ffs(diff) - 1))) == 0)
+ return 0;
+ /* Otherwise, uncorrectable */
+ return -1;
+}
+
+static void nomadik_ecc_hwctl(struct mtd_info *mtd, int mode)
+{ /* mandatory in the structure but not used here */ }
+
+
+/* This is the layout used by older installations, we keep compatible */
+struct nand_ecclayout nomadik_ecc_layout = {
+ .eccbytes = 3 * 4,
+ .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
+ 0x02, 0x03, 0x04,
+ 0x12, 0x13, 0x14,
+ 0x22, 0x23, 0x24,
+ 0x32, 0x33, 0x34},
+ .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
+};
+
+#define MASK_ALE (1 << 24) /* our ALE is AD21 */
+#define MASK_CLE (1 << 23) /* our CLE is AD22 */
+
+/* This is copied from the AT91SAM9 devices (Stelian Pop, Lead Tech Design) */
+static void nomadik_nand_hwcontrol(struct mtd_info *mtd,
+ int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ u32 pcr0 = readl(REG_FSMC_PCR0);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ ulong IO_ADDR_W = (ulong) this->IO_ADDR_W;
+ IO_ADDR_W &= ~(MASK_ALE | MASK_CLE);
+
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= MASK_CLE;
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= MASK_ALE;
+
+ if (ctrl & NAND_NCE)
+ writel(pcr0 | 0x4, REG_FSMC_PCR0);
+ else
+ writel(pcr0 & ~0x4, REG_FSMC_PCR0);
+
+ this->IO_ADDR_W = (void *) IO_ADDR_W;
+ this->IO_ADDR_R = (void *) IO_ADDR_W;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+/* Returns 1 when ready; upper layers timeout at 20ms with timer routines */
+static int nomadik_nand_ready(struct mtd_info *mtd)
+{
+ return 1; /* The ready bit is handled in hardware */
+}
+
+/* Copy a buffer 32bits at a time: faster than defualt method which is 8bit */
+static void nomadik_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ u32 *p = (u32 *) buf;
+
+ len >>= 2;
+ writel(0, REG_FSMC_ECCR0);
+ for (i = 0; i < len; i++)
+ p[i] = readl(chip->IO_ADDR_R);
+}
+
+int board_nand_init(struct nand_chip *chip)
+{
+ /* Set up the FSMC_PCR0 for nand access*/
+ writel(0x0000004a, REG_FSMC_PCR0);
+ /* Set up FSMC_PMEM0, FSMC_PATT0 with timing data for access */
+ writel(0x00020401, REG_FSMC_PMEM0);
+ writel(0x00020404, REG_FSMC_PATT0);
+
+ chip->options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING;
+ chip->cmd_ctrl = nomadik_nand_hwcontrol;
+ chip->dev_ready = nomadik_nand_ready;
+ /* The chip allows 32bit reads, so avoid the default 8bit copy */
+ chip->read_buf = nomadik_nand_read_buf;
+
+ /* ECC: follow the hardware-defined rulse, but do it in sw */
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.bytes = 3;
+ chip->ecc.size = 512;
+ chip->ecc.layout = &nomadik_ecc_layout;
+ chip->ecc.calculate = nomadik_ecc_calculate;
+ chip->ecc.hwctl = nomadik_ecc_hwctl;
+ chip->ecc.correct = nomadik_ecc_correct;
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/omap_gpmc.c b/u-boot/drivers/mtd/nand/omap_gpmc.c
new file mode 100644
index 0000000..99b9cef
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/omap_gpmc.c
@@ -0,0 +1,344 @@
+/*
+ * (C) Copyright 2004-2008 Texas Instruments, <www.ti.com>
+ * Rohit Choraria <rohitkc@ti.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <asm/arch/mem.h>
+#include <asm/arch/omap_gpmc.h>
+#include <linux/mtd/nand_ecc.h>
+#include <nand.h>
+
+static uint8_t cs;
+static struct nand_ecclayout hw_nand_oob = GPMC_NAND_HW_ECC_LAYOUT;
+
+/*
+ * omap_nand_hwcontrol - Set the address pointers corretly for the
+ * following address/data/command operation
+ */
+static void omap_nand_hwcontrol(struct mtd_info *mtd, int32_t cmd,
+ uint32_t ctrl)
+{
+ register struct nand_chip *this = mtd->priv;
+
+ /*
+ * Point the IO_ADDR to DATA and ADDRESS registers instead
+ * of chip address
+ */
+ switch (ctrl) {
+ case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
+ this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_cmd;
+ break;
+ case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
+ this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_adr;
+ break;
+ case NAND_CTRL_CHANGE | NAND_NCE:
+ this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_dat;
+ break;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+/*
+ * omap_hwecc_init - Initialize the Hardware ECC for NAND flash in
+ * GPMC controller
+ * @mtd: MTD device structure
+ *
+ */
+static void omap_hwecc_init(struct nand_chip *chip)
+{
+ /*
+ * Init ECC Control Register
+ * Clear all ECC | Enable Reg1
+ */
+ writel(ECCCLEAR | ECCRESULTREG1, &gpmc_cfg->ecc_control);
+ writel(ECCSIZE1 | ECCSIZE0 | ECCSIZE0SEL, &gpmc_cfg->ecc_size_config);
+}
+
+/*
+ * gen_true_ecc - This function will generate true ECC value, which
+ * can be used when correcting data read from NAND flash memory core
+ *
+ * @ecc_buf: buffer to store ecc code
+ *
+ * @return: re-formatted ECC value
+ */
+static uint32_t gen_true_ecc(uint8_t *ecc_buf)
+{
+ return ecc_buf[0] | (ecc_buf[1] << 16) | ((ecc_buf[2] & 0xF0) << 20) |
+ ((ecc_buf[2] & 0x0F) << 8);
+}
+
+/*
+ * omap_correct_data - Compares the ecc read from nand spare area with ECC
+ * registers values and corrects one bit error if it has occured
+ * Further details can be had from OMAP TRM and the following selected links:
+ * http://en.wikipedia.org/wiki/Hamming_code
+ * http://www.cs.utexas.edu/users/plaxton/c/337/05f/slides/ErrorCorrection-4.pdf
+ *
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from ECC registers
+ *
+ * @return 0 if data is OK or corrected, else returns -1
+ */
+static int omap_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint32_t orig_ecc, new_ecc, res, hm;
+ uint16_t parity_bits, byte;
+ uint8_t bit;
+
+ /* Regenerate the orginal ECC */
+ orig_ecc = gen_true_ecc(read_ecc);
+ new_ecc = gen_true_ecc(calc_ecc);
+ /* Get the XOR of real ecc */
+ res = orig_ecc ^ new_ecc;
+ if (res) {
+ /* Get the hamming width */
+ hm = hweight32(res);
+ /* Single bit errors can be corrected! */
+ if (hm == 12) {
+ /* Correctable data! */
+ parity_bits = res >> 16;
+ bit = (parity_bits & 0x7);
+ byte = (parity_bits >> 3) & 0x1FF;
+ /* Flip the bit to correct */
+ dat[byte] ^= (0x1 << bit);
+ } else if (hm == 1) {
+ printf("Error: Ecc is wrong\n");
+ /* ECC itself is corrupted */
+ return 2;
+ } else {
+ /*
+ * hm distance != parity pairs OR one, could mean 2 bit
+ * error OR potentially be on a blank page..
+ * orig_ecc: contains spare area data from nand flash.
+ * new_ecc: generated ecc while reading data area.
+ * Note: if the ecc = 0, all data bits from which it was
+ * generated are 0xFF.
+ * The 3 byte(24 bits) ecc is generated per 512byte
+ * chunk of a page. If orig_ecc(from spare area)
+ * is 0xFF && new_ecc(computed now from data area)=0x0,
+ * this means that data area is 0xFF and spare area is
+ * 0xFF. A sure sign of a erased page!
+ */
+ if ((orig_ecc == 0x0FFF0FFF) && (new_ecc == 0x00000000))
+ return 0;
+ printf("Error: Bad compare! failed\n");
+ /* detected 2 bit error */
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * omap_calculate_ecc - Generate non-inverted ECC bytes.
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as
+ * long nobody is trying to write data on the seemingly unused page.
+ * Reading an erased page will produce an ECC mismatch between
+ * generated and read ECC bytes that has to be dealt with separately.
+ * E.g. if page is 0xFF (fresh erased), and if HW ECC engine within GPMC
+ * is used, the result of read will be 0x0 while the ECC offsets of the
+ * spare area will be 0xFF which will result in an ECC mismatch.
+ * @mtd: MTD structure
+ * @dat: unused
+ * @ecc_code: ecc_code buffer
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ u_int32_t val;
+
+ /* Start Reading from HW ECC1_Result = 0x200 */
+ val = readl(&gpmc_cfg->ecc1_result);
+
+ ecc_code[0] = val & 0xFF;
+ ecc_code[1] = (val >> 16) & 0xFF;
+ ecc_code[2] = ((val >> 8) & 0x0F) | ((val >> 20) & 0xF0);
+
+ /*
+ * Stop reading anymore ECC vals and clear old results
+ * enable will be called if more reads are required
+ */
+ writel(0x000, &gpmc_cfg->ecc_config);
+
+ return 0;
+}
+
+/*
+ * omap_enable_ecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int32_t mode)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint32_t val, dev_width = (chip->options & NAND_BUSWIDTH_16) >> 1;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* Clear the ecc result registers, select ecc reg as 1 */
+ writel(ECCCLEAR | ECCRESULTREG1, &gpmc_cfg->ecc_control);
+
+ /*
+ * Size 0 = 0xFF, Size1 is 0xFF - both are 512 bytes
+ * tell all regs to generate size0 sized regs
+ * we just have a single ECC engine for all CS
+ */
+ writel(ECCSIZE1 | ECCSIZE0 | ECCSIZE0SEL,
+ &gpmc_cfg->ecc_size_config);
+ val = (dev_width << 7) | (cs << 1) | (0x1);
+ writel(val, &gpmc_cfg->ecc_config);
+ break;
+ default:
+ printf("Error: Unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+}
+
+/*
+ * omap_nand_switch_ecc - switch the ECC operation b/w h/w ecc and s/w ecc.
+ * The default is to come up on s/w ecc
+ *
+ * @hardware - 1 -switch to h/w ecc, 0 - s/w ecc
+ *
+ */
+void omap_nand_switch_ecc(int32_t hardware)
+{
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+
+ if (nand_curr_device < 0 ||
+ nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
+ !nand_info[nand_curr_device].name) {
+ printf("Error: Can't switch ecc, no devices available\n");
+ return;
+ }
+
+ mtd = &nand_info[nand_curr_device];
+ nand = mtd->priv;
+
+ nand->options |= NAND_OWN_BUFFERS;
+
+ /* Reset ecc interface */
+ nand->ecc.read_page = NULL;
+ nand->ecc.write_page = NULL;
+ nand->ecc.read_oob = NULL;
+ nand->ecc.write_oob = NULL;
+ nand->ecc.hwctl = NULL;
+ nand->ecc.correct = NULL;
+ nand->ecc.calculate = NULL;
+
+ /* Setup the ecc configurations again */
+ if (hardware) {
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.layout = &hw_nand_oob;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+ nand->ecc.hwctl = omap_enable_hwecc;
+ nand->ecc.correct = omap_correct_data;
+ nand->ecc.calculate = omap_calculate_ecc;
+ omap_hwecc_init(nand);
+ printf("HW ECC selected\n");
+ } else {
+ nand->ecc.mode = NAND_ECC_SOFT;
+ /* Use mtd default settings */
+ nand->ecc.layout = NULL;
+ printf("SW ECC selected\n");
+ }
+
+ /* Update NAND handling after ECC mode switch */
+ nand_scan_tail(mtd);
+
+ nand->options &= ~NAND_OWN_BUFFERS;
+}
+
+/*
+ * Board-specific NAND initialization. The following members of the
+ * argument are board-specific:
+ * - IO_ADDR_R: address to read the 8 I/O lines of the flash device
+ * - IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * - cmd_ctrl: hardwarespecific function for accesing control-lines
+ * - waitfunc: hardwarespecific function for accesing device ready/busy line
+ * - ecc.hwctl: function to enable (reset) hardware ecc generator
+ * - ecc.mode: mode of ecc, see defines
+ * - chip_delay: chip dependent delay for transfering data from array to
+ * read regs (tR)
+ * - options: various chip options. They can partly be set to inform
+ * nand_scan about special functionality. See the defines for further
+ * explanation
+ */
+int board_nand_init(struct nand_chip *nand)
+{
+ int32_t gpmc_config = 0;
+ cs = 0;
+
+ /*
+ * xloader/Uboot's gpmc configuration would have configured GPMC for
+ * nand type of memory. The following logic scans and latches on to the
+ * first CS with NAND type memory.
+ * TBD: need to make this logic generic to handle multiple CS NAND
+ * devices.
+ */
+ while (cs < GPMC_MAX_CS) {
+ /* Check if NAND type is set */
+ if ((readl(&gpmc_cfg->cs[cs].config1) & 0xC00) == 0x800) {
+ /* Found it!! */
+ break;
+ }
+ cs++;
+ }
+ if (cs >= GPMC_MAX_CS) {
+ printf("NAND: Unable to find NAND settings in "
+ "GPMC Configuration - quitting\n");
+ return -ENODEV;
+ }
+
+ gpmc_config = readl(&gpmc_cfg->config);
+ /* Disable Write protect */
+ gpmc_config |= 0x10;
+ writel(gpmc_config, &gpmc_cfg->config);
+
+ nand->IO_ADDR_R = (void __iomem *)&gpmc_cfg->cs[cs].nand_dat;
+ nand->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_cmd;
+
+ nand->cmd_ctrl = omap_nand_hwcontrol;
+ nand->options = NAND_NO_PADDING | NAND_CACHEPRG | NAND_NO_AUTOINCR;
+ /* If we are 16 bit dev, our gpmc config tells us that */
+ if ((readl(&gpmc_cfg->cs[cs].config1) & 0x3000) == 0x1000)
+ nand->options |= NAND_BUSWIDTH_16;
+
+ nand->chip_delay = 100;
+ /* Default ECC mode */
+ nand->ecc.mode = NAND_ECC_SOFT;
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/s3c2410_nand.c b/u-boot/drivers/mtd/nand/s3c2410_nand.c
new file mode 100644
index 0000000..27351fb
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/s3c2410_nand.c
@@ -0,0 +1,189 @@
+/*
+ * (C) Copyright 2006 OpenMoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+
+#include <nand.h>
+#include <asm/arch/s3c24x0_cpu.h>
+#include <asm/io.h>
+
+#define S3C2410_NFCONF_EN (1<<15)
+#define S3C2410_NFCONF_512BYTE (1<<14)
+#define S3C2410_NFCONF_4STEP (1<<13)
+#define S3C2410_NFCONF_INITECC (1<<12)
+#define S3C2410_NFCONF_nFCE (1<<11)
+#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
+
+#define S3C2410_ADDR_NALE 4
+#define S3C2410_ADDR_NCLE 8
+
+#ifdef CONFIG_NAND_SPL
+
+/* in the early stage of NAND flash booting, printf() is not available */
+#define printf(fmt, args...)
+
+static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(this->IO_ADDR_R);
+}
+#endif
+
+static void s3c2410_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct s3c2410_nand *nand = s3c2410_get_base_nand();
+
+ debugX(1, "hwcontrol(): 0x%02x 0x%02x\n", cmd, ctrl);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ ulong IO_ADDR_W = (ulong)nand;
+
+ if (!(ctrl & NAND_CLE))
+ IO_ADDR_W |= S3C2410_ADDR_NCLE;
+ if (!(ctrl & NAND_ALE))
+ IO_ADDR_W |= S3C2410_ADDR_NALE;
+
+ chip->IO_ADDR_W = (void *)IO_ADDR_W;
+
+ if (ctrl & NAND_NCE)
+ writel(readl(&nand->nfconf) & ~S3C2410_NFCONF_nFCE,
+ &nand->nfconf);
+ else
+ writel(readl(&nand->nfconf) | S3C2410_NFCONF_nFCE,
+ &nand->nfconf);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+static int s3c2410_dev_ready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand *nand = s3c2410_get_base_nand();
+ debugX(1, "dev_ready\n");
+ return readl(&nand->nfstat) & 0x01;
+}
+
+#ifdef CONFIG_S3C2410_NAND_HWECC
+void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand *nand = s3c2410_get_base_nand();
+ debugX(1, "s3c2410_nand_enable_hwecc(%p, %d)\n", mtd, mode);
+ writel(readl(&nand->nfconf) | S3C2410_NFCONF_INITECC, &nand->nfconf);
+}
+
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct s3c2410_nand *nand = s3c2410_get_base_nand();
+ ecc_code[0] = readb(&nand->nfecc);
+ ecc_code[1] = readb(&nand->nfecc + 1);
+ ecc_code[2] = readb(&nand->nfecc + 2);
+ debugX(1, "s3c2410_nand_calculate_hwecc(%p,): 0x%02x 0x%02x 0x%02x\n",
+ mtd , ecc_code[0], ecc_code[1], ecc_code[2]);
+
+ return 0;
+}
+
+static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ if (read_ecc[0] == calc_ecc[0] &&
+ read_ecc[1] == calc_ecc[1] &&
+ read_ecc[2] == calc_ecc[2])
+ return 0;
+
+ printf("s3c2410_nand_correct_data: not implemented\n");
+ return -1;
+}
+#endif
+
+int board_nand_init(struct nand_chip *nand)
+{
+ u_int32_t cfg;
+ u_int8_t tacls, twrph0, twrph1;
+ struct s3c24x0_clock_power *clk_power = s3c24x0_get_base_clock_power();
+ struct s3c2410_nand *nand_reg = s3c2410_get_base_nand();
+
+ debugX(1, "board_nand_init()\n");
+
+ writel(readl(&clk_power->clkcon) | (1 << 4), &clk_power->clkcon);
+
+ /* initialize hardware */
+#if defined(CONFIG_S3C24XX_CUSTOM_NAND_TIMING)
+ tacls = CONFIG_S3C24XX_TACLS;
+ twrph0 = CONFIG_S3C24XX_TWRPH0;
+ twrph1 = CONFIG_S3C24XX_TWRPH1;
+#else
+ tacls = 4;
+ twrph0 = 8;
+ twrph1 = 8;
+#endif
+
+ cfg = S3C2410_NFCONF_EN;
+ cfg |= S3C2410_NFCONF_TACLS(tacls - 1);
+ cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
+ cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+ writel(cfg, &nand_reg->nfconf);
+
+ /* initialize nand_chip data structure */
+ nand->IO_ADDR_R = (void *)&nand_reg->nfdata;
+ nand->IO_ADDR_W = (void *)&nand_reg->nfdata;
+
+ nand->select_chip = NULL;
+
+ /* read_buf and write_buf are default */
+ /* read_byte and write_byte are default */
+#ifdef CONFIG_NAND_SPL
+ nand->read_buf = nand_read_buf;
+#endif
+
+ /* hwcontrol always must be implemented */
+ nand->cmd_ctrl = s3c2410_hwcontrol;
+
+ nand->dev_ready = s3c2410_dev_ready;
+
+#ifdef CONFIG_S3C2410_NAND_HWECC
+ nand->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ nand->ecc.calculate = s3c2410_nand_calculate_ecc;
+ nand->ecc.correct = s3c2410_nand_correct_data;
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = CONFIG_SYS_NAND_ECCSIZE;
+ nand->ecc.bytes = CONFIG_SYS_NAND_ECCBYTES;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif
+
+#ifdef CONFIG_S3C2410_NAND_BBT
+ nand->options = NAND_USE_FLASH_BBT;
+#else
+ nand->options = 0;
+#endif
+
+ debugX(1, "end of nand_init\n");
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/s3c64xx.c b/u-boot/drivers/mtd/nand/s3c64xx.c
new file mode 100644
index 0000000..084e475
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/s3c64xx.c
@@ -0,0 +1,319 @@
+/*
+ * (C) Copyright 2006 DENX Software Engineering
+ *
+ * Implementation for U-Boot 1.1.6 by Samsung
+ *
+ * (C) Copyright 2008
+ * Guennadi Liakhovetki, DENX Software Engineering, <lg@denx.de>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+
+#include <nand.h>
+#include <asm/arch/s3c6400.h>
+
+#include <asm/io.h>
+#include <asm/errno.h>
+
+#define MAX_CHIPS 2
+static int nand_cs[MAX_CHIPS] = {0, 1};
+
+#ifdef CONFIG_NAND_SPL
+#define printf(arg...) do {} while (0)
+#endif
+
+/* Nand flash definition values by jsgood */
+#ifdef S3C_NAND_DEBUG
+/*
+ * Function to print out oob buffer for debugging
+ * Written by jsgood
+ */
+static void print_oob(const char *header, struct mtd_info *mtd)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ printf("%s:\t", header);
+
+ for (i = 0; i < 64; i++)
+ printf("%02x ", chip->oob_poi[i]);
+
+ printf("\n");
+}
+#endif /* S3C_NAND_DEBUG */
+
+#ifdef CONFIG_NAND_SPL
+static u_char nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return readb(this->IO_ADDR_R);
+}
+
+static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], this->IO_ADDR_W);
+}
+
+static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(this->IO_ADDR_R);
+}
+#endif
+
+static void s3c_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ int ctrl = readl(NFCONT);
+
+ switch (chip) {
+ case -1:
+ ctrl |= 6;
+ break;
+ case 0:
+ ctrl &= ~2;
+ break;
+ case 1:
+ ctrl &= ~4;
+ break;
+ default:
+ return;
+ }
+
+ writel(ctrl, NFCONT);
+}
+
+/*
+ * Hardware specific access to control-lines function
+ * Written by jsgood
+ */
+static void s3c_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_CLE)
+ this->IO_ADDR_W = (void __iomem *)NFCMMD;
+ else if (ctrl & NAND_ALE)
+ this->IO_ADDR_W = (void __iomem *)NFADDR;
+ else
+ this->IO_ADDR_W = (void __iomem *)NFDATA;
+ if (ctrl & NAND_NCE)
+ s3c_nand_select_chip(mtd, *(int *)this->priv);
+ else
+ s3c_nand_select_chip(mtd, -1);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+/*
+ * Function for checking device ready pin
+ * Written by jsgood
+ */
+static int s3c_nand_device_ready(struct mtd_info *mtdinfo)
+{
+ return !!(readl(NFSTAT) & NFSTAT_RnB);
+}
+
+#ifdef CONFIG_SYS_S3C_NAND_HWECC
+/*
+ * This function is called before encoding ecc codes to ready ecc engine.
+ * Written by jsgood
+ */
+static void s3c_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ u_long nfcont, nfconf;
+
+ /*
+ * The original driver used 4-bit ECC for "new" MLC chips, i.e., for
+ * those with non-zero ID[3][3:2], which anyway only holds for ST
+ * (Numonyx) chips
+ */
+ nfconf = readl(NFCONF) & ~NFCONF_ECC_4BIT;
+
+ writel(nfconf, NFCONF);
+
+ /* Initialize & unlock */
+ nfcont = readl(NFCONT);
+ nfcont |= NFCONT_INITECC;
+ nfcont &= ~NFCONT_MECCLOCK;
+
+ if (mode == NAND_ECC_WRITE)
+ nfcont |= NFCONT_ECC_ENC;
+ else if (mode == NAND_ECC_READ)
+ nfcont &= ~NFCONT_ECC_ENC;
+
+ writel(nfcont, NFCONT);
+}
+
+/*
+ * This function is called immediately after encoding ecc codes.
+ * This function returns encoded ecc codes.
+ * Written by jsgood
+ */
+static int s3c_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ u_long nfcont, nfmecc0;
+
+ /* Lock */
+ nfcont = readl(NFCONT);
+ nfcont |= NFCONT_MECCLOCK;
+ writel(nfcont, NFCONT);
+
+ nfmecc0 = readl(NFMECC0);
+
+ ecc_code[0] = nfmecc0 & 0xff;
+ ecc_code[1] = (nfmecc0 >> 8) & 0xff;
+ ecc_code[2] = (nfmecc0 >> 16) & 0xff;
+ ecc_code[3] = (nfmecc0 >> 24) & 0xff;
+
+ return 0;
+}
+
+/*
+ * This function determines whether read data is good or not.
+ * If SLC, must write ecc codes to controller before reading status bit.
+ * If MLC, status bit is already set, so only reading is needed.
+ * If status bit is good, return 0.
+ * If correctable errors occured, do that.
+ * If uncorrectable errors occured, return -1.
+ * Written by jsgood
+ */
+static int s3c_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ int ret = -1;
+ u_long nfestat0, nfmeccdata0, nfmeccdata1, err_byte_addr;
+ u_char err_type, repaired;
+
+ /* SLC: Write ecc to compare */
+ nfmeccdata0 = (calc_ecc[1] << 16) | calc_ecc[0];
+ nfmeccdata1 = (calc_ecc[3] << 16) | calc_ecc[2];
+ writel(nfmeccdata0, NFMECCDATA0);
+ writel(nfmeccdata1, NFMECCDATA1);
+
+ /* Read ecc status */
+ nfestat0 = readl(NFESTAT0);
+ err_type = nfestat0 & 0x3;
+
+ switch (err_type) {
+ case 0: /* No error */
+ ret = 0;
+ break;
+
+ case 1:
+ /*
+ * 1 bit error (Correctable)
+ * (nfestat0 >> 7) & 0x7ff :error byte number
+ * (nfestat0 >> 4) & 0x7 :error bit number
+ */
+ err_byte_addr = (nfestat0 >> 7) & 0x7ff;
+ repaired = dat[err_byte_addr] ^ (1 << ((nfestat0 >> 4) & 0x7));
+
+ printf("S3C NAND: 1 bit error detected at byte %ld. "
+ "Correcting from 0x%02x to 0x%02x...OK\n",
+ err_byte_addr, dat[err_byte_addr], repaired);
+
+ dat[err_byte_addr] = repaired;
+
+ ret = 1;
+ break;
+
+ case 2: /* Multiple error */
+ case 3: /* ECC area error */
+ printf("S3C NAND: ECC uncorrectable error detected. "
+ "Not correctable.\n");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+#endif /* CONFIG_SYS_S3C_NAND_HWECC */
+
+/*
+ * Board-specific NAND initialization. The following members of the
+ * argument are board-specific (per include/linux/mtd/nand.h):
+ * - IO_ADDR_R?: address to read the 8 I/O lines of the flash device
+ * - IO_ADDR_W?: address to write the 8 I/O lines of the flash device
+ * - hwcontrol: hardwarespecific function for accesing control-lines
+ * - dev_ready: hardwarespecific function for accesing device ready/busy line
+ * - enable_hwecc?: function to enable (reset) hardware ecc generator. Must
+ * only be provided if a hardware ECC is available
+ * - eccmode: mode of ecc, see defines
+ * - chip_delay: chip dependent delay for transfering data from array to
+ * read regs (tR)
+ * - options: various chip options. They can partly be set to inform
+ * nand_scan about special functionality. See the defines for further
+ * explanation
+ * Members with a "?" were not set in the merged testing-NAND branch,
+ * so they are not set here either.
+ */
+int board_nand_init(struct nand_chip *nand)
+{
+ static int chip_n;
+
+ if (chip_n >= MAX_CHIPS)
+ return -ENODEV;
+
+ NFCONT_REG = (NFCONT_REG & ~NFCONT_WP) | NFCONT_ENABLE | 0x6;
+
+ nand->IO_ADDR_R = (void __iomem *)NFDATA;
+ nand->IO_ADDR_W = (void __iomem *)NFDATA;
+ nand->cmd_ctrl = s3c_nand_hwcontrol;
+ nand->dev_ready = s3c_nand_device_ready;
+ nand->select_chip = s3c_nand_select_chip;
+ nand->options = 0;
+#ifdef CONFIG_NAND_SPL
+ nand->read_byte = nand_read_byte;
+ nand->write_buf = nand_write_buf;
+ nand->read_buf = nand_read_buf;
+#endif
+
+#ifdef CONFIG_SYS_S3C_NAND_HWECC
+ nand->ecc.hwctl = s3c_nand_enable_hwecc;
+ nand->ecc.calculate = s3c_nand_calculate_ecc;
+ nand->ecc.correct = s3c_nand_correct_data;
+
+ /*
+ * If you get more than 1 NAND-chip with different page-sizes on the
+ * board one day, it will get more complicated...
+ */
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = CONFIG_SYS_NAND_ECCSIZE;
+ nand->ecc.bytes = CONFIG_SYS_NAND_ECCBYTES;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif /* ! CONFIG_SYS_S3C_NAND_HWECC */
+
+ nand->priv = nand_cs + chip_n++;
+
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/nand/spr_nand.c b/u-boot/drivers/mtd/nand/spr_nand.c
new file mode 100644
index 0000000..097d0c6
--- /dev/null
+++ b/u-boot/drivers/mtd/nand/spr_nand.c
@@ -0,0 +1,124 @@
+/*
+ * (C) Copyright 2009
+ * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/spr_nand.h>
+
+static struct fsmc_regs *const fsmc_regs_p =
+ (struct fsmc_regs *)CONFIG_SPEAR_FSMCBASE;
+
+static struct nand_ecclayout spear_nand_ecclayout = {
+ .eccbytes = 24,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
+ 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ {.offset = 72, .length = 8},
+ {.offset = 88, .length = 8},
+ {.offset = 104, .length = 8},
+ {.offset = 120, .length = 8}
+ }
+};
+
+static void spear_nand_hwcontrol(struct mtd_info *mtd, int cmd, uint ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ ulong IO_ADDR_W;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ IO_ADDR_W = (ulong)this->IO_ADDR_W;
+
+ IO_ADDR_W &= ~(CONFIG_SYS_NAND_CLE | CONFIG_SYS_NAND_ALE);
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_CLE;
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_ALE;
+
+ if (ctrl & NAND_NCE) {
+ writel(readl(&fsmc_regs_p->genmemctrl_pc) |
+ FSMC_ENABLE, &fsmc_regs_p->genmemctrl_pc);
+ } else {
+ writel(readl(&fsmc_regs_p->genmemctrl_pc) &
+ ~FSMC_ENABLE, &fsmc_regs_p->genmemctrl_pc);
+ }
+ this->IO_ADDR_W = (void *)IO_ADDR_W;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+static int spear_read_hwecc(struct mtd_info *mtd,
+ const u_char *data, u_char ecc[3])
+{
+ u_int ecc_tmp;
+
+ /* read the h/w ECC */
+ ecc_tmp = readl(&fsmc_regs_p->genmemctrl_ecc);
+
+ ecc[0] = (u_char) (ecc_tmp & 0xFF);
+ ecc[1] = (u_char) ((ecc_tmp & 0xFF00) >> 8);
+ ecc[2] = (u_char) ((ecc_tmp & 0xFF0000) >> 16);
+
+ return 0;
+}
+
+void spear_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ writel(readl(&fsmc_regs_p->genmemctrl_pc) & ~0x80,
+ &fsmc_regs_p->genmemctrl_pc);
+ writel(readl(&fsmc_regs_p->genmemctrl_pc) & ~FSMC_ECCEN,
+ &fsmc_regs_p->genmemctrl_pc);
+ writel(readl(&fsmc_regs_p->genmemctrl_pc) | FSMC_ECCEN,
+ &fsmc_regs_p->genmemctrl_pc);
+}
+
+int spear_nand_init(struct nand_chip *nand)
+{
+ writel(FSMC_DEVWID_8 | FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON,
+ &fsmc_regs_p->genmemctrl_pc);
+ writel(readl(&fsmc_regs_p->genmemctrl_pc) | FSMC_TCLR_1 | FSMC_TAR_1,
+ &fsmc_regs_p->genmemctrl_pc);
+ writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
+ &fsmc_regs_p->genmemctrl_comm);
+ writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
+ &fsmc_regs_p->genmemctrl_attrib);
+
+ nand->options = 0;
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.layout = &spear_nand_ecclayout;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+ nand->ecc.calculate = spear_read_hwecc;
+ nand->ecc.hwctl = spear_enable_hwecc;
+ nand->ecc.correct = nand_correct_data;
+ nand->cmd_ctrl = spear_nand_hwcontrol;
+ return 0;
+}
diff --git a/u-boot/drivers/mtd/onenand/Makefile b/u-boot/drivers/mtd/onenand/Makefile
new file mode 100644
index 0000000..b984bd4
--- /dev/null
+++ b/u-boot/drivers/mtd/onenand/Makefile
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2005-2007 Samsung Electronics.
+# Kyungmin Park <kyungmin.park@samsung.com>
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+include $(TOPDIR)/config.mk
+
+LIB := $(obj)libonenand.o
+
+COBJS-$(CONFIG_CMD_ONENAND) := onenand_uboot.o onenand_base.o onenand_bbt.o
+COBJS-$(CONFIG_SAMSUNG_ONENAND) += samsung.o
+
+COBJS := $(COBJS-y)
+SRCS := $(COBJS:.o=.c)
+OBJS := $(addprefix $(obj),$(COBJS))
+
+all: $(LIB)
+
+$(LIB): $(obj).depend $(OBJS)
+ $(call cmd_link_o_target, $(OBJS))
+
+#########################################################################
+
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
+
+#########################################################################
diff --git a/u-boot/drivers/mtd/onenand/onenand_base.c b/u-boot/drivers/mtd/onenand/onenand_base.c
new file mode 100644
index 0000000..24e02c2
--- /dev/null
+++ b/u-boot/drivers/mtd/onenand/onenand_base.c
@@ -0,0 +1,2755 @@
+/*
+ * linux/drivers/mtd/onenand/onenand_base.c
+ *
+ * Copyright (C) 2005-2007 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Credits:
+ * Adrian Hunter <ext-adrian.hunter@nokia.com>:
+ * auto-placement support, read-while load support, various fixes
+ * Copyright (C) Nokia Corporation, 2007
+ *
+ * Rohit Hagargundgi <h.rohit at samsung.com>,
+ * Amul Kumar Saha <amul.saha@samsung.com>:
+ * Flex-OneNAND support
+ * Copyright (C) Samsung Electronics, 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <malloc.h>
+
+/* It should access 16-bit instead of 8-bit */
+static void *memcpy_16(void *dst, const void *src, unsigned int len)
+{
+ void *ret = dst;
+ short *d = dst;
+ const short *s = src;
+
+ len >>= 1;
+ while (len-- > 0)
+ *d++ = *s++;
+ return ret;
+}
+
+/**
+ * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
+/**
+ * onenand_oob_64 - oob info for large (2KB) page
+ */
+static struct nand_ecclayout onenand_oob_64 = {
+ .eccbytes = 20,
+ .eccpos = {
+ 8, 9, 10, 11, 12,
+ 24, 25, 26, 27, 28,
+ 40, 41, 42, 43, 44,
+ 56, 57, 58, 59, 60,
+ },
+ .oobfree = {
+ {2, 3}, {14, 2}, {18, 3}, {30, 2},
+ {34, 3}, {46, 2}, {50, 3}, {62, 2}
+ }
+};
+
+/**
+ * onenand_oob_32 - oob info for middle (1KB) page
+ */
+static struct nand_ecclayout onenand_oob_32 = {
+ .eccbytes = 10,
+ .eccpos = {
+ 8, 9, 10, 11, 12,
+ 24, 25, 26, 27, 28,
+ },
+ .oobfree = { {2, 3}, {14, 2}, {18, 3}, {30, 2} }
+};
+
+static const unsigned char ffchars[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
+};
+
+/**
+ * onenand_readw - [OneNAND Interface] Read OneNAND register
+ * @param addr address to read
+ *
+ * Read OneNAND register
+ */
+static unsigned short onenand_readw(void __iomem * addr)
+{
+ return readw(addr);
+}
+
+/**
+ * onenand_writew - [OneNAND Interface] Write OneNAND register with value
+ * @param value value to write
+ * @param addr address to write
+ *
+ * Write OneNAND register with value
+ */
+static void onenand_writew(unsigned short value, void __iomem * addr)
+{
+ writew(value, addr);
+}
+
+/**
+ * onenand_block_address - [DEFAULT] Get block address
+ * @param device the device id
+ * @param block the block
+ * @return translated block address if DDP, otherwise same
+ *
+ * Setup Start Address 1 Register (F100h)
+ */
+static int onenand_block_address(struct onenand_chip *this, int block)
+{
+ /* Device Flash Core select, NAND Flash Block Address */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1 | (block ^ this->density_mask);
+
+ return block;
+}
+
+/**
+ * onenand_bufferram_address - [DEFAULT] Get bufferram address
+ * @param device the device id
+ * @param block the block
+ * @return set DBS value if DDP, otherwise 0
+ *
+ * Setup Start Address 2 Register (F101h) for DDP
+ */
+static int onenand_bufferram_address(struct onenand_chip *this, int block)
+{
+ /* Device BufferRAM Select */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1;
+
+ return ONENAND_DDP_CHIP0;
+}
+
+/**
+ * onenand_page_address - [DEFAULT] Get page address
+ * @param page the page address
+ * @param sector the sector address
+ * @return combined page and sector address
+ *
+ * Setup Start Address 8 Register (F107h)
+ */
+static int onenand_page_address(int page, int sector)
+{
+ /* Flash Page Address, Flash Sector Address */
+ int fpa, fsa;
+
+ fpa = page & ONENAND_FPA_MASK;
+ fsa = sector & ONENAND_FSA_MASK;
+
+ return ((fpa << ONENAND_FPA_SHIFT) | fsa);
+}
+
+/**
+ * onenand_buffer_address - [DEFAULT] Get buffer address
+ * @param dataram1 DataRAM index
+ * @param sectors the sector address
+ * @param count the number of sectors
+ * @return the start buffer value
+ *
+ * Setup Start Buffer Register (F200h)
+ */
+static int onenand_buffer_address(int dataram1, int sectors, int count)
+{
+ int bsa, bsc;
+
+ /* BufferRAM Sector Address */
+ bsa = sectors & ONENAND_BSA_MASK;
+
+ if (dataram1)
+ bsa |= ONENAND_BSA_DATARAM1; /* DataRAM1 */
+ else
+ bsa |= ONENAND_BSA_DATARAM0; /* DataRAM0 */
+
+ /* BufferRAM Sector Count */
+ bsc = count & ONENAND_BSC_MASK;
+
+ return ((bsa << ONENAND_BSA_SHIFT) | bsc);
+}
+
+/**
+ * flexonenand_block - Return block number for flash address
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned int flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned int boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+unsigned int onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t) block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t) (block - boundary - 1)
+ << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t) block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+
+/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+
+/**
+ * onenand_get_density - [DEFAULT] Get OneNAND density
+ * @param dev_id OneNAND device ID
+ *
+ * Get OneNAND density from device ID
+ */
+static inline int onenand_get_density(int dev_id)
+{
+ int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ return (density & ONENAND_DEVICE_DENSITY_MASK);
+}
+
+/**
+ * onenand_command - [DEFAULT] Send command to OneNAND device
+ * @param mtd MTD device structure
+ * @param cmd the command to be sent
+ * @param addr offset to read from or write to
+ * @param len number of bytes to read or write
+ *
+ * Send command to OneNAND device. This function is used for middle/large page
+ * devices (1KB/2KB Bytes per page)
+ */
+static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int value;
+ int block, page;
+
+ /* Now we use page size operation */
+ int sectors = 0, count = 0;
+
+ /* Address translation */
+ switch (cmd) {
+ case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_LOCK:
+ case ONENAND_CMD_LOCK_TIGHT:
+ case ONENAND_CMD_UNLOCK_ALL:
+ block = -1;
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
+ page = -1;
+ break;
+
+ case ONENAND_CMD_ERASE:
+ case ONENAND_CMD_BUFFERRAM:
+ block = onenand_block(this, addr);
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
+ default:
+ block = onenand_block(this, addr);
+ page = (int) (addr
+ - onenand_addr(this, block)) >> this->page_shift;
+ page &= this->page_mask;
+ break;
+ }
+
+ /* NOTE: The setting order of the registers is very important! */
+ if (cmd == ONENAND_CMD_BUFFERRAM) {
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS2);
+
+ if (ONENAND_IS_MLC(this))
+ ONENAND_SET_BUFFERRAM0(this);
+ else
+ /* Switch to the next data buffer */
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ return 0;
+ }
+
+ if (block != -1) {
+ /* Write 'DFS, FBA' of Flash */
+ value = onenand_block_address(this, block);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS1);
+
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ if (page != -1) {
+ int dataram;
+
+ switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ if (ONENAND_IS_MLC(this))
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ break;
+
+ default:
+ dataram = ONENAND_CURRENT_BUFFERRAM(this);
+ break;
+ }
+
+ /* Write 'FPA, FSA' of Flash */
+ value = onenand_page_address(page, sectors);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS8);
+
+ /* Write 'BSA, BSC' of DataRAM */
+ value = onenand_buffer_address(dataram, sectors, count);
+ this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
+ }
+
+ /* Interrupt clear */
+ this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
+ /* Write command */
+ this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
+
+ return 0;
+}
+
+/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i;
+
+ if (!FLEXONENAND(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base
+ + ((ONENAND_REG_ECC_STATUS + i) << 1));
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_wait - [DEFAULT] wait until the command is done
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Wait for command done. This applies to all OneNAND command
+ * Read can take up to 30us, erase up to 2ms and program up to 350us
+ * according to general OneNAND specs
+ */
+static int onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int flags = ONENAND_INT_MASTER;
+ unsigned int interrupt = 0;
+ unsigned int ctrl;
+
+ while (1) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ if (interrupt & flags)
+ break;
+ }
+
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+
+ if (interrupt & ONENAND_INT_READ) {
+ int ecc = onenand_read_ecc(this);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk("onenand_wait: ECC error = 0x%04x\n", ecc);
+ return -EBADMSG;
+ }
+ }
+
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk("onenand_wait: controller error = 0x%04x\n", ctrl);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk("onenand_wait: it's locked error = 0x%04x\n",
+ ctrl);
+
+ return -EIO;
+ }
+
+
+ return 0;
+}
+
+/**
+ * onenand_bufferram_offset - [DEFAULT] BufferRAM offset
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @return offset given area
+ *
+ * Return BufferRAM offset given area
+ */
+static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ return mtd->writesize;
+ if (area == ONENAND_SPARERAM)
+ return mtd->oobsize;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_read_bufferram - [OneNAND Interface] Read the bufferram area
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Read the BufferRAM area
+ */
+static int onenand_read_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ memcpy_16(buffer, bufferram + offset, count);
+
+ return 0;
+}
+
+/**
+ * onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Read the BufferRAM area with Sync. Burst Mode
+ */
+static int onenand_sync_read_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ);
+
+ memcpy_16(buffer, bufferram + offset, count);
+
+ this->mmcontrol(mtd, 0);
+
+ return 0;
+}
+
+/**
+ * onenand_write_bufferram - [OneNAND Interface] Write the bufferram area
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Write the BufferRAM area
+ */
+static int onenand_write_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ memcpy_16(bufferram + offset, buffer, count);
+
+ return 0;
+}
+
+/**
+ * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode
+ * @param mtd MTD data structure
+ * @param addr address to check
+ * @return blockpage address
+ *
+ * Get blockpage address at 2x program mode
+ */
+static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, block, page;
+
+ /* Calculate the even block number */
+ block = (int) (addr >> this->erase_shift) & ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
+ blockpage = (block << 7) | page;
+
+ return blockpage;
+}
+
+/**
+ * onenand_check_bufferram - [GENERIC] Check BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr address to check
+ * @return 1 if there are valid data, otherwise 0
+ *
+ * Check bufferram if there is data we required
+ */
+static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, found = 0;
+ unsigned int i;
+
+#ifdef CONFIG_S3C64XX
+ return 0;
+#endif
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int) (addr >> this->page_shift);
+
+ /* Is there valid data? */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ found = 1;
+ else {
+ /* Check another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage) {
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ found = 1;
+ }
+ }
+
+ if (found && ONENAND_IS_DDP(this)) {
+ /* Select DataRAM for DDP */
+ int block = onenand_block(this, addr);
+ int value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ return found;
+}
+
+/**
+ * onenand_update_bufferram - [GENERIC] Update BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr address to update
+ * @param valid valid flag
+ *
+ * Update BufferRAM information
+ */
+static int onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
+ int valid)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage;
+ unsigned int i;
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int)(addr >> this->page_shift);
+
+ /* Invalidate another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ this->bufferram[i].blockpage = -1;
+
+ /* Update BufferRAM */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (valid)
+ this->bufferram[i].blockpage = blockpage;
+ else
+ this->bufferram[i].blockpage = -1;
+
+ return 0;
+}
+
+/**
+ * onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr start address to invalidate
+ * @param len length to invalidate
+ *
+ * Invalidate BufferRAM information
+ */
+static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr,
+ unsigned int len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+ loff_t end_addr = addr + len;
+
+ /* Invalidate BufferRAM */
+ for (i = 0; i < MAX_BUFFERRAM; i++) {
+ loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift;
+
+ if (buf_addr >= addr && buf_addr < end_addr)
+ this->bufferram[i].blockpage = -1;
+ }
+}
+
+/**
+ * onenand_get_device - [GENERIC] Get chip for selected access
+ * @param mtd MTD device structure
+ * @param new_state the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static void onenand_get_device(struct mtd_info *mtd, int new_state)
+{
+ /* Do nothing */
+}
+
+/**
+ * onenand_release_device - [GENERIC] release chip
+ * @param mtd MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void onenand_release_device(struct mtd_info *mtd)
+{
+ /* Do nothing */
+}
+
+/**
+ * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer
+ * @param mtd MTD device structure
+ * @param buf destination address
+ * @param column oob offset to read from
+ * @param thislen oob length to read
+ */
+static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf,
+ int column, int thislen)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct nand_oobfree *free;
+ int readcol = column;
+ int readend = column + thislen;
+ int lastgap = 0;
+ unsigned int i;
+ uint8_t *oob_buf = this->oob_buf;
+
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ if (readcol >= lastgap)
+ readcol += free->offset - lastgap;
+ if (readend >= lastgap)
+ readend += free->offset - lastgap;
+ lastgap = free->offset + free->length;
+ }
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ int free_end = free->offset + free->length;
+ if (free->offset < readend && free_end > readcol) {
+ int st = max_t(int,free->offset,readcol);
+ int ed = min_t(int,free_end,readend);
+ int n = ed - st;
+ memcpy(buf, oob_buf + st, n);
+ buf += n;
+ } else if (column == 0)
+ break;
+ }
+ return 0;
+}
+
+/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ printk("onenand_recover_lsb:"
+ "Attempting to recover from uncorrectable read\n");
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operation description structure
+ *
+ * OneNAND read main and/or out-of-band data
+ */
+static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0, boundary = 0;
+ int writesize = this->writesize;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n");
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ /* Read-while-load method */
+ /* Note: We can't use this feature in MLC */
+
+ /* Do first load to bufferRAM */
+ if (read < len) {
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->main_buf = buf;
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+ }
+
+ thislen = min_t(int, writesize, len - read);
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ while (!ret) {
+ /* If there is more to load then start next load */
+ from += thislen;
+ if (!ONENAND_IS_MLC(this) && read + thislen < len) {
+ this->main_buf = buf + thislen;
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ /*
+ * Chip boundary handling in DDP
+ * Now we issued chip 1 read and pointed chip 1
+ * bufferam so we have to point chip 0 bufferam.
+ */
+ if (ONENAND_IS_DDP(this) &&
+ unlikely(from == (this->chipsize >> 1))) {
+ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+ boundary = 1;
+ } else
+ boundary = 0;
+ ONENAND_SET_PREV_BUFFERRAM(this);
+ }
+
+ /* While load is going, read from last bufferRAM */
+ this->read_bufferram(mtd, from - thislen, ONENAND_DATARAM, buf, column, thislen);
+
+ /* Read oob area if needed */
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ if (ONENAND_IS_MLC(this) && (read + thislen < len)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+
+ /* See if we are done */
+ read += thislen;
+ if (read == len)
+ break;
+ /* Set up for next read from bufferRAM */
+ if (unlikely(boundary))
+ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+ if (!ONENAND_IS_MLC(this))
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ buf += thislen;
+ thislen = min_t(int, writesize, len - read);
+ column = 0;
+
+ if (!ONENAND_IS_MLC(this)) {
+ /* Now wait for load */
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
+ * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area
+ */
+static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int read = 0, thislen, column, oobsize;
+ size_t len = ops->ooblen;
+ mtd_oob_mode_t mode = ops->mode;
+ u_char *buf = ops->oobbuf;
+ int ret = 0, readcmd;
+
+ from += ops->ooboffs;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ /* Initialize return length value */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = from & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ column + len > ((mtd->size >> this->page_shift) -
+ (from >> this->page_shift)) * oobsize)) {
+ printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n");
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ while (read < len) {
+ thislen = oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->spare_buf = buf;
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret && ret != -EBADMSG) {
+ printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
+ break;
+ }
+
+ if (mode == MTD_OOB_AUTO)
+ onenand_transfer_auto_oob(mtd, buf, column, thislen);
+ else
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, buf, column, thislen);
+
+ read += thislen;
+
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Page size */
+ from += mtd->writesize;
+ column = 0;
+ }
+ }
+
+ ops->oobretlen = read;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_read - [MTD Interface] MTD compability function for onenand_read_ecc
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put data
+ *
+ * This function simply calls onenand_read_ecc with oob buffer and oobsel = NULL
+*/
+int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ onenand_get_device(mtd, FL_READING);
+ ret = onenand_read_ops_nolock(mtd, from, &ops);
+ onenand_release_device(mtd);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * onenand_read_oob - [MTD Interface] OneNAND read out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operations description structure
+ *
+ * OneNAND main and/or out-of-band
+ */
+int onenand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_AUTO:
+ break;
+ case MTD_OOB_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_READING);
+ if (ops->datbuf)
+ ret = onenand_read_ops_nolock(mtd, from, ops);
+ else
+ ret = onenand_read_oob_nolock(mtd, from, ops);
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_bbt_wait - [DEFAULT] wait until the command is done
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Wait for command done.
+ */
+static int onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int flags = ONENAND_INT_MASTER;
+ unsigned int interrupt;
+ unsigned int ctrl;
+
+ while (1) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ if (interrupt & flags)
+ break;
+ }
+
+ /* To get correct interrupt status in timeout case */
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+
+ if (interrupt & ONENAND_INT_READ) {
+ int ecc = onenand_read_ecc(this);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
+ ", controller = 0x%04x\n", ecc, ctrl);
+ return ONENAND_BBT_READ_ERROR;
+ }
+ } else {
+ printk(KERN_ERR "onenand_bbt_wait: read timeout!"
+ "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt);
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Initial bad block case: 0x2400 or 0x0400 */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area for bbt scan
+ */
+int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int read = 0, thislen, column;
+ int ret = 0, readcmd;
+ size_t len = ops->ooblen;
+ u_char *buf = ops->oobbuf;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_bbt_read_oob: from = 0x%08x, len = %zi\n", (unsigned int) from, len);
+
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ /* Initialize return value */
+ ops->oobretlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (unlikely((from + len) > mtd->size)) {
+ printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n");
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_READING);
+
+ column = from & (mtd->oobsize - 1);
+
+ while (read < len) {
+
+ thislen = mtd->oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->spare_buf = buf;
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret)
+ break;
+
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, buf, column, thislen);
+ read += thislen;
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Update Page size */
+ from += this->writesize;
+ column = 0;
+ }
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ ops->oobretlen = read;
+ return ret;
+}
+
+
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+/**
+ * onenand_verify_oob - [GENERIC] verify the oob contents after a write
+ * @param mtd MTD device structure
+ * @param buf the databuffer to verify
+ * @param to offset to read from
+ */
+static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
+{
+ struct onenand_chip *this = mtd->priv;
+ u_char *oob_buf = this->oob_buf;
+ int status, i, readcmd;
+
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
+ onenand_update_bufferram(mtd, to, 0);
+ status = this->wait(mtd, FL_READING);
+ if (status)
+ return status;
+
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
+ for (i = 0; i < mtd->oobsize; i++)
+ if (buf[i] != 0xFF && buf[i] != oob_buf[i])
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_verify - [GENERIC] verify the chip contents after a write
+ * @param mtd MTD device structure
+ * @param buf the databuffer to verify
+ * @param addr offset to read from
+ * @param len number of bytes to read and compare
+ */
+static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *dataram;
+ int ret = 0;
+ int thislen, column;
+
+ while (len != 0) {
+ thislen = min_t(int, this->writesize, len);
+ column = addr & (this->writesize - 1);
+ if (column + thislen > this->writesize)
+ thislen = this->writesize - column;
+
+ this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
+
+ onenand_update_bufferram(mtd, addr, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (ret)
+ return ret;
+
+ onenand_update_bufferram(mtd, addr, 1);
+
+ dataram = this->base + ONENAND_DATARAM;
+ dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM);
+
+ if (memcmp(buf, dataram + column, thislen))
+ return -EBADMSG;
+
+ len -= thislen;
+ buf += thislen;
+ addr += thislen;
+ }
+
+ return 0;
+}
+#else
+#define onenand_verify(...) (0)
+#define onenand_verify_oob(...) (0)
+#endif
+
+#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
+
+/**
+ * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
+ * @param mtd MTD device structure
+ * @param oob_buf oob buffer
+ * @param buf source address
+ * @param column oob offset to write to
+ * @param thislen oob length to write
+ */
+static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
+ const u_char *buf, int column, int thislen)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct nand_oobfree *free;
+ int writecol = column;
+ int writeend = column + thislen;
+ int lastgap = 0;
+ unsigned int i;
+
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ if (writecol >= lastgap)
+ writecol += free->offset - lastgap;
+ if (writeend >= lastgap)
+ writeend += free->offset - lastgap;
+ lastgap = free->offset + free->length;
+ }
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ int free_end = free->offset + free->length;
+ if (free->offset < writeend && free_end > writecol) {
+ int st = max_t(int,free->offset,writecol);
+ int ed = min_t(int,free_end,writeend);
+ int n = ed - st;
+ memcpy(oob_buf + st, buf, n);
+ buf += n;
+ } else if (column == 0)
+ break;
+ }
+ return 0;
+}
+
+/**
+ * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param ops oob operation description structure
+ *
+ * Write main and/or oob with ECC
+ */
+static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int written = 0, column, thislen, subpage;
+ int oobwritten = 0, oobcolumn, thisooblen, oobsize;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ const u_char *buf = ops->datbuf;
+ const u_char *oob = ops->oobbuf;
+ u_char *oobbuf;
+ int ret = 0;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (unlikely((to + len) > mtd->size)) {
+ printk(KERN_ERR "onenand_write_ops_nolock: Attempt write to past end of device\n");
+ return -EINVAL;
+ }
+
+ /* Reject writes, which are not page aligned */
+ if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
+ printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n");
+ return -EINVAL;
+ }
+
+ if (ops->mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = to & (mtd->oobsize - 1);
+
+ column = to & (mtd->writesize - 1);
+
+ /* Loop until all data write */
+ while (written < len) {
+ u_char *wbuf = (u_char *) buf;
+
+ thislen = min_t(int, mtd->writesize - column, len - written);
+ thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten);
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+ /* Partial page write */
+ subpage = thislen < mtd->writesize;
+ if (subpage) {
+ memset(this->page_buf, 0xff, mtd->writesize);
+ memcpy(this->page_buf + column, buf, thislen);
+ wbuf = this->page_buf;
+ }
+
+ this->write_bufferram(mtd, to, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+
+ if (oob) {
+ oobbuf = this->oob_buf;
+
+ /* We send data to spare ram with oobsize
+ * * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (ops->mode == MTD_OOB_AUTO)
+ onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
+ else
+ memcpy(oobbuf + oobcolumn, oob, thisooblen);
+
+ oobwritten += thisooblen;
+ oob += thisooblen;
+ oobcolumn = 0;
+ } else
+ oobbuf = (u_char *) ffchars;
+
+ this->write_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+
+ this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+
+ ret = this->wait(mtd, FL_WRITING);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, to, !ret && !subpage);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
+ }
+
+ if (ret) {
+ printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret);
+ break;
+ }
+
+ /* Only check verify write turn on */
+ ret = onenand_verify(mtd, buf, to, thislen);
+ if (ret) {
+ printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
+ break;
+ }
+
+ written += thislen;
+
+ if (written == len)
+ break;
+
+ column = 0;
+ to += thislen;
+ buf += thislen;
+ }
+
+ ops->retlen = written;
+
+ return ret;
+}
+
+/**
+ * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ * @param mode operation mode
+ *
+ * OneNAND write out-of-band
+ */
+static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, ret = 0, oobsize;
+ int written = 0, oobcmd;
+ u_char *oobbuf;
+ size_t len = ops->ooblen;
+ const u_char *buf = ops->oobbuf;
+ mtd_oob_mode_t mode = ops->mode;
+
+ to += ops->ooboffs;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = to & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n");
+ return -EINVAL;
+ }
+
+ /* For compatibility with NAND: Do not allow write past end of page */
+ if (unlikely(column + len > oobsize)) {
+ printk(KERN_ERR "onenand_write_oob_nolock: "
+ "Attempt to write past end of page\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(to >= mtd->size ||
+ column + len > ((mtd->size >> this->page_shift) -
+ (to >> this->page_shift)) * oobsize)) {
+ printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n");
+ return -EINVAL;
+ }
+
+ oobbuf = this->oob_buf;
+
+ oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, oobsize, len - written);
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (mode == MTD_OOB_AUTO)
+ onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
+ else
+ memcpy(oobbuf + column, buf, thislen);
+ this->write_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+
+ if (ONENAND_IS_MLC(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, 0, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, to, 0);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, 0);
+ }
+
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret);
+ break;
+ }
+
+ ret = onenand_verify_oob(mtd, oobbuf, to);
+ if (ret) {
+ printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret);
+ break;
+ }
+
+ written += thislen;
+ if (written == len)
+ break;
+
+ to += mtd->writesize;
+ buf += thislen;
+ column = 0;
+ }
+
+ ops->oobretlen = written;
+
+ return ret;
+}
+
+/**
+ * onenand_write - [MTD Interface] compability function for onenand_write_ecc
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ *
+ * Write with ECC
+ */
+int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = (u_char *) buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ onenand_get_device(mtd, FL_WRITING);
+ ret = onenand_write_ops_nolock(mtd, to, &ops);
+ onenand_release_device(mtd);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * onenand_write_oob - [MTD Interface] OneNAND write out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param ops oob operation description structure
+ *
+ * OneNAND write main and/or out-of-band
+ */
+int onenand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_AUTO:
+ break;
+ case MTD_OOB_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_WRITING);
+ if (ops->datbuf)
+ ret = onenand_write_ops_nolock(mtd, to, ops);
+ else
+ ret = onenand_write_oob_nolock(mtd, to, ops);
+ onenand_release_device(mtd);
+
+ return ret;
+
+}
+
+/**
+ * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad
+ * @param mtd MTD device structure
+ * @param ofs offset from device start
+ * @param allowbbt 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad, Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+
+ /* Return info from the table */
+ return bbm->isbad_bbt(mtd, ofs, allowbbt);
+}
+
+
+/**
+ * onenand_erase - [MTD Interface] erase block(s)
+ * @param mtd MTD device structure
+ * @param instr erase instruction
+ *
+ * Erase one ore more blocks
+ */
+int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int block_size;
+ loff_t addr = instr->addr;
+ unsigned int len = instr->len;
+ int ret = 0, i;
+ struct mtd_erase_region_info *region = NULL;
+ unsigned int region_end = 0;
+
+ MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n",
+ (unsigned int) addr, len);
+
+ /* Do not allow erase past end of device */
+ if (unlikely((len + addr) > mtd->size)) {
+ MTDDEBUG(MTD_DEBUG_LEVEL0, "onenand_erase:"
+ "Erase past end of device\n");
+ return -EINVAL;
+ }
+
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ i = flexonenand_region(mtd, addr);
+ region = &mtd->eraseregions[i];
+
+ block_size = region->erasesize;
+ region_end = region->offset
+ + region->erasesize * region->numblocks;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ if (unlikely((addr - region->offset) & (block_size - 1))) {
+ MTDDEBUG(MTD_DEBUG_LEVEL0, "onenand_erase:"
+ " Unaligned address\n");
+ return -EINVAL;
+ }
+ } else {
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely(addr & (block_size - 1))) {
+ MTDDEBUG(MTD_DEBUG_LEVEL0, "onenand_erase:"
+ "Unaligned address\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ MTDDEBUG (MTD_DEBUG_LEVEL0,
+ "onenand_erase: Length not block aligned\n");
+ return -EINVAL;
+ }
+
+ instr->fail_addr = 0xffffffff;
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_ERASING);
+
+ /* Loop throught the pages */
+ instr->state = MTD_ERASING;
+
+ while (len) {
+
+ /* Check if we have a bad block, we do not erase bad blocks */
+ if (instr->priv == 0 && onenand_block_isbad_nolock(mtd, addr, 0)) {
+ printk(KERN_WARNING "onenand_erase: attempt to erase"
+ " a bad block at addr 0x%08x\n",
+ (unsigned int) addr);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_ERASING);
+ /* Check, if it is write protected */
+ if (ret) {
+ if (ret == -EPERM)
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "onenand_erase: "
+ "Device is write protected!!!\n");
+ else
+ MTDDEBUG (MTD_DEBUG_LEVEL0, "onenand_erase: "
+ "Failed erase, block %d\n",
+ onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = addr;
+
+ goto erase_exit;
+ }
+
+ len -= block_size;
+ addr += block_size;
+
+ if (addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset
+ + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* This has been checked at MTD
+ * partitioning level. */
+ printk("onenand_erase: Unaligned address\n");
+ goto erase_exit;
+ }
+ }
+ }
+
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_sync - [MTD Interface] sync
+ * @param mtd MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+void onenand_sync(struct mtd_info *mtd)
+{
+ MTDDEBUG (MTD_DEBUG_LEVEL3, "onenand_sync: called\n");
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_SYNCING);
+
+ /* Release it and go back */
+ onenand_release_device(mtd);
+}
+
+/**
+ * onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ *
+ * Check whether the block is bad
+ */
+int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ /* Check for invalid offset */
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ onenand_get_device(mtd, FL_READING);
+ ret = onenand_block_isbad_nolock(mtd,ofs, 0);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_default_block_markbad - [DEFAULT] mark a block bad
+ * @param mtd MTD device structure
+ * @param ofs offset from device start
+ *
+ * This is the default implementation, which can be overridden by
+ * a hardware specific driver.
+ */
+static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ u_char buf[2] = {0, 0};
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OOB_PLACE,
+ .ooblen = 2,
+ .oobbuf = buf,
+ .ooboffs = 0,
+ };
+ int block;
+
+ /* Get block number */
+ block = onenand_block(this, ofs);
+ if (bbm->bbt)
+ bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* We write two bytes, so we dont have to mess with 16 bit access */
+ ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
+}
+
+/**
+ * onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ *
+ * Mark the block as bad
+ */
+int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret;
+
+ ret = onenand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ ret = this->block_markbad(mtd, ofs);
+ return ret;
+}
+
+/**
+ * onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to lock or unlock
+ * @param cmd lock or unlock command
+ *
+ * Lock or unlock one or more blocks
+ */
+static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, block, value, status;
+ int wp_status_mask;
+
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len);
+
+ if (cmd == ONENAND_CMD_LOCK)
+ wp_status_mask = ONENAND_WP_LS;
+ else
+ wp_status_mask = ONENAND_WP_US;
+
+ /* Continuous lock scheme */
+ if (this->options & ONENAND_HAS_CONT_LOCK) {
+ /* Set start block address */
+ this->write_word(start,
+ this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Set end block address */
+ this->write_word(end - 1,
+ this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, cmd, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_UNLOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US))
+ printk(KERN_ERR "wp status = 0x%x\n", status);
+
+ return 0;
+ }
+
+ /* Block lock scheme */
+ for (block = start; block < end; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+
+ /* Set start block address */
+ this->write_word(block,
+ this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_UNLOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US))
+ printk(KERN_ERR "block = %d, wp status = 0x%x\n",
+ block, status);
+ }
+
+ return 0;
+}
+
+#ifdef ONENAND_LINUX
+/**
+ * onenand_lock - [MTD Interface] Lock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to unlock
+ *
+ * Lock one or more blocks
+ */
+static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_unlock - [MTD Interface] Unlock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to unlock
+ *
+ * Unlock one or more blocks
+ */
+static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+#endif
+
+/**
+ * onenand_check_lock_status - [OneNAND Interface] Check lock status
+ * @param this onenand chip data structure
+ *
+ * Check lock status
+ */
+static int onenand_check_lock_status(struct onenand_chip *this)
+{
+ unsigned int value, block, status;
+ unsigned int end;
+
+ end = this->chipsize >> this->erase_shift;
+ for (block = 0; block < end; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ /* Set start block address */
+ this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US)) {
+ printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_unlock_all - [OneNAND Interface] unlock all blocks
+ * @param mtd MTD device structure
+ *
+ * Unlock all blocks
+ */
+static void onenand_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = mtd->size;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Set start block address */
+ this->write_word(0, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ if (onenand_check_lock_status(this))
+ return;
+
+ /* Workaround for all block unlock in DDP */
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+ }
+
+ onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+}
+
+
+/**
+ * onenand_check_features - Check and set OneNAND features
+ * @param mtd MTD data structure
+ *
+ * Check and set OneNAND features
+ * - lock scheme
+ * - two plane
+ */
+static void onenand_check_features(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int density, process;
+
+ /* Lock scheme depends on density and process */
+ density = onenand_get_density(this->device_id);
+ process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
+
+ /* Lock scheme */
+ switch (density) {
+ case ONENAND_DEVICE_DENSITY_4Gb:
+ this->options |= ONENAND_HAS_2PLANE;
+
+ case ONENAND_DEVICE_DENSITY_2Gb:
+ /* 2Gb DDP don't have 2 plane */
+ if (!ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+
+ case ONENAND_DEVICE_DENSITY_1Gb:
+ /* A-Die has all block unlock */
+ if (process)
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ break;
+
+ default:
+ /* Some OneNAND has continuous lock scheme */
+ if (!process)
+ this->options |= ONENAND_HAS_CONT_LOCK;
+ break;
+ }
+
+ if (ONENAND_IS_MLC(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
+ if (this->options & ONENAND_HAS_CONT_LOCK)
+ printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
+ if (this->options & ONENAND_HAS_UNLOCK_ALL)
+ printk(KERN_DEBUG "Chip support all block unlock\n");
+ if (this->options & ONENAND_HAS_2PLANE)
+ printk(KERN_DEBUG "Chip has 2 plane\n");
+}
+
+/**
+ * onenand_print_device_info - Print device ID
+ * @param device device ID
+ *
+ * Print device ID
+ */
+char *onenand_print_device_info(int device, int version)
+{
+ int vcc, demuxed, ddp, density, flexonenand;
+ char *dev_info = malloc(80);
+ char *p = dev_info;
+
+ vcc = device & ONENAND_DEVICE_VCC_MASK;
+ demuxed = device & ONENAND_DEVICE_IS_DEMUX;
+ ddp = device & ONENAND_DEVICE_IS_DDP;
+ density = onenand_get_density(device);
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ p += sprintf(dev_info, "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
+ ddp ? "(DDP)" : "",
+ (16 << density), vcc ? "2.65/3.3" : "1.8", device);
+
+ sprintf(p, "\nOneNAND version = 0x%04x", version);
+ printk("%s\n", dev_info);
+
+ return dev_info;
+}
+
+static const struct onenand_manufacturers onenand_manuf_ids[] = {
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
+ {ONENAND_MFR_SAMSUNG, "Samsung"},
+};
+
+/**
+ * onenand_check_maf - Check manufacturer ID
+ * @param manuf manufacturer ID
+ *
+ * Check manufacturer ID
+ */
+static int onenand_check_maf(int manuf)
+{
+ int size = ARRAY_SIZE(onenand_manuf_ids);
+ char *name;
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (manuf == onenand_manuf_ids[i].id)
+ break;
+
+ if (i < size)
+ name = onenand_manuf_ids[i].name;
+ else
+ name = "Unknown";
+
+#ifdef ONENAND_DEBUG
+ printk(KERN_DEBUG "OneNAND Manufacturer: %s (0x%0x)\n", name, manuf);
+#endif
+
+ return i == size;
+}
+
+/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+*
+* Fill up boundary[] field in onenand_chip
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int die, bdry;
+ int ret, syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ ret = this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize,
+ * mtd->eraseregions
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = 0;
+ ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08llx, erasesize: 0x%05x,"
+ " numblocks: %04u]\n", mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t) (blksperdie << this->erase_shift);
+ this->diesize[die] -= (loff_t) (this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd,
+ int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OOB_PLACE,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "Block %d not erased.\n", block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ if (die >= this->dies)
+ return -EINVAL;
+
+ if (boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk("flexonenand_set_boundary:"
+ "Invalid boundary value. "
+ "Boundary not changed.\n");
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new)
+ + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ goto out;
+ }
+
+ printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ addr = die ? this->diesize[0] : 0;
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk("flexonenand_set_boundary:"
+ "Failed PI erase for Die %d\n", die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk("flexonenand_set_boundary:"
+ "Failed PI write for Die %d\n", die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_probe - [OneNAND Interface] Probe the OneNAND device
+ * @param mtd MTD device structure
+ *
+ * OneNAND detection method:
+ * Compare the the values from command with ones from register
+ */
+static int onenand_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int bram_maf_id, bram_dev_id, maf_id, dev_id, ver_id;
+ int density;
+ int syscfg;
+
+ /* Save system configuration 1 */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ /* Clear Sync. Burst Read mode to read BootRAM */
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Send the command for reading device ID from BootRAM */
+ this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
+
+ /* Read manufacturer and device IDs from BootRAM */
+ bram_maf_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x0);
+ bram_dev_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x2);
+
+ /* Reset OneNAND to read default register values */
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
+
+ /* Wait reset */
+ this->wait(mtd, FL_RESETING);
+
+ /* Restore system configuration 1 */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Check manufacturer ID */
+ if (onenand_check_maf(bram_maf_id))
+ return -ENXIO;
+
+ /* Read manufacturer and device IDs from Register */
+ maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
+
+ /* Check OneNAND device */
+ if (maf_id != bram_maf_id || dev_id != bram_dev_id)
+ return -ENXIO;
+
+ /* Flash device information */
+ mtd->name = onenand_print_device_info(dev_id, ver_id);
+ this->device_id = dev_id;
+ this->version_id = ver_id;
+
+ density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1));
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
+ this->chipsize = (16 << density) << 20;
+
+ /* OneNAND page size & block size */
+ /* The data buffer size is equal to page size */
+ mtd->writesize =
+ this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_MLC(this))
+ mtd->writesize <<= 1;
+
+ mtd->oobsize = mtd->writesize >> 5;
+ /* Pagers per block is always 64 in OneNAND */
+ mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
+
+ this->erase_shift = ffs(mtd->erasesize) - 1;
+ this->page_shift = ffs(mtd->writesize) - 1;
+ this->ppb_shift = (this->erase_shift - this->page_shift);
+ this->page_mask = (mtd->erasesize / mtd->writesize) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
+ /* It's real page size */
+ this->writesize = mtd->writesize;
+
+ /* REVIST: Multichip handling */
+
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
+
+ /* Check OneNAND features */
+ onenand_check_features(mtd);
+
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->erase = onenand_erase;
+ mtd->read = onenand_read;
+ mtd->write = onenand_write;
+ mtd->read_oob = onenand_read_oob;
+ mtd->write_oob = onenand_write_oob;
+ mtd->sync = onenand_sync;
+ mtd->block_isbad = onenand_block_isbad;
+ mtd->block_markbad = onenand_block_markbad;
+
+ return 0;
+}
+
+/**
+ * onenand_scan - [OneNAND Interface] Scan for the OneNAND device
+ * @param mtd MTD device structure
+ * @param maxchips Number of chips to scan for
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ */
+int onenand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int i;
+ struct onenand_chip *this = mtd->priv;
+
+ if (!this->read_word)
+ this->read_word = onenand_readw;
+ if (!this->write_word)
+ this->write_word = onenand_writew;
+
+ if (!this->command)
+ this->command = onenand_command;
+ if (!this->wait)
+ this->wait = onenand_wait;
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+
+ if (!this->read_bufferram)
+ this->read_bufferram = onenand_read_bufferram;
+ if (!this->write_bufferram)
+ this->write_bufferram = onenand_write_bufferram;
+
+ if (!this->block_markbad)
+ this->block_markbad = onenand_default_block_markbad;
+ if (!this->scan_bbt)
+ this->scan_bbt = onenand_default_bbt;
+
+ if (onenand_probe(mtd))
+ return -ENXIO;
+
+ /* Set Sync. Burst Read after probing */
+ if (this->mmcontrol) {
+ printk(KERN_INFO "OneNAND Sync. Burst Read support\n");
+ this->read_bufferram = onenand_sync_read_bufferram;
+ }
+
+ /* Allocate buffers, if necessary */
+ if (!this->page_buf) {
+ this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->page_buf) {
+ printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n");
+ return -ENOMEM;
+ }
+ this->options |= ONENAND_PAGEBUF_ALLOC;
+ }
+ if (!this->oob_buf) {
+ this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!this->oob_buf) {
+ printk(KERN_ERR "onenand_scan: Can't allocate oob_buf\n");
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
+ this->options &= ~ONENAND_PAGEBUF_ALLOC;
+ kfree(this->page_buf);
+ }
+ return -ENOMEM;
+ }
+ this->options |= ONENAND_OOBBUF_ALLOC;
+ }
+
+ this->state = FL_READY;
+
+ /*
+ * Allow subpage writes up to oobsize.
+ */
+ switch (mtd->oobsize) {
+ case 128:
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 0;
+ break;
+
+ case 64:
+ this->ecclayout = &onenand_oob_64;
+ mtd->subpage_sft = 2;
+ break;
+
+ case 32:
+ this->ecclayout = &onenand_oob_32;
+ mtd->subpage_sft = 1;
+ break;
+
+ default:
+ printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ mtd->subpage_sft = 0;
+ /* To prevent kernel oops */
+ this->ecclayout = &onenand_oob_32;
+ break;
+ }
+
+ this->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area
+ */
+ this->ecclayout->oobavail = 0;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
+ this->ecclayout->oobfree[i].length; i++)
+ this->ecclayout->oobavail +=
+ this->ecclayout->oobfree[i].length;
+ mtd->oobavail = this->ecclayout->oobavail;
+
+ mtd->ecclayout = this->ecclayout;
+
+ /* Unlock whole block */
+ onenand_unlock_all(mtd);
+
+ return this->scan_bbt(mtd);
+}
+
+/**
+ * onenand_release - [OneNAND Interface] Free resources held by the OneNAND device
+ * @param mtd MTD device structure
+ */
+void onenand_release(struct mtd_info *mtd)
+{
+}
diff --git a/u-boot/drivers/mtd/onenand/onenand_bbt.c b/u-boot/drivers/mtd/onenand/onenand_bbt.c
new file mode 100644
index 0000000..1354877
--- /dev/null
+++ b/u-boot/drivers/mtd/onenand/onenand_bbt.c
@@ -0,0 +1,268 @@
+/*
+ * linux/drivers/mtd/onenand/onenand_bbt.c
+ *
+ * Bad Block Table support for the OneNAND driver
+ *
+ * Copyright(c) 2005-2008 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * TODO:
+ * Split BBT core and chip specific BBT.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <malloc.h>
+
+#include <asm/errno.h>
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @param buf the buffer to search
+ * @param len the length of buffer to search
+ * @param paglen the pagelength
+ * @param td search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers. Same as check_pattern, but
+ * no optional empty check and the pattern is expected to start
+ * at offset 0.
+ */
+static int check_short_pattern(uint8_t * buf, int len, int paglen,
+ struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @param mtd MTD device structure
+ * @param buf temporary buffer
+ * @param bd descriptor for the good/bad block search pattern
+ * @param chip create the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device
+ * for the given good/bad block identify pattern
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t * buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int i, j, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen, ooblen;
+ struct mtd_oob_ops ops;
+ int rgn;
+
+ printk(KERN_INFO "Scanning device for bad blocks\n");
+
+ len = 1;
+
+ /* We need only read few bytes from the OOB area */
+ scanlen = ooblen = 0;
+ readlen = bd->len;
+
+ /* chip == -1 case only */
+ /* Note that numblocks is 2 * (real numblocks) here;
+ * see i += 2 below as it makses shifting and masking less painful
+ */
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
+ startblock = 0;
+ from = 0;
+
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = readlen;
+ ops.oobbuf = buf;
+ ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
+
+ for (i = startblock; i < numblocks;) {
+ int ret;
+
+ for (j = 0; j < len; j++) {
+ /* No need to read pages fully,
+ * just read required OOB bytes */
+ ret = onenand_bbt_read_oob(mtd,
+ from + j * mtd->writesize +
+ bd->offs, &ops);
+
+ /* If it is a initial bad block, just ignore it */
+ if (ret == ONENAND_BBT_READ_FATAL_ERROR)
+ return -EIO;
+
+ if (ret || check_short_pattern
+ (&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
+ bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ printk(KERN_WARNING
+ "Bad eraseblock %d at 0x%08x\n", i >> 1,
+ (unsigned int)from);
+ break;
+ }
+ }
+ i += 2;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @param mtd MTD device structure
+ * @param bd descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device
+ * for manufacturer / software marked good / bad blocks
+ */
+static inline int onenand_memory_bbt(struct mtd_info *mtd,
+ struct nand_bbt_descr *bd)
+{
+ unsigned char data_buf[MAX_ONENAND_PAGESIZE];
+
+ bd->options &= ~NAND_BBT_SCANEMPTY;
+ return create_bbt(mtd, data_buf, bd, -1);
+}
+
+/**
+ * onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
+ * @param mtd MTD device structure
+ * @param offs offset in the device
+ * @param allowbbt allow access to bad block table region
+ */
+static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 2 */
+ block = (int) (onenand_block(this, offs) << 1);
+ res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ MTDDEBUG (MTD_DEBUG_LEVEL2,
+ "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int)offs, block >> 1, res);
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ return 1;
+ case 0x02:
+ return allowbbt ? 0 : 1;
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
+ * @param mtd MTD device structure
+ * @param bd descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already
+ * available. If not it scans the device for manufacturer
+ * marked good / bad blocks and writes the bad block table(s) to
+ * the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed
+ * by calling the onenand_free_bbt function.
+ *
+ */
+int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int len, ret = 0;
+
+ len = this->chipsize >> (this->erase_shift + 2);
+ /* Allocate memory (2bit per block) */
+ bbm->bbt = malloc(len);
+ if (!bbm->bbt) {
+ printk(KERN_ERR "onenand_scan_bbt: Out of memory\n");
+ return -ENOMEM;
+ }
+ /* Clear the memory bad block table */
+ memset(bbm->bbt, 0x00, len);
+
+ /* Set the bad block position */
+ bbm->badblockpos = ONENAND_BADBLOCK_POS;
+
+ /* Set erase shift */
+ bbm->bbt_erase_shift = this->erase_shift;
+
+ if (!bbm->isbad_bbt)
+ bbm->isbad_bbt = onenand_isbad_bbt;
+
+ /* Scan the device to build a memory based bad block table */
+ if ((ret = onenand_memory_bbt(mtd, bd))) {
+ printk(KERN_ERR
+ "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
+ free(bbm->bbt);
+ bbm->bbt = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+/**
+ * onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
+ * @param mtd MTD device structure
+ *
+ * This function selects the default bad block table
+ * support for the device and calls the onenand_scan_bbt function
+ */
+int onenand_default_bbt(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm;
+
+ this->bbm = malloc(sizeof(struct bbm_info));
+ if (!this->bbm)
+ return -ENOMEM;
+
+ bbm = this->bbm;
+
+ memset(bbm, 0, sizeof(struct bbm_info));
+
+ /* 1KB page has same configuration as 2KB page */
+ if (!bbm->badblock_pattern)
+ bbm->badblock_pattern = &largepage_memorybased;
+
+ return onenand_scan_bbt(mtd, bbm->badblock_pattern);
+}
diff --git a/u-boot/drivers/mtd/onenand/onenand_uboot.c b/u-boot/drivers/mtd/onenand/onenand_uboot.c
new file mode 100644
index 0000000..c642016
--- /dev/null
+++ b/u-boot/drivers/mtd/onenand/onenand_uboot.c
@@ -0,0 +1,56 @@
+/*
+ * drivers/mtd/onenand/onenand_uboot.c
+ *
+ * Copyright (C) 2005-2008 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * OneNAND initialization at U-Boot
+ */
+
+#include <common.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+
+struct mtd_info onenand_mtd;
+struct onenand_chip onenand_chip;
+static __attribute__((unused)) char dev_name[] = "onenand0";
+
+void onenand_init(void)
+{
+ memset(&onenand_mtd, 0, sizeof(struct mtd_info));
+ memset(&onenand_chip, 0, sizeof(struct onenand_chip));
+
+ onenand_mtd.priv = &onenand_chip;
+
+#ifdef CONFIG_USE_ONENAND_BOARD_INIT
+ /*
+ * It's used for some board init required
+ */
+ onenand_board_init(&onenand_mtd);
+#else
+ onenand_chip.base = (void *) CONFIG_SYS_ONENAND_BASE;
+#endif
+
+ onenand_scan(&onenand_mtd, 1);
+
+ if (onenand_chip.device_id & DEVICE_IS_FLEXONENAND)
+ puts("Flex-");
+ puts("OneNAND: ");
+ print_size(onenand_chip.chipsize, "\n");
+
+#ifdef CONFIG_MTD_DEVICE
+ /*
+ * Add MTD device so that we can reference it later
+ * via the mtdcore infrastructure (e.g. ubi).
+ */
+ onenand_mtd.name = dev_name;
+ add_mtd_device(&onenand_mtd);
+#endif
+}
diff --git a/u-boot/drivers/mtd/onenand/samsung.c b/u-boot/drivers/mtd/onenand/samsung.c
new file mode 100644
index 0000000..20b4912
--- /dev/null
+++ b/u-boot/drivers/mtd/onenand/samsung.c
@@ -0,0 +1,636 @@
+/*
+ * S3C64XX/S5PC100 OneNAND driver at U-Boot
+ *
+ * Copyright (C) 2008-2009 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Implementation:
+ * Emulate the pseudo BufferRAM
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/samsung_onenand.h>
+
+#include <asm/io.h>
+#include <asm/errno.h>
+
+#ifdef ONENAND_DEBUG
+#define DPRINTK(format, args...) \
+do { \
+ printf("%s[%d]: " format "\n", __func__, __LINE__, ##args); \
+} while (0)
+#else
+#define DPRINTK(...) do { } while (0)
+#endif
+
+#define ONENAND_ERASE_STATUS 0x00
+#define ONENAND_MULTI_ERASE_SET 0x01
+#define ONENAND_ERASE_START 0x03
+#define ONENAND_UNLOCK_START 0x08
+#define ONENAND_UNLOCK_END 0x09
+#define ONENAND_LOCK_START 0x0A
+#define ONENAND_LOCK_END 0x0B
+#define ONENAND_LOCK_TIGHT_START 0x0C
+#define ONENAND_LOCK_TIGHT_END 0x0D
+#define ONENAND_UNLOCK_ALL 0x0E
+#define ONENAND_OTP_ACCESS 0x12
+#define ONENAND_SPARE_ACCESS_ONLY 0x13
+#define ONENAND_MAIN_ACCESS_ONLY 0x14
+#define ONENAND_ERASE_VERIFY 0x15
+#define ONENAND_MAIN_SPARE_ACCESS 0x16
+#define ONENAND_PIPELINE_READ 0x4000
+
+#if defined(CONFIG_S3C64XX)
+#define MAP_00 (0x0 << 24)
+#define MAP_01 (0x1 << 24)
+#define MAP_10 (0x2 << 24)
+#define MAP_11 (0x3 << 24)
+#elif defined(CONFIG_S5P)
+#define MAP_00 (0x0 << 26)
+#define MAP_01 (0x1 << 26)
+#define MAP_10 (0x2 << 26)
+#define MAP_11 (0x3 << 26)
+#endif
+
+/* read/write of XIP buffer */
+#define CMD_MAP_00(mem_addr) (MAP_00 | ((mem_addr) << 1))
+/* read/write to the memory device */
+#define CMD_MAP_01(mem_addr) (MAP_01 | (mem_addr))
+/* control special functions of the memory device */
+#define CMD_MAP_10(mem_addr) (MAP_10 | (mem_addr))
+/* direct interface(direct access) with the memory device */
+#define CMD_MAP_11(mem_addr) (MAP_11 | ((mem_addr) << 2))
+
+struct s3c_onenand {
+ struct mtd_info *mtd;
+ void __iomem *base;
+ void __iomem *ahb_addr;
+ int bootram_command;
+ void __iomem *page_buf;
+ void __iomem *oob_buf;
+ unsigned int (*mem_addr)(int fba, int fpa, int fsa);
+ struct samsung_onenand *reg;
+};
+
+static struct s3c_onenand *onenand;
+
+static int s3c_read_cmd(unsigned int cmd)
+{
+ return readl(onenand->ahb_addr + cmd);
+}
+
+static void s3c_write_cmd(int value, unsigned int cmd)
+{
+ writel(value, onenand->ahb_addr + cmd);
+}
+
+/*
+ * MEM_ADDR
+ *
+ * fba: flash block address
+ * fpa: flash page address
+ * fsa: flash sector address
+ *
+ * return the buffer address on the memory device
+ * It will be combined with CMD_MAP_XX
+ */
+#if defined(CONFIG_S3C64XX)
+static unsigned int s3c_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << 12) | (fpa << 6) | (fsa << 4);
+}
+#elif defined(CONFIG_S5P)
+static unsigned int s3c_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << 13) | (fpa << 7) | (fsa << 5);
+}
+#endif
+
+static void s3c_onenand_reset(void)
+{
+ unsigned long timeout = 0x10000;
+ int stat;
+
+ writel(ONENAND_MEM_RESET_COLD, &onenand->reg->mem_reset);
+ while (timeout--) {
+ stat = readl(&onenand->reg->int_err_stat);
+ if (stat & RST_CMP)
+ break;
+ }
+ stat = readl(&onenand->reg->int_err_stat);
+ writel(stat, &onenand->reg->int_err_ack);
+
+ /* Clear interrupt */
+ writel(0x0, &onenand->reg->int_err_ack);
+ /* Clear the ECC status */
+ writel(0x0, &onenand->reg->ecc_err_stat);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+ int value;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_MANUFACTURER_ID:
+ return readl(&onenand->reg->manufact_id);
+ case ONENAND_REG_DEVICE_ID:
+ return readl(&onenand->reg->device_id);
+ case ONENAND_REG_VERSION_ID:
+ return readl(&onenand->reg->flash_ver_id);
+ case ONENAND_REG_DATA_BUFFER_SIZE:
+ return readl(&onenand->reg->data_buf_size);
+ case ONENAND_REG_TECHNOLOGY:
+ return readl(&onenand->reg->tech);
+ case ONENAND_REG_SYS_CFG1:
+ return readl(&onenand->reg->mem_cfg);
+
+ /* Used at unlock all status */
+ case ONENAND_REG_CTRL_STATUS:
+ return 0;
+
+ case ONENAND_REG_WP_STATUS:
+ return ONENAND_WP_US;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if (reg < ONENAND_DATARAM && onenand->bootram_command) {
+ if (word_addr == 0)
+ return readl(&onenand->reg->manufact_id);
+ if (word_addr == 1)
+ return readl(&onenand->reg->device_id);
+ if (word_addr == 2)
+ return readl(&onenand->reg->flash_ver_id);
+ }
+
+ value = s3c_read_cmd(CMD_MAP_11(word_addr)) & 0xffff;
+ printk(KERN_INFO "s3c_onenand_readw: Illegal access"
+ " at reg 0x%x, value 0x%x\n", word_addr, value);
+ return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_SYS_CFG1:
+ writel(value, &onenand->reg->mem_cfg);
+ return;
+
+ case ONENAND_REG_START_ADDRESS1:
+ case ONENAND_REG_START_ADDRESS2:
+ return;
+
+ /* Lock/lock-tight/unlock/unlock_all */
+ case ONENAND_REG_START_BLOCK_ADDRESS:
+ return;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if (reg < ONENAND_DATARAM) {
+ if (value == ONENAND_CMD_READID) {
+ onenand->bootram_command = 1;
+ return;
+ }
+ if (value == ONENAND_CMD_RESET) {
+ writel(ONENAND_MEM_RESET_COLD,
+ &onenand->reg->mem_reset);
+ onenand->bootram_command = 0;
+ return;
+ }
+ }
+
+ printk(KERN_INFO "s3c_onenand_writew: Illegal access"
+ " at reg 0x%x, value 0x%x\n", word_addr, value);
+
+ s3c_write_cmd(value, CMD_MAP_11(word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+ unsigned int flags = INT_ACT;
+ unsigned int stat, ecc;
+ unsigned long timeout = 0x100000;
+
+ switch (state) {
+ case FL_READING:
+ flags |= BLK_RW_CMP | LOAD_CMP;
+ break;
+ case FL_WRITING:
+ flags |= BLK_RW_CMP | PGM_CMP;
+ break;
+ case FL_ERASING:
+ flags |= BLK_RW_CMP | ERS_CMP;
+ break;
+ case FL_LOCKING:
+ flags |= BLK_RW_CMP;
+ break;
+ default:
+ break;
+ }
+
+ while (timeout--) {
+ stat = readl(&onenand->reg->int_err_stat);
+ if (stat & flags)
+ break;
+ }
+
+ /* To get correct interrupt status in timeout case */
+ stat = readl(&onenand->reg->int_err_stat);
+ writel(stat, &onenand->reg->int_err_ack);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (stat & LOAD_CMP) {
+ ecc = readl(&onenand->reg->ecc_err_stat);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ printk(KERN_INFO "%s: ECC error = 0x%04x\n",
+ __func__, ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ }
+ }
+
+ if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+ printk(KERN_INFO "%s: controller error = 0x%04x\n",
+ __func__, stat);
+ if (stat & LOCKED_BLK)
+ printk(KERN_INFO "%s: it's locked error = 0x%04x\n",
+ __func__, stat);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd,
+ loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int *m, *s;
+ int fba, fpa, fsa = 0;
+ unsigned int mem_addr;
+ int i, mcount, scount;
+ int index;
+
+ fba = (int) (addr >> this->erase_shift);
+ fpa = (int) (addr >> this->page_shift);
+ fpa &= this->page_mask;
+
+ mem_addr = onenand->mem_addr(fba, fpa, fsa);
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ case ONENAND_CMD_BUFFERRAM:
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ default:
+ break;
+ }
+
+ index = ONENAND_CURRENT_BUFFERRAM(this);
+
+ /*
+ * Emulate Two BufferRAMs and access with 4 bytes pointer
+ */
+ m = (unsigned int *) onenand->page_buf;
+ s = (unsigned int *) onenand->oob_buf;
+
+ if (index) {
+ m += (this->writesize >> 2);
+ s += (mtd->oobsize >> 2);
+ }
+
+ mcount = mtd->writesize >> 2;
+ scount = mtd->oobsize >> 2;
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(CMD_MAP_01(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_READOOB:
+ writel(TSRF, &onenand->reg->trans_spare);
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(CMD_MAP_01(mem_addr));
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ *s++ = s3c_read_cmd(CMD_MAP_01(mem_addr));
+
+ writel(0, &onenand->reg->trans_spare);
+ return 0;
+
+ case ONENAND_CMD_PROG:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(*m++, CMD_MAP_01(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_PROGOOB:
+ writel(TSRF, &onenand->reg->trans_spare);
+
+ /* Main - dummy write */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(0xffffffff, CMD_MAP_01(mem_addr));
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ s3c_write_cmd(*s++, CMD_MAP_01(mem_addr));
+
+ writel(0, &onenand->reg->trans_spare);
+ return 0;
+
+ case ONENAND_CMD_UNLOCK_ALL:
+ s3c_write_cmd(ONENAND_UNLOCK_ALL, CMD_MAP_10(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_ERASE:
+ s3c_write_cmd(ONENAND_ERASE_START, CMD_MAP_10(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_MULTIBLOCK_ERASE:
+ s3c_write_cmd(ONENAND_MULTI_ERASE_SET, CMD_MAP_10(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_ERASE_VERIFY:
+ s3c_write_cmd(ONENAND_ERASE_VERIFY, CMD_MAP_10(mem_addr));
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+ int index = ONENAND_CURRENT_BUFFERRAM(this);
+ unsigned char *p;
+
+ if (area == ONENAND_DATARAM) {
+ p = (unsigned char *) onenand->page_buf;
+ if (index == 1)
+ p += this->writesize;
+ } else {
+ p = (unsigned char *) onenand->oob_buf;
+ if (index == 1)
+ p += mtd->oobsize;
+ }
+
+ return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(buffer, p + offset, count);
+ return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(p + offset, buffer, count);
+ return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ struct samsung_onenand *reg = (struct samsung_onenand *)onenand->base;
+ unsigned int flags = INT_ACT | LOAD_CMP;
+ unsigned int stat;
+ unsigned long timeout = 0x10000;
+
+ while (timeout--) {
+ stat = readl(&reg->int_err_stat);
+ if (stat & flags)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = readl(&onenand->reg->int_err_stat);
+ writel(stat, &onenand->reg->int_err_ack);
+
+ if (stat & LD_FAIL_ECC_ERR) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ if (stat & LOAD_CMP) {
+ int ecc = readl(&onenand->reg->ecc_err_stat);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int block, end;
+ int tmp;
+
+ end = this->chipsize >> this->erase_shift;
+
+ for (block = 0; block < end; block++) {
+ tmp = s3c_read_cmd(CMD_MAP_01(onenand->mem_addr(block, 0, 0)));
+
+ if (readl(&onenand->reg->int_err_stat) & LOCKED_BLK) {
+ printf("block %d is write-protected!\n", block);
+ writel(LOCKED_BLK, &onenand->reg->int_err_ack);
+ }
+ }
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+ size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, start_mem_addr, end_mem_addr;
+
+ start = ofs >> this->erase_shift;
+ start_mem_addr = onenand->mem_addr(start, 0, 0);
+ end = start + (len >> this->erase_shift) - 1;
+ end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+ if (cmd == ONENAND_CMD_LOCK) {
+ s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(start_mem_addr));
+ s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(end_mem_addr));
+ } else {
+ s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(start_mem_addr));
+ s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(end_mem_addr));
+ }
+
+ this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_onenand_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = this->chipsize;
+
+ /* FIXME workaround */
+ this->subpagesize = mtd->writesize;
+ mtd->subpage_sft = 0;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* No need to check return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Workaround for all block unlock in DDP */
+ if (!ONENAND_IS_DDP(this)) {
+ s3c_onenand_check_lock_status(mtd);
+ return;
+ }
+
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+
+ s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+ s3c_onenand_check_lock_status(mtd);
+}
+
+#ifdef CONFIG_S3C64XX
+static void s3c_set_width_regs(struct onenand_chip *this)
+{
+ int dev_id, density;
+ int fba, fpa, fsa;
+ int dbs_dfs;
+
+ dev_id = DEVICE_ID0_REG;
+
+ density = (dev_id >> ONENAND_DEVICE_DENSITY_SHIFT) & 0xf;
+ dbs_dfs = !!(dev_id & ONENAND_DEVICE_IS_DDP);
+
+ fba = density + 7;
+ if (dbs_dfs)
+ fba--; /* Decrease the fba */
+ fpa = 6;
+ if (density >= ONENAND_DEVICE_DENSITY_512Mb)
+ fsa = 2;
+ else
+ fsa = 1;
+
+ DPRINTK("FBA %lu, FPA %lu, FSA %lu, DDP %lu",
+ FBA_WIDTH0_REG, FPA_WIDTH0_REG, FSA_WIDTH0_REG,
+ DDP_DEVICE_REG);
+
+ DPRINTK("mem_cfg0 0x%lx, sync mode %lu, "
+ "dev_page_size %lu, BURST LEN %lu",
+ MEM_CFG0_REG, SYNC_MODE_REG,
+ DEV_PAGE_SIZE_REG, BURST_LEN0_REG);
+
+ DEV_PAGE_SIZE_REG = 0x1;
+
+ FBA_WIDTH0_REG = fba;
+ FPA_WIDTH0_REG = fpa;
+ FSA_WIDTH0_REG = fsa;
+ DBS_DFS_WIDTH0_REG = dbs_dfs;
+}
+#endif
+
+void s3c_onenand_init(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ u32 size = (4 << 10); /* 4 KiB */
+
+ onenand = malloc(sizeof(struct s3c_onenand));
+ if (!onenand)
+ return;
+
+ onenand->page_buf = malloc(size * sizeof(char));
+ if (!onenand->page_buf)
+ return;
+ memset(onenand->page_buf, 0xff, size);
+
+ onenand->oob_buf = malloc(128 * sizeof(char));
+ if (!onenand->oob_buf)
+ return;
+ memset(onenand->oob_buf, 0xff, 128);
+
+ onenand->mtd = mtd;
+
+#if defined(CONFIG_S3C64XX)
+ onenand->base = (void *)0x70100000;
+ onenand->ahb_addr = (void *)0x20000000;
+#elif defined(CONFIG_S5P)
+ onenand->base = (void *)0xE7100000;
+ onenand->ahb_addr = (void *)0xB0000000;
+#endif
+ onenand->mem_addr = s3c_mem_addr;
+ onenand->reg = (struct samsung_onenand *)onenand->base;
+
+ this->read_word = s3c_onenand_readw;
+ this->write_word = s3c_onenand_writew;
+
+ this->wait = s3c_onenand_wait;
+ this->bbt_wait = s3c_onenand_bbt_wait;
+ this->unlock_all = s3c_onenand_unlock_all;
+ this->command = s3c_onenand_command;
+
+ this->read_bufferram = onenand_read_bufferram;
+ this->write_bufferram = onenand_write_bufferram;
+
+ this->options |= ONENAND_RUNTIME_BADBLOCK_CHECK;
+}
diff --git a/u-boot/drivers/mtd/spi/Makefile b/u-boot/drivers/mtd/spi/Makefile
new file mode 100644
index 0000000..57112af
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/Makefile
@@ -0,0 +1,55 @@
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+include $(TOPDIR)/config.mk
+
+LIB := $(obj)libspi_flash.o
+
+COBJS-$(CONFIG_SPI_FLASH) += spi_flash.o
+COBJS-$(CONFIG_SPI_FLASH_ATMEL) += atmel.o
+COBJS-$(CONFIG_SPI_FLASH_EON) += eon.o
+COBJS-$(CONFIG_SPI_FLASH_MACRONIX) += macronix.o
+COBJS-$(CONFIG_SPI_FLASH_SPANSION) += spansion.o
+COBJS-$(CONFIG_SPI_FLASH_SST) += sst.o
+COBJS-$(CONFIG_SPI_FLASH_STMICRO) += stmicro.o
+COBJS-$(CONFIG_SPI_FLASH_WINBOND) += winbond.o
+COBJS-$(CONFIG_SPI_FRAM_RAMTRON) += ramtron.o
+COBJS-$(CONFIG_SPI_M95XXX) += eeprom_m95xxx.o
+
+COBJS := $(COBJS-y)
+SRCS := $(COBJS:.o=.c)
+OBJS := $(addprefix $(obj),$(COBJS))
+
+all: $(LIB)
+
+$(LIB): $(obj).depend $(OBJS)
+ $(call cmd_link_o_target, $(OBJS))
+
+#########################################################################
+
+# defines $(obj).depend target
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
+
+#########################################################################
diff --git a/u-boot/drivers/mtd/spi/atmel.c b/u-boot/drivers/mtd/spi/atmel.c
new file mode 100644
index 0000000..8d02169
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/atmel.c
@@ -0,0 +1,552 @@
+/*
+ * Atmel SPI DataFlash support
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+/* AT45-specific commands */
+#define CMD_AT45_READ_STATUS 0xd7
+#define CMD_AT45_ERASE_PAGE 0x81
+#define CMD_AT45_LOAD_PROG_BUF1 0x82
+#define CMD_AT45_LOAD_BUF1 0x84
+#define CMD_AT45_LOAD_PROG_BUF2 0x85
+#define CMD_AT45_LOAD_BUF2 0x87
+#define CMD_AT45_PROG_BUF1 0x88
+#define CMD_AT45_PROG_BUF2 0x89
+
+/* AT45 status register bits */
+#define AT45_STATUS_P2_PAGE_SIZE (1 << 0)
+#define AT45_STATUS_READY (1 << 7)
+
+/* DataFlash family IDs, as obtained from the second idcode byte */
+#define DF_FAMILY_AT26F 0
+#define DF_FAMILY_AT45 1
+#define DF_FAMILY_AT26DF 2 /* AT25DF and AT26DF */
+
+struct atmel_spi_flash_params {
+ u8 idcode1;
+ /* Log2 of page size in power-of-two mode */
+ u8 l2_page_size;
+ u8 pages_per_block;
+ u8 blocks_per_sector;
+ u8 nr_sectors;
+ const char *name;
+};
+
+/* spi_flash needs to be first so upper layers can free() it */
+struct atmel_spi_flash {
+ struct spi_flash flash;
+ const struct atmel_spi_flash_params *params;
+};
+
+static inline struct atmel_spi_flash *
+to_atmel_spi_flash(struct spi_flash *flash)
+{
+ return container_of(flash, struct atmel_spi_flash, flash);
+}
+
+static const struct atmel_spi_flash_params atmel_spi_flash_table[] = {
+ {
+ .idcode1 = 0x22,
+ .l2_page_size = 8,
+ .pages_per_block = 8,
+ .blocks_per_sector = 16,
+ .nr_sectors = 4,
+ .name = "AT45DB011D",
+ },
+ {
+ .idcode1 = 0x23,
+ .l2_page_size = 8,
+ .pages_per_block = 8,
+ .blocks_per_sector = 16,
+ .nr_sectors = 8,
+ .name = "AT45DB021D",
+ },
+ {
+ .idcode1 = 0x24,
+ .l2_page_size = 8,
+ .pages_per_block = 8,
+ .blocks_per_sector = 32,
+ .nr_sectors = 8,
+ .name = "AT45DB041D",
+ },
+ {
+ .idcode1 = 0x25,
+ .l2_page_size = 8,
+ .pages_per_block = 8,
+ .blocks_per_sector = 32,
+ .nr_sectors = 16,
+ .name = "AT45DB081D",
+ },
+ {
+ .idcode1 = 0x26,
+ .l2_page_size = 9,
+ .pages_per_block = 8,
+ .blocks_per_sector = 32,
+ .nr_sectors = 16,
+ .name = "AT45DB161D",
+ },
+ {
+ .idcode1 = 0x27,
+ .l2_page_size = 9,
+ .pages_per_block = 8,
+ .blocks_per_sector = 64,
+ .nr_sectors = 64,
+ .name = "AT45DB321D",
+ },
+ {
+ .idcode1 = 0x28,
+ .l2_page_size = 10,
+ .pages_per_block = 8,
+ .blocks_per_sector = 32,
+ .nr_sectors = 32,
+ .name = "AT45DB642D",
+ },
+};
+
+static int at45_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 cmd = CMD_AT45_READ_STATUS;
+ u8 status;
+
+ timebase = get_timer(0);
+
+ ret = spi_xfer(spi, 8, &cmd, NULL, SPI_XFER_BEGIN);
+ if (ret)
+ return -1;
+
+ do {
+ ret = spi_xfer(spi, 8, NULL, &status, 0);
+ if (ret)
+ return -1;
+
+ if (status & AT45_STATUS_READY)
+ break;
+ } while (get_timer(timebase) < timeout);
+
+ /* Deactivate CS */
+ spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END);
+
+ if (status & AT45_STATUS_READY)
+ return 0;
+
+ /* Timed out */
+ return -1;
+}
+
+/*
+ * Assemble the address part of a command for AT45 devices in
+ * non-power-of-two page size mode.
+ */
+static void at45_build_address(struct atmel_spi_flash *asf, u8 *cmd, u32 offset)
+{
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ unsigned int page_shift;
+
+ /*
+ * The "extra" space per page is the power-of-two page size
+ * divided by 32.
+ */
+ page_shift = asf->params->l2_page_size;
+ page_size = (1 << page_shift) + (1 << (page_shift - 5));
+ page_shift++;
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ cmd[0] = page_addr >> (16 - page_shift);
+ cmd[1] = page_addr << (page_shift - 8) | (byte_addr >> 8);
+ cmd[2] = byte_addr;
+}
+
+static int dataflash_read_fast_p2(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ u8 cmd[5];
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ cmd[1] = offset >> 16;
+ cmd[2] = offset >> 8;
+ cmd[3] = offset;
+ cmd[4] = 0x00;
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int dataflash_read_fast_at45(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
+ u8 cmd[5];
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ at45_build_address(asf, cmd + 1, offset);
+ cmd[4] = 0x00;
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+/*
+ * TODO: the two write funcs (_p2/_at45) should get unified ...
+ */
+static int dataflash_write_p2(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
+ unsigned long page_size;
+ u32 addr = offset;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * TODO: This function currently uses only page buffer #1. We can
+ * speed this up by using both buffers and loading one buffer while
+ * the other is being programmed into main memory.
+ */
+
+ page_size = (1 << asf->params->l2_page_size);
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - (addr % page_size));
+
+ /* Use the same address bits for both commands */
+ cmd[0] = CMD_AT45_LOAD_BUF1;
+ cmd[1] = addr >> 16;
+ cmd[2] = addr >> 8;
+ cmd[3] = addr;
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: Loading AT45 buffer failed\n");
+ goto out;
+ }
+
+ cmd[0] = CMD_AT45_PROG_BUF1;
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: AT45 page programming failed\n");
+ goto out;
+ }
+
+ ret = at45_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: AT45 page programming timed out\n");
+ goto out;
+ }
+
+ addr += chunk_len;
+ }
+
+ debug("SF: AT45: Successfully programmed %zu bytes @ 0x%x\n",
+ len, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+static int dataflash_write_at45(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ unsigned int page_shift;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * TODO: This function currently uses only page buffer #1. We can
+ * speed this up by using both buffers and loading one buffer while
+ * the other is being programmed into main memory.
+ */
+
+ page_shift = asf->params->l2_page_size;
+ page_size = (1 << page_shift) + (1 << (page_shift - 5));
+ page_shift++;
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - byte_addr);
+
+ /* Use the same address bits for both commands */
+ cmd[0] = CMD_AT45_LOAD_BUF1;
+ cmd[1] = page_addr >> (16 - page_shift);
+ cmd[2] = page_addr << (page_shift - 8) | (byte_addr >> 8);
+ cmd[3] = byte_addr;
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: Loading AT45 buffer failed\n");
+ goto out;
+ }
+
+ cmd[0] = CMD_AT45_PROG_BUF1;
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: AT45 page programming failed\n");
+ goto out;
+ }
+
+ ret = at45_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: AT45 page programming timed out\n");
+ goto out;
+ }
+
+ page_addr++;
+ byte_addr = 0;
+ }
+
+ debug("SF: AT45: Successfully programmed %zu bytes @ 0x%x\n",
+ len, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+/*
+ * TODO: the two erase funcs (_p2/_at45) should get unified ...
+ */
+int dataflash_erase_p2(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
+ unsigned long page_size;
+
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * TODO: This function currently uses page erase only. We can
+ * probably speed things up by using block and/or sector erase
+ * when possible.
+ */
+
+ page_size = (1 << asf->params->l2_page_size);
+
+ if (offset % page_size || len % page_size) {
+ debug("SF: Erase offset/length not multiple of page size\n");
+ return -1;
+ }
+
+ cmd[0] = CMD_AT45_ERASE_PAGE;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual += page_size) {
+ cmd[1] = offset >> 16;
+ cmd[2] = offset >> 8;
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: AT45 page erase failed\n");
+ goto out;
+ }
+
+ ret = at45_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: AT45 page erase timed out\n");
+ goto out;
+ }
+
+ offset += page_size;
+ }
+
+ debug("SF: AT45: Successfully erased %zu bytes @ 0x%x\n",
+ len, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int dataflash_erase_at45(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct atmel_spi_flash *asf = to_atmel_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long page_size;
+ unsigned int page_shift;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * TODO: This function currently uses page erase only. We can
+ * probably speed things up by using block and/or sector erase
+ * when possible.
+ */
+
+ page_shift = asf->params->l2_page_size;
+ page_size = (1 << page_shift) + (1 << (page_shift - 5));
+ page_shift++;
+ page_addr = offset / page_size;
+
+ if (offset % page_size || len % page_size) {
+ debug("SF: Erase offset/length not multiple of page size\n");
+ return -1;
+ }
+
+ cmd[0] = CMD_AT45_ERASE_PAGE;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual += page_size) {
+ cmd[1] = page_addr >> (16 - page_shift);
+ cmd[2] = page_addr << (page_shift - 8);
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: AT45 page erase failed\n");
+ goto out;
+ }
+
+ ret = at45_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: AT45 page erase timed out\n");
+ goto out;
+ }
+
+ page_addr++;
+ }
+
+ debug("SF: AT45: Successfully erased %zu bytes @ 0x%x\n",
+ len, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+struct spi_flash *spi_flash_probe_atmel(struct spi_slave *spi, u8 *idcode)
+{
+ const struct atmel_spi_flash_params *params;
+ unsigned page_size;
+ unsigned int family;
+ struct atmel_spi_flash *asf;
+ unsigned int i;
+ int ret;
+ u8 status;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_spi_flash_table); i++) {
+ params = &atmel_spi_flash_table[i];
+ if (params->idcode1 == idcode[1])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(atmel_spi_flash_table)) {
+ debug("SF: Unsupported DataFlash ID %02x\n",
+ idcode[1]);
+ return NULL;
+ }
+
+ asf = malloc(sizeof(struct atmel_spi_flash));
+ if (!asf) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ asf->params = params;
+ asf->flash.spi = spi;
+ asf->flash.name = params->name;
+
+ /* Assuming power-of-two page size initially. */
+ page_size = 1 << params->l2_page_size;
+
+ family = idcode[1] >> 5;
+
+ switch (family) {
+ case DF_FAMILY_AT45:
+ /*
+ * AT45 chips have configurable page size. The status
+ * register indicates which configuration is active.
+ */
+ ret = spi_flash_cmd(spi, CMD_AT45_READ_STATUS, &status, 1);
+ if (ret)
+ goto err;
+
+ debug("SF: AT45 status register: %02x\n", status);
+
+ if (!(status & AT45_STATUS_P2_PAGE_SIZE)) {
+ asf->flash.read = dataflash_read_fast_at45;
+ asf->flash.write = dataflash_write_at45;
+ asf->flash.erase = dataflash_erase_at45;
+ page_size += 1 << (params->l2_page_size - 5);
+ } else {
+ asf->flash.read = dataflash_read_fast_p2;
+ asf->flash.write = dataflash_write_p2;
+ asf->flash.erase = dataflash_erase_p2;
+ }
+
+ break;
+
+ case DF_FAMILY_AT26F:
+ case DF_FAMILY_AT26DF:
+ asf->flash.read = dataflash_read_fast_p2;
+ break;
+
+ default:
+ debug("SF: Unsupported DataFlash family %u\n", family);
+ goto err;
+ }
+
+ asf->flash.size = page_size * params->pages_per_block
+ * params->blocks_per_sector
+ * params->nr_sectors;
+
+ printf("SF: Detected %s with page size %u, total ",
+ params->name, page_size);
+ print_size(asf->flash.size, "\n");
+
+ return &asf->flash;
+
+err:
+ free(asf);
+ return NULL;
+}
diff --git a/u-boot/drivers/mtd/spi/eeprom_m95xxx.c b/u-boot/drivers/mtd/spi/eeprom_m95xxx.c
new file mode 100644
index 0000000..632db4e
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/eeprom_m95xxx.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2009
+ * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <spi.h>
+
+#define SPI_EEPROM_WREN 0x06
+#define SPI_EEPROM_RDSR 0x05
+#define SPI_EEPROM_READ 0x03
+#define SPI_EEPROM_WRITE 0x02
+
+#ifndef CONFIG_DEFAULT_SPI_BUS
+#define CONFIG_DEFAULT_SPI_BUS 0
+#endif
+
+#ifndef CONFIG_DEFAULT_SPI_MODE
+#define CONFIG_DEFAULT_SPI_MODE SPI_MODE_0
+#endif
+
+ssize_t spi_read (uchar *addr, int alen, uchar *buffer, int len)
+{
+ struct spi_slave *slave;
+ u8 cmd = SPI_EEPROM_READ;
+
+ slave = spi_setup_slave(CONFIG_DEFAULT_SPI_BUS, 1, 1000000,
+ CONFIG_DEFAULT_SPI_MODE);
+ if(!slave)
+ return 0;
+
+ spi_claim_bus(slave);
+
+ /* command */
+ if(spi_xfer(slave, 8, &cmd, NULL, SPI_XFER_BEGIN))
+ return -1;
+
+ /*
+ * if alen == 3, addr[0] is the block number, we never use it here. All we
+ * need are the lower 16 bits
+ */
+ if (alen == 3)
+ addr++;
+
+ /* address, and data */
+ if(spi_xfer(slave, 16, addr, NULL, 0))
+ return -1;
+ if(spi_xfer(slave, 8 * len, NULL, buffer, SPI_XFER_END))
+ return -1;
+
+ spi_release_bus(slave);
+ spi_free_slave(slave);
+ return len;
+}
+
+ssize_t spi_write (uchar *addr, int alen, uchar *buffer, int len)
+{
+ struct spi_slave *slave;
+ char buf[3];
+
+ slave = spi_setup_slave(CONFIG_DEFAULT_SPI_BUS, 1, 1000000,
+ CONFIG_DEFAULT_SPI_MODE);
+ if (!slave)
+ return 0;
+
+ spi_claim_bus(slave);
+
+ buf[0] = SPI_EEPROM_WREN;
+ if(spi_xfer(slave, 8, buf, NULL, SPI_XFER_BEGIN | SPI_XFER_END))
+ return -1;
+
+ buf[0] = SPI_EEPROM_WRITE;
+
+ /* As for reading, drop addr[0] if alen is 3 */
+ if (alen == 3) {
+ alen--;
+ addr++;
+ }
+
+ memcpy(buf + 1, addr, alen);
+ /* command + addr, then data */
+ if(spi_xfer(slave, 24, buf, NULL, SPI_XFER_BEGIN))
+ return -1;
+ if(spi_xfer(slave, len * 8, buffer, NULL, SPI_XFER_END))
+ return -1;
+
+ reset_timer_masked();
+ do {
+ buf[0] = SPI_EEPROM_RDSR;
+ buf[1] = 0;
+ spi_xfer(slave, 16, buf, buf, SPI_XFER_BEGIN | SPI_XFER_END);
+
+ if (!(buf[1] & 1))
+ break;
+
+ } while (get_timer_masked() < CONFIG_SYS_SPI_WRITE_TOUT);
+
+ if (buf[1] & 1)
+ printf ("*** spi_write: Time out while writing!\n");
+
+ spi_release_bus(slave);
+ spi_free_slave(slave);
+ return len;
+}
diff --git a/u-boot/drivers/mtd/spi/eon.c b/u-boot/drivers/mtd/spi/eon.c
new file mode 100644
index 0000000..02c3bb9
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/eon.c
@@ -0,0 +1,275 @@
+/*
+ * (C) Copyright 2010, ucRobotics Inc.
+ * Author: Chong Huang <chuang@ucrobotics.com>
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+/* EN25Q128-specific commands */
+#define CMD_EN25Q128_WREN 0x06 /* Write Enable */
+#define CMD_EN25Q128_WRDI 0x04 /* Write Disable */
+#define CMD_EN25Q128_RDSR 0x05 /* Read Status Register */
+#define CMD_EN25Q128_WRSR 0x01 /* Write Status Register */
+#define CMD_EN25Q128_READ 0x03 /* Read Data Bytes */
+#define CMD_EN25Q128_FAST_READ 0x0b /* Read Data Bytes at Higher Speed */
+#define CMD_EN25Q128_PP 0x02 /* Page Program */
+#define CMD_EN25Q128_SE 0x20 /* Sector Erase */
+#define CMD_EN25Q128_BE 0xd8 /* Block Erase */
+#define CMD_EN25Q128_DP 0xb9 /* Deep Power-down */
+#define CMD_EN25Q128_RES 0xab /* Release from DP, and Read Signature */
+
+#define EON_ID_EN25Q128 0x18
+
+#define EON_SR_WIP (1 << 0) /* Write-in-Progress */
+
+struct eon_spi_flash_params {
+ u8 idcode1;
+ u16 page_size;
+ u16 pages_per_sector;
+ u16 sectors_per_block;
+ u16 nr_sectors;
+ const char *name;
+};
+
+/* spi_flash needs to be first so upper layers can free() it */
+struct eon_spi_flash {
+ struct spi_flash flash;
+ const struct eon_spi_flash_params *params;
+};
+
+static inline struct eon_spi_flash *to_eon_spi_flash(struct spi_flash *flash)
+{
+ return container_of(flash, struct eon_spi_flash, flash);
+}
+
+static const struct eon_spi_flash_params eon_spi_flash_table[] = {
+ {
+ .idcode1 = EON_ID_EN25Q128,
+ .page_size = 256,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_sectors = 4096,
+ .name = "EN25Q128",
+ },
+};
+
+static int eon_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 cmd = CMD_EN25Q128_RDSR;
+ u8 status;
+
+ ret = spi_xfer(spi, 8, &cmd, NULL, SPI_XFER_BEGIN);
+ if (ret) {
+ debug("SF: Failed to send command %02x: %d\n", cmd, ret);
+ return ret;
+ }
+
+ timebase = get_timer(0);
+ do {
+ ret = spi_xfer(spi, 8, NULL, &status, 0);
+ if (ret)
+ return -1;
+
+ if ((status & EON_SR_WIP) == 0)
+ break;
+
+ } while (get_timer(timebase) < timeout);
+
+ spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END);
+
+ if ((status & EON_SR_WIP) == 0)
+ return 0;
+
+ /* Timed out */
+ return -1;
+}
+
+static int eon_read_fast(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ struct eon_spi_flash *eon = to_eon_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long page_size;
+ u8 cmd[5];
+
+ page_size = eon->params->page_size;
+ page_addr = offset / page_size;
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = offset % page_size;
+ cmd[4] = 0x00;
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int eon_write(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct eon_spi_flash *eon = to_eon_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ page_size = eon->params->page_size;
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - byte_addr);
+
+ cmd[0] = CMD_EN25Q128_PP;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = byte_addr;
+
+ debug
+ ("PP: 0x%p => cmd = { 0x%02x 0x%02x%02x%02x } chunk_len = %d\n",
+ buf + actual, cmd[0], cmd[1], cmd[2], cmd[3], chunk_len);
+
+ ret = spi_flash_cmd(flash->spi, CMD_EN25Q128_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: EON Page Program failed\n");
+ break;
+ }
+
+ ret = eon_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: EON page programming timed out\n");
+ break;
+ }
+
+ page_addr++;
+ byte_addr = 0;
+ }
+
+ debug("SF: EON: Successfully programmed %u bytes @ 0x%x\n",
+ len, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int eon_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ /* block erase */
+ struct eon_spi_flash *eon = to_eon_spi_flash(flash);
+ unsigned long block_size;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+
+ block_size = eon->params->page_size * eon->params->pages_per_sector
+ * eon->params->sectors_per_block;
+
+ if (offset % block_size || len % block_size) {
+ debug("SF: Erase offset/length not multiple of block size\n");
+ return -1;
+ }
+
+ len /= block_size;
+ cmd[0] = CMD_EN25Q128_BE;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual++) {
+ cmd[1] = (offset / block_size) + actual;
+ ret = spi_flash_cmd(flash->spi, CMD_EN25Q128_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: EON page erase failed\n");
+ break;
+ }
+
+ ret = eon_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: EON page erase timed out\n");
+ break;
+ }
+ }
+
+ debug("SF: EON: Successfully erased %u bytes @ 0x%x\n",
+ len * block_size, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+struct spi_flash *spi_flash_probe_eon(struct spi_slave *spi, u8 *idcode)
+{
+ const struct eon_spi_flash_params *params;
+ struct eon_spi_flash *eon;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(eon_spi_flash_table); ++i) {
+ params = &eon_spi_flash_table[i];
+ if (params->idcode1 == idcode[2])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(eon_spi_flash_table)) {
+ debug("SF: Unsupported EON ID %02x\n", idcode[1]);
+ return NULL;
+ }
+
+ eon = malloc(sizeof(*eon));
+ if (!eon) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ eon->params = params;
+ eon->flash.spi = spi;
+ eon->flash.name = params->name;
+
+ eon->flash.write = eon_write;
+ eon->flash.erase = eon_erase;
+ eon->flash.read = eon_read_fast;
+ eon->flash.size = params->page_size * params->pages_per_sector
+ * params->nr_sectors;
+
+ debug("SF: Detected %s with page size %u, total %u bytes\n",
+ params->name, params->page_size, eon->flash.size);
+
+ return &eon->flash;
+}
diff --git a/u-boot/drivers/mtd/spi/macronix.c b/u-boot/drivers/mtd/spi/macronix.c
new file mode 100644
index 0000000..76d5284
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/macronix.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2009(C) Marvell International Ltd. and its affiliates
+ * Prafulla Wadaskar <prafulla@marvell.com>
+ *
+ * Based on drivers/mtd/spi/stmicro.c
+ *
+ * Copyright 2008, Network Appliance Inc.
+ * Jason McMullan <mcmullan@netapp.com>
+ *
+ * Copyright (C) 2004-2007 Freescale Semiconductor, Inc.
+ * TsiChung Liew (Tsi-Chung.Liew@freescale.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+/* MX25xx-specific commands */
+#define CMD_MX25XX_WREN 0x06 /* Write Enable */
+#define CMD_MX25XX_WRDI 0x04 /* Write Disable */
+#define CMD_MX25XX_RDSR 0x05 /* Read Status Register */
+#define CMD_MX25XX_WRSR 0x01 /* Write Status Register */
+#define CMD_MX25XX_READ 0x03 /* Read Data Bytes */
+#define CMD_MX25XX_FAST_READ 0x0b /* Read Data Bytes at Higher Speed */
+#define CMD_MX25XX_PP 0x02 /* Page Program */
+#define CMD_MX25XX_SE 0x20 /* Sector Erase */
+#define CMD_MX25XX_BE 0xD8 /* Block Erase */
+#define CMD_MX25XX_CE 0xc7 /* Chip Erase */
+#define CMD_MX25XX_DP 0xb9 /* Deep Power-down */
+#define CMD_MX25XX_RES 0xab /* Release from DP, and Read Signature */
+
+#define MACRONIX_SR_WIP (1 << 0) /* Write-in-Progress */
+
+struct macronix_spi_flash_params {
+ u16 idcode;
+ u16 page_size;
+ u16 pages_per_sector;
+ u16 sectors_per_block;
+ u16 nr_blocks;
+ const char *name;
+};
+
+struct macronix_spi_flash {
+ struct spi_flash flash;
+ const struct macronix_spi_flash_params *params;
+};
+
+static inline struct macronix_spi_flash *to_macronix_spi_flash(struct spi_flash
+ *flash)
+{
+ return container_of(flash, struct macronix_spi_flash, flash);
+}
+
+static const struct macronix_spi_flash_params macronix_spi_flash_table[] = {
+ {
+ .idcode = 0x2015,
+ .page_size = 256,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 32,
+ .name = "MX25L1605D",
+ },
+ {
+ .idcode = 0x2016,
+ .page_size = 256,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 64,
+ .name = "MX25L3205D",
+ },
+ {
+ .idcode = 0x2017,
+ .page_size = 256,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 128,
+ .name = "MX25L6405D",
+ },
+ {
+ .idcode = 0x2018,
+ .page_size = 256,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 256,
+ .name = "MX25L12805D",
+ },
+ {
+ .idcode = 0x2618,
+ .page_size = 256,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 256,
+ .name = "MX25L12855E",
+ },
+};
+
+static int macronix_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 status;
+ u8 cmd = CMD_MX25XX_RDSR;
+
+ ret = spi_xfer(spi, 8, &cmd, NULL, SPI_XFER_BEGIN);
+ if (ret) {
+ debug("SF: Failed to send command %02x: %d\n", cmd, ret);
+ return ret;
+ }
+
+ timebase = get_timer(0);
+ do {
+ ret = spi_xfer(spi, 8, NULL, &status, 0);
+ if (ret)
+ return -1;
+
+ if ((status & MACRONIX_SR_WIP) == 0)
+ break;
+
+ } while (get_timer(timebase) < timeout);
+
+ spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END);
+
+ if ((status & MACRONIX_SR_WIP) == 0)
+ return 0;
+
+ /* Timed out */
+ return -1;
+}
+
+static int macronix_read_fast(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ struct macronix_spi_flash *mcx = to_macronix_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long page_size;
+ u8 cmd[5];
+
+ page_size = mcx->params->page_size;
+ page_addr = offset / page_size;
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = offset % page_size;
+ cmd[4] = 0x00;
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int macronix_write(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct macronix_spi_flash *mcx = to_macronix_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ page_size = mcx->params->page_size;
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - byte_addr);
+
+ cmd[0] = CMD_MX25XX_PP;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = byte_addr;
+
+ debug
+ ("PP: 0x%p => cmd = { 0x%02x 0x%02x%02x%02x } chunk_len = %d\n",
+ buf + actual, cmd[0], cmd[1], cmd[2], cmd[3], chunk_len);
+
+ ret = spi_flash_cmd(flash->spi, CMD_MX25XX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: Macronix Page Program failed\n");
+ break;
+ }
+
+ ret = macronix_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: Macronix page programming timed out\n");
+ break;
+ }
+
+ page_addr++;
+ byte_addr = 0;
+ }
+
+ debug("SF: Macronix: Successfully programmed %u bytes @ 0x%x\n",
+ len, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int macronix_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct macronix_spi_flash *mcx = to_macronix_spi_flash(flash);
+ unsigned long sector_size;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * This function currently uses sector erase only.
+ * probably speed things up by using bulk erase
+ * when possible.
+ */
+
+ sector_size = mcx->params->page_size * mcx->params->pages_per_sector
+ * mcx->params->sectors_per_block;
+
+ if (offset % sector_size || len % sector_size) {
+ debug("SF: Erase offset/length not multiple of sector size\n");
+ return -1;
+ }
+
+ len /= sector_size;
+ cmd[0] = CMD_MX25XX_BE;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual++) {
+ cmd[1] = (offset / sector_size) + actual;
+
+ ret = spi_flash_cmd(flash->spi, CMD_MX25XX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Macronix page erase failed\n");
+ break;
+ }
+
+ ret = macronix_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: Macronix page erase timed out\n");
+ break;
+ }
+ }
+
+ debug("SF: Macronix: Successfully erased %u bytes @ 0x%x\n",
+ len * sector_size, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+struct spi_flash *spi_flash_probe_macronix(struct spi_slave *spi, u8 *idcode)
+{
+ const struct macronix_spi_flash_params *params;
+ struct macronix_spi_flash *mcx;
+ unsigned int i;
+ u16 id = idcode[2] | idcode[1] << 8;
+
+ for (i = 0; i < ARRAY_SIZE(macronix_spi_flash_table); i++) {
+ params = &macronix_spi_flash_table[i];
+ if (params->idcode == id)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(macronix_spi_flash_table)) {
+ debug("SF: Unsupported Macronix ID %04x\n", id);
+ return NULL;
+ }
+
+ mcx = malloc(sizeof(*mcx));
+ if (!mcx) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ mcx->params = params;
+ mcx->flash.spi = spi;
+ mcx->flash.name = params->name;
+
+ mcx->flash.write = macronix_write;
+ mcx->flash.erase = macronix_erase;
+ mcx->flash.read = macronix_read_fast;
+ mcx->flash.size = params->page_size * params->pages_per_sector
+ * params->sectors_per_block * params->nr_blocks;
+
+ printf("SF: Detected %s with page size %u, total ",
+ params->name, params->page_size);
+ print_size(mcx->flash.size, "\n");
+
+ return &mcx->flash;
+}
diff --git a/u-boot/drivers/mtd/spi/ramtron.c b/u-boot/drivers/mtd/spi/ramtron.c
new file mode 100644
index 0000000..171390d
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/ramtron.c
@@ -0,0 +1,319 @@
+/*
+ * (C) Copyright 2010
+ * Reinhard Meyer, EMK Elektronik, reinhard.meyer@emk-elektronik.de
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Note: RAMTRON SPI FRAMs are ferroelectric, nonvolatile RAMs
+ * with an interface identical to SPI flash devices.
+ * However since they behave like RAM there are no delays or
+ * busy polls required. They can sustain read or write at the
+ * allowed SPI bus speed, which can be 40 MHz for some devices.
+ *
+ * Unfortunately some RAMTRON devices do not have a means of
+ * identifying them. They will leave the SO line undriven when
+ * the READ-ID command is issued. It is therefore mandatory
+ * that the MISO line has a proper pull-up, so that READ-ID
+ * will return a row of 0xff. This 0xff pseudo-id will cause
+ * probes by all vendor specific functions that are designed
+ * to handle it. If the MISO line is not pulled up, READ-ID
+ * could return any random noise, even mimicking another
+ * device.
+ *
+ * We use CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC
+ * to define which device will be assumed after a simple status
+ * register verify. This method is prone to false positive
+ * detection and should therefore be the last to be tried.
+ * Enter it in the last position in the table in spi_flash.c!
+ *
+ * The define CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC both activates
+ * compilation of the special handler and defines the device
+ * to assume.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+#include "spi_flash_internal.h"
+
+/* RAMTRON commands common to all devices */
+#define CMD_RAMTRON_WREN 0x06 /* Write Enable */
+#define CMD_RAMTRON_WRDI 0x04 /* Write Disable */
+#define CMD_RAMTRON_RDSR 0x05 /* Read Status Register */
+#define CMD_RAMTRON_WRSR 0x01 /* Write Status Register */
+#define CMD_RAMTRON_READ 0x03 /* Read Data Bytes */
+#define CMD_RAMTRON_WRITE 0x02 /* Write Data Bytes */
+/* not all have those: */
+#define CMD_RAMTRON_FSTRD 0x0b /* Fast Read (for compatibility - not used here) */
+#define CMD_RAMTRON_SLEEP 0xb9 /* Enter Sleep Mode */
+#define CMD_RAMTRON_RDID 0x9f /* Read ID */
+#define CMD_RAMTRON_SNR 0xc3 /* Read Serial Number */
+
+/*
+ * Properties of supported FRAMs
+ * Note: speed is currently not used because we have no method to deliver that
+ * value to the upper layers
+ */
+struct ramtron_spi_fram_params {
+ u32 size; /* size in bytes */
+ u8 addr_len; /* number of address bytes */
+ u8 merge_cmd; /* some address bits are in the command byte */
+ u8 id1; /* device ID 1 (family, density) */
+ u8 id2; /* device ID 2 (sub, rev, rsvd) */
+ u32 speed; /* max. SPI clock in Hz */
+ const char *name; /* name for display and/or matching */
+};
+
+struct ramtron_spi_fram {
+ struct spi_flash flash;
+ const struct ramtron_spi_fram_params *params;
+};
+
+static inline struct ramtron_spi_fram *to_ramtron_spi_fram(struct spi_flash
+ *flash)
+{
+ return container_of(flash, struct ramtron_spi_fram, flash);
+}
+
+/*
+ * table describing supported FRAM chips:
+ * chips without RDID command must have the values 0xff for id1 and id2
+ */
+static const struct ramtron_spi_fram_params ramtron_spi_fram_table[] = {
+ {
+ .size = 32*1024,
+ .addr_len = 2,
+ .merge_cmd = 0,
+ .id1 = 0x22,
+ .id2 = 0x00,
+ .speed = 40000000,
+ .name = "FM25V02",
+ },
+ {
+ .size = 32*1024,
+ .addr_len = 2,
+ .merge_cmd = 0,
+ .id1 = 0x22,
+ .id2 = 0x01,
+ .speed = 40000000,
+ .name = "FM25VN02",
+ },
+ {
+ .size = 64*1024,
+ .addr_len = 2,
+ .merge_cmd = 0,
+ .id1 = 0x23,
+ .id2 = 0x00,
+ .speed = 40000000,
+ .name = "FM25V05",
+ },
+ {
+ .size = 64*1024,
+ .addr_len = 2,
+ .merge_cmd = 0,
+ .id1 = 0x23,
+ .id2 = 0x01,
+ .speed = 40000000,
+ .name = "FM25VN05",
+ },
+ {
+ .size = 128*1024,
+ .addr_len = 3,
+ .merge_cmd = 0,
+ .id1 = 0x24,
+ .id2 = 0x00,
+ .speed = 40000000,
+ .name = "FM25V10",
+ },
+ {
+ .size = 128*1024,
+ .addr_len = 3,
+ .merge_cmd = 0,
+ .id1 = 0x24,
+ .id2 = 0x01,
+ .speed = 40000000,
+ .name = "FM25VN10",
+ },
+#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC
+ {
+ .size = 256*1024,
+ .addr_len = 3,
+ .merge_cmd = 0,
+ .id1 = 0xff,
+ .id2 = 0xff,
+ .speed = 40000000,
+ .name = "FM25H20",
+ },
+#endif
+};
+
+static int ramtron_common(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf, u8 command)
+{
+ struct ramtron_spi_fram *sn = to_ramtron_spi_fram(flash);
+ u8 cmd[4];
+ int cmd_len;
+ int ret;
+
+ if (sn->params->addr_len == 3 && sn->params->merge_cmd == 0) {
+ cmd[0] = command;
+ cmd[1] = offset >> 16;
+ cmd[2] = offset >> 8;
+ cmd[3] = offset;
+ cmd_len = 4;
+ } else if (sn->params->addr_len == 2 && sn->params->merge_cmd == 0) {
+ cmd[0] = command;
+ cmd[1] = offset >> 8;
+ cmd[2] = offset;
+ cmd_len = 3;
+ } else {
+ printf("SF: unsupported addr_len or merge_cmd\n");
+ return -1;
+ }
+
+ /* claim the bus */
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ if (command == CMD_RAMTRON_WRITE) {
+ /* send WREN */
+ ret = spi_flash_cmd(flash->spi, CMD_RAMTRON_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ goto releasebus;
+ }
+ }
+
+ /* do the transaction */
+ if (command == CMD_RAMTRON_WRITE)
+ ret = spi_flash_cmd_write(flash->spi, cmd, cmd_len, buf, len);
+ else
+ ret = spi_flash_cmd_read(flash->spi, cmd, cmd_len, buf, len);
+ if (ret < 0)
+ debug("SF: Transaction failed\n");
+
+releasebus:
+ /* release the bus */
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+static int ramtron_read(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ return ramtron_common(flash, offset, len, buf,
+ CMD_RAMTRON_READ);
+}
+
+static int ramtron_write(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ return ramtron_common(flash, offset, len, (void *)buf,
+ CMD_RAMTRON_WRITE);
+}
+
+int ramtron_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ debug("SF: Erase of RAMTRON FRAMs is pointless\n");
+ return -1;
+}
+
+/*
+ * nore: we are called here with idcode pointing to the first non-0x7f byte
+ * already!
+ */
+struct spi_flash *spi_fram_probe_ramtron(struct spi_slave *spi, u8 *idcode)
+{
+ const struct ramtron_spi_fram_params *params;
+ struct ramtron_spi_fram *sn;
+ unsigned int i;
+#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC
+ int ret;
+ u8 sr;
+#endif
+
+ /* NOTE: the bus has been claimed before this function is called! */
+ switch (idcode[0]) {
+ case 0xc2:
+ /* JEDEC conformant RAMTRON id */
+ for (i = 0; i < ARRAY_SIZE(ramtron_spi_fram_table); i++) {
+ params = &ramtron_spi_fram_table[i];
+ if (idcode[1] == params->id1 && idcode[2] == params->id2)
+ goto found;
+ }
+ break;
+#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC
+ case 0xff:
+ /*
+ * probably open MISO line, pulled up.
+ * We COULD have a non JEDEC conformant FRAM here,
+ * read the status register to verify
+ */
+ ret = spi_flash_cmd(spi, CMD_RAMTRON_RDSR, &sr, 1);
+ if (ret)
+ return NULL;
+
+ /* Bits 5,4,0 are fixed 0 for all devices */
+ if ((sr & 0x31) != 0x00)
+ return NULL;
+ /* now find the device */
+ for (i = 0; i < ARRAY_SIZE(ramtron_spi_fram_table); i++) {
+ params = &ramtron_spi_fram_table[i];
+ if (!strcmp(params->name, CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC))
+ goto found;
+ }
+ debug("SF: Unsupported non-JEDEC RAMTRON device "
+ CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC "\n");
+ break;
+#endif
+ default:
+ break;
+ }
+
+ /* arriving here means no method has found a device we can handle */
+ debug("SF/ramtron: unsupported device id0=%02x id1=%02x id2=%02x\n",
+ idcode[0], idcode[1], idcode[2]);
+ return NULL;
+
+found:
+ sn = malloc(sizeof(*sn));
+ if (!sn) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ sn->params = params;
+ sn->flash.spi = spi;
+ sn->flash.name = params->name;
+
+ sn->flash.write = ramtron_write;
+ sn->flash.read = ramtron_read;
+ sn->flash.erase = ramtron_erase;
+ sn->flash.size = params->size;
+
+ printf("SF: Detected %s with size ", params->name);
+ print_size(sn->flash.size, "\n");
+
+ return &sn->flash;
+}
diff --git a/u-boot/drivers/mtd/spi/spansion.c b/u-boot/drivers/mtd/spi/spansion.c
new file mode 100644
index 0000000..c0900f9
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/spansion.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2009 Freescale Semiconductor, Inc.
+ *
+ * Author: Mingkai Hu (Mingkai.hu@freescale.com)
+ * Based on stmicro.c by Wolfgang Denk (wd@denx.de),
+ * TsiChung Liew (Tsi-Chung.Liew@freescale.com),
+ * and Jason McMullan (mcmullan@netapp.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+/* S25FLxx-specific commands */
+#define CMD_S25FLXX_READ 0x03 /* Read Data Bytes */
+#define CMD_S25FLXX_FAST_READ 0x0b /* Read Data Bytes at Higher Speed */
+#define CMD_S25FLXX_READID 0x90 /* Read Manufacture ID and Device ID */
+#define CMD_S25FLXX_WREN 0x06 /* Write Enable */
+#define CMD_S25FLXX_WRDI 0x04 /* Write Disable */
+#define CMD_S25FLXX_RDSR 0x05 /* Read Status Register */
+#define CMD_S25FLXX_WRSR 0x01 /* Write Status Register */
+#define CMD_S25FLXX_PP 0x02 /* Page Program */
+#define CMD_S25FLXX_SE 0xd8 /* Sector Erase */
+#define CMD_S25FLXX_BE 0xc7 /* Bulk Erase */
+#define CMD_S25FLXX_DP 0xb9 /* Deep Power-down */
+#define CMD_S25FLXX_RES 0xab /* Release from DP, and Read Signature */
+
+#define SPSN_ID_S25FL008A 0x0213
+#define SPSN_ID_S25FL016A 0x0214
+#define SPSN_ID_S25FL032A 0x0215
+#define SPSN_ID_S25FL064A 0x0216
+#define SPSN_ID_S25FL128P 0x2018
+#define SPSN_EXT_ID_S25FL128P_256KB 0x0300
+#define SPSN_EXT_ID_S25FL128P_64KB 0x0301
+#define SPSN_EXT_ID_S25FL032P 0x4d00
+
+#define SPANSION_SR_WIP (1 << 0) /* Write-in-Progress */
+
+struct spansion_spi_flash_params {
+ u16 idcode1;
+ u16 idcode2;
+ u16 page_size;
+ u16 pages_per_sector;
+ u16 nr_sectors;
+ const char *name;
+};
+
+struct spansion_spi_flash {
+ struct spi_flash flash;
+ const struct spansion_spi_flash_params *params;
+};
+
+static inline struct spansion_spi_flash *to_spansion_spi_flash(struct spi_flash
+ *flash)
+{
+ return container_of(flash, struct spansion_spi_flash, flash);
+}
+
+static const struct spansion_spi_flash_params spansion_spi_flash_table[] = {
+ {
+ .idcode1 = SPSN_ID_S25FL008A,
+ .idcode2 = 0,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 16,
+ .name = "S25FL008A",
+ },
+ {
+ .idcode1 = SPSN_ID_S25FL016A,
+ .idcode2 = 0,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 32,
+ .name = "S25FL016A",
+ },
+ {
+ .idcode1 = SPSN_ID_S25FL032A,
+ .idcode2 = 0,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 64,
+ .name = "S25FL032A",
+ },
+ {
+ .idcode1 = SPSN_ID_S25FL064A,
+ .idcode2 = 0,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 128,
+ .name = "S25FL064A",
+ },
+ {
+ .idcode1 = SPSN_ID_S25FL128P,
+ .idcode2 = SPSN_EXT_ID_S25FL128P_64KB,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 256,
+ .name = "S25FL128P_64K",
+ },
+ {
+ .idcode1 = SPSN_ID_S25FL128P,
+ .idcode2 = SPSN_EXT_ID_S25FL128P_256KB,
+ .page_size = 256,
+ .pages_per_sector = 1024,
+ .nr_sectors = 64,
+ .name = "S25FL128P_256K",
+ },
+ {
+ .idcode1 = SPSN_ID_S25FL032A,
+ .idcode2 = SPSN_EXT_ID_S25FL032P,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 64,
+ .name = "S25FL032P",
+ },
+};
+
+static int spansion_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 status;
+
+ timebase = get_timer(0);
+ do {
+ ret = spi_flash_cmd(spi, CMD_S25FLXX_RDSR, &status, sizeof(status));
+ if (ret)
+ return -1;
+
+ if ((status & SPANSION_SR_WIP) == 0)
+ break;
+
+ } while (get_timer(timebase) < timeout);
+
+
+ if ((status & SPANSION_SR_WIP) == 0)
+ return 0;
+
+ /* Timed out */
+ return -1;
+}
+
+static int spansion_read_fast(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ struct spansion_spi_flash *spsn = to_spansion_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long page_size;
+ u8 cmd[5];
+
+ page_size = spsn->params->page_size;
+ page_addr = offset / page_size;
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = offset % page_size;
+ cmd[4] = 0x00;
+
+ debug
+ ("READ: 0x%x => cmd = { 0x%02x 0x%02x%02x%02x%02x } len = 0x%x\n",
+ offset, cmd[0], cmd[1], cmd[2], cmd[3], cmd[4], len);
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int spansion_write(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct spansion_spi_flash *spsn = to_spansion_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ page_size = spsn->params->page_size;
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - byte_addr);
+
+ cmd[0] = CMD_S25FLXX_PP;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = byte_addr;
+
+ debug
+ ("PP: 0x%p => cmd = { 0x%02x 0x%02x%02x%02x } chunk_len = %d\n",
+ buf + actual, cmd[0], cmd[1], cmd[2], cmd[3], chunk_len);
+
+ ret = spi_flash_cmd(flash->spi, CMD_S25FLXX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: SPANSION Page Program failed\n");
+ break;
+ }
+
+ ret = spansion_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: SPANSION page programming timed out\n");
+ break;
+ }
+
+ page_addr++;
+ byte_addr = 0;
+ }
+
+ debug("SF: SPANSION: Successfully programmed %u bytes @ 0x%x\n",
+ len, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int spansion_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct spansion_spi_flash *spsn = to_spansion_spi_flash(flash);
+ unsigned long sector_size;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * This function currently uses sector erase only.
+ * probably speed things up by using bulk erase
+ * when possible.
+ */
+
+ sector_size = spsn->params->page_size * spsn->params->pages_per_sector;
+
+ if (offset % sector_size || len % sector_size) {
+ debug("SF: Erase offset/length not multiple of sector size\n");
+ return -1;
+ }
+
+ cmd[0] = CMD_S25FLXX_SE;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual += sector_size) {
+ cmd[1] = (offset + actual) >> 16;
+
+ ret = spi_flash_cmd(flash->spi, CMD_S25FLXX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: SPANSION page erase failed\n");
+ break;
+ }
+
+ /* Up to 2 seconds */
+ ret = spansion_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: SPANSION page erase timed out\n");
+ break;
+ }
+ }
+
+ debug("SF: SPANSION: Successfully erased %u bytes @ 0x%x\n",
+ len, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+struct spi_flash *spi_flash_probe_spansion(struct spi_slave *spi, u8 *idcode)
+{
+ const struct spansion_spi_flash_params *params;
+ struct spansion_spi_flash *spsn;
+ unsigned int i;
+ unsigned short jedec, ext_jedec;
+
+ jedec = idcode[1] << 8 | idcode[2];
+ ext_jedec = idcode[3] << 8 | idcode[4];
+
+ for (i = 0; i < ARRAY_SIZE(spansion_spi_flash_table); i++) {
+ params = &spansion_spi_flash_table[i];
+ if (params->idcode1 == jedec) {
+ if (params->idcode2 == ext_jedec)
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(spansion_spi_flash_table)) {
+ debug("SF: Unsupported SPANSION ID %04x %04x\n", jedec, ext_jedec);
+ return NULL;
+ }
+
+ spsn = malloc(sizeof(struct spansion_spi_flash));
+ if (!spsn) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ spsn->params = params;
+ spsn->flash.spi = spi;
+ spsn->flash.name = params->name;
+
+ spsn->flash.write = spansion_write;
+ spsn->flash.erase = spansion_erase;
+ spsn->flash.read = spansion_read_fast;
+ spsn->flash.size = params->page_size * params->pages_per_sector
+ * params->nr_sectors;
+
+ printf("SF: Detected %s with page size %u, total ",
+ params->name, params->page_size);
+ print_size(spsn->flash.size, "\n");
+
+ return &spsn->flash;
+}
diff --git a/u-boot/drivers/mtd/spi/spi_flash.c b/u-boot/drivers/mtd/spi/spi_flash.c
new file mode 100644
index 0000000..b61d219
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/spi_flash.c
@@ -0,0 +1,233 @@
+/*
+ * SPI flash interface
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ * Copyright (C) 2010 Reinhard Meyer, EMK Elektronik
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+int spi_flash_cmd(struct spi_slave *spi, u8 cmd, void *response, size_t len)
+{
+ unsigned long flags = SPI_XFER_BEGIN;
+ int ret;
+
+ if (len == 0)
+ flags |= SPI_XFER_END;
+
+ ret = spi_xfer(spi, 8, &cmd, NULL, flags);
+ if (ret) {
+ debug("SF: Failed to send command %02x: %d\n", cmd, ret);
+ return ret;
+ }
+
+ if (len) {
+ ret = spi_xfer(spi, len * 8, NULL, response, SPI_XFER_END);
+ if (ret)
+ debug("SF: Failed to read response (%zu bytes): %d\n",
+ len, ret);
+ }
+
+ return ret;
+}
+
+int spi_flash_cmd_read(struct spi_slave *spi, const u8 *cmd,
+ size_t cmd_len, void *data, size_t data_len)
+{
+ unsigned long flags = SPI_XFER_BEGIN;
+ int ret;
+
+ if (data_len == 0)
+ flags |= SPI_XFER_END;
+
+ ret = spi_xfer(spi, cmd_len * 8, cmd, NULL, flags);
+ if (ret) {
+ debug("SF: Failed to send read command (%zu bytes): %d\n",
+ cmd_len, ret);
+ } else if (data_len != 0) {
+ ret = spi_xfer(spi, data_len * 8, NULL, data, SPI_XFER_END);
+ if (ret)
+ debug("SF: Failed to read %zu bytes of data: %d\n",
+ data_len, ret);
+ }
+
+ return ret;
+}
+
+int spi_flash_cmd_write(struct spi_slave *spi, const u8 *cmd, size_t cmd_len,
+ const void *data, size_t data_len)
+{
+ unsigned long flags = SPI_XFER_BEGIN;
+ int ret;
+
+ if (data_len == 0)
+ flags |= SPI_XFER_END;
+
+ ret = spi_xfer(spi, cmd_len * 8, cmd, NULL, flags);
+ if (ret) {
+ debug("SF: Failed to send read command (%zu bytes): %d\n",
+ cmd_len, ret);
+ } else if (data_len != 0) {
+ ret = spi_xfer(spi, data_len * 8, data, NULL, SPI_XFER_END);
+ if (ret)
+ debug("SF: Failed to read %zu bytes of data: %d\n",
+ data_len, ret);
+ }
+
+ return ret;
+}
+
+
+int spi_flash_read_common(struct spi_flash *flash, const u8 *cmd,
+ size_t cmd_len, void *data, size_t data_len)
+{
+ struct spi_slave *spi = flash->spi;
+ int ret;
+
+ spi_claim_bus(spi);
+ ret = spi_flash_cmd_read(spi, cmd, cmd_len, data, data_len);
+ spi_release_bus(spi);
+
+ return ret;
+}
+
+/*
+ * The following table holds all device probe functions
+ *
+ * shift: number of continuation bytes before the ID
+ * idcode: the expected IDCODE or 0xff for non JEDEC devices
+ * probe: the function to call
+ *
+ * Non JEDEC devices should be ordered in the table such that
+ * the probe functions with best detection algorithms come first.
+ *
+ * Several matching entries are permitted, they will be tried
+ * in sequence until a probe function returns non NULL.
+ *
+ * IDCODE_CONT_LEN may be redefined if a device needs to declare a
+ * larger "shift" value. IDCODE_PART_LEN generally shouldn't be
+ * changed. This is the max number of bytes probe functions may
+ * examine when looking up part-specific identification info.
+ *
+ * Probe functions will be given the idcode buffer starting at their
+ * manu id byte (the "idcode" in the table below). In other words,
+ * all of the continuation bytes will be skipped (the "shift" below).
+ */
+#define IDCODE_CONT_LEN 0
+#define IDCODE_PART_LEN 5
+static const struct {
+ const u8 shift;
+ const u8 idcode;
+ struct spi_flash *(*probe) (struct spi_slave *spi, u8 *idcode);
+} flashes[] = {
+ /* Keep it sorted by define name */
+#ifdef CONFIG_SPI_FLASH_ATMEL
+ { 0, 0x1f, spi_flash_probe_atmel, },
+#endif
+#ifdef CONFIG_SPI_FLASH_EON
+ { 0, 0x1c, spi_flash_probe_eon, },
+#endif
+#ifdef CONFIG_SPI_FLASH_MACRONIX
+ { 0, 0xc2, spi_flash_probe_macronix, },
+#endif
+#ifdef CONFIG_SPI_FLASH_SPANSION
+ { 0, 0x01, spi_flash_probe_spansion, },
+#endif
+#ifdef CONFIG_SPI_FLASH_SST
+ { 0, 0xbf, spi_flash_probe_sst, },
+#endif
+#ifdef CONFIG_SPI_FLASH_STMICRO
+ { 0, 0x20, spi_flash_probe_stmicro, },
+#endif
+#ifdef CONFIG_SPI_FLASH_WINBOND
+ { 0, 0xef, spi_flash_probe_winbond, },
+#endif
+#ifdef CONFIG_SPI_FRAM_RAMTRON
+ { 6, 0xc2, spi_fram_probe_ramtron, },
+# undef IDCODE_CONT_LEN
+# define IDCODE_CONT_LEN 6
+#endif
+ /* Keep it sorted by best detection */
+#ifdef CONFIG_SPI_FLASH_STMICRO
+ { 0, 0xff, spi_flash_probe_stmicro, },
+#endif
+#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC
+ { 0, 0xff, spi_fram_probe_ramtron, },
+#endif
+};
+#define IDCODE_LEN (IDCODE_CONT_LEN + IDCODE_PART_LEN)
+
+struct spi_flash *spi_flash_probe(unsigned int bus, unsigned int cs,
+ unsigned int max_hz, unsigned int spi_mode)
+{
+ struct spi_slave *spi;
+ struct spi_flash *flash = NULL;
+ int ret, i, shift;
+ u8 idcode[IDCODE_LEN], *idp;
+
+ spi = spi_setup_slave(bus, cs, max_hz, spi_mode);
+ if (!spi) {
+ printf("SF: Failed to set up slave\n");
+ return NULL;
+ }
+
+ ret = spi_claim_bus(spi);
+ if (ret) {
+ debug("SF: Failed to claim SPI bus: %d\n", ret);
+ goto err_claim_bus;
+ }
+
+ /* Read the ID codes */
+ ret = spi_flash_cmd(spi, CMD_READ_ID, idcode, sizeof(idcode));
+ if (ret)
+ goto err_read_id;
+
+#ifdef DEBUG
+ printf("SF: Got idcodes\n");
+ print_buffer(0, idcode, 1, sizeof(idcode), 0);
+#endif
+
+ /* count the number of continuation bytes */
+ for (shift = 0, idp = idcode;
+ shift < IDCODE_CONT_LEN && *idp == 0x7f;
+ ++shift, ++idp)
+ continue;
+
+ /* search the table for matches in shift and id */
+ for (i = 0; i < ARRAY_SIZE(flashes); ++i)
+ if (flashes[i].shift == shift && flashes[i].idcode == *idp) {
+ /* we have a match, call probe */
+ flash = flashes[i].probe(spi, idp);
+ if (flash)
+ break;
+ }
+
+ if (!flash) {
+ printf("SF: Unsupported manufacturer %02x\n", *idp);
+ goto err_manufacturer_probe;
+ }
+
+ spi_release_bus(spi);
+
+ return flash;
+
+err_manufacturer_probe:
+err_read_id:
+ spi_release_bus(spi);
+err_claim_bus:
+ spi_free_slave(spi);
+ return NULL;
+}
+
+void spi_flash_free(struct spi_flash *flash)
+{
+ spi_free_slave(flash->spi);
+ free(flash);
+}
diff --git a/u-boot/drivers/mtd/spi/spi_flash_internal.h b/u-boot/drivers/mtd/spi/spi_flash_internal.h
new file mode 100644
index 0000000..68dcffb
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/spi_flash_internal.h
@@ -0,0 +1,54 @@
+/*
+ * SPI flash internal definitions
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ */
+
+/* Common parameters -- kind of high, but they should only occur when there
+ * is a problem (and well your system already is broken), so err on the side
+ * of caution in case we're dealing with slower SPI buses and/or processors.
+ */
+#define SPI_FLASH_PROG_TIMEOUT (2 * CONFIG_SYS_HZ)
+#define SPI_FLASH_PAGE_ERASE_TIMEOUT (5 * CONFIG_SYS_HZ)
+#define SPI_FLASH_SECTOR_ERASE_TIMEOUT (10 * CONFIG_SYS_HZ)
+
+/* Common commands */
+#define CMD_READ_ID 0x9f
+
+#define CMD_READ_ARRAY_SLOW 0x03
+#define CMD_READ_ARRAY_FAST 0x0b
+#define CMD_READ_ARRAY_LEGACY 0xe8
+
+/* Send a single-byte command to the device and read the response */
+int spi_flash_cmd(struct spi_slave *spi, u8 cmd, void *response, size_t len);
+
+/*
+ * Send a multi-byte command to the device and read the response. Used
+ * for flash array reads, etc.
+ */
+int spi_flash_cmd_read(struct spi_slave *spi, const u8 *cmd,
+ size_t cmd_len, void *data, size_t data_len);
+
+/*
+ * Send a multi-byte command to the device followed by (optional)
+ * data. Used for programming the flash array, etc.
+ */
+int spi_flash_cmd_write(struct spi_slave *spi, const u8 *cmd, size_t cmd_len,
+ const void *data, size_t data_len);
+
+/*
+ * Same as spi_flash_cmd_read() except it also claims/releases the SPI
+ * bus. Used as common part of the ->read() operation.
+ */
+int spi_flash_read_common(struct spi_flash *flash, const u8 *cmd,
+ size_t cmd_len, void *data, size_t data_len);
+
+/* Manufacturer-specific probe functions */
+struct spi_flash *spi_flash_probe_spansion(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_flash_probe_atmel(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_flash_probe_eon(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_flash_probe_macronix(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_flash_probe_sst(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_flash_probe_stmicro(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_flash_probe_winbond(struct spi_slave *spi, u8 *idcode);
+struct spi_flash *spi_fram_probe_ramtron(struct spi_slave *spi, u8 *idcode);
diff --git a/u-boot/drivers/mtd/spi/sst.c b/u-boot/drivers/mtd/spi/sst.c
new file mode 100644
index 0000000..2557891
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/sst.c
@@ -0,0 +1,375 @@
+/*
+ * Driver for SST serial flashes
+ *
+ * (C) Copyright 2000-2002
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ * Copyright 2008, Network Appliance Inc.
+ * Jason McMullan <mcmullan@netapp.com>
+ * Copyright (C) 2004-2007 Freescale Semiconductor, Inc.
+ * TsiChung Liew (Tsi-Chung.Liew@freescale.com)
+ * Copyright (c) 2008-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+#define CMD_SST_WREN 0x06 /* Write Enable */
+#define CMD_SST_WRDI 0x04 /* Write Disable */
+#define CMD_SST_RDSR 0x05 /* Read Status Register */
+#define CMD_SST_WRSR 0x01 /* Write Status Register */
+#define CMD_SST_READ 0x03 /* Read Data Bytes */
+#define CMD_SST_FAST_READ 0x0b /* Read Data Bytes at Higher Speed */
+#define CMD_SST_BP 0x02 /* Byte Program */
+#define CMD_SST_AAI_WP 0xAD /* Auto Address Increment Word Program */
+#define CMD_SST_SE 0x20 /* Sector Erase */
+
+#define SST_SR_WIP (1 << 0) /* Write-in-Progress */
+#define SST_SR_WEL (1 << 1) /* Write enable */
+#define SST_SR_BP0 (1 << 2) /* Block Protection 0 */
+#define SST_SR_BP1 (1 << 3) /* Block Protection 1 */
+#define SST_SR_BP2 (1 << 4) /* Block Protection 2 */
+#define SST_SR_AAI (1 << 6) /* Addressing mode */
+#define SST_SR_BPL (1 << 7) /* BP bits lock */
+
+struct sst_spi_flash_params {
+ u8 idcode1;
+ u16 nr_sectors;
+ const char *name;
+};
+
+struct sst_spi_flash {
+ struct spi_flash flash;
+ const struct sst_spi_flash_params *params;
+};
+
+static inline struct sst_spi_flash *to_sst_spi_flash(struct spi_flash *flash)
+{
+ return container_of(flash, struct sst_spi_flash, flash);
+}
+
+#define SST_SECTOR_SIZE (4 * 1024)
+static const struct sst_spi_flash_params sst_spi_flash_table[] = {
+ {
+ .idcode1 = 0x8d,
+ .nr_sectors = 128,
+ .name = "SST25VF040B",
+ },{
+ .idcode1 = 0x8e,
+ .nr_sectors = 256,
+ .name = "SST25VF080B",
+ },{
+ .idcode1 = 0x41,
+ .nr_sectors = 512,
+ .name = "SST25VF016B",
+ },{
+ .idcode1 = 0x4a,
+ .nr_sectors = 1024,
+ .name = "SST25VF032B",
+ },{
+ .idcode1 = 0x01,
+ .nr_sectors = 16,
+ .name = "SST25WF512",
+ },{
+ .idcode1 = 0x02,
+ .nr_sectors = 32,
+ .name = "SST25WF010",
+ },{
+ .idcode1 = 0x03,
+ .nr_sectors = 64,
+ .name = "SST25WF020",
+ },{
+ .idcode1 = 0x04,
+ .nr_sectors = 128,
+ .name = "SST25WF040",
+ },
+};
+
+static int
+sst_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 byte = CMD_SST_RDSR;
+
+ ret = spi_xfer(spi, sizeof(byte) * 8, &byte, NULL, SPI_XFER_BEGIN);
+ if (ret) {
+ debug("SF: Failed to send command %02x: %d\n", byte, ret);
+ return ret;
+ }
+
+ timebase = get_timer(0);
+ do {
+ ret = spi_xfer(spi, sizeof(byte) * 8, NULL, &byte, 0);
+ if (ret)
+ break;
+
+ if ((byte & SST_SR_WIP) == 0)
+ break;
+
+ } while (get_timer(timebase) < timeout);
+
+ spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END);
+
+ if (!ret && (byte & SST_SR_WIP) != 0)
+ ret = -1;
+
+ if (ret)
+ debug("SF: sst wait for ready timed out\n");
+ return ret;
+}
+
+static int
+sst_enable_writing(struct spi_flash *flash)
+{
+ int ret = spi_flash_cmd(flash->spi, CMD_SST_WREN, NULL, 0);
+ if (ret)
+ debug("SF: Enabling Write failed\n");
+ return ret;
+}
+
+static int
+sst_disable_writing(struct spi_flash *flash)
+{
+ int ret = spi_flash_cmd(flash->spi, CMD_SST_WRDI, NULL, 0);
+ if (ret)
+ debug("SF: Disabling Write failed\n");
+ return ret;
+}
+
+static int
+sst_read_fast(struct spi_flash *flash, u32 offset, size_t len, void *buf)
+{
+ u8 cmd[5] = {
+ CMD_READ_ARRAY_FAST,
+ offset >> 16,
+ offset >> 8,
+ offset,
+ 0x00,
+ };
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int
+sst_byte_write(struct spi_flash *flash, u32 offset, const void *buf)
+{
+ int ret;
+ u8 cmd[4] = {
+ CMD_SST_BP,
+ offset >> 16,
+ offset >> 8,
+ offset,
+ };
+
+ debug("BP[%02x]: 0x%p => cmd = { 0x%02x 0x%06x }\n",
+ spi_w8r8(flash->spi, CMD_SST_RDSR), buf, cmd[0], offset);
+
+ ret = sst_enable_writing(flash);
+ if (ret)
+ return ret;
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, sizeof(cmd), buf, 1);
+ if (ret)
+ return ret;
+
+ return sst_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+}
+
+static int
+sst_write(struct spi_flash *flash, u32 offset, size_t len, const void *buf)
+{
+ size_t actual, cmd_len;
+ int ret;
+ u8 cmd[4];
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ /* If the data is not word aligned, write out leading single byte */
+ actual = offset % 2;
+ if (actual) {
+ ret = sst_byte_write(flash, offset, buf);
+ if (ret)
+ goto done;
+ }
+ offset += actual;
+
+ ret = sst_enable_writing(flash);
+ if (ret)
+ goto done;
+
+ cmd_len = 4;
+ cmd[0] = CMD_SST_AAI_WP;
+ cmd[1] = offset >> 16;
+ cmd[2] = offset >> 8;
+ cmd[3] = offset;
+
+ for (; actual < len - 1; actual += 2) {
+ debug("WP[%02x]: 0x%p => cmd = { 0x%02x 0x%06x }\n",
+ spi_w8r8(flash->spi, CMD_SST_RDSR), buf + actual, cmd[0],
+ offset);
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, cmd_len,
+ buf + actual, 2);
+ if (ret) {
+ debug("SF: sst word program failed\n");
+ break;
+ }
+
+ ret = sst_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret)
+ break;
+
+ cmd_len = 1;
+ offset += 2;
+ }
+
+ if (!ret)
+ ret = sst_disable_writing(flash);
+
+ /* If there is a single trailing byte, write it out */
+ if (!ret && actual != len)
+ ret = sst_byte_write(flash, offset, buf + actual);
+
+ done:
+ debug("SF: sst: program %s %zu bytes @ 0x%zx\n",
+ ret ? "failure" : "success", len, offset - actual);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int
+sst_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ unsigned long sector_size;
+ u32 start, end;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * This function currently uses sector erase only.
+ * Probably speed things up by using bulk erase
+ * when possible.
+ */
+
+ sector_size = SST_SECTOR_SIZE;
+
+ if (offset % sector_size) {
+ debug("SF: Erase offset not multiple of sector size\n");
+ return -1;
+ }
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ cmd[0] = CMD_SST_SE;
+ cmd[3] = 0;
+ start = offset;
+ end = start + len;
+
+ ret = 0;
+ while (offset < end) {
+ cmd[1] = offset >> 16;
+ cmd[2] = offset >> 8;
+ offset += sector_size;
+
+ debug("SF: erase %2x %2x %2x %2x (%x)\n", cmd[0], cmd[1],
+ cmd[2], cmd[3], offset);
+
+ ret = sst_enable_writing(flash);
+ if (ret)
+ break;
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, sizeof(cmd), NULL, 0);
+ if (ret) {
+ debug("SF: sst page erase failed\n");
+ break;
+ }
+
+ ret = sst_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret)
+ break;
+ }
+
+ debug("SF: sst: Successfully erased %lu bytes @ 0x%x\n",
+ len * sector_size, start);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+static int
+sst_unlock(struct spi_flash *flash)
+{
+ int ret;
+ u8 cmd, status;
+
+ ret = sst_enable_writing(flash);
+ if (ret)
+ return ret;
+
+ cmd = CMD_SST_WRSR;
+ status = 0;
+ ret = spi_flash_cmd_write(flash->spi, &cmd, 1, &status, 1);
+ if (ret)
+ debug("SF: Unable to set status byte\n");
+
+ debug("SF: sst: status = %x\n", spi_w8r8(flash->spi, CMD_SST_RDSR));
+
+ return ret;
+}
+
+struct spi_flash *
+spi_flash_probe_sst(struct spi_slave *spi, u8 *idcode)
+{
+ const struct sst_spi_flash_params *params;
+ struct sst_spi_flash *stm;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(sst_spi_flash_table); ++i) {
+ params = &sst_spi_flash_table[i];
+ if (params->idcode1 == idcode[2])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(sst_spi_flash_table)) {
+ debug("SF: Unsupported SST ID %02x\n", idcode[1]);
+ return NULL;
+ }
+
+ stm = malloc(sizeof(*stm));
+ if (!stm) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ stm->params = params;
+ stm->flash.spi = spi;
+ stm->flash.name = params->name;
+
+ stm->flash.write = sst_write;
+ stm->flash.erase = sst_erase;
+ stm->flash.read = sst_read_fast;
+ stm->flash.size = SST_SECTOR_SIZE * params->nr_sectors;
+
+ printf("SF: Detected %s with page size %u, total ",
+ params->name, SST_SECTOR_SIZE);
+ print_size(stm->flash.size, "\n");
+
+ /* Flash powers up read-only, so clear BP# bits */
+ sst_unlock(&stm->flash);
+
+ return &stm->flash;
+}
diff --git a/u-boot/drivers/mtd/spi/stmicro.c b/u-boot/drivers/mtd/spi/stmicro.c
new file mode 100644
index 0000000..3134027
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/stmicro.c
@@ -0,0 +1,373 @@
+/*
+ * (C) Copyright 2000-2002
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * Copyright 2008, Network Appliance Inc.
+ * Jason McMullan <mcmullan@netapp.com>
+ *
+ * Copyright (C) 2004-2007 Freescale Semiconductor, Inc.
+ * TsiChung Liew (Tsi-Chung.Liew@freescale.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+/* M25Pxx-specific commands */
+#define CMD_M25PXX_WREN 0x06 /* Write Enable */
+#define CMD_M25PXX_WRDI 0x04 /* Write Disable */
+#define CMD_M25PXX_RDSR 0x05 /* Read Status Register */
+#define CMD_M25PXX_WRSR 0x01 /* Write Status Register */
+#define CMD_M25PXX_READ 0x03 /* Read Data Bytes */
+#define CMD_M25PXX_FAST_READ 0x0b /* Read Data Bytes at Higher Speed */
+#define CMD_M25PXX_PP 0x02 /* Page Program */
+#define CMD_M25PXX_SE 0xd8 /* Sector Erase */
+#define CMD_M25PXX_BE 0xc7 /* Bulk Erase */
+#define CMD_M25PXX_DP 0xb9 /* Deep Power-down */
+#define CMD_M25PXX_RES 0xab /* Release from DP, and Read Signature */
+
+#define STM_ID_M25P10 0x11
+#define STM_ID_M25P16 0x15
+#define STM_ID_M25P20 0x12
+#define STM_ID_M25P32 0x16
+#define STM_ID_M25P40 0x13
+#define STM_ID_M25P64 0x17
+#define STM_ID_M25P80 0x14
+#define STM_ID_M25P128 0x18
+
+#define STMICRO_SR_WIP (1 << 0) /* Write-in-Progress */
+
+struct stmicro_spi_flash_params {
+ u8 idcode1;
+ u16 page_size;
+ u16 pages_per_sector;
+ u16 nr_sectors;
+ const char *name;
+};
+
+/* spi_flash needs to be first so upper layers can free() it */
+struct stmicro_spi_flash {
+ struct spi_flash flash;
+ const struct stmicro_spi_flash_params *params;
+};
+
+static inline struct stmicro_spi_flash *to_stmicro_spi_flash(struct spi_flash
+ *flash)
+{
+ return container_of(flash, struct stmicro_spi_flash, flash);
+}
+
+static const struct stmicro_spi_flash_params stmicro_spi_flash_table[] = {
+ {
+ .idcode1 = STM_ID_M25P10,
+ .page_size = 256,
+ .pages_per_sector = 128,
+ .nr_sectors = 4,
+ .name = "M25P10",
+ },
+ {
+ .idcode1 = STM_ID_M25P16,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 32,
+ .name = "M25P16",
+ },
+ {
+ .idcode1 = STM_ID_M25P20,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 4,
+ .name = "M25P20",
+ },
+ {
+ .idcode1 = STM_ID_M25P32,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 64,
+ .name = "M25P32",
+ },
+ {
+ .idcode1 = STM_ID_M25P40,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 8,
+ .name = "M25P40",
+ },
+ {
+ .idcode1 = STM_ID_M25P64,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 128,
+ .name = "M25P64",
+ },
+ {
+ .idcode1 = STM_ID_M25P80,
+ .page_size = 256,
+ .pages_per_sector = 256,
+ .nr_sectors = 16,
+ .name = "M25P80",
+ },
+ {
+ .idcode1 = STM_ID_M25P128,
+ .page_size = 256,
+ .pages_per_sector = 1024,
+ .nr_sectors = 64,
+ .name = "M25P128",
+ },
+};
+
+static int stmicro_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 cmd = CMD_M25PXX_RDSR;
+ u8 status;
+
+ ret = spi_xfer(spi, 8, &cmd, NULL, SPI_XFER_BEGIN);
+ if (ret) {
+ debug("SF: Failed to send command %02x: %d\n", cmd, ret);
+ return ret;
+ }
+
+ timebase = get_timer(0);
+ do {
+ ret = spi_xfer(spi, 8, NULL, &status, 0);
+ if (ret)
+ return -1;
+
+ if ((status & STMICRO_SR_WIP) == 0)
+ break;
+
+ } while (get_timer(timebase) < timeout);
+
+ spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END);
+
+ if ((status & STMICRO_SR_WIP) == 0)
+ return 0;
+
+ /* Timed out */
+ return -1;
+}
+
+static int stmicro_read_fast(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ struct stmicro_spi_flash *stm = to_stmicro_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long page_size;
+ u8 cmd[5];
+
+ page_size = stm->params->page_size;
+ page_addr = offset / page_size;
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = offset % page_size;
+ cmd[4] = 0x00;
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int stmicro_write(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct stmicro_spi_flash *stm = to_stmicro_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ page_size = stm->params->page_size;
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - byte_addr);
+
+ cmd[0] = CMD_M25PXX_PP;
+ cmd[1] = page_addr >> 8;
+ cmd[2] = page_addr;
+ cmd[3] = byte_addr;
+
+ debug
+ ("PP: 0x%p => cmd = { 0x%02x 0x%02x%02x%02x } chunk_len = %d\n",
+ buf + actual, cmd[0], cmd[1], cmd[2], cmd[3], chunk_len);
+
+ ret = spi_flash_cmd(flash->spi, CMD_M25PXX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: STMicro Page Program failed\n");
+ break;
+ }
+
+ ret = stmicro_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: STMicro page programming timed out\n");
+ break;
+ }
+
+ page_addr++;
+ byte_addr = 0;
+ }
+
+ debug("SF: STMicro: Successfully programmed %u bytes @ 0x%x\n",
+ len, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int stmicro_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct stmicro_spi_flash *stm = to_stmicro_spi_flash(flash);
+ unsigned long sector_size;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * This function currently uses sector erase only.
+ * probably speed things up by using bulk erase
+ * when possible.
+ */
+
+ sector_size = stm->params->page_size * stm->params->pages_per_sector;
+
+ if (offset % sector_size || len % sector_size) {
+ debug("SF: Erase offset/length not multiple of sector size\n");
+ return -1;
+ }
+
+ len /= sector_size;
+ cmd[0] = CMD_M25PXX_SE;
+ cmd[2] = 0x00;
+ cmd[3] = 0x00;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (actual = 0; actual < len; actual++) {
+ cmd[1] = offset >> 16;
+ offset += sector_size;
+
+ ret = spi_flash_cmd(flash->spi, CMD_M25PXX_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ break;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: STMicro page erase failed\n");
+ break;
+ }
+
+ ret = stmicro_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: STMicro page erase timed out\n");
+ break;
+ }
+ }
+
+ debug("SF: STMicro: Successfully erased %u bytes @ 0x%x\n",
+ len * sector_size, offset);
+
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+struct spi_flash *spi_flash_probe_stmicro(struct spi_slave *spi, u8 * idcode)
+{
+ const struct stmicro_spi_flash_params *params;
+ struct stmicro_spi_flash *stm;
+ unsigned int i;
+
+ if (idcode[0] == 0xff) {
+ i = spi_flash_cmd(spi, CMD_M25PXX_RES,
+ idcode, 4);
+ if (i)
+ return NULL;
+ if ((idcode[3] & 0xf0) == 0x10) {
+ idcode[0] = 0x20;
+ idcode[1] = 0x20;
+ idcode[2] = idcode[3] + 1;
+ } else
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stmicro_spi_flash_table); i++) {
+ params = &stmicro_spi_flash_table[i];
+ if (params->idcode1 == idcode[2]) {
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(stmicro_spi_flash_table)) {
+ debug("SF: Unsupported STMicro ID %02x\n", idcode[1]);
+ return NULL;
+ }
+
+ stm = malloc(sizeof(struct stmicro_spi_flash));
+ if (!stm) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ stm->params = params;
+ stm->flash.spi = spi;
+ stm->flash.name = params->name;
+
+ stm->flash.write = stmicro_write;
+ stm->flash.erase = stmicro_erase;
+ stm->flash.read = stmicro_read_fast;
+ stm->flash.size = params->page_size * params->pages_per_sector
+ * params->nr_sectors;
+
+ printf("SF: Detected %s with page size %u, total ",
+ params->name, params->page_size);
+ print_size(stm->flash.size, "\n");
+
+ return &stm->flash;
+}
diff --git a/u-boot/drivers/mtd/spi/winbond.c b/u-boot/drivers/mtd/spi/winbond.c
new file mode 100644
index 0000000..4452355
--- /dev/null
+++ b/u-boot/drivers/mtd/spi/winbond.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright 2008, Network Appliance Inc.
+ * Author: Jason McMullan <mcmullan <at> netapp.com>
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <spi_flash.h>
+
+#include "spi_flash_internal.h"
+
+/* M25Pxx-specific commands */
+#define CMD_W25_WREN 0x06 /* Write Enable */
+#define CMD_W25_WRDI 0x04 /* Write Disable */
+#define CMD_W25_RDSR 0x05 /* Read Status Register */
+#define CMD_W25_WRSR 0x01 /* Write Status Register */
+#define CMD_W25_READ 0x03 /* Read Data Bytes */
+#define CMD_W25_FAST_READ 0x0b /* Read Data Bytes at Higher Speed */
+#define CMD_W25_PP 0x02 /* Page Program */
+#define CMD_W25_SE 0x20 /* Sector (4K) Erase */
+#define CMD_W25_BE 0xd8 /* Block (64K) Erase */
+#define CMD_W25_CE 0xc7 /* Chip Erase */
+#define CMD_W25_DP 0xb9 /* Deep Power-down */
+#define CMD_W25_RES 0xab /* Release from DP, and Read Signature */
+
+#define WINBOND_SR_WIP (1 << 0) /* Write-in-Progress */
+
+struct winbond_spi_flash_params {
+ uint16_t id;
+ /* Log2 of page size in power-of-two mode */
+ uint8_t l2_page_size;
+ uint16_t pages_per_sector;
+ uint16_t sectors_per_block;
+ uint16_t nr_blocks;
+ const char *name;
+};
+
+/* spi_flash needs to be first so upper layers can free() it */
+struct winbond_spi_flash {
+ struct spi_flash flash;
+ const struct winbond_spi_flash_params *params;
+};
+
+static inline struct winbond_spi_flash *
+to_winbond_spi_flash(struct spi_flash *flash)
+{
+ return container_of(flash, struct winbond_spi_flash, flash);
+}
+
+static const struct winbond_spi_flash_params winbond_spi_flash_table[] = {
+ {
+ .id = 0x3015,
+ .l2_page_size = 8,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 32,
+ .name = "W25X16",
+ },
+ {
+ .id = 0x3016,
+ .l2_page_size = 8,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 64,
+ .name = "W25X32",
+ },
+ {
+ .id = 0x3017,
+ .l2_page_size = 8,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 128,
+ .name = "W25X64",
+ },
+ {
+ .id = 0x4015,
+ .l2_page_size = 8,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 32,
+ .name = "W25Q16",
+ },
+ {
+ .id = 0x4016,
+ .l2_page_size = 8,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 64,
+ .name = "W25Q32",
+ },
+ {
+ .id = 0x4017,
+ .l2_page_size = 8,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 128,
+ .name = "W25Q64",
+ },
+ {
+ .id = 0x4018,
+ .l2_page_size = 8,
+ .pages_per_sector = 16,
+ .sectors_per_block = 16,
+ .nr_blocks = 256,
+ .name = "W25Q128",
+ },
+};
+
+static int winbond_wait_ready(struct spi_flash *flash, unsigned long timeout)
+{
+ struct spi_slave *spi = flash->spi;
+ unsigned long timebase;
+ int ret;
+ u8 status;
+ u8 cmd[4] = { CMD_W25_RDSR, 0xff, 0xff, 0xff };
+
+ ret = spi_xfer(spi, 32, &cmd[0], NULL, SPI_XFER_BEGIN);
+ if (ret) {
+ debug("SF: Failed to send command %02x: %d\n", cmd, ret);
+ return ret;
+ }
+
+ timebase = get_timer(0);
+ do {
+ ret = spi_xfer(spi, 8, NULL, &status, 0);
+ if (ret) {
+ debug("SF: Failed to get status for cmd %02x: %d\n", cmd, ret);
+ return -1;
+ }
+
+ if ((status & WINBOND_SR_WIP) == 0)
+ break;
+
+ } while (get_timer(timebase) < timeout);
+
+ spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END);
+
+ if ((status & WINBOND_SR_WIP) == 0)
+ return 0;
+
+ debug("SF: Timed out on command %02x: %d\n", cmd, ret);
+ /* Timed out */
+ return -1;
+}
+
+/*
+ * Assemble the address part of a command for Winbond devices in
+ * non-power-of-two page size mode.
+ */
+static void winbond_build_address(struct winbond_spi_flash *stm, u8 *cmd, u32 offset)
+{
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ unsigned int page_shift;
+
+ /*
+ * The "extra" space per page is the power-of-two page size
+ * divided by 32.
+ */
+ page_shift = stm->params->l2_page_size;
+ page_size = (1 << page_shift);
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ cmd[0] = page_addr >> (16 - page_shift);
+ cmd[1] = page_addr << (page_shift - 8) | (byte_addr >> 8);
+ cmd[2] = byte_addr;
+}
+
+static int winbond_read_fast(struct spi_flash *flash,
+ u32 offset, size_t len, void *buf)
+{
+ struct winbond_spi_flash *stm = to_winbond_spi_flash(flash);
+ u8 cmd[5];
+
+ cmd[0] = CMD_READ_ARRAY_FAST;
+ winbond_build_address(stm, cmd + 1, offset);
+ cmd[4] = 0x00;
+
+ return spi_flash_read_common(flash, cmd, sizeof(cmd), buf, len);
+}
+
+static int winbond_write(struct spi_flash *flash,
+ u32 offset, size_t len, const void *buf)
+{
+ struct winbond_spi_flash *stm = to_winbond_spi_flash(flash);
+ unsigned long page_addr;
+ unsigned long byte_addr;
+ unsigned long page_size;
+ unsigned int page_shift;
+ size_t chunk_len;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ page_shift = stm->params->l2_page_size;
+ page_size = (1 << page_shift);
+ page_addr = offset / page_size;
+ byte_addr = offset % page_size;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual += chunk_len) {
+ chunk_len = min(len - actual, page_size - byte_addr);
+
+ cmd[0] = CMD_W25_PP;
+ cmd[1] = page_addr >> (16 - page_shift);
+ cmd[2] = page_addr << (page_shift - 8) | (byte_addr >> 8);
+ cmd[3] = byte_addr;
+ debug("PP: 0x%p => cmd = { 0x%02x 0x%02x%02x%02x } chunk_len = %d\n",
+ buf + actual,
+ cmd[0], cmd[1], cmd[2], cmd[3], chunk_len);
+
+ ret = spi_flash_cmd(flash->spi, CMD_W25_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ goto out;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4,
+ buf + actual, chunk_len);
+ if (ret < 0) {
+ debug("SF: Winbond Page Program failed\n");
+ goto out;
+ }
+
+ ret = winbond_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: Winbond page programming timed out\n");
+ goto out;
+ }
+
+ page_addr++;
+ byte_addr = 0;
+ }
+
+ debug("SF: Winbond: Successfully programmed %u bytes @ 0x%x\n",
+ len, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+int winbond_erase(struct spi_flash *flash, u32 offset, size_t len)
+{
+ struct winbond_spi_flash *stm = to_winbond_spi_flash(flash);
+ unsigned long sector_size;
+ unsigned int page_shift;
+ size_t actual;
+ int ret;
+ u8 cmd[4];
+
+ /*
+ * This function currently uses sector erase only.
+ * probably speed things up by using bulk erase
+ * when possible.
+ */
+
+ page_shift = stm->params->l2_page_size;
+ sector_size = (1 << page_shift) * stm->params->pages_per_sector;
+
+ if (offset % sector_size || len % sector_size) {
+ debug("SF: Erase offset/length not multiple of sector size\n");
+ return -1;
+ }
+
+ len /= sector_size;
+ cmd[0] = CMD_W25_SE;
+
+ ret = spi_claim_bus(flash->spi);
+ if (ret) {
+ debug("SF: Unable to claim SPI bus\n");
+ return ret;
+ }
+
+ for (actual = 0; actual < len; actual++) {
+ winbond_build_address(stm, &cmd[1], offset + actual * sector_size);
+ printf("Erase: %02x %02x %02x %02x\n",
+ cmd[0], cmd[1], cmd[2], cmd[3]);
+
+ ret = spi_flash_cmd(flash->spi, CMD_W25_WREN, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Enabling Write failed\n");
+ goto out;
+ }
+
+ ret = spi_flash_cmd_write(flash->spi, cmd, 4, NULL, 0);
+ if (ret < 0) {
+ debug("SF: Winbond sector erase failed\n");
+ goto out;
+ }
+
+ ret = winbond_wait_ready(flash, SPI_FLASH_PAGE_ERASE_TIMEOUT);
+ if (ret < 0) {
+ debug("SF: Winbond sector erase timed out\n");
+ goto out;
+ }
+ }
+
+ debug("SF: Winbond: Successfully erased %u bytes @ 0x%x\n",
+ len * sector_size, offset);
+ ret = 0;
+
+out:
+ spi_release_bus(flash->spi);
+ return ret;
+}
+
+struct spi_flash *spi_flash_probe_winbond(struct spi_slave *spi, u8 *idcode)
+{
+ const struct winbond_spi_flash_params *params;
+ unsigned page_size;
+ struct winbond_spi_flash *stm;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(winbond_spi_flash_table); i++) {
+ params = &winbond_spi_flash_table[i];
+ if (params->id == ((idcode[1] << 8) | idcode[2]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(winbond_spi_flash_table)) {
+ debug("SF: Unsupported Winbond ID %02x%02x\n",
+ idcode[1], idcode[2]);
+ return NULL;
+ }
+
+ stm = malloc(sizeof(struct winbond_spi_flash));
+ if (!stm) {
+ debug("SF: Failed to allocate memory\n");
+ return NULL;
+ }
+
+ stm->params = params;
+ stm->flash.spi = spi;
+ stm->flash.name = params->name;
+
+ /* Assuming power-of-two page size initially. */
+ page_size = 1 << params->l2_page_size;
+
+ stm->flash.write = winbond_write;
+ stm->flash.erase = winbond_erase;
+ stm->flash.read = winbond_read_fast;
+ stm->flash.size = page_size * params->pages_per_sector
+ * params->sectors_per_block
+ * params->nr_blocks;
+
+ printf("SF: Detected %s with page size %u, total ",
+ params->name, page_size);
+ print_size(stm->flash.size, "\n");
+
+ return &stm->flash;
+}
diff --git a/u-boot/drivers/mtd/spr_smi.c b/u-boot/drivers/mtd/spr_smi.c
new file mode 100644
index 0000000..9a70a19
--- /dev/null
+++ b/u-boot/drivers/mtd/spr_smi.c
@@ -0,0 +1,523 @@
+/*
+ * (C) Copyright 2009
+ * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <flash.h>
+#include <linux/err.h>
+
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/spr_smi.h>
+
+#if !defined(CONFIG_SYS_NO_FLASH)
+
+static struct smi_regs *const smicntl =
+ (struct smi_regs * const)CONFIG_SYS_SMI_BASE;
+static ulong bank_base[CONFIG_SYS_MAX_FLASH_BANKS] =
+ CONFIG_SYS_FLASH_ADDR_BASE;
+flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS];
+
+#define ST_M25Pxx_ID 0x00002020
+
+static struct flash_dev flash_ids[] = {
+ {0x10, 0x10000, 2}, /* 64K Byte */
+ {0x11, 0x20000, 4}, /* 128K Byte */
+ {0x12, 0x40000, 4}, /* 256K Byte */
+ {0x13, 0x80000, 8}, /* 512K Byte */
+ {0x14, 0x100000, 16}, /* 1M Byte */
+ {0x15, 0x200000, 32}, /* 2M Byte */
+ {0x16, 0x400000, 64}, /* 4M Byte */
+ {0x17, 0x800000, 128}, /* 8M Byte */
+ {0x18, 0x1000000, 64}, /* 16M Byte */
+ {0x00,}
+};
+
+/*
+ * smi_wait_xfer_finish - Wait until TFF is set in status register
+ * @timeout: timeout in milliseconds
+ *
+ * Wait until TFF is set in status register
+ */
+static void smi_wait_xfer_finish(int timeout)
+{
+ while (timeout--) {
+ if (readl(&smicntl->smi_sr) & TFF)
+ break;
+ udelay(1000);
+ }
+}
+
+/*
+ * smi_read_id - Read flash id
+ * @info: flash_info structure pointer
+ * @banknum: bank number
+ *
+ * Read the flash id present at bank #banknum
+ */
+static unsigned int smi_read_id(flash_info_t *info, int banknum)
+{
+ unsigned int value;
+
+ writel(readl(&smicntl->smi_cr1) | SW_MODE, &smicntl->smi_cr1);
+ writel(READ_ID, &smicntl->smi_tr);
+ writel((banknum << BANKSEL_SHIFT) | SEND | TX_LEN_1 | RX_LEN_3,
+ &smicntl->smi_cr2);
+ smi_wait_xfer_finish(XFER_FINISH_TOUT);
+
+ value = (readl(&smicntl->smi_rr) & 0x00FFFFFF);
+
+ writel(readl(&smicntl->smi_sr) & ~TFF, &smicntl->smi_sr);
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1);
+
+ return value;
+}
+
+/*
+ * flash_get_size - Detect the SMI flash by reading the ID.
+ * @base: Base address of the flash area bank #banknum
+ * @banknum: Bank number
+ *
+ * Detect the SMI flash by reading the ID. Initializes the flash_info structure
+ * with size, sector count etc.
+ */
+static ulong flash_get_size(ulong base, int banknum)
+{
+ flash_info_t *info = &flash_info[banknum];
+ struct flash_dev *dev;
+ unsigned int value;
+ unsigned int density;
+ int i;
+
+ value = smi_read_id(info, banknum);
+ density = (value >> 16) & 0xff;
+
+ for (i = 0, dev = &flash_ids[0]; dev->density != 0x0;
+ i++, dev = &flash_ids[i]) {
+ if (dev->density == density) {
+ info->size = dev->size;
+ info->sector_count = dev->sector_count;
+ break;
+ }
+ }
+
+ if (dev->density == 0x0)
+ return 0;
+
+ info->flash_id = value & 0xffff;
+ info->start[0] = base;
+
+ return info->size;
+}
+
+/*
+ * smi_read_sr - Read status register of SMI
+ * @bank: bank number
+ *
+ * This routine will get the status register of the flash chip present at the
+ * given bank
+ */
+static unsigned int smi_read_sr(int bank)
+{
+ u32 ctrlreg1;
+
+ /* store the CTRL REG1 state */
+ ctrlreg1 = readl(&smicntl->smi_cr1);
+
+ /* Program SMI in HW Mode */
+ writel(readl(&smicntl->smi_cr1) & ~(SW_MODE | WB_MODE),
+ &smicntl->smi_cr1);
+
+ /* Performing a RSR instruction in HW mode */
+ writel((bank << BANKSEL_SHIFT) | RD_STATUS_REG, &smicntl->smi_cr2);
+
+ smi_wait_xfer_finish(XFER_FINISH_TOUT);
+
+ /* Restore the CTRL REG1 state */
+ writel(ctrlreg1, &smicntl->smi_cr1);
+
+ return readl(&smicntl->smi_sr);
+}
+
+/*
+ * smi_wait_till_ready - Wait till last operation is over.
+ * @bank: bank number shifted.
+ * @timeout: timeout in milliseconds.
+ *
+ * This routine checks for WIP(write in progress)bit in Status register(SMSR-b0)
+ * The routine checks for #timeout loops, each at interval of 1 milli-second.
+ * If successful the routine returns 0.
+ */
+static int smi_wait_till_ready(int bank, int timeout)
+{
+ int count;
+ unsigned int sr;
+
+ /* One chip guarantees max 5 msec wait here after page writes,
+ but potentially three seconds (!) after page erase. */
+ for (count = 0; count < timeout; count++) {
+
+ sr = smi_read_sr(bank);
+ if (sr < 0)
+ break;
+ else if (!(sr & WIP_BIT))
+ return 0;
+
+ /* Try again after 1m-sec */
+ udelay(1000);
+ }
+ printf("SMI controller is still in wait, timeout=%d\n", timeout);
+ return -EIO;
+}
+
+/*
+ * smi_write_enable - Enable the flash to do write operation
+ * @bank: bank number
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static int smi_write_enable(int bank)
+{
+ u32 ctrlreg1;
+ int timeout = WMODE_TOUT;
+
+ /* Store the CTRL REG1 state */
+ ctrlreg1 = readl(&smicntl->smi_cr1);
+
+ /* Program SMI in H/W Mode */
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1);
+
+ /* Give the Flash, Write Enable command */
+ writel((bank << BANKSEL_SHIFT) | WE, &smicntl->smi_cr2);
+
+ smi_wait_xfer_finish(XFER_FINISH_TOUT);
+
+ /* Restore the CTRL REG1 state */
+ writel(ctrlreg1, &smicntl->smi_cr1);
+
+ while (timeout--) {
+ if (smi_read_sr(bank) & (1 << (bank + WM_SHIFT)))
+ break;
+ udelay(1000);
+ }
+
+ if (timeout)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * smi_init - SMI initialization routine
+ *
+ * SMI initialization routine. Sets SMI control register1.
+ */
+static void smi_init(void)
+{
+ /* Setting the fast mode values. SMI working at 166/4 = 41.5 MHz */
+ writel(HOLD1 | FAST_MODE | BANK_EN | DSEL_TIME | PRESCAL4,
+ &smicntl->smi_cr1);
+}
+
+/*
+ * smi_sector_erase - Erase flash sector
+ * @info: flash_info structure pointer
+ * @sector: sector number
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static int smi_sector_erase(flash_info_t *info, unsigned int sector)
+{
+ int bank;
+ unsigned int sect_add;
+ unsigned int instruction;
+
+ switch (info->start[0]) {
+ case SMIBANK0_BASE:
+ bank = BANK0;
+ break;
+ case SMIBANK1_BASE:
+ bank = BANK1;
+ break;
+ case SMIBANK2_BASE:
+ bank = BANK2;
+ break;
+ case SMIBANK3_BASE:
+ bank = BANK3;
+ break;
+ default:
+ return -1;
+ }
+
+ sect_add = sector * (info->size / info->sector_count);
+ instruction = ((sect_add >> 8) & 0x0000FF00) | SECTOR_ERASE;
+
+ writel(readl(&smicntl->smi_sr) & ~(ERF1 | ERF2), &smicntl->smi_sr);
+
+ if (info->flash_id == ST_M25Pxx_ID) {
+ /* Wait until finished previous write command. */
+ if (smi_wait_till_ready(bank, CONFIG_SYS_FLASH_ERASE_TOUT))
+ return -EBUSY;
+
+ /* Send write enable, before erase commands. */
+ if (smi_write_enable(bank))
+ return -EIO;
+
+ /* Put SMI in SW mode */
+ writel(readl(&smicntl->smi_cr1) | SW_MODE, &smicntl->smi_cr1);
+
+ /* Send Sector Erase command in SW Mode */
+ writel(instruction, &smicntl->smi_tr);
+ writel((bank << BANKSEL_SHIFT) | SEND | TX_LEN_4,
+ &smicntl->smi_cr2);
+ smi_wait_xfer_finish(XFER_FINISH_TOUT);
+
+ if (smi_wait_till_ready(bank, CONFIG_SYS_FLASH_ERASE_TOUT))
+ return -EBUSY;
+
+ /* Put SMI in HW mode */
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE,
+ &smicntl->smi_cr1);
+
+ return 0;
+ } else {
+ /* Put SMI in HW mode */
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE,
+ &smicntl->smi_cr1);
+ return -EINVAL;
+ }
+}
+
+/*
+ * smi_write - Write to SMI flash
+ * @src_addr: source buffer
+ * @dst_addr: destination buffer
+ * @length: length to write in words
+ * @bank: bank base address
+ *
+ * Write to SMI flash
+ */
+static int smi_write(unsigned int *src_addr, unsigned int *dst_addr,
+ unsigned int length, ulong bank_addr)
+{
+ int banknum;
+ unsigned int WM;
+
+ switch (bank_addr) {
+ case SMIBANK0_BASE:
+ banknum = BANK0;
+ WM = WM0;
+ break;
+ case SMIBANK1_BASE:
+ banknum = BANK1;
+ WM = WM1;
+ break;
+ case SMIBANK2_BASE:
+ banknum = BANK2;
+ WM = WM2;
+ break;
+ case SMIBANK3_BASE:
+ banknum = BANK3;
+ WM = WM3;
+ break;
+ default:
+ return -1;
+ }
+
+ if (smi_wait_till_ready(banknum, CONFIG_SYS_FLASH_WRITE_TOUT))
+ return -EBUSY;
+
+ /* Set SMI in Hardware Mode */
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1);
+
+ if (smi_write_enable(banknum))
+ return -EIO;
+
+ /* Perform the write command */
+ while (length--) {
+ if (((ulong) (dst_addr) % SFLASH_PAGE_SIZE) == 0) {
+ if (smi_wait_till_ready(banknum,
+ CONFIG_SYS_FLASH_WRITE_TOUT))
+ return -EBUSY;
+
+ if (smi_write_enable(banknum))
+ return -EIO;
+ }
+
+ *dst_addr++ = *src_addr++;
+
+ if ((readl(&smicntl->smi_sr) & (ERF1 | ERF2)))
+ return -EIO;
+ }
+
+ if (smi_wait_till_ready(banknum, CONFIG_SYS_FLASH_WRITE_TOUT))
+ return -EBUSY;
+
+ writel(readl(&smicntl->smi_sr) & ~(WCF), &smicntl->smi_sr);
+
+ return 0;
+}
+
+/*
+ * write_buff - Write to SMI flash
+ * @info: flash info structure
+ * @src: source buffer
+ * @dest_addr: destination buffer
+ * @length: length to write in words
+ *
+ * Write to SMI flash
+ */
+int write_buff(flash_info_t *info, uchar *src, ulong dest_addr, ulong length)
+{
+ return smi_write((unsigned int *)src, (unsigned int *)dest_addr,
+ (length + 3) / 4, info->start[0]);
+}
+
+/*
+ * flash_init - SMI flash initialization
+ *
+ * SMI flash initialization
+ */
+unsigned long flash_init(void)
+{
+ unsigned long size = 0;
+ int i, j;
+
+ smi_init();
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ flash_info[i].flash_id = FLASH_UNKNOWN;
+ size += flash_info[i].size = flash_get_size(bank_base[i], i);
+ }
+
+ for (j = 0; j < CONFIG_SYS_MAX_FLASH_BANKS; j++) {
+ for (i = 1; i < flash_info[j].sector_count; i++)
+ flash_info[j].start[i] =
+ flash_info[j].start[i - 1] +
+ flash_info->size / flash_info->sector_count;
+
+ }
+
+ return size;
+}
+
+/*
+ * flash_print_info - Print SMI flash information
+ *
+ * Print SMI flash information
+ */
+void flash_print_info(flash_info_t *info)
+{
+ int i;
+ if (info->flash_id == FLASH_UNKNOWN) {
+ puts("missing or unknown FLASH type\n");
+ return;
+ }
+ printf(" Size: %ld MB in %d Sectors\n",
+ info->size >> 20, info->sector_count);
+
+ puts(" Sector Start Addresses:");
+ for (i = 0; i < info->sector_count; ++i) {
+#ifdef CONFIG_SYS_FLASH_EMPTY_INFO
+ int size;
+ int erased;
+ u32 *flash;
+
+ /*
+ * Check if whole sector is erased
+ */
+ size = (info->size) / (info->sector_count);
+ flash = (u32 *) info->start[i];
+ size = size / sizeof(int);
+
+ while ((size--) && (*flash++ == ~0))
+ ;
+
+ size++;
+ if (size)
+ erased = 0;
+ else
+ erased = 1;
+
+ if ((i % 5) == 0)
+ printf("\n");
+
+ printf(" %08lX%s%s",
+ info->start[i],
+ erased ? " E" : " ", info->protect[i] ? "RO " : " ");
+#else
+ if ((i % 5) == 0)
+ printf("\n ");
+ printf(" %08lX%s",
+ info->start[i], info->protect[i] ? " (RO) " : " ");
+#endif
+ }
+ putc('\n');
+ return;
+}
+
+/*
+ * flash_erase - Erase SMI flash
+ *
+ * Erase SMI flash
+ */
+int flash_erase(flash_info_t *info, int s_first, int s_last)
+{
+ int rcode = 0;
+ int prot = 0;
+ flash_sect_t sect;
+
+ if (info->flash_id != ST_M25Pxx_ID) {
+ puts("Can't erase unknown flash type - aborted\n");
+ return 1;
+ }
+
+ if ((s_first < 0) || (s_first > s_last)) {
+ puts("- no sectors to erase\n");
+ return 1;
+ }
+
+ for (sect = s_first; sect <= s_last; ++sect) {
+ if (info->protect[sect])
+ prot++;
+ }
+ if (prot) {
+ printf("- Warning: %d protected sectors will not be erased!\n",
+ prot);
+ } else {
+ putc('\n');
+ }
+
+ for (sect = s_first; sect <= s_last; sect++) {
+ if (info->protect[sect] == 0) {
+ if (smi_sector_erase(info, sect))
+ rcode = 1;
+ else
+ putc('.');
+ }
+ }
+ puts(" done\n");
+ return rcode;
+}
+#endif
diff --git a/u-boot/drivers/mtd/ubi/Makefile b/u-boot/drivers/mtd/ubi/Makefile
new file mode 100644
index 0000000..b2e6014
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/Makefile
@@ -0,0 +1,51 @@
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+include $(TOPDIR)/config.mk
+
+LIB := $(obj)libubi.o
+
+ifdef CONFIG_CMD_UBI
+COBJS-y += build.o vtbl.o vmt.o upd.o kapi.o eba.o io.o wl.o scan.o crc32.o
+
+COBJS-y += misc.o
+COBJS-y += debug.o
+endif
+
+COBJS := $(COBJS-y)
+SRCS := $(COBJS:.o=.c)
+OBJS := $(addprefix $(obj),$(COBJS))
+
+all: $(LIB)
+
+$(LIB): $(obj).depend $(OBJS)
+ $(call cmd_link_o_target, $(OBJS))
+
+#########################################################################
+
+# defines $(obj).depend target
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
+
+#########################################################################
diff --git a/u-boot/drivers/mtd/ubi/build.c b/u-boot/drivers/mtd/ubi/build.c
new file mode 100644
index 0000000..3ea0e6c
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/build.c
@@ -0,0 +1,1193 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём),
+ * Frank Haverkamp
+ */
+
+/*
+ * This file includes UBI initialization and building of UBI devices.
+ *
+ * When UBI is initialized, it attaches all the MTD devices specified as the
+ * module load parameters or the kernel boot parameters. If MTD devices were
+ * specified, UBI does not attach any MTD device, but it is possible to do
+ * later using the "UBI control device".
+ *
+ * At the moment we only attach UBI devices by scanning, which will become a
+ * bottleneck when flashes reach certain large size. Then one may improve UBI
+ * and add other methods, although it does not seem to be easy to do.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/stat.h>
+#include <linux/miscdevice.h>
+#include <linux/log2.h>
+#include <linux/kthread.h>
+#endif
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+#if (CONFIG_SYS_MALLOC_LEN < (512 << 10))
+#error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k
+#endif
+
+/* Maximum length of the 'mtd=' parameter */
+#define MTD_PARAM_LEN_MAX 64
+
+/**
+ * struct mtd_dev_param - MTD device parameter description data structure.
+ * @name: MTD device name or number string
+ * @vid_hdr_offs: VID header offset
+ */
+struct mtd_dev_param
+{
+ char name[MTD_PARAM_LEN_MAX];
+ int vid_hdr_offs;
+};
+
+/* Numbers of elements set in the @mtd_dev_param array */
+static int mtd_devs = 0;
+
+/* MTD devices specification parameters */
+static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
+
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class *ubi_class;
+
+#ifdef UBI_LINUX
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+/* UBI control character device */
+static struct miscdevice ubi_ctrl_cdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ubi_ctrl",
+ .fops = &ubi_ctrl_cdev_operations,
+};
+#endif
+
+/* All UBI devices in system */
+struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+
+#ifdef UBI_LINUX
+/* Serializes UBI devices creations and removals */
+DEFINE_MUTEX(ubi_devices_mutex);
+
+/* Protects @ubi_devices and @ubi->ref_count */
+static DEFINE_SPINLOCK(ubi_devices_lock);
+
+/* "Show" method for files in '/<sysfs>/class/ubi/' */
+static ssize_t ubi_version_show(struct class *class, char *buf)
+{
+ return sprintf(buf, "%d\n", UBI_VERSION);
+}
+
+/* UBI version attribute ('/<sysfs>/class/ubi/version') */
+static struct class_attribute ubi_version =
+ __ATTR(version, S_IRUGO, ubi_version_show, NULL);
+
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
+static struct device_attribute dev_eraseblock_size =
+ __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_avail_eraseblocks =
+ __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_total_eraseblocks =
+ __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_volumes_count =
+ __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_ec =
+ __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_reserved_for_bad =
+ __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bad_peb_count =
+ __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_vol_count =
+ __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_min_io_size =
+ __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bgt_enabled =
+ __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_num =
+ __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+#endif
+
+/**
+ * ubi_get_device - get UBI device.
+ * @ubi_num: UBI device number
+ *
+ * This function returns UBI device description object for UBI device number
+ * @ubi_num, or %NULL if the device does not exist. This function increases the
+ * device reference count to prevent removal of the device. In other words, the
+ * device cannot be removed if its reference count is not zero.
+ */
+struct ubi_device *ubi_get_device(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (ubi) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi;
+}
+
+/**
+ * ubi_put_device - drop an UBI device reference.
+ * @ubi: UBI device description object
+ */
+void ubi_put_device(struct ubi_device *ubi)
+{
+ spin_lock(&ubi_devices_lock);
+ ubi->ref_count -= 1;
+ put_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+}
+
+/**
+ * ubi_get_by_major - get UBI device description object by character device
+ * major number.
+ * @major: major number
+ *
+ * This function is similar to 'ubi_get_device()', but it searches the device
+ * by its major number.
+ */
+struct ubi_device *ubi_get_by_major(int major)
+{
+ int i;
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+ return ubi;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return NULL;
+}
+
+/**
+ * ubi_major2num - get UBI device number by character device major number.
+ * @major: major number
+ *
+ * This function searches UBI device number object by its major number. If UBI
+ * device was not found, this function returns -ENODEV, otherwise the UBI device
+ * number is returned.
+ */
+int ubi_major2num(int major)
+{
+ int i, ubi_num = -ENODEV;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_num = ubi->ubi_num;
+ break;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi_num;
+}
+
+#ifdef UBI_LINUX
+/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct ubi_device *ubi;
+
+ /*
+ * The below code looks weird, but it actually makes sense. We get the
+ * UBI device reference from the contained 'struct ubi_device'. But it
+ * is unclear if the device was removed or not yet. Indeed, if the
+ * device was removed before we increased its reference count,
+ * 'ubi_get_device()' will return -ENODEV and we fail.
+ *
+ * Remember, 'struct ubi_device' is freed in the release function, so
+ * we still can use 'ubi->ubi_num'.
+ */
+ ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ if (attr == &dev_eraseblock_size)
+ ret = sprintf(buf, "%d\n", ubi->leb_size);
+ else if (attr == &dev_avail_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->avail_pebs);
+ else if (attr == &dev_total_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->good_peb_count);
+ else if (attr == &dev_volumes_count)
+ ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
+ else if (attr == &dev_max_ec)
+ ret = sprintf(buf, "%d\n", ubi->max_ec);
+ else if (attr == &dev_reserved_for_bad)
+ ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
+ else if (attr == &dev_bad_peb_count)
+ ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
+ else if (attr == &dev_max_vol_count)
+ ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
+ else if (attr == &dev_min_io_size)
+ ret = sprintf(buf, "%d\n", ubi->min_io_size);
+ else if (attr == &dev_bgt_enabled)
+ ret = sprintf(buf, "%d\n", ubi->thread_enabled);
+ else if (attr == &dev_mtd_num)
+ ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else
+ ret = -EINVAL;
+
+ ubi_put_device(ubi);
+ return ret;
+}
+
+/* Fake "release" method for UBI devices */
+static void dev_release(struct device *dev) { }
+
+/**
+ * ubi_sysfs_init - initialize sysfs for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int ubi_sysfs_init(struct ubi_device *ubi)
+{
+ int err;
+
+ ubi->dev.release = dev_release;
+ ubi->dev.devt = ubi->cdev.dev;
+ ubi->dev.class = ubi_class;
+ sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
+ err = device_register(&ubi->dev);
+ if (err)
+ return err;
+
+ err = device_create_file(&ubi->dev, &dev_eraseblock_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_volumes_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_max_ec);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_bad_peb_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_max_vol_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_min_io_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_bgt_enabled);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_mtd_num);
+ return err;
+}
+
+/**
+ * ubi_sysfs_close - close sysfs for an UBI device.
+ * @ubi: UBI device description object
+ */
+static void ubi_sysfs_close(struct ubi_device *ubi)
+{
+ device_remove_file(&ubi->dev, &dev_mtd_num);
+ device_remove_file(&ubi->dev, &dev_bgt_enabled);
+ device_remove_file(&ubi->dev, &dev_min_io_size);
+ device_remove_file(&ubi->dev, &dev_max_vol_count);
+ device_remove_file(&ubi->dev, &dev_bad_peb_count);
+ device_remove_file(&ubi->dev, &dev_reserved_for_bad);
+ device_remove_file(&ubi->dev, &dev_max_ec);
+ device_remove_file(&ubi->dev, &dev_volumes_count);
+ device_remove_file(&ubi->dev, &dev_total_eraseblocks);
+ device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
+ device_remove_file(&ubi->dev, &dev_eraseblock_size);
+ device_unregister(&ubi->dev);
+}
+#endif
+
+/**
+ * kill_volumes - destroy all volumes.
+ * @ubi: UBI device description object
+ */
+static void kill_volumes(struct ubi_device *ubi)
+{
+ int i;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i])
+ ubi_free_volume(ubi, ubi->volumes[i]);
+}
+
+/**
+ * uif_init - initialize user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int uif_init(struct ubi_device *ubi)
+{
+ int i, err;
+#ifdef UBI_LINUX
+ dev_t dev;
+#endif
+
+ sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
+
+ /*
+ * Major numbers for the UBI character devices are allocated
+ * dynamically. Major numbers of volume character devices are
+ * equivalent to ones of the corresponding UBI character device. Minor
+ * numbers of UBI character devices are 0, while minor numbers of
+ * volume character devices start from 1. Thus, we allocate one major
+ * number and ubi->vtbl_slots + 1 minor numbers.
+ */
+ err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
+ if (err) {
+ ubi_err("cannot register UBI character devices");
+ return err;
+ }
+
+ ubi_assert(MINOR(dev) == 0);
+ cdev_init(&ubi->cdev, &ubi_cdev_operations);
+ dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
+ ubi->cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&ubi->cdev, dev, 1);
+ if (err) {
+ ubi_err("cannot add character device");
+ goto out_unreg;
+ }
+
+ err = ubi_sysfs_init(ubi);
+ if (err)
+ goto out_sysfs;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i]) {
+ err = ubi_add_volume(ubi, ubi->volumes[i]);
+ if (err) {
+ ubi_err("cannot add volume %d", i);
+ goto out_volumes;
+ }
+ }
+
+ return 0;
+
+out_volumes:
+ kill_volumes(ubi);
+out_sysfs:
+ ubi_sysfs_close(ubi);
+ cdev_del(&ubi->cdev);
+out_unreg:
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+ ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
+ return err;
+}
+
+/**
+ * uif_close - close user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ */
+static void uif_close(struct ubi_device *ubi)
+{
+ kill_volumes(ubi);
+ ubi_sysfs_close(ubi);
+ cdev_del(&ubi->cdev);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+}
+
+/**
+ * attach_by_scanning - attach an MTD device using scanning method.
+ * @ubi: UBI device descriptor
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ *
+ * Note, currently this is the only method to attach UBI devices. Hopefully in
+ * the future we'll have more scalable attaching methods and avoid full media
+ * scanning. But even in this case scanning will be needed as a fall-back
+ * attaching method if there are some on-flash table corruptions.
+ */
+static int attach_by_scanning(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_scan_info *si;
+
+ si = ubi_scan(ubi);
+ if (IS_ERR(si))
+ return PTR_ERR(si);
+
+ ubi->bad_peb_count = si->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->max_ec = si->max_ec;
+ ubi->mean_ec = si->mean_ec;
+
+ err = ubi_read_volume_table(ubi, si);
+ if (err)
+ goto out_si;
+
+ err = ubi_wl_init_scan(ubi, si);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init_scan(ubi, si);
+ if (err)
+ goto out_wl;
+
+ ubi_scan_destroy_si(si);
+ return 0;
+
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ vfree(ubi->vtbl);
+out_si:
+ ubi_scan_destroy_si(si);
+ return err;
+}
+
+/**
+ * io_init - initialize I/O unit for a given UBI device.
+ * @ubi: UBI device description object
+ *
+ * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
+ * assumed:
+ * o EC header is always at offset zero - this cannot be changed;
+ * o VID header starts just after the EC header at the closest address
+ * aligned to @io->hdrs_min_io_size;
+ * o data starts just after the VID header at the closest address aligned to
+ * @io->min_io_size
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int io_init(struct ubi_device *ubi)
+{
+ if (ubi->mtd->numeraseregions != 0) {
+ /*
+ * Some flashes have several erase regions. Different regions
+ * may have different eraseblock size and other
+ * characteristics. It looks like mostly multi-region flashes
+ * have one "main" region and one or more small regions to
+ * store boot loader code or boot parameters or whatever. I
+ * guess we should just pick the largest region. But this is
+ * not implemented.
+ */
+ ubi_err("multiple regions, not implemented");
+ return -EINVAL;
+ }
+
+ if (ubi->vid_hdr_offset < 0)
+ return -EINVAL;
+
+ /*
+ * Note, in this implementation we support MTD devices with 0x7FFFFFFF
+ * physical eraseblocks maximum.
+ */
+
+ ubi->peb_size = ubi->mtd->erasesize;
+ ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
+ ubi->flash_size = ubi->mtd->size;
+
+ if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+ ubi->bad_allowed = 1;
+
+ ubi->min_io_size = ubi->mtd->writesize;
+ ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
+
+ /*
+ * Make sure minimal I/O unit is power of 2. Note, there is no
+ * fundamental reason for this assumption. It is just an optimization
+ * which allows us to avoid costly division operations.
+ */
+ if (!is_power_of_2(ubi->min_io_size)) {
+ ubi_err("min. I/O unit (%d) is not power of 2",
+ ubi->min_io_size);
+ return -EINVAL;
+ }
+
+ ubi_assert(ubi->hdrs_min_io_size > 0);
+ ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
+ ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+
+ /* Calculate default aligned sizes of EC and VID headers */
+ ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+
+ dbg_msg("min_io_size %d", ubi->min_io_size);
+ dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+
+ if (ubi->vid_hdr_offset == 0)
+ /* Default offset */
+ ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
+ ubi->ec_hdr_alsize;
+ else {
+ ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
+ ~(ubi->hdrs_min_io_size - 1);
+ ubi->vid_hdr_shift = ubi->vid_hdr_offset -
+ ubi->vid_hdr_aloffset;
+ }
+
+ /* Similar for the data offset */
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+ ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
+
+ dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_msg("leb_start %d", ubi->leb_start);
+
+ /* The shift must be aligned to 32-bit boundary */
+ if (ubi->vid_hdr_shift % 4) {
+ ubi_err("unaligned VID header shift %d",
+ ubi->vid_hdr_shift);
+ return -EINVAL;
+ }
+
+ /* Check sanity */
+ if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
+ ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
+ ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
+ ubi->leb_start & (ubi->min_io_size - 1)) {
+ ubi_err("bad VID header (%d) or data offsets (%d)",
+ ubi->vid_hdr_offset, ubi->leb_start);
+ return -EINVAL;
+ }
+
+ /*
+ * It may happen that EC and VID headers are situated in one minimal
+ * I/O unit. In this case we can only accept this UBI image in
+ * read-only mode.
+ */
+ if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
+ ubi_warn("EC and VID headers are in the same minimal I/O unit, "
+ "switch to read-only mode");
+ ubi->ro_mode = 1;
+ }
+
+ ubi->leb_size = ubi->peb_size - ubi->leb_start;
+
+ if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
+ ubi_msg("MTD device %d is write-protected, attach in "
+ "read-only mode", ubi->mtd->index);
+ ubi->ro_mode = 1;
+ }
+
+ ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
+ ubi->peb_size, ubi->peb_size >> 10);
+ ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
+ ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
+ if (ubi->hdrs_min_io_size != ubi->min_io_size)
+ ubi_msg("sub-page size: %d",
+ ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d)",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
+ ubi_msg("data offset: %d", ubi->leb_start);
+
+ /*
+ * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+ * unfortunately, MTD does not provide this information. We should loop
+ * over all physical eraseblocks and invoke mtd->block_is_bad() for
+ * each physical eraseblock. So, we skip ubi->bad_peb_count
+ * uninitialized and initialize it after scanning.
+ */
+
+ return 0;
+}
+
+/**
+ * autoresize - re-size the volume which has the "auto-resize" flag set.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to re-size
+ *
+ * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * the volume table to the largest possible size. See comments in ubi-header.h
+ * for more description of the flag. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int autoresize(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_volume_desc desc;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
+
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propogate this change
+ * to the flash.
+ */
+ ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
+
+ if (ubi->avail_pebs == 0) {
+ struct ubi_vtbl_record vtbl_rec;
+
+ /*
+ * No avalilable PEBs to re-size the volume, clear the flag on
+ * flash and exit.
+ */
+ memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
+ sizeof(struct ubi_vtbl_record));
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ ubi_err("cannot clean auto-resize flag for volume %d",
+ vol_id);
+ } else {
+ desc.vol = vol;
+ err = ubi_resize_volume(&desc,
+ old_reserved_pebs + ubi->avail_pebs);
+ if (err)
+ ubi_err("cannot auto-resize volume %d", vol_id);
+ }
+
+ if (err)
+ return err;
+
+ ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
+ vol->name, old_reserved_pebs, vol->reserved_pebs);
+ return 0;
+}
+
+/**
+ * ubi_attach_mtd_dev - attach an MTD device.
+ * @mtd_dev: MTD device description object
+ * @ubi_num: number to assign to the new UBI device
+ * @vid_hdr_offset: VID header offset
+ *
+ * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
+ * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
+ * which case this function finds a vacant device nubert and assings it
+ * automatically. Returns the new UBI device number in case of success and a
+ * negative error code in case of failure.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+{
+ struct ubi_device *ubi;
+ int i, err;
+
+ /*
+ * Check if we already have the same MTD device attached.
+ *
+ * Note, this function assumes that UBI devices creations and deletions
+ * are serialized, so it does not take the &ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && mtd->index == ubi->mtd->index) {
+ dbg_err("mtd%d is already attached to ubi%d",
+ mtd->index, i);
+ return -EEXIST;
+ }
+ }
+
+ /*
+ * Make sure this MTD device is not emulated on top of an UBI volume
+ * already. Well, generally this recursion works fine, but there are
+ * different problems like the UBI module takes a reference to itself
+ * by attaching (and thus, opening) the emulated MTD device. This
+ * results in inability to unload the module. And in general it makes
+ * no sense to attach emulated MTD devices, so we prohibit this.
+ */
+ if (mtd->type == MTD_UBIVOLUME) {
+ ubi_err("refuse attaching mtd%d - it is already emulated on "
+ "top of UBI", mtd->index);
+ return -EINVAL;
+ }
+
+ if (ubi_num == UBI_DEV_NUM_AUTO) {
+ /* Search for an empty slot in the @ubi_devices array */
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+ if (!ubi_devices[ubi_num])
+ break;
+ if (ubi_num == UBI_MAX_DEVICES) {
+ dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
+ return -ENFILE;
+ }
+ } else {
+ if (ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Make sure ubi_num is not busy */
+ if (ubi_devices[ubi_num]) {
+ dbg_err("ubi%d already exists", ubi_num);
+ return -EEXIST;
+ }
+ }
+
+ ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
+ if (!ubi)
+ return -ENOMEM;
+
+ ubi->mtd = mtd;
+ ubi->ubi_num = ubi_num;
+ ubi->vid_hdr_offset = vid_hdr_offset;
+ ubi->autoresize_vol_id = -1;
+
+ mutex_init(&ubi->buf_mutex);
+ mutex_init(&ubi->ckvol_mutex);
+ mutex_init(&ubi->volumes_mutex);
+ spin_lock_init(&ubi->volumes_lock);
+
+ ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+
+ err = io_init(ubi);
+ if (err)
+ goto out_free;
+
+ err = -ENOMEM;
+ ubi->peb_buf1 = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf1)
+ goto out_free;
+
+ ubi->peb_buf2 = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf2)
+ goto out_free;
+
+#ifdef CONFIG_MTD_UBI_DEBUG
+ mutex_init(&ubi->dbg_buf_mutex);
+ ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
+ if (!ubi->dbg_peb_buf)
+ goto out_free;
+#endif
+
+ err = attach_by_scanning(ubi);
+ if (err) {
+ dbg_err("failed to attach by scanning, error %d", err);
+ goto out_free;
+ }
+
+ if (ubi->autoresize_vol_id != -1) {
+ err = autoresize(ubi, ubi->autoresize_vol_id);
+ if (err)
+ goto out_detach;
+ }
+
+ err = uif_init(ubi);
+ if (err)
+ goto out_detach;
+
+ ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
+ if (IS_ERR(ubi->bgt_thread)) {
+ err = PTR_ERR(ubi->bgt_thread);
+ ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
+ err);
+ goto out_uif;
+ }
+
+ ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
+ ubi_msg("MTD device name: \"%s\"", mtd->name);
+ ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
+ ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
+ ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
+ ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
+ ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
+ ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
+ ubi_msg("number of user volumes: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT);
+ ubi_msg("available PEBs: %d", ubi->avail_pebs);
+ ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
+ ubi_msg("number of PEBs reserved for bad PEB handling: %d",
+ ubi->beb_rsvd_pebs);
+ ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
+
+ /* Enable the background thread */
+ if (!DBG_DISABLE_BGT) {
+ ubi->thread_enabled = 1;
+ wake_up_process(ubi->bgt_thread);
+ }
+
+ ubi_devices[ubi_num] = ubi;
+ return ubi_num;
+
+out_uif:
+ uif_close(ubi);
+out_detach:
+ ubi_eba_close(ubi);
+ ubi_wl_close(ubi);
+ vfree(ubi->vtbl);
+out_free:
+ vfree(ubi->peb_buf1);
+ vfree(ubi->peb_buf2);
+#ifdef CONFIG_MTD_UBI_DEBUG
+ vfree(ubi->dbg_peb_buf);
+#endif
+ kfree(ubi);
+ return err;
+}
+
+/**
+ * ubi_detach_mtd_dev - detach an MTD device.
+ * @ubi_num: UBI device number to detach from
+ * @anyway: detach MTD even if device reference count is not zero
+ *
+ * This function destroys an UBI device number @ubi_num and detaches the
+ * underlying MTD device. Returns zero in case of success and %-EBUSY if the
+ * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
+ * exist.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (!ubi) {
+ spin_unlock(&ubi_devices_lock);
+ return -EINVAL;
+ }
+
+ if (ubi->ref_count) {
+ if (!anyway) {
+ spin_unlock(&ubi_devices_lock);
+ return -EBUSY;
+ }
+ /* This may only happen if there is a bug */
+ ubi_err("%s reference count %d, destroy anyway",
+ ubi->ubi_name, ubi->ref_count);
+ }
+ ubi_devices[ubi_num] = NULL;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_assert(ubi_num == ubi->ubi_num);
+ dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+
+ /*
+ * Before freeing anything, we have to stop the background thread to
+ * prevent it from doing anything on this device while we are freeing.
+ */
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+
+ uif_close(ubi);
+ ubi_eba_close(ubi);
+ ubi_wl_close(ubi);
+ vfree(ubi->vtbl);
+ put_mtd_device(ubi->mtd);
+ vfree(ubi->peb_buf1);
+ vfree(ubi->peb_buf2);
+#ifdef CONFIG_MTD_UBI_DEBUG
+ vfree(ubi->dbg_peb_buf);
+#endif
+ ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
+ kfree(ubi);
+ return 0;
+}
+
+/**
+ * find_mtd_device - open an MTD device by its name or number.
+ * @mtd_dev: name or number of the device
+ *
+ * This function tries to open and MTD device described by @mtd_dev string,
+ * which is first treated as an ASCII number, and if it is not true, it is
+ * treated as MTD device name. Returns MTD device description object in case of
+ * success and a negative error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
+{
+ struct mtd_info *mtd;
+ int mtd_num;
+ char *endp;
+
+ mtd_num = simple_strtoul(mtd_dev, &endp, 0);
+ if (*endp != '\0' || mtd_dev == endp) {
+ /*
+ * This does not look like an ASCII integer, probably this is
+ * MTD device name.
+ */
+ mtd = get_mtd_device_nm(mtd_dev);
+ } else
+ mtd = get_mtd_device(NULL, mtd_num);
+
+ return mtd;
+}
+
+int __init ubi_init(void)
+{
+ int err, i, k;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
+ if (IS_ERR(ubi_class)) {
+ err = PTR_ERR(ubi_class);
+ ubi_err("cannot create UBI class");
+ goto out;
+ }
+
+ err = class_create_file(ubi_class, &ubi_version);
+ if (err) {
+ ubi_err("cannot create sysfs file");
+ goto out_class;
+ }
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ ubi_err("cannot register device");
+ goto out_version;
+ }
+
+#ifdef UBI_LINUX
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab)
+ goto out_dev_unreg;
+#endif
+
+ /* Attach MTD devices */
+ for (i = 0; i < mtd_devs; i++) {
+ struct mtd_dev_param *p = &mtd_dev_param[i];
+ struct mtd_info *mtd;
+
+ cond_resched();
+
+ mtd = open_mtd_device(p->name);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ goto out_detach;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
+ p->vid_hdr_offs);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ put_mtd_device(mtd);
+ ubi_err("cannot attach mtd%d", mtd->index);
+ goto out_detach;
+ }
+ }
+
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+#ifdef UBI_LINUX
+ kmem_cache_destroy(ubi_wl_entry_slab);
+out_dev_unreg:
+#endif
+ misc_deregister(&ubi_ctrl_cdev);
+out_version:
+ class_remove_file(ubi_class, &ubi_version);
+out_class:
+ class_destroy(ubi_class);
+out:
+ mtd_devs = 0;
+ ubi_err("UBI error: cannot initialize UBI, error %d", err);
+ return err;
+}
+module_init(ubi_init);
+
+void __exit ubi_exit(void)
+{
+ int i;
+
+ for (i = 0; i < UBI_MAX_DEVICES; i++)
+ if (ubi_devices[i]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ kmem_cache_destroy(ubi_wl_entry_slab);
+ misc_deregister(&ubi_ctrl_cdev);
+ class_remove_file(ubi_class, &ubi_version);
+ class_destroy(ubi_class);
+ mtd_devs = 0;
+}
+module_exit(ubi_exit);
+
+/**
+ * bytes_str_to_int - convert a string representing number of bytes to an
+ * integer.
+ * @str: the string to convert
+ *
+ * This function returns positive resulting integer in case of success and a
+ * negative error code in case of failure.
+ */
+static int __init bytes_str_to_int(const char *str)
+{
+ char *endp;
+ unsigned long result;
+
+ result = simple_strtoul(str, &endp, 0);
+ if (str == endp || result < 0) {
+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+ str);
+ return -EINVAL;
+ }
+
+ switch (*endp) {
+ case 'G':
+ result *= 1024;
+ case 'M':
+ result *= 1024;
+ case 'K':
+ result *= 1024;
+ if (endp[1] == 'i' && endp[2] == 'B')
+ endp += 2;
+ case '\0':
+ break;
+ default:
+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+ str);
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+/**
+ * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
+ * @val: the parameter value to parse
+ * @kp: not used
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of error.
+ */
+int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+{
+ int i, len;
+ struct mtd_dev_param *p;
+ char buf[MTD_PARAM_LEN_MAX];
+ char *pbuf = &buf[0];
+ char *tokens[2] = {NULL, NULL};
+
+ if (!val)
+ return -EINVAL;
+
+ if (mtd_devs == UBI_MAX_DEVICES) {
+ printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ len = strnlen(val, MTD_PARAM_LEN_MAX);
+ if (len == MTD_PARAM_LEN_MAX) {
+ printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
+ "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+ return -EINVAL;
+ }
+
+ if (len == 0) {
+ printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
+ "ignored\n");
+ return 0;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < 2; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ if (pbuf) {
+ printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
+ val);
+ return -EINVAL;
+ }
+
+ p = &mtd_dev_param[mtd_devs];
+ strcpy(&p->name[0], tokens[0]);
+
+ if (tokens[1])
+ p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
+
+ if (p->vid_hdr_offs < 0)
+ return p->vid_hdr_offs;
+
+ mtd_devs += 1;
+ return 0;
+}
+
+module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
+ "mtd=<name|num>[,<vid_hdr_offs>].\n"
+ "Multiple \"mtd\" parameters may be specified.\n"
+ "MTD devices may be specified by their number or name.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
+ "header position and data starting position to be used "
+ "by UBI.\n"
+ "Example: mtd=content,1984 mtd=4 - attach MTD device"
+ "with name \"content\" using VID header offset 1984, and "
+ "MTD device number 4 with default VID header offset.");
+
+MODULE_VERSION(__stringify(UBI_VERSION));
+MODULE_DESCRIPTION("UBI - Unsorted Block Images");
+MODULE_AUTHOR("Artem Bityutskiy");
+MODULE_LICENSE("GPL");
diff --git a/u-boot/drivers/mtd/ubi/crc32.c b/u-boot/drivers/mtd/ubi/crc32.c
new file mode 100644
index 0000000..a7e26b0
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/crc32.c
@@ -0,0 +1,518 @@
+/*
+ * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
+ * Code was from the public domain, copyright abandoned. Code was
+ * subsequently included in the kernel, thus was re-licensed under the
+ * GNU GPL v2.
+ *
+ * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Same crc32 function was used in 5 other places in the kernel.
+ * I made one version, and deleted the others.
+ * There are various incantations of crc32(). Some use a seed of 0 or ~0.
+ * Some xor at the end with ~0. The generic crc32() function takes
+ * seed as an argument, and doesn't xor at the end. Then individual
+ * users can do whatever they need.
+ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
+ * fs/jffs2 uses seed 0, doesn't xor with ~0.
+ * fs/partitions/efi.c uses seed ~0, xor's with ~0.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/compiler.h>
+#endif
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#ifdef UBI_LINUX
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+#endif
+#include "crc32defs.h"
+#define CRC_LE_BITS 8
+
+# define __force
+#ifndef __constant_cpu_to_le32
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#endif
+#ifndef __constant_le32_to_cpu
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#endif
+
+#if CRC_LE_BITS == 8
+#define tole(x) __constant_cpu_to_le32(x)
+#define tobe(x) __constant_cpu_to_be32(x)
+#else
+#define tole(x) (x)
+#define tobe(x) (x)
+#endif
+#include "crc32table.h"
+#ifdef UBI_LINUX
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
+MODULE_DESCRIPTION("Ethernet CRC32 calculations");
+MODULE_LICENSE("GPL");
+#endif
+/**
+ * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
+
+#if CRC_LE_BITS == 1
+/*
+ * In fact, the table-based code will work in this case, but it can be
+ * simplified by inlining the table in ?: form.
+ */
+
+u32 crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
+ }
+ return crc;
+}
+#else /* Table-based approach */
+
+u32 crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+# if CRC_LE_BITS == 8
+ const u32 *b =(u32 *)p;
+ const u32 *tab = crc32table_le;
+
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
+# else
+# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
+# endif
+ /* printf("Crc32_le crc=%x\n",crc); */
+ crc = __cpu_to_le32(crc);
+ /* Align it */
+ if((((long)b)&3 && len)){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (void *)p;
+ } while ((--len) && ((long)b)&3 );
+ }
+ if((len >= 4)){
+ /* load data 32 bits wide, xor data 32 bits wide. */
+ size_t save_len = len & 3;
+ len = len >> 2;
+ --b; /* use pre increment below(*++b) for speed */
+ do {
+ crc ^= *++b;
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ } while (--len);
+ b++; /* point to next byte(s) */
+ len = save_len;
+ }
+ /* And the last few bytes */
+ if(len){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (void *)p;
+ } while (--len);
+ }
+
+ return __le32_to_cpu(crc);
+#undef ENDIAN_SHIFT
+#undef DO_CRC
+
+# elif CRC_LE_BITS == 4
+ while (len--) {
+ crc ^= *p++;
+ crc = (crc >> 4) ^ crc32table_le[crc & 15];
+ crc = (crc >> 4) ^ crc32table_le[crc & 15];
+ }
+ return crc;
+# elif CRC_LE_BITS == 2
+ while (len--) {
+ crc ^= *p++;
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ }
+ return crc;
+# endif
+}
+#endif
+#ifdef UBI_LINUX
+/**
+ * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len);
+
+#if CRC_BE_BITS == 1
+/*
+ * In fact, the table-based code will work in this case, but it can be
+ * simplified by inlining the table in ?: form.
+ */
+
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 24;
+ for (i = 0; i < 8; i++)
+ crc =
+ (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE :
+ 0);
+ }
+ return crc;
+}
+
+#else /* Table-based approach */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+# if CRC_BE_BITS == 8
+ const u32 *b =(u32 *)p;
+ const u32 *tab = crc32table_be;
+
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
+# else
+# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
+# endif
+
+ crc = __cpu_to_be32(crc);
+ /* Align it */
+ if(unlikely(((long)b)&3 && len)){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (u32 *)p;
+ } while ((--len) && ((long)b)&3 );
+ }
+ if(likely(len >= 4)){
+ /* load data 32 bits wide, xor data 32 bits wide. */
+ size_t save_len = len & 3;
+ len = len >> 2;
+ --b; /* use pre increment below(*++b) for speed */
+ do {
+ crc ^= *++b;
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ } while (--len);
+ b++; /* point to next byte(s) */
+ len = save_len;
+ }
+ /* And the last few bytes */
+ if(len){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (void *)p;
+ } while (--len);
+ }
+ return __be32_to_cpu(crc);
+#undef ENDIAN_SHIFT
+#undef DO_CRC
+
+# elif CRC_BE_BITS == 4
+ while (len--) {
+ crc ^= *p++ << 24;
+ crc = (crc << 4) ^ crc32table_be[crc >> 28];
+ crc = (crc << 4) ^ crc32table_be[crc >> 28];
+ }
+ return crc;
+# elif CRC_BE_BITS == 2
+ while (len--) {
+ crc ^= *p++ << 24;
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ }
+ return crc;
+# endif
+}
+#endif
+
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(crc32_be);
+#endif
+/*
+ * A brief CRC tutorial.
+ *
+ * A CRC is a long-division remainder. You add the CRC to the message,
+ * and the whole thing (message+CRC) is a multiple of the given
+ * CRC polynomial. To check the CRC, you can either check that the
+ * CRC matches the recomputed value, *or* you can check that the
+ * remainder computed on the message+CRC is 0. This latter approach
+ * is used by a lot of hardware implementations, and is why so many
+ * protocols put the end-of-frame flag after the CRC.
+ *
+ * It's actually the same long division you learned in school, except that
+ * - We're working in binary, so the digits are only 0 and 1, and
+ * - When dividing polynomials, there are no carries. Rather than add and
+ * subtract, we just xor. Thus, we tend to get a bit sloppy about
+ * the difference between adding and subtracting.
+ *
+ * A 32-bit CRC polynomial is actually 33 bits long. But since it's
+ * 33 bits long, bit 32 is always going to be set, so usually the CRC
+ * is written in hex with the most significant bit omitted. (If you're
+ * familiar with the IEEE 754 floating-point format, it's the same idea.)
+ *
+ * Note that a CRC is computed over a string of *bits*, so you have
+ * to decide on the endianness of the bits within each byte. To get
+ * the best error-detecting properties, this should correspond to the
+ * order they're actually sent. For example, standard RS-232 serial is
+ * little-endian; the most significant bit (sometimes used for parity)
+ * is sent last. And when appending a CRC word to a message, you should
+ * do it in the right order, matching the endianness.
+ *
+ * Just like with ordinary division, the remainder is always smaller than
+ * the divisor (the CRC polynomial) you're dividing by. Each step of the
+ * division, you take one more digit (bit) of the dividend and append it
+ * to the current remainder. Then you figure out the appropriate multiple
+ * of the divisor to subtract to being the remainder back into range.
+ * In binary, it's easy - it has to be either 0 or 1, and to make the
+ * XOR cancel, it's just a copy of bit 32 of the remainder.
+ *
+ * When computing a CRC, we don't care about the quotient, so we can
+ * throw the quotient bit away, but subtract the appropriate multiple of
+ * the polynomial from the remainder and we're back to where we started,
+ * ready to process the next bit.
+ *
+ * A big-endian CRC written this way would be coded like:
+ * for (i = 0; i < input_bits; i++) {
+ * multiple = remainder & 0x80000000 ? CRCPOLY : 0;
+ * remainder = (remainder << 1 | next_input_bit()) ^ multiple;
+ * }
+ * Notice how, to get at bit 32 of the shifted remainder, we look
+ * at bit 31 of the remainder *before* shifting it.
+ *
+ * But also notice how the next_input_bit() bits we're shifting into
+ * the remainder don't actually affect any decision-making until
+ * 32 bits later. Thus, the first 32 cycles of this are pretty boring.
+ * Also, to add the CRC to a message, we need a 32-bit-long hole for it at
+ * the end, so we have to add 32 extra cycles shifting in zeros at the
+ * end of every message,
+ *
+ * So the standard trick is to rearrage merging in the next_input_bit()
+ * until the moment it's needed. Then the first 32 cycles can be precomputed,
+ * and merging in the final 32 zero bits to make room for the CRC can be
+ * skipped entirely.
+ * This changes the code to:
+ * for (i = 0; i < input_bits; i++) {
+ * remainder ^= next_input_bit() << 31;
+ * multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ * remainder = (remainder << 1) ^ multiple;
+ * }
+ * With this optimization, the little-endian code is simpler:
+ * for (i = 0; i < input_bits; i++) {
+ * remainder ^= next_input_bit();
+ * multiple = (remainder & 1) ? CRCPOLY : 0;
+ * remainder = (remainder >> 1) ^ multiple;
+ * }
+ *
+ * Note that the other details of endianness have been hidden in CRCPOLY
+ * (which must be bit-reversed) and next_input_bit().
+ *
+ * However, as long as next_input_bit is returning the bits in a sensible
+ * order, we can actually do the merging 8 or more bits at a time rather
+ * than one bit at a time:
+ * for (i = 0; i < input_bytes; i++) {
+ * remainder ^= next_input_byte() << 24;
+ * for (j = 0; j < 8; j++) {
+ * multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ * remainder = (remainder << 1) ^ multiple;
+ * }
+ * }
+ * Or in little-endian:
+ * for (i = 0; i < input_bytes; i++) {
+ * remainder ^= next_input_byte();
+ * for (j = 0; j < 8; j++) {
+ * multiple = (remainder & 1) ? CRCPOLY : 0;
+ * remainder = (remainder << 1) ^ multiple;
+ * }
+ * }
+ * If the input is a multiple of 32 bits, you can even XOR in a 32-bit
+ * word at a time and increase the inner loop count to 32.
+ *
+ * You can also mix and match the two loop styles, for example doing the
+ * bulk of a message byte-at-a-time and adding bit-at-a-time processing
+ * for any fractional bytes at the end.
+ *
+ * The only remaining optimization is to the byte-at-a-time table method.
+ * Here, rather than just shifting one bit of the remainder to decide
+ * in the correct multiple to subtract, we can shift a byte at a time.
+ * This produces a 40-bit (rather than a 33-bit) intermediate remainder,
+ * but again the multiple of the polynomial to subtract depends only on
+ * the high bits, the high 8 bits in this case.
+ *
+ * The multile we need in that case is the low 32 bits of a 40-bit
+ * value whose high 8 bits are given, and which is a multiple of the
+ * generator polynomial. This is simply the CRC-32 of the given
+ * one-byte message.
+ *
+ * Two more details: normally, appending zero bits to a message which
+ * is already a multiple of a polynomial produces a larger multiple of that
+ * polynomial. To enable a CRC to detect this condition, it's common to
+ * invert the CRC before appending it. This makes the remainder of the
+ * message+crc come out not as zero, but some fixed non-zero value.
+ *
+ * The same problem applies to zero bits prepended to the message, and
+ * a similar solution is used. Instead of starting with a remainder of
+ * 0, an initial remainder of all ones is used. As long as you start
+ * the same way on decoding, it doesn't make a difference.
+ */
+
+#ifdef UNITTEST
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef UBI_LINUX /*Not used at present */
+static void
+buf_dump(char const *prefix, unsigned char const *buf, size_t len)
+{
+ fputs(prefix, stdout);
+ while (len--)
+ printf(" %02x", *buf++);
+ putchar('\n');
+
+}
+#endif
+
+static void bytereverse(unsigned char *buf, size_t len)
+{
+ while (len--) {
+ unsigned char x = bitrev8(*buf);
+ *buf++ = x;
+ }
+}
+
+static void random_garbage(unsigned char *buf, size_t len)
+{
+ while (len--)
+ *buf++ = (unsigned char) random();
+}
+
+#ifdef UBI_LINUX /* Not used at present */
+static void store_le(u32 x, unsigned char *buf)
+{
+ buf[0] = (unsigned char) x;
+ buf[1] = (unsigned char) (x >> 8);
+ buf[2] = (unsigned char) (x >> 16);
+ buf[3] = (unsigned char) (x >> 24);
+}
+#endif
+
+static void store_be(u32 x, unsigned char *buf)
+{
+ buf[0] = (unsigned char) (x >> 24);
+ buf[1] = (unsigned char) (x >> 16);
+ buf[2] = (unsigned char) (x >> 8);
+ buf[3] = (unsigned char) x;
+}
+
+/*
+ * This checks that CRC(buf + CRC(buf)) = 0, and that
+ * CRC commutes with bit-reversal. This has the side effect
+ * of bytewise bit-reversing the input buffer, and returns
+ * the CRC of the reversed buffer.
+ */
+static u32 test_step(u32 init, unsigned char *buf, size_t len)
+{
+ u32 crc1, crc2;
+ size_t i;
+
+ crc1 = crc32_be(init, buf, len);
+ store_be(crc1, buf + len);
+ crc2 = crc32_be(init, buf, len + 4);
+ if (crc2)
+ printf("\nCRC cancellation fail: 0x%08x should be 0\n",
+ crc2);
+
+ for (i = 0; i <= len + 4; i++) {
+ crc2 = crc32_be(init, buf, i);
+ crc2 = crc32_be(crc2, buf + i, len + 4 - i);
+ if (crc2)
+ printf("\nCRC split fail: 0x%08x\n", crc2);
+ }
+
+ /* Now swap it around for the other test */
+
+ bytereverse(buf, len + 4);
+ init = bitrev32(init);
+ crc2 = bitrev32(crc1);
+ if (crc1 != bitrev32(crc2))
+ printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n",
+ crc1, crc2, bitrev32(crc2));
+ crc1 = crc32_le(init, buf, len);
+ if (crc1 != crc2)
+ printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1,
+ crc2);
+ crc2 = crc32_le(init, buf, len + 4);
+ if (crc2)
+ printf("\nCRC cancellation fail: 0x%08x should be 0\n",
+ crc2);
+
+ for (i = 0; i <= len + 4; i++) {
+ crc2 = crc32_le(init, buf, i);
+ crc2 = crc32_le(crc2, buf + i, len + 4 - i);
+ if (crc2)
+ printf("\nCRC split fail: 0x%08x\n", crc2);
+ }
+
+ return crc1;
+}
+
+#define SIZE 64
+#define INIT1 0
+#define INIT2 0
+
+int main(void)
+{
+ unsigned char buf1[SIZE + 4];
+ unsigned char buf2[SIZE + 4];
+ unsigned char buf3[SIZE + 4];
+ int i, j;
+ u32 crc1, crc2, crc3;
+
+ for (i = 0; i <= SIZE; i++) {
+ printf("\rTesting length %d...", i);
+ fflush(stdout);
+ random_garbage(buf1, i);
+ random_garbage(buf2, i);
+ for (j = 0; j < i; j++)
+ buf3[j] = buf1[j] ^ buf2[j];
+
+ crc1 = test_step(INIT1, buf1, i);
+ crc2 = test_step(INIT2, buf2, i);
+ /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */
+ crc3 = test_step(INIT1 ^ INIT2, buf3, i);
+ if (crc3 != (crc1 ^ crc2))
+ printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n",
+ crc3, crc1, crc2);
+ }
+ printf("\nAll test complete. No failures expected.\n");
+ return 0;
+}
+
+#endif /* UNITTEST */
diff --git a/u-boot/drivers/mtd/ubi/crc32defs.h b/u-boot/drivers/mtd/ubi/crc32defs.h
new file mode 100644
index 0000000..f5a5401
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/crc32defs.h
@@ -0,0 +1,32 @@
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRCPOLY_LE 0xedb88320
+#define CRCPOLY_BE 0x04c11db7
+
+/* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */
+/* For less performance-sensitive, use 4 */
+#ifndef CRC_LE_BITS
+# define CRC_LE_BITS 8
+#endif
+#ifndef CRC_BE_BITS
+# define CRC_BE_BITS 8
+#endif
+
+/*
+ * Little-endian CRC computation. Used with serial bit streams sent
+ * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
+ */
+#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1
+# error CRC_LE_BITS must be a power of 2 between 1 and 8
+#endif
+
+/*
+ * Big-endian CRC computation. Used with serial bit streams sent
+ * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
+ */
+#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1
+# error CRC_BE_BITS must be a power of 2 between 1 and 8
+#endif
diff --git a/u-boot/drivers/mtd/ubi/crc32table.h b/u-boot/drivers/mtd/ubi/crc32table.h
new file mode 100644
index 0000000..0438af4
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/crc32table.h
@@ -0,0 +1,136 @@
+/* this file is generated - do not edit */
+
+static const u32 crc32table_le[] = {
+tole(0x00000000L), tole(0x77073096L), tole(0xee0e612cL), tole(0x990951baL),
+tole(0x076dc419L), tole(0x706af48fL), tole(0xe963a535L), tole(0x9e6495a3L),
+tole(0x0edb8832L), tole(0x79dcb8a4L), tole(0xe0d5e91eL), tole(0x97d2d988L),
+tole(0x09b64c2bL), tole(0x7eb17cbdL), tole(0xe7b82d07L), tole(0x90bf1d91L),
+tole(0x1db71064L), tole(0x6ab020f2L), tole(0xf3b97148L), tole(0x84be41deL),
+tole(0x1adad47dL), tole(0x6ddde4ebL), tole(0xf4d4b551L), tole(0x83d385c7L),
+tole(0x136c9856L), tole(0x646ba8c0L), tole(0xfd62f97aL), tole(0x8a65c9ecL),
+tole(0x14015c4fL), tole(0x63066cd9L), tole(0xfa0f3d63L), tole(0x8d080df5L),
+tole(0x3b6e20c8L), tole(0x4c69105eL), tole(0xd56041e4L), tole(0xa2677172L),
+tole(0x3c03e4d1L), tole(0x4b04d447L), tole(0xd20d85fdL), tole(0xa50ab56bL),
+tole(0x35b5a8faL), tole(0x42b2986cL), tole(0xdbbbc9d6L), tole(0xacbcf940L),
+tole(0x32d86ce3L), tole(0x45df5c75L), tole(0xdcd60dcfL), tole(0xabd13d59L),
+tole(0x26d930acL), tole(0x51de003aL), tole(0xc8d75180L), tole(0xbfd06116L),
+tole(0x21b4f4b5L), tole(0x56b3c423L), tole(0xcfba9599L), tole(0xb8bda50fL),
+tole(0x2802b89eL), tole(0x5f058808L), tole(0xc60cd9b2L), tole(0xb10be924L),
+tole(0x2f6f7c87L), tole(0x58684c11L), tole(0xc1611dabL), tole(0xb6662d3dL),
+tole(0x76dc4190L), tole(0x01db7106L), tole(0x98d220bcL), tole(0xefd5102aL),
+tole(0x71b18589L), tole(0x06b6b51fL), tole(0x9fbfe4a5L), tole(0xe8b8d433L),
+tole(0x7807c9a2L), tole(0x0f00f934L), tole(0x9609a88eL), tole(0xe10e9818L),
+tole(0x7f6a0dbbL), tole(0x086d3d2dL), tole(0x91646c97L), tole(0xe6635c01L),
+tole(0x6b6b51f4L), tole(0x1c6c6162L), tole(0x856530d8L), tole(0xf262004eL),
+tole(0x6c0695edL), tole(0x1b01a57bL), tole(0x8208f4c1L), tole(0xf50fc457L),
+tole(0x65b0d9c6L), tole(0x12b7e950L), tole(0x8bbeb8eaL), tole(0xfcb9887cL),
+tole(0x62dd1ddfL), tole(0x15da2d49L), tole(0x8cd37cf3L), tole(0xfbd44c65L),
+tole(0x4db26158L), tole(0x3ab551ceL), tole(0xa3bc0074L), tole(0xd4bb30e2L),
+tole(0x4adfa541L), tole(0x3dd895d7L), tole(0xa4d1c46dL), tole(0xd3d6f4fbL),
+tole(0x4369e96aL), tole(0x346ed9fcL), tole(0xad678846L), tole(0xda60b8d0L),
+tole(0x44042d73L), tole(0x33031de5L), tole(0xaa0a4c5fL), tole(0xdd0d7cc9L),
+tole(0x5005713cL), tole(0x270241aaL), tole(0xbe0b1010L), tole(0xc90c2086L),
+tole(0x5768b525L), tole(0x206f85b3L), tole(0xb966d409L), tole(0xce61e49fL),
+tole(0x5edef90eL), tole(0x29d9c998L), tole(0xb0d09822L), tole(0xc7d7a8b4L),
+tole(0x59b33d17L), tole(0x2eb40d81L), tole(0xb7bd5c3bL), tole(0xc0ba6cadL),
+tole(0xedb88320L), tole(0x9abfb3b6L), tole(0x03b6e20cL), tole(0x74b1d29aL),
+tole(0xead54739L), tole(0x9dd277afL), tole(0x04db2615L), tole(0x73dc1683L),
+tole(0xe3630b12L), tole(0x94643b84L), tole(0x0d6d6a3eL), tole(0x7a6a5aa8L),
+tole(0xe40ecf0bL), tole(0x9309ff9dL), tole(0x0a00ae27L), tole(0x7d079eb1L),
+tole(0xf00f9344L), tole(0x8708a3d2L), tole(0x1e01f268L), tole(0x6906c2feL),
+tole(0xf762575dL), tole(0x806567cbL), tole(0x196c3671L), tole(0x6e6b06e7L),
+tole(0xfed41b76L), tole(0x89d32be0L), tole(0x10da7a5aL), tole(0x67dd4accL),
+tole(0xf9b9df6fL), tole(0x8ebeeff9L), tole(0x17b7be43L), tole(0x60b08ed5L),
+tole(0xd6d6a3e8L), tole(0xa1d1937eL), tole(0x38d8c2c4L), tole(0x4fdff252L),
+tole(0xd1bb67f1L), tole(0xa6bc5767L), tole(0x3fb506ddL), tole(0x48b2364bL),
+tole(0xd80d2bdaL), tole(0xaf0a1b4cL), tole(0x36034af6L), tole(0x41047a60L),
+tole(0xdf60efc3L), tole(0xa867df55L), tole(0x316e8eefL), tole(0x4669be79L),
+tole(0xcb61b38cL), tole(0xbc66831aL), tole(0x256fd2a0L), tole(0x5268e236L),
+tole(0xcc0c7795L), tole(0xbb0b4703L), tole(0x220216b9L), tole(0x5505262fL),
+tole(0xc5ba3bbeL), tole(0xb2bd0b28L), tole(0x2bb45a92L), tole(0x5cb36a04L),
+tole(0xc2d7ffa7L), tole(0xb5d0cf31L), tole(0x2cd99e8bL), tole(0x5bdeae1dL),
+tole(0x9b64c2b0L), tole(0xec63f226L), tole(0x756aa39cL), tole(0x026d930aL),
+tole(0x9c0906a9L), tole(0xeb0e363fL), tole(0x72076785L), tole(0x05005713L),
+tole(0x95bf4a82L), tole(0xe2b87a14L), tole(0x7bb12baeL), tole(0x0cb61b38L),
+tole(0x92d28e9bL), tole(0xe5d5be0dL), tole(0x7cdcefb7L), tole(0x0bdbdf21L),
+tole(0x86d3d2d4L), tole(0xf1d4e242L), tole(0x68ddb3f8L), tole(0x1fda836eL),
+tole(0x81be16cdL), tole(0xf6b9265bL), tole(0x6fb077e1L), tole(0x18b74777L),
+tole(0x88085ae6L), tole(0xff0f6a70L), tole(0x66063bcaL), tole(0x11010b5cL),
+tole(0x8f659effL), tole(0xf862ae69L), tole(0x616bffd3L), tole(0x166ccf45L),
+tole(0xa00ae278L), tole(0xd70dd2eeL), tole(0x4e048354L), tole(0x3903b3c2L),
+tole(0xa7672661L), tole(0xd06016f7L), tole(0x4969474dL), tole(0x3e6e77dbL),
+tole(0xaed16a4aL), tole(0xd9d65adcL), tole(0x40df0b66L), tole(0x37d83bf0L),
+tole(0xa9bcae53L), tole(0xdebb9ec5L), tole(0x47b2cf7fL), tole(0x30b5ffe9L),
+tole(0xbdbdf21cL), tole(0xcabac28aL), tole(0x53b39330L), tole(0x24b4a3a6L),
+tole(0xbad03605L), tole(0xcdd70693L), tole(0x54de5729L), tole(0x23d967bfL),
+tole(0xb3667a2eL), tole(0xc4614ab8L), tole(0x5d681b02L), tole(0x2a6f2b94L),
+tole(0xb40bbe37L), tole(0xc30c8ea1L), tole(0x5a05df1bL), tole(0x2d02ef8dL)
+};
+#ifdef UBI_LINUX
+static const u32 crc32table_be[] = {
+tobe(0x00000000L), tobe(0x04c11db7L), tobe(0x09823b6eL), tobe(0x0d4326d9L),
+tobe(0x130476dcL), tobe(0x17c56b6bL), tobe(0x1a864db2L), tobe(0x1e475005L),
+tobe(0x2608edb8L), tobe(0x22c9f00fL), tobe(0x2f8ad6d6L), tobe(0x2b4bcb61L),
+tobe(0x350c9b64L), tobe(0x31cd86d3L), tobe(0x3c8ea00aL), tobe(0x384fbdbdL),
+tobe(0x4c11db70L), tobe(0x48d0c6c7L), tobe(0x4593e01eL), tobe(0x4152fda9L),
+tobe(0x5f15adacL), tobe(0x5bd4b01bL), tobe(0x569796c2L), tobe(0x52568b75L),
+tobe(0x6a1936c8L), tobe(0x6ed82b7fL), tobe(0x639b0da6L), tobe(0x675a1011L),
+tobe(0x791d4014L), tobe(0x7ddc5da3L), tobe(0x709f7b7aL), tobe(0x745e66cdL),
+tobe(0x9823b6e0L), tobe(0x9ce2ab57L), tobe(0x91a18d8eL), tobe(0x95609039L),
+tobe(0x8b27c03cL), tobe(0x8fe6dd8bL), tobe(0x82a5fb52L), tobe(0x8664e6e5L),
+tobe(0xbe2b5b58L), tobe(0xbaea46efL), tobe(0xb7a96036L), tobe(0xb3687d81L),
+tobe(0xad2f2d84L), tobe(0xa9ee3033L), tobe(0xa4ad16eaL), tobe(0xa06c0b5dL),
+tobe(0xd4326d90L), tobe(0xd0f37027L), tobe(0xddb056feL), tobe(0xd9714b49L),
+tobe(0xc7361b4cL), tobe(0xc3f706fbL), tobe(0xceb42022L), tobe(0xca753d95L),
+tobe(0xf23a8028L), tobe(0xf6fb9d9fL), tobe(0xfbb8bb46L), tobe(0xff79a6f1L),
+tobe(0xe13ef6f4L), tobe(0xe5ffeb43L), tobe(0xe8bccd9aL), tobe(0xec7dd02dL),
+tobe(0x34867077L), tobe(0x30476dc0L), tobe(0x3d044b19L), tobe(0x39c556aeL),
+tobe(0x278206abL), tobe(0x23431b1cL), tobe(0x2e003dc5L), tobe(0x2ac12072L),
+tobe(0x128e9dcfL), tobe(0x164f8078L), tobe(0x1b0ca6a1L), tobe(0x1fcdbb16L),
+tobe(0x018aeb13L), tobe(0x054bf6a4L), tobe(0x0808d07dL), tobe(0x0cc9cdcaL),
+tobe(0x7897ab07L), tobe(0x7c56b6b0L), tobe(0x71159069L), tobe(0x75d48ddeL),
+tobe(0x6b93dddbL), tobe(0x6f52c06cL), tobe(0x6211e6b5L), tobe(0x66d0fb02L),
+tobe(0x5e9f46bfL), tobe(0x5a5e5b08L), tobe(0x571d7dd1L), tobe(0x53dc6066L),
+tobe(0x4d9b3063L), tobe(0x495a2dd4L), tobe(0x44190b0dL), tobe(0x40d816baL),
+tobe(0xaca5c697L), tobe(0xa864db20L), tobe(0xa527fdf9L), tobe(0xa1e6e04eL),
+tobe(0xbfa1b04bL), tobe(0xbb60adfcL), tobe(0xb6238b25L), tobe(0xb2e29692L),
+tobe(0x8aad2b2fL), tobe(0x8e6c3698L), tobe(0x832f1041L), tobe(0x87ee0df6L),
+tobe(0x99a95df3L), tobe(0x9d684044L), tobe(0x902b669dL), tobe(0x94ea7b2aL),
+tobe(0xe0b41de7L), tobe(0xe4750050L), tobe(0xe9362689L), tobe(0xedf73b3eL),
+tobe(0xf3b06b3bL), tobe(0xf771768cL), tobe(0xfa325055L), tobe(0xfef34de2L),
+tobe(0xc6bcf05fL), tobe(0xc27dede8L), tobe(0xcf3ecb31L), tobe(0xcbffd686L),
+tobe(0xd5b88683L), tobe(0xd1799b34L), tobe(0xdc3abdedL), tobe(0xd8fba05aL),
+tobe(0x690ce0eeL), tobe(0x6dcdfd59L), tobe(0x608edb80L), tobe(0x644fc637L),
+tobe(0x7a089632L), tobe(0x7ec98b85L), tobe(0x738aad5cL), tobe(0x774bb0ebL),
+tobe(0x4f040d56L), tobe(0x4bc510e1L), tobe(0x46863638L), tobe(0x42472b8fL),
+tobe(0x5c007b8aL), tobe(0x58c1663dL), tobe(0x558240e4L), tobe(0x51435d53L),
+tobe(0x251d3b9eL), tobe(0x21dc2629L), tobe(0x2c9f00f0L), tobe(0x285e1d47L),
+tobe(0x36194d42L), tobe(0x32d850f5L), tobe(0x3f9b762cL), tobe(0x3b5a6b9bL),
+tobe(0x0315d626L), tobe(0x07d4cb91L), tobe(0x0a97ed48L), tobe(0x0e56f0ffL),
+tobe(0x1011a0faL), tobe(0x14d0bd4dL), tobe(0x19939b94L), tobe(0x1d528623L),
+tobe(0xf12f560eL), tobe(0xf5ee4bb9L), tobe(0xf8ad6d60L), tobe(0xfc6c70d7L),
+tobe(0xe22b20d2L), tobe(0xe6ea3d65L), tobe(0xeba91bbcL), tobe(0xef68060bL),
+tobe(0xd727bbb6L), tobe(0xd3e6a601L), tobe(0xdea580d8L), tobe(0xda649d6fL),
+tobe(0xc423cd6aL), tobe(0xc0e2d0ddL), tobe(0xcda1f604L), tobe(0xc960ebb3L),
+tobe(0xbd3e8d7eL), tobe(0xb9ff90c9L), tobe(0xb4bcb610L), tobe(0xb07daba7L),
+tobe(0xae3afba2L), tobe(0xaafbe615L), tobe(0xa7b8c0ccL), tobe(0xa379dd7bL),
+tobe(0x9b3660c6L), tobe(0x9ff77d71L), tobe(0x92b45ba8L), tobe(0x9675461fL),
+tobe(0x8832161aL), tobe(0x8cf30badL), tobe(0x81b02d74L), tobe(0x857130c3L),
+tobe(0x5d8a9099L), tobe(0x594b8d2eL), tobe(0x5408abf7L), tobe(0x50c9b640L),
+tobe(0x4e8ee645L), tobe(0x4a4ffbf2L), tobe(0x470cdd2bL), tobe(0x43cdc09cL),
+tobe(0x7b827d21L), tobe(0x7f436096L), tobe(0x7200464fL), tobe(0x76c15bf8L),
+tobe(0x68860bfdL), tobe(0x6c47164aL), tobe(0x61043093L), tobe(0x65c52d24L),
+tobe(0x119b4be9L), tobe(0x155a565eL), tobe(0x18197087L), tobe(0x1cd86d30L),
+tobe(0x029f3d35L), tobe(0x065e2082L), tobe(0x0b1d065bL), tobe(0x0fdc1becL),
+tobe(0x3793a651L), tobe(0x3352bbe6L), tobe(0x3e119d3fL), tobe(0x3ad08088L),
+tobe(0x2497d08dL), tobe(0x2056cd3aL), tobe(0x2d15ebe3L), tobe(0x29d4f654L),
+tobe(0xc5a92679L), tobe(0xc1683bceL), tobe(0xcc2b1d17L), tobe(0xc8ea00a0L),
+tobe(0xd6ad50a5L), tobe(0xd26c4d12L), tobe(0xdf2f6bcbL), tobe(0xdbee767cL),
+tobe(0xe3a1cbc1L), tobe(0xe760d676L), tobe(0xea23f0afL), tobe(0xeee2ed18L),
+tobe(0xf0a5bd1dL), tobe(0xf464a0aaL), tobe(0xf9278673L), tobe(0xfde69bc4L),
+tobe(0x89b8fd09L), tobe(0x8d79e0beL), tobe(0x803ac667L), tobe(0x84fbdbd0L),
+tobe(0x9abc8bd5L), tobe(0x9e7d9662L), tobe(0x933eb0bbL), tobe(0x97ffad0cL),
+tobe(0xafb010b1L), tobe(0xab710d06L), tobe(0xa6322bdfL), tobe(0xa2f33668L),
+tobe(0xbcb4666dL), tobe(0xb8757bdaL), tobe(0xb5365d03L), tobe(0xb1f740b4L)
+};
+#endif
diff --git a/u-boot/drivers/mtd/ubi/debug.c b/u-boot/drivers/mtd/ubi/debug.c
new file mode 100644
index 0000000..492ab5c
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/debug.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * Here we keep all the UBI debugging stuff which should normally be disabled
+ * and compiled-out, but it is extremely helpful when hunting bugs or doing big
+ * changes.
+ */
+#include <ubi_uboot.h>
+
+#ifdef CONFIG_MTD_UBI_DEBUG_MSG
+
+#include "ubi.h"
+
+/**
+ * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * @ec_hdr: the erase counter header to dump
+ */
+void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+{
+ dbg_msg("erase counter header dump:");
+ dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic));
+ dbg_msg("version %d", (int)ec_hdr->version);
+ dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec));
+ dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset));
+ dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc));
+ dbg_msg("erase counter header hexdump:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ec_hdr, UBI_EC_HDR_SIZE, 1);
+}
+
+/**
+ * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * @vid_hdr: the volume identifier header to dump
+ */
+void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+{
+ dbg_msg("volume identifier header dump:");
+ dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic));
+ dbg_msg("version %d", (int)vid_hdr->version);
+ dbg_msg("vol_type %d", (int)vid_hdr->vol_type);
+ dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag);
+ dbg_msg("compat %d", (int)vid_hdr->compat);
+ dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id));
+ dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum));
+ dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver));
+ dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size));
+ dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs));
+ dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad));
+ dbg_msg("sqnum %llu",
+ (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
+ dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc));
+ dbg_msg("volume identifier header hexdump:");
+}
+
+/**
+ * ubi_dbg_dump_vol_info- dump volume information.
+ * @vol: UBI volume description object
+ */
+void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+{
+ dbg_msg("volume information dump:");
+ dbg_msg("vol_id %d", vol->vol_id);
+ dbg_msg("reserved_pebs %d", vol->reserved_pebs);
+ dbg_msg("alignment %d", vol->alignment);
+ dbg_msg("data_pad %d", vol->data_pad);
+ dbg_msg("vol_type %d", vol->vol_type);
+ dbg_msg("name_len %d", vol->name_len);
+ dbg_msg("usable_leb_size %d", vol->usable_leb_size);
+ dbg_msg("used_ebs %d", vol->used_ebs);
+ dbg_msg("used_bytes %lld", vol->used_bytes);
+ dbg_msg("last_eb_bytes %d", vol->last_eb_bytes);
+ dbg_msg("corrupted %d", vol->corrupted);
+ dbg_msg("upd_marker %d", vol->upd_marker);
+
+ if (vol->name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
+ dbg_msg("name %s", vol->name);
+ } else {
+ dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c",
+ vol->name[0], vol->name[1], vol->name[2],
+ vol->name[3], vol->name[4]);
+ }
+}
+
+/**
+ * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * @r: the object to dump
+ * @idx: volume table index
+ */
+void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+{
+ int name_len = be16_to_cpu(r->name_len);
+
+ dbg_msg("volume table record %d dump:", idx);
+ dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs));
+ dbg_msg("alignment %d", be32_to_cpu(r->alignment));
+ dbg_msg("data_pad %d", be32_to_cpu(r->data_pad));
+ dbg_msg("vol_type %d", (int)r->vol_type);
+ dbg_msg("upd_marker %d", (int)r->upd_marker);
+ dbg_msg("name_len %d", name_len);
+
+ if (r->name[0] == '\0') {
+ dbg_msg("name NULL");
+ return;
+ }
+
+ if (name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(&r->name[0], name_len + 1) == name_len) {
+ dbg_msg("name %s", &r->name[0]);
+ } else {
+ dbg_msg("1st 5 characters of the name: %c%c%c%c%c",
+ r->name[0], r->name[1], r->name[2], r->name[3],
+ r->name[4]);
+ }
+ dbg_msg("crc %#08x", be32_to_cpu(r->crc));
+}
+
+/**
+ * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
+ * @sv: the object to dump
+ */
+void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+{
+ dbg_msg("volume scanning information dump:");
+ dbg_msg("vol_id %d", sv->vol_id);
+ dbg_msg("highest_lnum %d", sv->highest_lnum);
+ dbg_msg("leb_count %d", sv->leb_count);
+ dbg_msg("compat %d", sv->compat);
+ dbg_msg("vol_type %d", sv->vol_type);
+ dbg_msg("used_ebs %d", sv->used_ebs);
+ dbg_msg("last_data_size %d", sv->last_data_size);
+ dbg_msg("data_pad %d", sv->data_pad);
+}
+
+/**
+ * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
+ * @seb: the object to dump
+ * @type: object type: 0 - not corrupted, 1 - corrupted
+ */
+void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+{
+ dbg_msg("eraseblock scanning information dump:");
+ dbg_msg("ec %d", seb->ec);
+ dbg_msg("pnum %d", seb->pnum);
+ if (type == 0) {
+ dbg_msg("lnum %d", seb->lnum);
+ dbg_msg("scrub %d", seb->scrub);
+ dbg_msg("sqnum %llu", seb->sqnum);
+ dbg_msg("leb_ver %u", seb->leb_ver);
+ }
+}
+
+/**
+ * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * @req: the object to dump
+ */
+void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+{
+ char nm[17];
+
+ dbg_msg("volume creation request dump:");
+ dbg_msg("vol_id %d", req->vol_id);
+ dbg_msg("alignment %d", req->alignment);
+ dbg_msg("bytes %lld", (long long)req->bytes);
+ dbg_msg("vol_type %d", req->vol_type);
+ dbg_msg("name_len %d", req->name_len);
+
+ memcpy(nm, req->name, 16);
+ nm[16] = 0;
+ dbg_msg("the 1st 16 characters of the name: %s", nm);
+}
+
+#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
diff --git a/u-boot/drivers/mtd/ubi/debug.h b/u-boot/drivers/mtd/ubi/debug.h
new file mode 100644
index 0000000..b44380b
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/debug.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_DEBUG_H__
+#define __UBI_DEBUG_H__
+
+#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef UBI_LINUX
+#include <linux/random.h>
+#endif
+
+#define ubi_assert(expr) BUG_ON(!(expr))
+#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
+#else
+#define ubi_assert(expr) ({})
+#define dbg_err(fmt, ...) ({})
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
+#define DBG_DISABLE_BGT 1
+#else
+#define DBG_DISABLE_BGT 0
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_MSG
+/* Generic debugging message */
+#define dbg_msg(fmt, ...) \
+ printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", \
+ __FUNCTION__, ##__VA_ARGS__)
+
+#define ubi_dbg_dump_stack() dump_stack()
+
+struct ubi_ec_hdr;
+struct ubi_vid_hdr;
+struct ubi_volume;
+struct ubi_vtbl_record;
+struct ubi_scan_volume;
+struct ubi_scan_leb;
+struct ubi_mkvol_req;
+
+void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
+void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
+void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
+
+#else
+
+#define dbg_msg(fmt, ...) ({})
+#define ubi_dbg_dump_stack() ({})
+#define ubi_dbg_dump_ec_hdr(ec_hdr) ({})
+#define ubi_dbg_dump_vid_hdr(vid_hdr) ({})
+#define ubi_dbg_dump_vol_info(vol) ({})
+#define ubi_dbg_dump_vtbl_record(r, idx) ({})
+#define ubi_dbg_dump_sv(sv) ({})
+#define ubi_dbg_dump_seb(seb, type) ({})
+#define ubi_dbg_dump_mkvol_req(req) ({})
+
+#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
+
+#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
+/* Messages from the eraseblock association unit */
+#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#else
+#define dbg_eba(fmt, ...) ({})
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
+/* Messages from the wear-leveling unit */
+#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#else
+#define dbg_wl(fmt, ...) ({})
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
+/* Messages from the input/output unit */
+#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#else
+#define dbg_io(fmt, ...) ({})
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
+/* Initialization and build messages */
+#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#else
+#define dbg_bld(fmt, ...) ({})
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
+/**
+ * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ *
+ * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ */
+static inline int ubi_dbg_is_bitflip(void)
+{
+ return !(random32() % 200);
+}
+#else
+#define ubi_dbg_is_bitflip() 0
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
+/**
+ * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ *
+ * Returns non-zero if a write failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_write_failure(void)
+{
+ return !(random32() % 500);
+}
+#else
+#define ubi_dbg_is_write_failure() 0
+#endif
+
+#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
+/**
+ * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ *
+ * Returns non-zero if an erase failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_erase_failure(void)
+{
+ return !(random32() % 400);
+}
+#else
+#define ubi_dbg_is_erase_failure() 0
+#endif
+
+#endif /* !__UBI_DEBUG_H__ */
diff --git a/u-boot/drivers/mtd/ubi/eba.c b/u-boot/drivers/mtd/ubi/eba.c
new file mode 100644
index 0000000..d523c94
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/eba.c
@@ -0,0 +1,1256 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * The UBI Eraseblock Association (EBA) unit.
+ *
+ * This unit is responsible for I/O to/from logical eraseblock.
+ *
+ * Although in this implementation the EBA table is fully kept and managed in
+ * RAM, which assumes poor scalability, it might be (partially) maintained on
+ * flash in future implementations.
+ *
+ * The EBA unit implements per-logical eraseblock locking. Before accessing a
+ * logical eraseblock it is locked for reading or writing. The per-logical
+ * eraseblock locking is implemented by means of the lock tree. The lock tree
+ * is an RB-tree which refers all the currently locked logical eraseblocks. The
+ * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
+ * (@vol_id, @lnum) pairs.
+ *
+ * EBA also maintains the global sequence counter which is incremented each
+ * time a logical eraseblock is mapped to a physical eraseblock and it is
+ * stored in the volume identifier header. This means that each VID header has
+ * a unique sequence number. The sequence number is only increased an we assume
+ * 64 bits is enough to never overflow.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
+/**
+ * next_sqnum - get next sequence number.
+ * @ubi: UBI device description object
+ *
+ * This function returns next sequence number to use, which is just the current
+ * global sequence counter value. It also increases the global sequence
+ * counter.
+ */
+static unsigned long long next_sqnum(struct ubi_device *ubi)
+{
+ unsigned long long sqnum;
+
+ spin_lock(&ubi->ltree_lock);
+ sqnum = ubi->global_sqnum++;
+ spin_unlock(&ubi->ltree_lock);
+
+ return sqnum;
+}
+
+/**
+ * ubi_get_compat - get compatibility flags of a volume.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ *
+ * This function returns compatibility flags for an internal volume. User
+ * volumes have no compatibility flags, so %0 is returned.
+ */
+static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id == UBI_LAYOUT_VOLUME_ID)
+ return UBI_LAYOUT_VOLUME_COMPAT;
+ return 0;
+}
+
+/**
+ * ltree_lookup - look up the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
+ * object if the logical eraseblock is locked and %NULL if it is not.
+ * @ubi->ltree_lock has to be locked.
+ */
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+ int lnum)
+{
+ struct rb_node *p;
+
+ p = ubi->ltree.rb_node;
+ while (p) {
+ struct ubi_ltree_entry *le;
+
+ le = rb_entry(p, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le->vol_id)
+ p = p->rb_left;
+ else if (vol_id > le->vol_id)
+ p = p->rb_right;
+ else {
+ if (lnum < le->lnum)
+ p = p->rb_left;
+ else if (lnum > le->lnum)
+ p = p->rb_right;
+ else
+ return le;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ltree_add_entry - add new entry to the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
+ * lock tree. If such entry is already there, its usage counter is increased.
+ * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
+ * failed.
+ */
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+ int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le, *le1, *le_free;
+
+ le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
+
+ le->users = 0;
+ init_rwsem(&le->mutex);
+ le->vol_id = vol_id;
+ le->lnum = lnum;
+
+ spin_lock(&ubi->ltree_lock);
+ le1 = ltree_lookup(ubi, vol_id, lnum);
+
+ if (le1) {
+ /*
+ * This logical eraseblock is already locked. The newly
+ * allocated lock entry is not needed.
+ */
+ le_free = le;
+ le = le1;
+ } else {
+ struct rb_node **p, *parent = NULL;
+
+ /*
+ * No lock entry, add the newly allocated one to the
+ * @ubi->ltree RB-tree.
+ */
+ le_free = NULL;
+
+ p = &ubi->ltree.rb_node;
+ while (*p) {
+ parent = *p;
+ le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le1->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > le1->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(lnum != le1->lnum);
+ if (lnum < le1->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&le->rb, parent, p);
+ rb_insert_color(&le->rb, &ubi->ltree);
+ }
+ le->users += 1;
+ spin_unlock(&ubi->ltree_lock);
+
+ if (le_free)
+ kfree(le_free);
+
+ return le;
+}
+
+/**
+ * leb_read_lock - lock logical eraseblock for reading.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for reading. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_read(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_read_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int _free = 0;
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ _free = 1;
+ }
+ spin_unlock(&ubi->ltree_lock);
+
+ up_read(&le->mutex);
+ if (_free)
+ kfree(le);
+}
+
+/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_write(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing if there is no
+ * contention and does nothing if there is contention. Returns %0 in case of
+ * success, %1 in case of contention, and and a negative error code in case of
+ * failure.
+ */
+static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int _free;
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ if (down_write_trylock(&le->mutex))
+ return 0;
+
+ /* Contention, cancel */
+ spin_lock(&ubi->ltree_lock);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ _free = 1;
+ } else
+ _free = 0;
+ spin_unlock(&ubi->ltree_lock);
+ if (_free)
+ kfree(le);
+
+ return 1;
+}
+
+/**
+ * leb_write_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int _free;
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ _free = 1;
+ } else
+ _free = 0;
+ spin_unlock(&ubi->ltree_lock);
+
+ up_write(&le->mutex);
+ if (_free)
+ kfree(le);
+}
+
+/**
+ * ubi_eba_unmap_leb - un-map logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules corresponding
+ * physical eraseblock for erasure. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum)
+{
+ int err, pnum, vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum < 0)
+ /* This logical eraseblock is already unmapped */
+ goto out_unlock;
+
+ dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+
+ vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
+ err = ubi_wl_put_peb(ubi, pnum, 0);
+
+out_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * ubi_eba_read_leb - read data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: buffer to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
+ * bytes. The @check flag only makes sense for static volumes and forces
+ * eraseblock data CRC checking.
+ *
+ * In case of success this function returns zero. In case of a static volume,
+ * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
+ * returned for any volume type if an ECC error was detected by the MTD device
+ * driver. Other negative error cored may be returned in case of other errors.
+ */
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check)
+{
+ int err, pnum, scrub = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t uninitialized_var(crc);
+
+ err = leb_read_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum < 0) {
+ /*
+ * The logical eraseblock is not mapped, fill the whole buffer
+ * with 0xFF bytes. The exception is static volumes for which
+ * it is an error to read unmapped logical eraseblocks.
+ */
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
+ len, offset, vol_id, lnum);
+ leb_read_unlock(ubi, vol_id, lnum);
+ ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
+ memset(buf, 0xFF, len);
+ return 0;
+ }
+
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ check = 0;
+
+retry:
+ if (check) {
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0) {
+ /*
+ * The header is either absent or corrupted.
+ * The former case means there is a bug -
+ * switch to read-only mode just in case.
+ * The latter case means a real corruption - we
+ * may try to recover data. FIXME: but this is
+ * not implemented.
+ */
+ if (err == UBI_IO_BAD_VID_HDR) {
+ ubi_warn("bad VID header at PEB %d, LEB"
+ "%d:%d", pnum, vol_id, lnum);
+ err = -EBADMSG;
+ } else
+ ubi_ro_mode(ubi);
+ }
+ goto out_free;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
+ ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
+
+ crc = be32_to_cpu(vid_hdr->data_crc);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ }
+
+ err = ubi_io_read_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS) {
+ scrub = 1;
+ err = 0;
+ } else if (err == -EBADMSG) {
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ goto out_unlock;
+ scrub = 1;
+ if (!check) {
+ ubi_msg("force data checking");
+ check = 1;
+ goto retry;
+ }
+ } else
+ goto out_unlock;
+ }
+
+ if (check) {
+ uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
+ if (crc1 != crc) {
+ ubi_warn("CRC error: calculated %#08x, must be %#08x",
+ crc1, crc);
+ err = -EBADMSG;
+ goto out_unlock;
+ }
+ }
+
+ if (scrub)
+ err = ubi_wl_scrub_peb(ubi, pnum);
+
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+
+out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+out_unlock:
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * recover_peb - recover from write failure.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to recover
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ * @buf: data which was not written because of the write failure
+ * @offset: offset of the failed write
+ * @len: how many bytes should have been written
+ *
+ * This function is called in case of a write failure and moves all good data
+ * from the potentially bad physical eraseblock to a good physical eraseblock.
+ * This function also writes the data which was not written due to the failure.
+ * Returns new physical eraseblock number in case of success, and a negative
+ * error code in case of failure.
+ */
+static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
+ struct ubi_volume *vol = ubi->volumes[idx];
+ struct ubi_vid_hdr *vid_hdr;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ return -ENOMEM;
+ }
+
+ mutex_lock(&ubi->buf_mutex);
+
+retry:
+ new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+ if (new_pnum < 0) {
+ mutex_unlock(&ubi->buf_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return new_pnum;
+ }
+
+ ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0)
+ err = -EIO;
+ goto out_put;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+ if (err)
+ goto write_error;
+
+ data_size = offset + len;
+ memset(ubi->peb_buf1 + offset, 0xFF, len);
+
+ /* Read everything before the area where the write failure happened */
+ if (offset > 0) {
+ err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
+ if (err && err != UBI_IO_BITFLIPS)
+ goto out_put;
+ }
+
+ memcpy(ubi->peb_buf1 + offset, buf, len);
+
+ err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
+ if (err)
+ goto write_error;
+
+ mutex_unlock(&ubi->buf_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+
+ vol->eba_tbl[lnum] = new_pnum;
+ ubi_wl_put_peb(ubi, pnum, 1);
+
+ ubi_msg("data was successfully recovered");
+ return 0;
+
+out_put:
+ mutex_unlock(&ubi->buf_mutex);
+ ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ /*
+ * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
+ * get another one.
+ */
+ ubi_warn("failed to write to PEB %d", new_pnum);
+ ubi_wl_put_peb(ubi, new_pnum, 1);
+ if (++tries > UBI_IO_RETRIES) {
+ mutex_unlock(&ubi->buf_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+ ubi_msg("try again");
+ goto retry;
+}
+
+/**
+ * ubi_eba_write_leb - write data to dynamic volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: the data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ * @dtype: data type
+ *
+ * This function writes data to logical eraseblock @lnum of a dynamic volume
+ * @vol. Returns zero in case of success and a negative error code in case
+ * of failure. In case of error, it is possible that something was still
+ * written to the flash media, but may be some garbage.
+ */
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len, int dtype)
+{
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum >= 0) {
+ dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn("failed to write data to PEB %d", pnum);
+ if (err == -EIO && ubi->bad_allowed)
+ err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+ offset, len);
+ if (err)
+ ubi_ro_mode(ubi);
+ }
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+ }
+
+ /*
+ * The logical eraseblock is not mapped. We have to get a free physical
+ * eraseblock and write the volume identifier header there first.
+ */
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ leb_write_unlock(ubi, vol_id, lnum);
+ return -ENOMEM;
+ }
+
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi, dtype);
+ if (pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ return pnum;
+ }
+
+ dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ goto write_error;
+ }
+
+ if (len) {
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn("failed to write %d bytes at offset %d of "
+ "LEB %d:%d, PEB %d", len, offset, vol_id,
+ lnum, pnum);
+ goto write_error;
+ }
+ }
+
+ vol->eba_tbl[lnum] = pnum;
+
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ /*
+ * Fortunately, this is the first write operation to this physical
+ * eraseblock, so just put it and request a new one. We assume that if
+ * this physical eraseblock went bad, the erase code will handle that.
+ */
+ err = ubi_wl_put_peb(ubi, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ ubi_msg("try another PEB");
+ goto retry;
+}
+
+/**
+ * ubi_eba_write_leb_st - write data to static volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ * @dtype: data type
+ * @used_ebs: how many logical eraseblocks will this volume contain
+ *
+ * This function writes data to logical eraseblock @lnum of static volume
+ * @vol. The @used_ebs argument should contain total number of logical
+ * eraseblock in this static volume.
+ *
+ * When writing to the last logical eraseblock, the @len argument doesn't have
+ * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
+ * to the real data size, although the @buf buffer has to contain the
+ * alignment. In all other cases, @len has to be aligned.
+ *
+ * It is prohibited to write more then once to logical eraseblocks of static
+ * volumes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype,
+ int used_ebs)
+{
+ int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (lnum == used_ebs - 1)
+ /* If this is the last LEB @len may be unaligned */
+ len = ALIGN(data_size, ubi->min_io_size);
+ else
+ ubi_assert(!(len & (ubi->min_io_size - 1)));
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, data_size);
+ vid_hdr->vol_type = UBI_VID_STATIC;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->used_ebs = cpu_to_be32(used_ebs);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi, dtype);
+ if (pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ return pnum;
+ }
+
+ dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
+ len, vol_id, lnum, pnum, used_ebs);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ goto write_error;
+ }
+
+ err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+ if (err) {
+ ubi_warn("failed to write %d bytes of data to PEB %d",
+ len, pnum);
+ goto write_error;
+ }
+
+ ubi_assert(vol->eba_tbl[lnum] < 0);
+ vol->eba_tbl[lnum] = pnum;
+
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ err = ubi_wl_put_peb(ubi, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ ubi_msg("try another PEB");
+ goto retry;
+}
+
+/*
+ * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ * @dtype: data type
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. This function guarantees that in case of an
+ * unclean reboot the old contents is preserved. Returns zero in case of
+ * success and a negative error code in case of failure.
+ *
+ * UBI reserves one LEB for the "atomic LEB change" operation, so only one
+ * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
+ */
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype)
+{
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (len == 0) {
+ /*
+ * Special case when data length is zero. In this case the LEB
+ * has to be unmapped and mapped somewhere else.
+ */
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ }
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ mutex_lock(&ubi->alc_mutex);
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ goto out_mutex;
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, len);
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->data_size = cpu_to_be32(len);
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi, dtype);
+ if (pnum < 0) {
+ err = pnum;
+ goto out_leb_unlock;
+ }
+
+ dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
+ vol_id, lnum, vol->eba_tbl[lnum], pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ goto write_error;
+ }
+
+ err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+ if (err) {
+ ubi_warn("failed to write %d bytes of data to PEB %d",
+ len, pnum);
+ goto write_error;
+ }
+
+ if (vol->eba_tbl[lnum] >= 0) {
+ err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
+ if (err)
+ goto out_leb_unlock;
+ }
+
+ vol->eba_tbl[lnum] = pnum;
+
+out_leb_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+out_mutex:
+ mutex_unlock(&ubi->alc_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ err = ubi_wl_put_peb(ubi, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ ubi_msg("try another PEB");
+ goto retry;
+}
+
+/**
+ * ubi_eba_copy_leb - copy logical eraseblock.
+ * @ubi: UBI device description object
+ * @from: physical eraseblock number from where to copy
+ * @to: physical eraseblock number where to copy
+ * @vid_hdr: VID header of the @from physical eraseblock
+ *
+ * This function copies logical eraseblock from physical eraseblock @from to
+ * physical eraseblock @to. The @vid_hdr buffer may be changed by this
+ * function. Returns:
+ * o %0 in case of success;
+ * o %1 if the operation was canceled and should be tried later (e.g.,
+ * because a bit-flip was detected at the target PEB);
+ * o %2 if the volume is being deleted and this LEB should not be moved.
+ */
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ int err, vol_id, lnum, data_size, aldata_size, idx;
+ struct ubi_volume *vol;
+ uint32_t crc;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+ dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+
+ if (vid_hdr->vol_type == UBI_VID_STATIC) {
+ data_size = be32_to_cpu(vid_hdr->data_size);
+ aldata_size = ALIGN(data_size, ubi->min_io_size);
+ } else
+ data_size = aldata_size =
+ ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
+
+ idx = vol_id2idx(ubi, vol_id);
+ spin_lock(&ubi->volumes_lock);
+ /*
+ * Note, we may race with volume deletion, which means that the volume
+ * this logical eraseblock belongs to might be being deleted. Since the
+ * volume deletion unmaps all the volume's logical eraseblocks, it will
+ * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
+ */
+ vol = ubi->volumes[idx];
+ if (!vol) {
+ /* No need to do further work, cancel */
+ dbg_eba("volume %d is being removed, cancel", vol_id);
+ spin_unlock(&ubi->volumes_lock);
+ return 2;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ /*
+ * We do not want anybody to write to this logical eraseblock while we
+ * are moving it, so lock it.
+ *
+ * Note, we are using non-waiting locking here, because we cannot sleep
+ * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
+ * unmapping the LEB which is mapped to the PEB we are going to move
+ * (@from). This task locks the LEB and goes sleep in the
+ * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ * LEB is already locked, we just do not move it and return %1.
+ */
+ err = leb_write_trylock(ubi, vol_id, lnum);
+ if (err) {
+ dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return err;
+ }
+
+ /*
+ * The LEB might have been put meanwhile, and the task which put it is
+ * probably waiting on @ubi->move_mutex. No need to continue the work,
+ * cancel it.
+ */
+ if (vol->eba_tbl[lnum] != from) {
+ dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
+ "PEB %d, cancel", vol_id, lnum, from,
+ vol->eba_tbl[lnum]);
+ err = 1;
+ goto out_unlock_leb;
+ }
+
+ /*
+ * OK, now the LEB is locked and we can safely start moving iy. Since
+ * this function utilizes thie @ubi->peb1_buf buffer which is shared
+ * with some other functions, so lock the buffer by taking the
+ * @ubi->buf_mutex.
+ */
+ mutex_lock(&ubi->buf_mutex);
+ dbg_eba("read %d bytes of data", aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_warn("error %d while reading data from PEB %d",
+ err, from);
+ goto out_unlock_buf;
+ }
+
+ /*
+ * Now we have got to calculate how much data we have to to copy. In
+ * case of a static volume it is fairly easy - the VID header contains
+ * the data size. In case of a dynamic volume it is more difficult - we
+ * have to read the contents, cut 0xFF bytes from the end and copy only
+ * the first part. We must do this to avoid writing 0xFF bytes as it
+ * may have some side-effects. And not only this. It is important not
+ * to include those 0xFFs to CRC because later the they may be filled
+ * by data.
+ */
+ if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
+ aldata_size = data_size =
+ ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
+
+ cond_resched();
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
+ cond_resched();
+
+ /*
+ * It may turn out to me that the whole @from physical eraseblock
+ * contains only 0xFF bytes. Then we have to only write the VID header
+ * and do not write any data. This also means we should not set
+ * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
+ */
+ if (data_size > 0) {
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+ }
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+
+ err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
+ if (err)
+ goto out_unlock_buf;
+
+ cond_resched();
+
+ /* Read the VID header back and check if it was written correctly */
+ err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS)
+ ubi_warn("cannot read VID header back from PEB %d", to);
+ else
+ err = 1;
+ goto out_unlock_buf;
+ }
+
+ if (data_size > 0) {
+ err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
+ if (err)
+ goto out_unlock_buf;
+
+ cond_resched();
+
+ /*
+ * We've written the data and are going to read it back to make
+ * sure it was written correctly.
+ */
+
+ err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS)
+ ubi_warn("cannot read data back from PEB %d",
+ to);
+ else
+ err = 1;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
+ ubi_warn("read data back from PEB %d - it is different",
+ to);
+ goto out_unlock_buf;
+ }
+ }
+
+ ubi_assert(vol->eba_tbl[lnum] == from);
+ vol->eba_tbl[lnum] = to;
+
+out_unlock_buf:
+ mutex_unlock(&ubi->buf_mutex);
+out_unlock_leb:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * ubi_eba_init_scan - initialize the EBA unit using scanning information.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+{
+ int i, j, err, num_volumes;
+ struct ubi_scan_volume *sv;
+ struct ubi_volume *vol;
+ struct ubi_scan_leb *seb;
+ struct rb_node *rb;
+
+ dbg_eba("initialize EBA unit");
+
+ spin_lock_init(&ubi->ltree_lock);
+ mutex_init(&ubi->alc_mutex);
+ ubi->ltree = RB_ROOT;
+
+ ubi->global_sqnum = si->max_sqnum + 1;
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ cond_resched();
+
+ vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
+ GFP_KERNEL);
+ if (!vol->eba_tbl) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
+
+ sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
+ if (!sv)
+ continue;
+
+ ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
+ if (seb->lnum >= vol->reserved_pebs)
+ /*
+ * This may happen in case of an unclean reboot
+ * during re-size.
+ */
+ ubi_scan_move_to_list(sv, seb, &si->erase);
+ vol->eba_tbl[seb->lnum] = seb->pnum;
+ }
+ }
+
+ if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+ ubi_err("no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, EBA_RESERVED_PEBS);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= EBA_RESERVED_PEBS;
+ ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
+ if (ubi->bad_allowed) {
+ ubi_calculate_reserved(ubi);
+
+ if (ubi->avail_pebs < ubi->beb_rsvd_level) {
+ /* No enough free physical eraseblocks */
+ ubi->beb_rsvd_pebs = ubi->avail_pebs;
+ ubi_warn("cannot reserve enough PEBs for bad PEB "
+ "handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ } else
+ ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
+
+ ubi->avail_pebs -= ubi->beb_rsvd_pebs;
+ ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
+ }
+
+ dbg_eba("EBA unit is initialized");
+ return 0;
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+ kfree(ubi->volumes[i]->eba_tbl);
+ }
+ return err;
+}
+
+/**
+ * ubi_eba_close - close EBA unit.
+ * @ubi: UBI device description object
+ */
+void ubi_eba_close(const struct ubi_device *ubi)
+{
+ int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ dbg_eba("close EBA unit");
+
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+ kfree(ubi->volumes[i]->eba_tbl);
+ }
+}
diff --git a/u-boot/drivers/mtd/ubi/io.c b/u-boot/drivers/mtd/ubi/io.c
new file mode 100644
index 0000000..8423894
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/io.c
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI input/output unit.
+ *
+ * This unit provides a uniform way to work with all kinds of the underlying
+ * MTD devices. It also implements handy functions for reading and writing UBI
+ * headers.
+ *
+ * We are trying to have a paranoid mindset and not to trust to what we read
+ * from the flash media in order to be more secure and robust. So this unit
+ * validates every single header it reads from the flash media.
+ *
+ * Some words about how the eraseblock headers are stored.
+ *
+ * The erase counter header is always stored at offset zero. By default, the
+ * VID header is stored after the EC header at the closest aligned offset
+ * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
+ * header at the closest aligned offset. But this default layout may be
+ * changed. For example, for different reasons (e.g., optimization) UBI may be
+ * asked to put the VID header at further offset, and even at an unaligned
+ * offset. Of course, if the offset of the VID header is unaligned, UBI adds
+ * proper padding in front of it. Data offset may also be changed but it has to
+ * be aligned.
+ *
+ * About minimal I/O units. In general, UBI assumes flash device model where
+ * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
+ * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
+ * @ubi->mtd->writesize field. But as an exception, UBI admits of using another
+ * (smaller) minimal I/O unit size for EC and VID headers to make it possible
+ * to do different optimizations.
+ *
+ * This is extremely useful in case of NAND flashes which admit of several
+ * write operations to one NAND page. In this case UBI can fit EC and VID
+ * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
+ * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
+ * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
+ * users.
+ *
+ * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
+ * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
+ * headers.
+ *
+ * Q: why not just to treat sub-page as a minimal I/O unit of this flash
+ * device, e.g., make @ubi->min_io_size = 512 in the example above?
+ *
+ * A: because when writing a sub-page, MTD still writes a full 2K page but the
+ * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
+ * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
+ * prefer to use sub-pages only for EV and VID headers.
+ *
+ * As it was noted above, the VID header may start at a non-aligned offset.
+ * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
+ * the VID header may reside at offset 1984 which is the last 64 bytes of the
+ * last sub-page (EC header is always at offset zero). This causes some
+ * difficulties when reading and writing VID headers.
+ *
+ * Suppose we have a 64-byte buffer and we read a VID header at it. We change
+ * the data and want to write this VID header out. As we can only write in
+ * 512-byte chunks, we have to allocate one more buffer and copy our VID header
+ * to offset 448 of this buffer.
+ *
+ * The I/O unit does the following trick in order to avoid this extra copy.
+ * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header
+ * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the
+ * VID header is being written out, it shifts the VID header pointer back and
+ * writes the whole sub-page.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/crc32.h>
+#include <linux/err.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
+#else
+#define paranoid_check_not_bad(ubi, pnum) 0
+#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
+#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
+#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
+#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
+#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
+#endif
+
+/**
+ * ubi_io_read - read data from a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer where to store the read data
+ * @pnum: physical eraseblock number to read from
+ * @offset: offset within the physical eraseblock from where to read
+ * @len: how many bytes to read
+ *
+ * This function reads data from offset @offset of physical eraseblock @pnum
+ * and stores the read data in the @buf buffer. The following return codes are
+ * possible:
+ *
+ * o %0 if all the requested data were successfully read;
+ * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
+ * correctable bit-flips were detected; this is harmless but may indicate
+ * that this eraseblock may become bad soon (but do not have to);
+ * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
+ * example it can be an ECC error in case of NAND; this most probably means
+ * that the data is corrupted;
+ * o %-EIO if some I/O error occurred;
+ * o other negative error codes in case of other errors.
+ */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len)
+{
+ int err, retries = 0;
+ size_t read;
+ loff_t addr;
+
+ dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(len > 0);
+
+ err = paranoid_check_not_bad(ubi, pnum);
+ if (err)
+ return err > 0 ? -EINVAL : err;
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+retry:
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ if (err) {
+ if (err == -EUCLEAN) {
+ /*
+ * -EUCLEAN is reported if there was a bit-flip which
+ * was corrected, so this is harmless.
+ */
+ ubi_msg("fixable bit-flip detected at PEB %d", pnum);
+ ubi_assert(len == read);
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (read != len && retries++ < UBI_IO_RETRIES) {
+ dbg_io("error %d while reading %d bytes from PEB %d:%d, "
+ "read only %zd bytes, retry",
+ err, len, pnum, offset, read);
+ yield();
+ goto retry;
+ }
+
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, "
+ "read %zd bytes", err, len, pnum, offset, read);
+ ubi_dbg_dump_stack();
+
+ /*
+ * The driver should never return -EBADMSG if it failed to read
+ * all the requested data. But some buggy drivers might do
+ * this, so we change it to -EIO.
+ */
+ if (read != len && err == -EBADMSG) {
+ ubi_assert(0);
+ printk("%s[%d] not here\n", __func__, __LINE__);
+/* err = -EIO; */
+ }
+ } else {
+ ubi_assert(len == read);
+
+ if (ubi_dbg_is_bitflip()) {
+ dbg_msg("bit-flip (emulated)");
+ err = UBI_IO_BITFLIPS;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ubi_io_write - write data to a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer with the data to write
+ * @pnum: physical eraseblock number to write to
+ * @offset: offset within the physical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes @len bytes of data from buffer @buf to offset @offset
+ * of physical eraseblock @pnum. If all the data were successfully written,
+ * zero is returned. If an error occurred, this function returns a negative
+ * error code. If %-EIO is returned, the physical eraseblock most probably went
+ * bad.
+ *
+ * Note, in case of an error, it is possible that something was still written
+ * to the flash media, but may be some garbage.
+ */
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+
+ dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(offset % ubi->hdrs_min_io_size == 0);
+ ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
+
+ if (ubi->ro_mode) {
+ ubi_err("read-only mode");
+ return -EROFS;
+ }
+
+ /* The below has to be compiled out if paranoid checks are disabled */
+
+ err = paranoid_check_not_bad(ubi, pnum);
+ if (err)
+ return err > 0 ? -EINVAL : err;
+
+ /* The area we are writing to has to contain all 0xFF bytes */
+ err = paranoid_check_all_ff(ubi, pnum, offset, len);
+ if (err)
+ return err > 0 ? -EINVAL : err;
+
+ if (offset >= ubi->leb_start) {
+ /*
+ * We write to the data area of the physical eraseblock. Make
+ * sure it has valid EC and VID headers.
+ */
+ err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err > 0 ? -EINVAL : err;
+ err = paranoid_check_peb_vid_hdr(ubi, pnum);
+ if (err)
+ return err > 0 ? -EINVAL : err;
+ }
+
+ if (ubi_dbg_is_write_failure()) {
+ dbg_err("cannot write %d bytes to PEB %d:%d "
+ "(emulated)", len, pnum, offset);
+ ubi_dbg_dump_stack();
+ return -EIO;
+ }
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+ err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
+ if (err) {
+ ubi_err("error %d while writing %d bytes to PEB %d:%d, written"
+ " %zd bytes", err, len, pnum, offset, written);
+ ubi_dbg_dump_stack();
+ } else
+ ubi_assert(written == len);
+
+ return err;
+}
+
+/**
+ * erase_callback - MTD erasure call-back.
+ * @ei: MTD erase information object.
+ *
+ * Note, even though MTD erase interface is asynchronous, all the current
+ * implementations are synchronous anyway.
+ */
+static void erase_callback(struct erase_info *ei)
+{
+ wake_up_interruptible((wait_queue_head_t *)ei->priv);
+}
+
+/**
+ * do_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to erase
+ *
+ * This function synchronously erases physical eraseblock @pnum and returns
+ * zero in case of success and a negative error code in case of failure. If
+ * %-EIO is returned, the physical eraseblock most probably went bad.
+ */
+static int do_sync_erase(struct ubi_device *ubi, int pnum)
+{
+ int err, retries = 0;
+ struct erase_info ei;
+ wait_queue_head_t wq;
+
+ dbg_io("erase PEB %d", pnum);
+
+retry:
+ init_waitqueue_head(&wq);
+ memset(&ei, 0, sizeof(struct erase_info));
+
+ ei.mtd = ubi->mtd;
+ ei.addr = (loff_t)pnum * ubi->peb_size;
+ ei.len = ubi->peb_size;
+ ei.callback = erase_callback;
+ ei.priv = (unsigned long)&wq;
+
+ err = ubi->mtd->erase(ubi->mtd, &ei);
+ if (err) {
+ if (retries++ < UBI_IO_RETRIES) {
+ dbg_io("error %d while erasing PEB %d, retry",
+ err, pnum);
+ yield();
+ goto retry;
+ }
+ ubi_err("cannot erase PEB %d, error %d", pnum, err);
+ ubi_dbg_dump_stack();
+ return err;
+ }
+
+ err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
+ ei.state == MTD_ERASE_FAILED);
+ if (err) {
+ ubi_err("interrupted PEB %d erasure", pnum);
+ return -EINTR;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ if (retries++ < UBI_IO_RETRIES) {
+ dbg_io("error while erasing PEB %d, retry", pnum);
+ yield();
+ goto retry;
+ }
+ ubi_err("cannot erase PEB %d", pnum);
+ ubi_dbg_dump_stack();
+ return -EIO;
+ }
+
+ err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ if (err)
+ return err > 0 ? -EINVAL : err;
+
+ if (ubi_dbg_is_erase_failure() && !err) {
+ dbg_err("cannot erase PEB %d (emulated)", pnum);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * check_pattern - check if buffer contains only a certain byte pattern.
+ * @buf: buffer to check
+ * @patt: the pattern to check
+ * @size: buffer size in bytes
+ *
+ * This function returns %1 in there are only @patt bytes in @buf, and %0 if
+ * something else was also found.
+ */
+static int check_pattern(const void *buf, uint8_t patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (((const uint8_t *)buf)[i] != patt)
+ return 0;
+ return 1;
+}
+
+/* Patterns to write to a physical eraseblock when torturing it */
+static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
+
+/**
+ * torture_peb - test a supposedly bad physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to test
+ *
+ * This function returns %-EIO if the physical eraseblock did not pass the
+ * test, a positive number of erase operations done if the test was
+ * successfully passed, and other negative error codes in case of other errors.
+ */
+static int torture_peb(struct ubi_device *ubi, int pnum)
+{
+ int err, i, patt_count;
+
+ patt_count = ARRAY_SIZE(patterns);
+ ubi_assert(patt_count > 0);
+
+ mutex_lock(&ubi->buf_mutex);
+ for (i = 0; i < patt_count; i++) {
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ goto out;
+
+ /* Make sure the PEB contains only 0xFF bytes */
+ err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
+ if (err == 0) {
+ ubi_err("erased PEB %d, but a non-0xFF byte found",
+ pnum);
+ err = -EIO;
+ goto out;
+ }
+
+ /* Write a pattern and check it */
+ memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
+ err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size);
+ if (err == 0) {
+ ubi_err("pattern %x checking failed for PEB %d",
+ patterns[i], pnum);
+ err = -EIO;
+ goto out;
+ }
+ }
+
+ err = patt_count;
+
+out:
+ mutex_unlock(&ubi->buf_mutex);
+ if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+ /*
+ * If a bit-flip or data integrity error was detected, the test
+ * has not passed because it happened on a freshly erased
+ * physical eraseblock which means something is wrong with it.
+ */
+ ubi_err("read problems on freshly erased PEB %d, must be bad",
+ pnum);
+ err = -EIO;
+ }
+ return err;
+}
+
+/**
+ * ubi_io_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to erase
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function synchronously erases physical eraseblock @pnum. If @torture
+ * flag is not zero, the physical eraseblock is checked by means of writing
+ * different patterns to it and reading them back. If the torturing is enabled,
+ * the physical eraseblock is erased more then once.
+ *
+ * This function returns the number of erasures made in case of success, %-EIO
+ * if the erasure failed or the torturing test failed, and other negative error
+ * codes in case of other errors. Note, %-EIO means that the physical
+ * eraseblock is bad.
+ */
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
+{
+ int err, ret = 0;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = paranoid_check_not_bad(ubi, pnum);
+ if (err != 0)
+ return err > 0 ? -EINVAL : err;
+
+ if (ubi->ro_mode) {
+ ubi_err("read-only mode");
+ return -EROFS;
+ }
+
+ if (torture) {
+ ret = torture_peb(ubi, pnum);
+ if (ret < 0)
+ return ret;
+ }
+
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ return err;
+
+ return ret + 1;
+}
+
+/**
+ * ubi_io_is_bad - check if a physical eraseblock is bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns a positive number if the physical eraseblock is bad,
+ * zero if not, and a negative error code if an error occurred.
+ */
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
+{
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->bad_allowed) {
+ int ret;
+
+ ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (ret < 0)
+ ubi_err("error %d while checking if PEB %d is bad",
+ ret, pnum);
+ else if (ret)
+ dbg_io("PEB %d is bad", pnum);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_io_mark_bad - mark a physical eraseblock as bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to mark
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err("read-only mode");
+ return -EROFS;
+ }
+
+ if (!ubi->bad_allowed)
+ return 0;
+
+ err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (err)
+ ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
+ return err;
+}
+
+/**
+ * validate_ec_hdr - validate an erase counter header.
+ * @ubi: UBI device description object
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header is OK, and %1 if
+ * not.
+ */
+static int validate_ec_hdr(const struct ubi_device *ubi,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ long long ec;
+ int vid_hdr_offset, leb_start;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
+ leb_start = be32_to_cpu(ec_hdr->data_offset);
+
+ if (ec_hdr->version != UBI_VERSION) {
+ ubi_err("node with incompatible UBI version found: "
+ "this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ec_hdr->version);
+ goto bad;
+ }
+
+ if (vid_hdr_offset != ubi->vid_hdr_offset) {
+ ubi_err("bad VID header offset %d, expected %d",
+ vid_hdr_offset, ubi->vid_hdr_offset);
+ goto bad;
+ }
+
+ if (leb_start != ubi->leb_start) {
+ ubi_err("bad data offset %d, expected %d",
+ leb_start, ubi->leb_start);
+ goto bad;
+ }
+
+ if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
+ ubi_err("bad erase counter %lld", ec);
+ goto bad;
+ }
+
+ return 0;
+
+bad:
+ ubi_err("bad EC header");
+ ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_dbg_dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_ec_hdr - read and check an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to read from
+ * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
+ * header
+ * @verbose: be verbose if the header is corrupted or was not found
+ *
+ * This function reads erase counter header from physical eraseblock @pnum and
+ * stores it in @ec_hdr. This function also checks CRC checksum of the read
+ * erase counter header. The following codes may be returned:
+ *
+ * o %0 if the CRC checksum is correct and the header was successfully read;
+ * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
+ * and corrected by the flash driver; this is harmless but may indicate that
+ * this eraseblock may become bad soon (but may be not);
+ * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error);
+ * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty;
+ * o a negative error code in case of failure.
+ */
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose)
+{
+ int err, read_err = 0;
+ uint32_t crc, magic, hdr_crc;
+
+ dbg_io("read EC header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ if (UBI_IO_DEBUG)
+ verbose = 1;
+
+ err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ return err;
+
+ /*
+ * We read all the data, but either a correctable bit-flip
+ * occurred, or MTD reported about some data integrity error,
+ * like an ECC error in case of NAND. The former is harmless,
+ * the later may mean that the read data is corrupted. But we
+ * have a CRC check-sum and we will detect this. If the EC
+ * header is still OK, we just report this as there was a
+ * bit-flip.
+ */
+ read_err = err;
+ }
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ /*
+ * The magic field is wrong. Let's check if we have read all
+ * 0xFF. If yes, this physical eraseblock is assumed to be
+ * empty.
+ *
+ * But if there was a read error, we do not test it for all
+ * 0xFFs. Even if it does contain all 0xFFs, this error
+ * indicates that something is still wrong with this physical
+ * eraseblock and we anyway cannot treat it as empty.
+ */
+ if (read_err != -EBADMSG &&
+ check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
+ /* The physical eraseblock is supposedly empty */
+
+ /*
+ * The below is just a paranoid check, it has to be
+ * compiled out if paranoid checks are disabled.
+ */
+ err = paranoid_check_all_ff(ubi, pnum, 0,
+ ubi->peb_size);
+ if (err)
+ return err > 0 ? UBI_IO_BAD_EC_HDR : err;
+
+ if (verbose)
+ ubi_warn("no EC header found at PEB %d, "
+ "only 0xFF bytes", pnum);
+ return UBI_IO_PEB_EMPTY;
+ }
+
+ /*
+ * This is not a valid erase counter header, and these are not
+ * 0xFF bytes. Report that the header is corrupted.
+ */
+ if (verbose) {
+ ubi_warn("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dbg_dump_ec_hdr(ec_hdr);
+ }
+ return UBI_IO_BAD_EC_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn("bad EC header CRC at PEB %d, calculated %#08x,"
+ " read %#08x", pnum, crc, hdr_crc);
+ ubi_dbg_dump_ec_hdr(ec_hdr);
+ }
+ return UBI_IO_BAD_EC_HDR;
+ }
+
+ /* And of course validate what has just been read from the media */
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err("validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_ec_hdr - write an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to write to
+ * @ec_hdr: the erase counter header to write
+ *
+ * This function writes erase counter header described by @ec_hdr to physical
+ * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
+ * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
+ * field.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock most probably
+ * went bad.
+ */
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t crc;
+
+ dbg_io("write EC header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
+ ec_hdr->version = UBI_VERSION;
+ ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
+ ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ ec_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ if (err)
+ return -EINVAL;
+
+ err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
+ return err;
+}
+
+/**
+ * validate_vid_hdr - validate a volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function checks that data stored in the volume identifier header
+ * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
+ */
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int vol_type = vid_hdr->vol_type;
+ int copy_flag = vid_hdr->copy_flag;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int lnum = be32_to_cpu(vid_hdr->lnum);
+ int compat = vid_hdr->compat;
+ int data_size = be32_to_cpu(vid_hdr->data_size);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+ int data_crc = be32_to_cpu(vid_hdr->data_crc);
+ int usable_leb_size = ubi->leb_size - data_pad;
+
+ if (copy_flag != 0 && copy_flag != 1) {
+ dbg_err("bad copy_flag");
+ goto bad;
+ }
+
+ if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
+ data_pad < 0) {
+ dbg_err("negative values");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
+ dbg_err("bad vol_id");
+ goto bad;
+ }
+
+ if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
+ dbg_err("bad compat");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
+ compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
+ compat != UBI_COMPAT_REJECT) {
+ dbg_err("bad compat");
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ dbg_err("bad vol_type");
+ goto bad;
+ }
+
+ if (data_pad >= ubi->leb_size / 2) {
+ dbg_err("bad data_pad");
+ goto bad;
+ }
+
+ if (vol_type == UBI_VID_STATIC) {
+ /*
+ * Although from high-level point of view static volumes may
+ * contain zero bytes of data, but no VID headers can contain
+ * zero at these fields, because they empty volumes do not have
+ * mapped logical eraseblocks.
+ */
+ if (used_ebs == 0) {
+ dbg_err("zero used_ebs");
+ goto bad;
+ }
+ if (data_size == 0) {
+ dbg_err("zero data_size");
+ goto bad;
+ }
+ if (lnum < used_ebs - 1) {
+ if (data_size != usable_leb_size) {
+ dbg_err("bad data_size");
+ goto bad;
+ }
+ } else if (lnum == used_ebs - 1) {
+ if (data_size == 0) {
+ dbg_err("bad data_size at last LEB");
+ goto bad;
+ }
+ } else {
+ dbg_err("too high lnum");
+ goto bad;
+ }
+ } else {
+ if (copy_flag == 0) {
+ if (data_crc != 0) {
+ dbg_err("non-zero data CRC");
+ goto bad;
+ }
+ if (data_size != 0) {
+ dbg_err("non-zero data_size");
+ goto bad;
+ }
+ } else {
+ if (data_size == 0) {
+ dbg_err("zero data_size of copy");
+ goto bad;
+ }
+ }
+ if (used_ebs != 0) {
+ dbg_err("bad used_ebs");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err("bad VID header");
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dbg_dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_vid_hdr - read and check a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to read from
+ * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
+ * identifier header
+ * @verbose: be verbose if the header is corrupted or wasn't found
+ *
+ * This function reads the volume identifier header from physical eraseblock
+ * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
+ * volume identifier header. The following codes may be returned:
+ *
+ * o %0 if the CRC checksum is correct and the header was successfully read;
+ * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
+ * and corrected by the flash driver; this is harmless but may indicate that
+ * this eraseblock may become bad soon;
+ * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC
+ * error detected);
+ * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
+ * header there);
+ * o a negative error code in case of failure.
+ */
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int verbose)
+{
+ int err, read_err = 0;
+ uint32_t crc, magic, hdr_crc;
+ void *p;
+
+ dbg_io("read VID header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ if (UBI_IO_DEBUG)
+ verbose = 1;
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ return err;
+
+ /*
+ * We read all the data, but either a correctable bit-flip
+ * occurred, or MTD reported about some data integrity error,
+ * like an ECC error in case of NAND. The former is harmless,
+ * the later may mean the read data is corrupted. But we have a
+ * CRC check-sum and we will identify this. If the VID header is
+ * still OK, we just report this as there was a bit-flip.
+ */
+ read_err = err;
+ }
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ /*
+ * If we have read all 0xFF bytes, the VID header probably does
+ * not exist and the physical eraseblock is assumed to be free.
+ *
+ * But if there was a read error, we do not test the data for
+ * 0xFFs. Even if it does contain all 0xFFs, this error
+ * indicates that something is still wrong with this physical
+ * eraseblock and it cannot be regarded as free.
+ */
+ if (read_err != -EBADMSG &&
+ check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
+ /* The physical eraseblock is supposedly free */
+
+ /*
+ * The below is just a paranoid check, it has to be
+ * compiled out if paranoid checks are disabled.
+ */
+ err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start,
+ ubi->leb_size);
+ if (err)
+ return err > 0 ? UBI_IO_BAD_VID_HDR : err;
+
+ if (verbose)
+ ubi_warn("no VID header found at PEB %d, "
+ "only 0xFF bytes", pnum);
+ return UBI_IO_PEB_FREE;
+ }
+
+ /*
+ * This is not a valid VID header, and these are not 0xFF
+ * bytes. Report that the header is corrupted.
+ */
+ if (verbose) {
+ ubi_warn("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ }
+ return UBI_IO_BAD_VID_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn("bad CRC at PEB %d, calculated %#08x, "
+ "read %#08x", pnum, crc, hdr_crc);
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ }
+ return UBI_IO_BAD_VID_HDR;
+ }
+
+ /* Validate the VID header that we have just read */
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err("validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_vid_hdr - write a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to write to
+ * @vid_hdr: the volume identifier header to write
+ *
+ * This function writes the volume identifier header described by @vid_hdr to
+ * physical eraseblock @pnum. This function automatically fills the
+ * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
+ * header CRC checksum and stores it at vid_hdr->hdr_crc.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock probably went
+ * bad.
+ */
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ int err;
+ uint32_t crc;
+ void *p;
+
+ dbg_io("write VID header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err > 0 ? -EINVAL: err;
+
+ vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
+ vid_hdr->version = UBI_VERSION;
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ vid_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ if (err)
+ return -EINVAL;
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ return err;
+}
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+
+/**
+ * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to check
+ *
+ * This function returns zero if the physical eraseblock is good, a positive
+ * number if it is bad and a negative error code if an error occurred.
+ */
+static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+
+ err = ubi_io_is_bad(ubi, pnum);
+ if (!err)
+ return err;
+
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_dbg_dump_stack();
+ return err;
+}
+
+/**
+ * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the erase counter header belongs to
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header contains valid
+ * values, and %1 if not.
+ */
+static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ ubi_err("bad magic %#08x, must be %#08x",
+ magic, UBI_EC_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_dbg_dump_stack();
+ return 1;
+}
+
+/**
+ * paranoid_check_peb_ec_hdr - check that the erase counter header of a
+ * physical eraseblock is in-place and is all right.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the erase counter header is all right, %1 if
+ * not, and a negative error code if an error occurred.
+ */
+static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_ec_hdr *ec_hdr;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_dbg_dump_stack();
+ err = 1;
+ goto exit;
+ }
+
+ err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+
+exit:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the volume identifier header belongs to
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function returns zero if the volume identifier header is all right, and
+ * %1 if not.
+ */
+static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
+ magic, pnum, UBI_VID_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return err;
+
+fail:
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dbg_dump_stack();
+ return 1;
+
+}
+
+/**
+ * paranoid_check_peb_vid_hdr - check that the volume identifier header of a
+ * physical eraseblock is in-place and is all right.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the volume identifier header is all right,
+ * %1 if not, and a negative error code if an error occurred.
+ */
+static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_vid_hdr *vid_hdr;
+ void *p;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
+ "read %#08x", pnum, crc, hdr_crc);
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dbg_dump_stack();
+ err = 1;
+ goto exit;
+ }
+
+ err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+
+exit:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+}
+
+/**
+ * paranoid_check_all_ff - check that a region of flash is empty.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @offset: the starting offset within the physical eraseblock to check
+ * @len: the length of the region to check
+ *
+ * This function returns zero if only 0xFF bytes are present at offset
+ * @offset of the physical eraseblock @pnum, %1 if not, and a negative error
+ * code if an error occurred.
+ */
+static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len)
+{
+ size_t read;
+ int err;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ mutex_lock(&ubi->dbg_buf_mutex);
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, "
+ "read %zd bytes", err, len, pnum, offset, read);
+ goto error;
+ }
+
+ err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
+ if (err == 0) {
+ ubi_err("flash region at PEB %d:%d, length %d does not "
+ "contain all 0xFF bytes", pnum, offset, len);
+ goto fail;
+ }
+ mutex_unlock(&ubi->dbg_buf_mutex);
+
+ return 0;
+
+fail:
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ dbg_msg("hex dump of the %d-%d region", offset, offset + len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->dbg_peb_buf, len, 1);
+ err = 1;
+error:
+ ubi_dbg_dump_stack();
+ mutex_unlock(&ubi->dbg_buf_mutex);
+ return err;
+}
+
+#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/u-boot/drivers/mtd/ubi/kapi.c b/u-boot/drivers/mtd/ubi/kapi.c
new file mode 100644
index 0000000..423d479
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/kapi.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* This file mostly implements UBI kernel API functions */
+
+#ifdef UBI_LINUX
+#include <linux/module.h>
+#include <linux/err.h>
+#include <asm/div64.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+/**
+ * ubi_get_device_info - get information about UBI device.
+ * @ubi_num: UBI device number
+ * @di: the information is stored here
+ *
+ * This function returns %0 in case of success, %-EINVAL if the UBI device
+ * number is invalid, and %-ENODEV if there is no such UBI device.
+ */
+int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ di->ubi_num = ubi->ubi_num;
+ di->leb_size = ubi->leb_size;
+ di->min_io_size = ubi->min_io_size;
+ di->ro_mode = ubi->ro_mode;
+ di->cdev = ubi->cdev.dev;
+
+ ubi_put_device(ubi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_get_device_info);
+
+/**
+ * ubi_get_volume_info - get information about UBI volume.
+ * @desc: volume descriptor
+ * @vi: the information is stored here
+ */
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+ struct ubi_volume_info *vi)
+{
+ const struct ubi_volume *vol = desc->vol;
+ const struct ubi_device *ubi = vol->ubi;
+
+ vi->vol_id = vol->vol_id;
+ vi->ubi_num = ubi->ubi_num;
+ vi->size = vol->reserved_pebs;
+ vi->used_bytes = vol->used_bytes;
+ vi->vol_type = vol->vol_type;
+ vi->corrupted = vol->corrupted;
+ vi->upd_marker = vol->upd_marker;
+ vi->alignment = vol->alignment;
+ vi->usable_leb_size = vol->usable_leb_size;
+ vi->name_len = vol->name_len;
+ vi->name = vol->name;
+ vi->cdev = vol->cdev.dev;
+}
+EXPORT_SYMBOL_GPL(ubi_get_volume_info);
+
+/**
+ * ubi_open_volume - open UBI volume.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ * @mode: open mode
+ *
+ * The @mode parameter specifies if the volume should be opened in read-only
+ * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that
+ * nobody else will be able to open this volume. UBI allows to have many volume
+ * readers and one writer at a time.
+ *
+ * If a static volume is being opened for the first time since boot, it will be
+ * checked by this function, which means it will be fully read and the CRC
+ * checksum of each logical eraseblock will be checked.
+ *
+ * This function returns volume descriptor in case of success and a negative
+ * error code in case of failure.
+ */
+struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
+{
+ int err;
+ struct ubi_volume_desc *desc;
+ struct ubi_device *ubi;
+ struct ubi_volume *vol;
+
+ dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ if (mode != UBI_READONLY && mode != UBI_READWRITE &&
+ mode != UBI_EXCLUSIVE)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * First of all, we have to get the UBI device to prevent its removal.
+ */
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+ err = -EINVAL;
+ goto out_put_ubi;
+ }
+
+ desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_put_ubi;
+ }
+
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto out_free;
+
+ spin_lock(&ubi->volumes_lock);
+ vol = ubi->volumes[vol_id];
+ if (!vol)
+ goto out_unlock;
+
+ err = -EBUSY;
+ switch (mode) {
+ case UBI_READONLY:
+ if (vol->exclusive)
+ goto out_unlock;
+ vol->readers += 1;
+ break;
+
+ case UBI_READWRITE:
+ if (vol->exclusive || vol->writers > 0)
+ goto out_unlock;
+ vol->writers += 1;
+ break;
+
+ case UBI_EXCLUSIVE:
+ if (vol->exclusive || vol->writers || vol->readers)
+ goto out_unlock;
+ vol->exclusive = 1;
+ break;
+ }
+ get_device(&vol->dev);
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ desc->vol = vol;
+ desc->mode = mode;
+
+ mutex_lock(&ubi->ckvol_mutex);
+ if (!vol->checked) {
+ /* This is the first open - check the volume */
+ err = ubi_check_volume(ubi, vol_id);
+ if (err < 0) {
+ mutex_unlock(&ubi->ckvol_mutex);
+ ubi_close_volume(desc);
+ return ERR_PTR(err);
+ }
+ if (err == 1) {
+ ubi_warn("volume %d on UBI device %d is corrupted",
+ vol_id, ubi->ubi_num);
+ vol->corrupted = 1;
+ }
+ vol->checked = 1;
+ }
+ mutex_unlock(&ubi->ckvol_mutex);
+
+ return desc;
+
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ module_put(THIS_MODULE);
+out_free:
+ kfree(desc);
+out_put_ubi:
+ ubi_put_device(ubi);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume);
+
+/**
+ * ubi_open_volume_nm - open UBI volume by name.
+ * @ubi_num: UBI device number
+ * @name: volume name
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume by name.
+ */
+struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
+ int mode)
+{
+ int i, vol_id = -1, len;
+ struct ubi_device *ubi;
+ struct ubi_volume_desc *ret;
+
+ dbg_msg("open volume %s, mode %d", name, mode);
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len > UBI_VOL_NAME_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ spin_lock(&ubi->volumes_lock);
+ /* Walk all volumes of this UBI device */
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ struct ubi_volume *vol = ubi->volumes[i];
+
+ if (vol && len == vol->name_len && !strcmp(name, vol->name)) {
+ vol_id = i;
+ break;
+ }
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ if (vol_id >= 0)
+ ret = ubi_open_volume(ubi_num, vol_id, mode);
+ else
+ ret = ERR_PTR(-ENODEV);
+
+ /*
+ * We should put the UBI device even in case of success, because
+ * 'ubi_open_volume()' took a reference as well.
+ */
+ ubi_put_device(ubi);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
+
+/**
+ * ubi_close_volume - close UBI volume.
+ * @desc: volume descriptor
+ */
+void ubi_close_volume(struct ubi_volume_desc *desc)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
+
+ spin_lock(&ubi->volumes_lock);
+ switch (desc->mode) {
+ case UBI_READONLY:
+ vol->readers -= 1;
+ break;
+ case UBI_READWRITE:
+ vol->writers -= 1;
+ break;
+ case UBI_EXCLUSIVE:
+ vol->exclusive = 0;
+ }
+ vol->ref_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ kfree(desc);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(ubi_close_volume);
+
+/**
+ * ubi_leb_read - read data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function reads data from offset @offset of logical eraseblock @lnum and
+ * stores the data at @buf. When reading from static volumes, @check specifies
+ * whether the data has to be checked or not. If yes, the whole logical
+ * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC
+ * checksum is per-eraseblock). So checking may substantially slow down the
+ * read speed. The @check argument is ignored for dynamic volumes.
+ *
+ * In case of success, this function returns zero. In case of failure, this
+ * function returns a negative error code.
+ *
+ * %-EBADMSG error code is returned:
+ * o for both static and dynamic volumes if MTD driver has detected a data
+ * integrity problem (unrecoverable ECC checksum mismatch in case of NAND);
+ * o for static volumes in case of data CRC mismatch.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF error code.
+ */
+int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
+ int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
+ lnum >= vol->used_ebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size)
+ return -EINVAL;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
+
+ if (vol->upd_marker)
+ return -EBADF;
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
+ if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn("mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read);
+
+/**
+ * ubi_leb_write - write data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to write to
+ * @buf: data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ * @dtype: expected data type
+ *
+ * This function writes @len bytes of data from @buf to offset @offset of
+ * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
+ * the data.
+ *
+ * This function takes care of physical eraseblock write failures. If write to
+ * the physical eraseblock write operation fails, the logical eraseblock is
+ * re-mapped to another physical eraseblock, the data is recovered, and the
+ * write finishes. UBI has a pool of reserved physical eraseblocks for this.
+ *
+ * If all the data were successfully written, zero is returned. If an error
+ * occurred and UBI has not been able to recover from it, this function returns
+ * a negative error code. Note, in case of an error, it is possible that
+ * something was still written to the flash media, but that may be some
+ * garbage.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int offset, int len, int dtype)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size ||
+ offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
+ dtype != UBI_UNKNOWN)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_write);
+
+/*
+ * ubi_leb_change - change logical eraseblock atomically.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to change
+ * @buf: data to write
+ * @len: how many bytes to write
+ * @dtype: expected data type
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. The length may be shorter then the logical
+ * eraseblock size, ant the logical eraseblock may be appended to more times
+ * later on. This function guarantees that in case of an unclean reboot the old
+ * contents is preserved. Returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int len, int dtype)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
+ len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
+ dtype != UBI_UNKNOWN)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_change);
+
+/**
+ * ubi_leb_erase - erase logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and synchronously erases the
+ * correspondent physical eraseblock. Returns zero in case of success and a
+ * negative error code in case of failure.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err;
+
+ dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+
+ return ubi_wl_flush(ubi);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_erase);
+
+/**
+ * ubi_leb_unmap - un-map logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules the
+ * corresponding physical eraseblock for erasure, so that it will eventually be
+ * physically erased in background. This operation is much faster then the
+ * erase operation.
+ *
+ * Unlike erase, the un-map operation does not guarantee that the logical
+ * eraseblock will contain all 0xFF bytes when UBI is initialized again. For
+ * example, if several logical eraseblocks are un-mapped, and an unclean reboot
+ * happens after this, the logical eraseblocks will not necessarily be
+ * un-mapped again when this MTD device is attached. They may actually be
+ * mapped to the same physical eraseblocks again. So, this function has to be
+ * used with care.
+ *
+ * In other words, when un-mapping a logical eraseblock, UBI does not store
+ * any information about this on the flash media, it just marks the logical
+ * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical
+ * eraseblock is physically erased, it will be mapped again to the same logical
+ * eraseblock when the MTD device is attached again.
+ *
+ * The main and obvious use-case of this function is when the contents of a
+ * logical eraseblock has to be re-written. Then it is much more efficient to
+ * first un-map it, then write new data, rather then first erase it, then write
+ * new data. Note, once new data has been written to the logical eraseblock,
+ * UBI guarantees that the old contents has gone forever. In other words, if an
+ * unclean reboot happens after the logical eraseblock has been un-mapped and
+ * then written to, it will contain the last written data.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If the volume is damaged because of an interrupted update
+ * this function just returns immediately with %-EBADF code.
+ */
+int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return ubi_eba_unmap_leb(ubi, vol, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_unmap);
+
+/**
+ * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ * @dtype: expected data type
+ *
+ * This function maps an un-mapped logical eraseblock @lnum to a physical
+ * eraseblock. This means, that after a successfull invocation of this
+ * function the logical eraseblock @lnum will be empty (contain only %0xFF
+ * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
+ * happens.
+ *
+ * This function returns zero in case of success, %-EBADF if the volume is
+ * damaged because of an interrupted update, %-EBADMSG if the logical
+ * eraseblock is already mapped, and other negative error codes in case of
+ * other failures.
+ */
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
+ dtype != UBI_UNKNOWN)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (vol->eba_tbl[lnum] >= 0)
+ return -EBADMSG;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_map);
+
+/**
+ * ubi_is_mapped - check if logical eraseblock is mapped.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function checks if logical eraseblock @lnum is mapped to a physical
+ * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily
+ * mean it will still be un-mapped after the UBI device is re-attached. The
+ * logical eraseblock may become mapped to the physical eraseblock it was last
+ * mapped to.
+ *
+ * This function returns %1 if the LEB is mapped, %0 if not, and a negative
+ * error code in case of failure. If the volume is damaged because of an
+ * interrupted update this function just returns immediately with %-EBADF error
+ * code.
+ */
+int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+
+ dbg_msg("test LEB %d:%d", vol->vol_id, lnum);
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return vol->eba_tbl[lnum] >= 0;
+}
+EXPORT_SYMBOL_GPL(ubi_is_mapped);
diff --git a/u-boot/drivers/mtd/ubi/misc.c b/u-boot/drivers/mtd/ubi/misc.c
new file mode 100644
index 0000000..a6410bf
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/misc.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* Here we keep miscellaneous functions which are used all over the UBI code */
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+/**
+ * calc_data_len - calculate how much real data is stored in a buffer.
+ * @ubi: UBI device description object
+ * @buf: a buffer with the contents of the physical eraseblock
+ * @length: the buffer length
+ *
+ * This function calculates how much "real data" is stored in @buf and returnes
+ * the length. Continuous 0xFF bytes at the end of the buffer are not
+ * considered as "real data".
+ */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length)
+{
+ int i;
+
+ ubi_assert(!(length & (ubi->min_io_size - 1)));
+
+ for (i = length - 1; i >= 0; i--)
+ if (((const uint8_t *)buf)[i] != 0xFF)
+ break;
+
+ /* The resulting length must be aligned to the minimum flash I/O size */
+ length = ALIGN(i + 1, ubi->min_io_size);
+ return length;
+}
+
+/**
+ * ubi_check_volume - check the contents of a static volume.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to check
+ *
+ * This function checks if static volume @vol_id is corrupted by fully reading
+ * it and checking data CRC. This function returns %0 if the volume is not
+ * corrupted, %1 if it is corrupted and a negative error code in case of
+ * failure. Dynamic volumes are not checked and zero is returned immediately.
+ */
+int ubi_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ void *buf;
+ int err = 0, i;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+
+ if (vol->vol_type != UBI_STATIC_VOLUME)
+ return 0;
+
+ buf = vmalloc(vol->usable_leb_size);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < vol->used_ebs; i++) {
+ int size;
+
+ if (i == vol->used_ebs - 1)
+ size = vol->last_eb_bytes;
+ else
+ size = vol->usable_leb_size;
+
+ err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
+ if (err) {
+ if (err == -EBADMSG)
+ err = 1;
+ break;
+ }
+ }
+
+ vfree(buf);
+ return err;
+}
+
+/**
+ * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * eraseblock handling.
+ * @ubi: UBI device description object
+ */
+void ubi_calculate_reserved(struct ubi_device *ubi)
+{
+ ubi->beb_rsvd_level = ubi->good_peb_count/100;
+ ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
+ if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
+ ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+}
diff --git a/u-boot/drivers/mtd/ubi/scan.c b/u-boot/drivers/mtd/ubi/scan.c
new file mode 100644
index 0000000..d5c1d27
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/scan.c
@@ -0,0 +1,1360 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI scanning unit.
+ *
+ * This unit is responsible for scanning the flash media, checking UBI
+ * headers and providing complete information about the UBI flash image.
+ *
+ * The scanning information is represented by a &struct ubi_scan_info' object.
+ * Information about found volumes is represented by &struct ubi_scan_volume
+ * objects which are kept in volume RB-tree with root at the @volumes field.
+ * The RB-tree is indexed by the volume ID.
+ *
+ * Found logical eraseblocks are represented by &struct ubi_scan_leb objects.
+ * These objects are kept in per-volume RB-trees with the root at the
+ * corresponding &struct ubi_scan_volume object. To put it differently, we keep
+ * an RB-tree of per-volume objects and each of these objects is the root of
+ * RB-tree of per-eraseblock objects.
+ *
+ * Corrupted physical eraseblocks are put to the @corr list, free physical
+ * eraseblocks are put to the @free list and the physical eraseblock to be
+ * erased are put to the @erase list.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/err.h>
+#include <linux/crc32.h>
+#include <asm/div64.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
+#else
+#define paranoid_check_si(ubi, si) 0
+#endif
+
+/* Temporary variables used during scanning */
+static struct ubi_ec_hdr *ech;
+static struct ubi_vid_hdr *vidh;
+
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @si: scanning information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ * @list: the list to add to
+ *
+ * This function adds physical eraseblock @pnum to free, erase, corrupted or
+ * alien lists. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
+ struct list_head *list)
+{
+ struct ubi_scan_leb *seb;
+
+ if (list == &si->free)
+ dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
+ else if (list == &si->erase)
+ dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
+ else if (list == &si->corr)
+ dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
+ else if (list == &si->alien)
+ dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
+ else
+ BUG();
+
+ seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ if (!seb)
+ return -ENOMEM;
+
+ seb->pnum = pnum;
+ seb->ec = ec;
+ list_add_tail(&seb->u.list, list);
+ return 0;
+}
+
+/**
+ * validate_vid_hdr - check that volume identifier header is correct and
+ * consistent.
+ * @vid_hdr: the volume identifier header to check
+ * @sv: information about the volume this logical eraseblock belongs to
+ * @pnum: physical eraseblock number the VID header came from
+ *
+ * This function checks that data stored in @vid_hdr is consistent. Returns
+ * non-zero if an inconsistency was found and zero if not.
+ *
+ * Note, UBI does sanity check of everything it reads from the flash media.
+ * Most of the checks are done in the I/O unit. Here we check that the
+ * information in the VID header is consistent to the information in other VID
+ * headers of the same volume.
+ */
+static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
+ const struct ubi_scan_volume *sv, int pnum)
+{
+ int vol_type = vid_hdr->vol_type;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+
+ if (sv->leb_count != 0) {
+ int sv_vol_type;
+
+ /*
+ * This is not the first logical eraseblock belonging to this
+ * volume. Ensure that the data in its VID header is consistent
+ * to the data in previous logical eraseblock headers.
+ */
+
+ if (vol_id != sv->vol_id) {
+ dbg_err("inconsistent vol_id");
+ goto bad;
+ }
+
+ if (sv->vol_type == UBI_STATIC_VOLUME)
+ sv_vol_type = UBI_VID_STATIC;
+ else
+ sv_vol_type = UBI_VID_DYNAMIC;
+
+ if (vol_type != sv_vol_type) {
+ dbg_err("inconsistent vol_type");
+ goto bad;
+ }
+
+ if (used_ebs != sv->used_ebs) {
+ dbg_err("inconsistent used_ebs");
+ goto bad;
+ }
+
+ if (data_pad != sv->data_pad) {
+ dbg_err("inconsistent data_pad");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err("inconsistent VID header at PEB %d", pnum);
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dbg_dump_sv(sv);
+ return -EINVAL;
+}
+
+/**
+ * add_volume - add volume to the scanning information.
+ * @si: scanning information
+ * @vol_id: ID of the volume to add
+ * @pnum: physical eraseblock number
+ * @vid_hdr: volume identifier header
+ *
+ * If the volume corresponding to the @vid_hdr logical eraseblock is already
+ * present in the scanning information, this function does nothing. Otherwise
+ * it adds corresponding volume to the scanning information. Returns a pointer
+ * to the scanning volume object in case of success and a negative error code
+ * in case of failure.
+ */
+static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
+ int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_scan_volume *sv;
+ struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
+
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
+
+ /* Walk the volume RB-tree to look if this volume is already present */
+ while (*p) {
+ parent = *p;
+ sv = rb_entry(parent, struct ubi_scan_volume, rb);
+
+ if (vol_id == sv->vol_id)
+ return sv;
+
+ if (vol_id > sv->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ /* The volume is absent - add it */
+ sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
+ if (!sv)
+ return ERR_PTR(-ENOMEM);
+
+ sv->highest_lnum = sv->leb_count = 0;
+ sv->vol_id = vol_id;
+ sv->root = RB_ROOT;
+ sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ sv->compat = vid_hdr->compat;
+ sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ : UBI_STATIC_VOLUME;
+ if (vol_id > si->highest_vol_id)
+ si->highest_vol_id = vol_id;
+
+ rb_link_node(&sv->rb, parent, p);
+ rb_insert_color(&sv->rb, &si->volumes);
+ si->vols_found += 1;
+ dbg_bld("added volume %d", vol_id);
+ return sv;
+}
+
+/**
+ * compare_lebs - find out which logical eraseblock is newer.
+ * @ubi: UBI device description object
+ * @seb: first logical eraseblock to compare
+ * @pnum: physical eraseblock number of the second logical eraseblock to
+ * compare
+ * @vid_hdr: volume identifier header of the second logical eraseblock
+ *
+ * This function compares 2 copies of a LEB and informs which one is newer. In
+ * case of success this function returns a positive value, in case of failure, a
+ * negative error code is returned. The success return codes use the following
+ * bits:
+ * o bit 0 is cleared: the first PEB (described by @seb) is newer then the
+ * second PEB (described by @pnum and @vid_hdr);
+ * o bit 0 is set: the second PEB is newer;
+ * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
+ * o bit 1 is set: bit-flips were detected in the newer LEB;
+ * o bit 2 is cleared: the older LEB is not corrupted;
+ * o bit 2 is set: the older LEB is corrupted.
+ */
+static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr)
+{
+ void *buf;
+ int len, err, second_is_newer, bitflips = 0, corrupted = 0;
+ uint32_t data_crc, crc;
+ struct ubi_vid_hdr *vh = NULL;
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
+
+ if (seb->sqnum == 0 && sqnum2 == 0) {
+ long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
+
+ /*
+ * UBI constantly increases the logical eraseblock version
+ * number and it can overflow. Thus, we have to bear in mind
+ * that versions that are close to %0xFFFFFFFF are less then
+ * versions that are close to %0.
+ *
+ * The UBI WL unit guarantees that the number of pending tasks
+ * is not greater then %0x7FFFFFFF. So, if the difference
+ * between any two versions is greater or equivalent to
+ * %0x7FFFFFFF, there was an overflow and the logical
+ * eraseblock with lower version is actually newer then the one
+ * with higher version.
+ *
+ * FIXME: but this is anyway obsolete and will be removed at
+ * some point.
+ */
+ dbg_bld("using old crappy leb_ver stuff");
+
+ if (v1 == v2) {
+ ubi_err("PEB %d and PEB %d have the same version %lld",
+ seb->pnum, pnum, v1);
+ return -EINVAL;
+ }
+
+ abs = v1 - v2;
+ if (abs < 0)
+ abs = -abs;
+
+ if (abs < 0x7FFFFFFF)
+ /* Non-overflow situation */
+ second_is_newer = (v2 > v1);
+ else
+ second_is_newer = (v2 < v1);
+ } else
+ /* Obviously the LEB with lower sequence counter is older */
+ second_is_newer = sqnum2 > seb->sqnum;
+
+ /*
+ * Now we know which copy is newer. If the copy flag of the PEB with
+ * newer version is not set, then we just return, otherwise we have to
+ * check data CRC. For the second PEB we already have the VID header,
+ * for the first one - we'll need to re-read it from flash.
+ *
+ * FIXME: this may be optimized so that we wouldn't read twice.
+ */
+
+ if (second_is_newer) {
+ if (!vid_hdr->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("second PEB %d is newer, copy_flag is unset",
+ pnum);
+ return 1;
+ }
+ } else {
+ pnum = seb->pnum;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh)
+ return -ENOMEM;
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else {
+ dbg_err("VID of PEB %d header is bad, but it "
+ "was OK earlier", pnum);
+ if (err > 0)
+ err = -EIO;
+
+ goto out_free_vidh;
+ }
+ }
+
+ if (!vh->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("first PEB %d is newer, copy_flag is unset",
+ pnum);
+ err = bitflips << 1;
+ goto out_free_vidh;
+ }
+
+ vid_hdr = vh;
+ }
+
+ /* Read the data of the copy and check the CRC */
+
+ len = be32_to_cpu(vid_hdr->data_size);
+ buf = vmalloc(len);
+ if (!buf) {
+ err = -ENOMEM;
+ goto out_free_vidh;
+ }
+
+ err = ubi_io_read_data(ubi, buf, pnum, 0, len);
+ if (err && err != UBI_IO_BITFLIPS)
+ goto out_free_buf;
+
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
+ crc = crc32(UBI_CRC32_INIT, buf, len);
+ if (crc != data_crc) {
+ dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
+ pnum, crc, data_crc);
+ corrupted = 1;
+ bitflips = 0;
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+ bitflips = !!err;
+ }
+
+ vfree(buf);
+ ubi_free_vid_hdr(ubi, vh);
+
+ if (second_is_newer)
+ dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
+ else
+ dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
+
+ return second_is_newer | (bitflips << 1) | (corrupted << 2);
+
+out_free_buf:
+ vfree(buf);
+out_free_vidh:
+ ubi_free_vid_hdr(ubi, vh);
+ return err;
+}
+
+/**
+ * ubi_scan_add_used - add information about a physical eraseblock to the
+ * scanning information.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ * @pnum: the physical eraseblock number
+ * @ec: erase counter
+ * @vid_hdr: the volume identifier header
+ * @bitflips: if bit-flips were detected when this physical eraseblock was read
+ *
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
+ int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
+ int bitflips)
+{
+ int err, vol_id, lnum;
+ uint32_t leb_ver;
+ unsigned long long sqnum;
+ struct ubi_scan_volume *sv;
+ struct ubi_scan_leb *seb;
+ struct rb_node **p, *parent = NULL;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+ leb_ver = be32_to_cpu(vid_hdr->leb_ver);
+
+ dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d",
+ pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips);
+
+ sv = add_volume(si, vol_id, pnum, vid_hdr);
+ if (IS_ERR(sv) < 0)
+ return PTR_ERR(sv);
+
+ if (si->max_sqnum < sqnum)
+ si->max_sqnum = sqnum;
+
+ /*
+ * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
+ * if this is the first instance of this logical eraseblock or not.
+ */
+ p = &sv->root.rb_node;
+ while (*p) {
+ int cmp_res;
+
+ parent = *p;
+ seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
+ if (lnum != seb->lnum) {
+ if (lnum < seb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ continue;
+ }
+
+ /*
+ * There is already a physical eraseblock describing the same
+ * logical eraseblock present.
+ */
+
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
+ "LEB ver %u, EC %d", seb->pnum, seb->sqnum,
+ seb->leb_ver, seb->ec);
+
+ /*
+ * Make sure that the logical eraseblocks have different
+ * versions. Otherwise the image is bad.
+ */
+ if (seb->leb_ver == leb_ver && leb_ver != 0) {
+ ubi_err("two LEBs with same version %u", leb_ver);
+ ubi_dbg_dump_seb(seb, 0);
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure that the logical eraseblocks have different
+ * sequence numbers. Otherwise the image is bad.
+ *
+ * FIXME: remove 'sqnum != 0' check when leb_ver is removed.
+ */
+ if (seb->sqnum == sqnum && sqnum != 0) {
+ ubi_err("two LEBs with same sequence number %llu",
+ sqnum);
+ ubi_dbg_dump_seb(seb, 0);
+ ubi_dbg_dump_vid_hdr(vid_hdr);
+ return -EINVAL;
+ }
+
+ /*
+ * Now we have to drop the older one and preserve the newer
+ * one.
+ */
+ cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ if (cmp_res & 1) {
+ /*
+ * This logical eraseblock is newer then the one
+ * found earlier.
+ */
+ err = validate_vid_hdr(vid_hdr, sv, pnum);
+ if (err)
+ return err;
+
+ if (cmp_res & 4)
+ err = add_to_list(si, seb->pnum, seb->ec,
+ &si->corr);
+ else
+ err = add_to_list(si, seb->pnum, seb->ec,
+ &si->erase);
+ if (err)
+ return err;
+
+ seb->ec = ec;
+ seb->pnum = pnum;
+ seb->scrub = ((cmp_res & 2) || bitflips);
+ seb->sqnum = sqnum;
+ seb->leb_ver = leb_ver;
+
+ if (sv->highest_lnum == lnum)
+ sv->last_data_size =
+ be32_to_cpu(vid_hdr->data_size);
+
+ return 0;
+ } else {
+ /*
+ * This logical eraseblock is older then the one found
+ * previously.
+ */
+ if (cmp_res & 4)
+ return add_to_list(si, pnum, ec, &si->corr);
+ else
+ return add_to_list(si, pnum, ec, &si->erase);
+ }
+ }
+
+ /*
+ * We've met this logical eraseblock for the first time, add it to the
+ * scanning information.
+ */
+
+ err = validate_vid_hdr(vid_hdr, sv, pnum);
+ if (err)
+ return err;
+
+ seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
+ if (!seb)
+ return -ENOMEM;
+
+ seb->ec = ec;
+ seb->pnum = pnum;
+ seb->lnum = lnum;
+ seb->sqnum = sqnum;
+ seb->scrub = bitflips;
+ seb->leb_ver = leb_ver;
+
+ if (sv->highest_lnum <= lnum) {
+ sv->highest_lnum = lnum;
+ sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ }
+
+ sv->leb_count += 1;
+ rb_link_node(&seb->u.rb, parent, p);
+ rb_insert_color(&seb->u.rb, &sv->root);
+ return 0;
+}
+
+/**
+ * ubi_scan_find_sv - find information about a particular volume in the
+ * scanning information.
+ * @si: scanning information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the volume description or %NULL if there
+ * are no data about this volume in the scanning information.
+ */
+struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
+ int vol_id)
+{
+ struct ubi_scan_volume *sv;
+ struct rb_node *p = si->volumes.rb_node;
+
+ while (p) {
+ sv = rb_entry(p, struct ubi_scan_volume, rb);
+
+ if (vol_id == sv->vol_id)
+ return sv;
+
+ if (vol_id > sv->vol_id)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+
+ return NULL;
+}
+
+/**
+ * ubi_scan_find_seb - find information about a particular logical
+ * eraseblock in the volume scanning information.
+ * @sv: a pointer to the volume scanning information
+ * @lnum: the requested logical eraseblock
+ *
+ * This function returns a pointer to the scanning logical eraseblock or %NULL
+ * if there are no data about it in the scanning volume information.
+ */
+struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
+ int lnum)
+{
+ struct ubi_scan_leb *seb;
+ struct rb_node *p = sv->root.rb_node;
+
+ while (p) {
+ seb = rb_entry(p, struct ubi_scan_leb, u.rb);
+
+ if (lnum == seb->lnum)
+ return seb;
+
+ if (lnum > seb->lnum)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+
+ return NULL;
+}
+
+/**
+ * ubi_scan_rm_volume - delete scanning information about a volume.
+ * @si: scanning information
+ * @sv: the volume scanning information to delete
+ */
+void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+{
+ struct rb_node *rb;
+ struct ubi_scan_leb *seb;
+
+ dbg_bld("remove scanning information about volume %d", sv->vol_id);
+
+ while ((rb = rb_first(&sv->root))) {
+ seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
+ rb_erase(&seb->u.rb, &sv->root);
+ list_add_tail(&seb->u.list, &si->erase);
+ }
+
+ rb_erase(&sv->rb, &si->volumes);
+ kfree(sv);
+ si->vols_found -= 1;
+}
+
+/**
+ * ubi_scan_erase_peb - erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ * @pnum: physical eraseblock number to erase;
+ * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
+ *
+ * This function erases physical eraseblock 'pnum', and writes the erase
+ * counter header to it. This function should only be used on UBI device
+ * initialization stages, when the EBA unit had not been yet initialized. This
+ * function returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
+ int pnum, int ec)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
+ return -EINVAL;
+ }
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_sync_erase(ubi, pnum, 0);
+ if (err < 0)
+ goto out_free;
+
+ err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * ubi_scan_get_free_peb - get a free physical eraseblock.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * This function returns a free physical eraseblock. It is supposed to be
+ * called on the UBI initialization stages when the wear-leveling unit is not
+ * initialized yet. This function picks a physical eraseblocks from one of the
+ * lists, writes the EC header if it is needed, and removes it from the list.
+ *
+ * This function returns scanning physical eraseblock information in case of
+ * success and an error code in case of failure.
+ */
+struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
+ struct ubi_scan_info *si)
+{
+ int err = 0, i;
+ struct ubi_scan_leb *seb;
+
+ if (!list_empty(&si->free)) {
+ seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
+ list_del(&seb->u.list);
+ dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
+ return seb;
+ }
+
+ for (i = 0; i < 2; i++) {
+ struct list_head *head;
+ struct ubi_scan_leb *tmp_seb;
+
+ if (i == 0)
+ head = &si->erase;
+ else
+ head = &si->corr;
+
+ /*
+ * We try to erase the first physical eraseblock from the @head
+ * list and pick it if we succeed, or try to erase the
+ * next one if not. And so forth. We don't want to take care
+ * about bad eraseblocks here - they'll be handled later.
+ */
+ list_for_each_entry_safe(seb, tmp_seb, head, u.list) {
+ if (seb->ec == UBI_SCAN_UNKNOWN_EC)
+ seb->ec = si->mean_ec;
+
+ err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
+ if (err)
+ continue;
+
+ seb->ec += 1;
+ list_del(&seb->u.list);
+ dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
+ return seb;
+ }
+ }
+
+ ubi_err("no eraseblocks found");
+ return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * process_eb - read UBI headers, check them and add corresponding data
+ * to the scanning information.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ * @pnum: the physical eraseblock number
+ *
+ * This function returns a zero if the physical eraseblock was successfully
+ * handled and a negative error code in case of failure.
+ */
+static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
+{
+ long long uninitialized_var(ec);
+ int err, bitflips = 0, vol_id, ec_corr = 0;
+
+ dbg_bld("scan PEB %d", pnum);
+
+ /* Skip bad physical eraseblocks */
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0)
+ return err;
+ else if (err) {
+ /*
+ * FIXME: this is actually duty of the I/O unit to initialize
+ * this, but MTD does not provide enough information.
+ */
+ si->bad_peb_count += 1;
+ return 0;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err < 0)
+ return err;
+ else if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else if (err == UBI_IO_PEB_EMPTY)
+ return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase);
+ else if (err == UBI_IO_BAD_EC_HDR) {
+ /*
+ * We have to also look at the VID header, possibly it is not
+ * corrupted. Set %bitflips flag in order to make this PEB be
+ * moved and EC be re-created.
+ */
+ ec_corr = 1;
+ ec = UBI_SCAN_UNKNOWN_EC;
+ bitflips = 1;
+ }
+
+ si->is_empty = 0;
+
+ if (!ec_corr) {
+ /* Make sure UBI version is OK */
+ if (ech->version != UBI_VERSION) {
+ ubi_err("this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ech->version);
+ return -EINVAL;
+ }
+
+ ec = be64_to_cpu(ech->ec);
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. The EC headers have 64 bits
+ * reserved, but we anyway make use of only 31 bit
+ * values, as this seems to be enough for any existing
+ * flash. Upgrade UBI and use 64-bit erase counters
+ * internally.
+ */
+ ubi_err("erase counter overflow, max is %d",
+ UBI_MAX_ERASECOUNTER);
+ ubi_dbg_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+ }
+
+ /* OK, we've done with the EC header, let's look at the VID header */
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+ if (err < 0)
+ return err;
+ else if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else if (err == UBI_IO_BAD_VID_HDR ||
+ (err == UBI_IO_PEB_FREE && ec_corr)) {
+ /* VID header is corrupted */
+ err = add_to_list(si, pnum, ec, &si->corr);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ } else if (err == UBI_IO_PEB_FREE) {
+ /* No VID header - the physical eraseblock is free */
+ err = add_to_list(si, pnum, ec, &si->free);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ }
+
+ vol_id = be32_to_cpu(vidh->vol_id);
+ if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+ /* Unsupported internal volume */
+ switch (vidh->compat) {
+ case UBI_COMPAT_DELETE:
+ ubi_msg("\"delete\" compatible internal volume %d:%d"
+ " found, remove it", vol_id, lnum);
+ err = add_to_list(si, pnum, ec, &si->corr);
+ if (err)
+ return err;
+ break;
+
+ case UBI_COMPAT_RO:
+ ubi_msg("read-only compatible internal volume %d:%d"
+ " found, switch to read-only mode",
+ vol_id, lnum);
+ ubi->ro_mode = 1;
+ break;
+
+ case UBI_COMPAT_PRESERVE:
+ ubi_msg("\"preserve\" compatible internal volume %d:%d"
+ " found", vol_id, lnum);
+ err = add_to_list(si, pnum, ec, &si->alien);
+ if (err)
+ return err;
+ si->alien_peb_count += 1;
+ return 0;
+
+ case UBI_COMPAT_REJECT:
+ ubi_err("incompatible internal volume %d:%d found",
+ vol_id, lnum);
+ return -EINVAL;
+ }
+ }
+
+ /* Both UBI headers seem to be fine */
+ err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
+ if (err)
+ return err;
+
+adjust_mean_ec:
+ if (!ec_corr) {
+ si->ec_sum += ec;
+ si->ec_count += 1;
+ if (ec > si->max_ec)
+ si->max_ec = ec;
+ if (ec < si->min_ec)
+ si->min_ec = ec;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_scan - scan an MTD device.
+ * @ubi: UBI device description object
+ *
+ * This function does full scanning of an MTD device and returns complete
+ * information about it. In case of failure, an error code is returned.
+ */
+struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
+{
+ int err, pnum;
+ struct rb_node *rb1, *rb2;
+ struct ubi_scan_volume *sv;
+ struct ubi_scan_leb *seb;
+ struct ubi_scan_info *si;
+
+ si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
+ if (!si)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&si->corr);
+ INIT_LIST_HEAD(&si->free);
+ INIT_LIST_HEAD(&si->erase);
+ INIT_LIST_HEAD(&si->alien);
+ si->volumes = RB_ROOT;
+ si->is_empty = 1;
+
+ err = -ENOMEM;
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out_si;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ cond_resched();
+
+ dbg_msg("process PEB %d", pnum);
+ err = process_eb(ubi, si, pnum);
+ if (err < 0)
+ goto out_vidh;
+ }
+
+ dbg_msg("scanning is finished");
+
+ /* Calculate mean erase counter */
+ if (si->ec_count) {
+ do_div(si->ec_sum, si->ec_count);
+ si->mean_ec = si->ec_sum;
+ }
+
+ if (si->is_empty)
+ ubi_msg("empty MTD device detected");
+
+ /*
+ * In case of unknown erase counter we use the mean erase counter
+ * value.
+ */
+ ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
+ if (seb->ec == UBI_SCAN_UNKNOWN_EC)
+ seb->ec = si->mean_ec;
+ }
+
+ list_for_each_entry(seb, &si->free, u.list) {
+ if (seb->ec == UBI_SCAN_UNKNOWN_EC)
+ seb->ec = si->mean_ec;
+ }
+
+ list_for_each_entry(seb, &si->corr, u.list)
+ if (seb->ec == UBI_SCAN_UNKNOWN_EC)
+ seb->ec = si->mean_ec;
+
+ list_for_each_entry(seb, &si->erase, u.list)
+ if (seb->ec == UBI_SCAN_UNKNOWN_EC)
+ seb->ec = si->mean_ec;
+
+ err = paranoid_check_si(ubi, si);
+ if (err) {
+ if (err > 0)
+ err = -EINVAL;
+ goto out_vidh;
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ return si;
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out_si:
+ ubi_scan_destroy_si(si);
+ return ERR_PTR(err);
+}
+
+/**
+ * destroy_sv - free the scanning volume information
+ * @sv: scanning volume information
+ *
+ * This function destroys the volume RB-tree (@sv->root) and the scanning
+ * volume information.
+ */
+static void destroy_sv(struct ubi_scan_volume *sv)
+{
+ struct ubi_scan_leb *seb;
+ struct rb_node *this = sv->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ seb = rb_entry(this, struct ubi_scan_leb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &seb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kfree(seb);
+ }
+ }
+ kfree(sv);
+}
+
+/**
+ * ubi_scan_destroy_si - destroy scanning information.
+ * @si: scanning information
+ */
+void ubi_scan_destroy_si(struct ubi_scan_info *si)
+{
+ struct ubi_scan_leb *seb, *seb_tmp;
+ struct ubi_scan_volume *sv;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
+ list_del(&seb->u.list);
+ kfree(seb);
+ }
+ list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
+ list_del(&seb->u.list);
+ kfree(seb);
+ }
+ list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
+ list_del(&seb->u.list);
+ kfree(seb);
+ }
+ list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
+ list_del(&seb->u.list);
+ kfree(seb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = si->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ sv = rb_entry(rb, struct ubi_scan_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &sv->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_sv(sv);
+ }
+ }
+
+ kfree(si);
+}
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+
+/**
+ * paranoid_check_si - check if the scanning information is correct and
+ * consistent.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * This function returns zero if the scanning information is all right, %1 if
+ * not and a negative error code if an error occurred.
+ */
+static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
+{
+ int pnum, err, vols_found = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_scan_volume *sv;
+ struct ubi_scan_leb *seb, *last_seb;
+ uint8_t *buf;
+
+ /*
+ * At first, check that scanning information is OK.
+ */
+ ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+ int leb_count = 0;
+
+ cond_resched();
+
+ vols_found += 1;
+
+ if (si->is_empty) {
+ ubi_err("bad is_empty flag");
+ goto bad_sv;
+ }
+
+ if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
+ sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
+ sv->data_pad < 0 || sv->last_data_size < 0) {
+ ubi_err("negative values");
+ goto bad_sv;
+ }
+
+ if (sv->vol_id >= UBI_MAX_VOLUMES &&
+ sv->vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("bad vol_id");
+ goto bad_sv;
+ }
+
+ if (sv->vol_id > si->highest_vol_id) {
+ ubi_err("highest_vol_id is %d, but vol_id %d is there",
+ si->highest_vol_id, sv->vol_id);
+ goto out;
+ }
+
+ if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
+ sv->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err("bad vol_type");
+ goto bad_sv;
+ }
+
+ if (sv->data_pad > ubi->leb_size / 2) {
+ ubi_err("bad data_pad");
+ goto bad_sv;
+ }
+
+ last_seb = NULL;
+ ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ cond_resched();
+
+ last_seb = seb;
+ leb_count += 1;
+
+ if (seb->pnum < 0 || seb->ec < 0) {
+ ubi_err("negative values");
+ goto bad_seb;
+ }
+
+ if (seb->ec < si->min_ec) {
+ ubi_err("bad si->min_ec (%d), %d found",
+ si->min_ec, seb->ec);
+ goto bad_seb;
+ }
+
+ if (seb->ec > si->max_ec) {
+ ubi_err("bad si->max_ec (%d), %d found",
+ si->max_ec, seb->ec);
+ goto bad_seb;
+ }
+
+ if (seb->pnum >= ubi->peb_count) {
+ ubi_err("too high PEB number %d, total PEBs %d",
+ seb->pnum, ubi->peb_count);
+ goto bad_seb;
+ }
+
+ if (sv->vol_type == UBI_STATIC_VOLUME) {
+ if (seb->lnum >= sv->used_ebs) {
+ ubi_err("bad lnum or used_ebs");
+ goto bad_seb;
+ }
+ } else {
+ if (sv->used_ebs != 0) {
+ ubi_err("non-zero used_ebs");
+ goto bad_seb;
+ }
+ }
+
+ if (seb->lnum > sv->highest_lnum) {
+ ubi_err("incorrect highest_lnum or lnum");
+ goto bad_seb;
+ }
+ }
+
+ if (sv->leb_count != leb_count) {
+ ubi_err("bad leb_count, %d objects in the tree",
+ leb_count);
+ goto bad_sv;
+ }
+
+ if (!last_seb)
+ continue;
+
+ seb = last_seb;
+
+ if (seb->lnum != sv->highest_lnum) {
+ ubi_err("bad highest_lnum");
+ goto bad_seb;
+ }
+ }
+
+ if (vols_found != si->vols_found) {
+ ubi_err("bad si->vols_found %d, should be %d",
+ si->vols_found, vols_found);
+ goto out;
+ }
+
+ /* Check that scanning information is correct */
+ ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+ last_seb = NULL;
+ ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ int vol_type;
+
+ cond_resched();
+
+ last_seb = seb;
+
+ err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("VID header is not OK (%d)", err);
+ if (err > 0)
+ err = -EIO;
+ return err;
+ }
+
+ vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ if (sv->vol_type != vol_type) {
+ ubi_err("bad vol_type");
+ goto bad_vid_hdr;
+ }
+
+ if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err("bad sqnum %llu", seb->sqnum);
+ goto bad_vid_hdr;
+ }
+
+ if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err("bad vol_id %d", sv->vol_id);
+ goto bad_vid_hdr;
+ }
+
+ if (sv->compat != vidh->compat) {
+ ubi_err("bad compat %d", vidh->compat);
+ goto bad_vid_hdr;
+ }
+
+ if (seb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad lnum %d", seb->lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err("bad used_ebs %d", sv->used_ebs);
+ goto bad_vid_hdr;
+ }
+
+ if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err("bad data_pad %d", sv->data_pad);
+ goto bad_vid_hdr;
+ }
+
+ if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) {
+ ubi_err("bad leb_ver %u", seb->leb_ver);
+ goto bad_vid_hdr;
+ }
+ }
+
+ if (!last_seb)
+ continue;
+
+ if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad highest_lnum %d", sv->highest_lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err("bad last_data_size %d", sv->last_data_size);
+ goto bad_vid_hdr;
+ }
+ }
+
+ /*
+ * Make sure that all the physical eraseblocks are in one of the lists
+ * or trees.
+ */
+ buf = kzalloc(ubi->peb_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ }
+ else if (err)
+ buf[pnum] = 1;
+ }
+
+ ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
+ ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
+ buf[seb->pnum] = 1;
+
+ list_for_each_entry(seb, &si->free, u.list)
+ buf[seb->pnum] = 1;
+
+ list_for_each_entry(seb, &si->corr, u.list)
+ buf[seb->pnum] = 1;
+
+ list_for_each_entry(seb, &si->erase, u.list)
+ buf[seb->pnum] = 1;
+
+ list_for_each_entry(seb, &si->alien, u.list)
+ buf[seb->pnum] = 1;
+
+ err = 0;
+ for (pnum = 0; pnum < ubi->peb_count; pnum++)
+ if (!buf[pnum]) {
+ ubi_err("PEB %d is not referred", pnum);
+ err = 1;
+ }
+
+ kfree(buf);
+ if (err)
+ goto out;
+ return 0;
+
+bad_seb:
+ ubi_err("bad scanning information about LEB %d", seb->lnum);
+ ubi_dbg_dump_seb(seb, 0);
+ ubi_dbg_dump_sv(sv);
+ goto out;
+
+bad_sv:
+ ubi_err("bad scanning information about volume %d", sv->vol_id);
+ ubi_dbg_dump_sv(sv);
+ goto out;
+
+bad_vid_hdr:
+ ubi_err("bad scanning information about volume %d", sv->vol_id);
+ ubi_dbg_dump_sv(sv);
+ ubi_dbg_dump_vid_hdr(vidh);
+
+out:
+ ubi_dbg_dump_stack();
+ return 1;
+}
+
+#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/u-boot/drivers/mtd/ubi/scan.h b/u-boot/drivers/mtd/ubi/scan.h
new file mode 100644
index 0000000..966b9b6
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/scan.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_SCAN_H__
+#define __UBI_SCAN_H__
+
+/* The erase counter value for this physical eraseblock is unknown */
+#define UBI_SCAN_UNKNOWN_EC (-1)
+
+/**
+ * struct ubi_scan_leb - scanning information about a physical eraseblock.
+ * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
+ * @pnum: physical eraseblock number
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
+ * @u.list: link in one of the eraseblock lists
+ * @leb_ver: logical eraseblock version (obsolete)
+ *
+ * One object of this type is allocated for each physical eraseblock during
+ * scanning.
+ */
+struct ubi_scan_leb {
+ int ec;
+ int pnum;
+ int lnum;
+ int scrub;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+ uint32_t leb_ver;
+};
+
+/**
+ * struct ubi_scan_volume - scanning information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock size in case of
+ * dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_scan_leb objects)
+ *
+ * One object of this type is allocated for each volume during scanning.
+ */
+struct ubi_scan_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_scan_info - UBI scanning information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * @bad_peb_count: count of bad physical eraseblocks
+ * those belonging to "preserve"-compatible internal volumes)
+ * @vols_found: number of volumes found during scanning
+ * @highest_vol_id: highest volume ID
+ * @alien_peb_count: count of physical eraseblocks in the @alien list
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ *
+ * This data structure contains the result of scanning and may be used by other
+ * UBI units to build final UBI data structures, further error-recovery and so
+ * on.
+ */
+struct ubi_scan_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int alien_peb_count;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+};
+
+struct ubi_device;
+struct ubi_vid_hdr;
+
+/*
+ * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a
+ * list.
+ *
+ * @sv: volume scanning information
+ * @seb: scanning eraseblock infprmation
+ * @list: the list to move to
+ */
+static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
+ struct ubi_scan_leb *seb,
+ struct list_head *list)
+{
+ rb_erase(&seb->u.rb, &sv->root);
+ list_add_tail(&seb->u.list, list);
+}
+
+int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
+ int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
+ int bitflips);
+struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
+ int vol_id);
+struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
+ int lnum);
+void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
+struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
+ struct ubi_scan_info *si);
+int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
+ int pnum, int ec);
+struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
+void ubi_scan_destroy_si(struct ubi_scan_info *si);
+
+#endif /* !__UBI_SCAN_H__ */
diff --git a/u-boot/drivers/mtd/ubi/ubi-media.h b/u-boot/drivers/mtd/ubi/ubi-media.h
new file mode 100644
index 0000000..c3185d9
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/ubi-media.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Thomas Gleixner
+ * Frank Haverkamp
+ * Oliver Lohmann
+ * Andreas Arnez
+ */
+
+/*
+ * This file defines the layout of UBI headers and all the other UBI on-flash
+ * data structures.
+ */
+
+#ifndef __UBI_MEDIA_H__
+#define __UBI_MEDIA_H__
+
+#include <asm/byteorder.h>
+
+/* The version of UBI images supported by this implementation */
+#define UBI_VERSION 1
+
+/* The highest erase counter value supported by this implementation */
+#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF
+
+/* The initial CRC32 value used when calculating CRC checksums */
+#define UBI_CRC32_INIT 0xFFFFFFFFU
+
+/* Erase counter header magic number (ASCII "UBI#") */
+#define UBI_EC_HDR_MAGIC 0x55424923
+/* Volume identifier header magic number (ASCII "UBI!") */
+#define UBI_VID_HDR_MAGIC 0x55424921
+
+/*
+ * Volume type constants used in the volume identifier header.
+ *
+ * @UBI_VID_DYNAMIC: dynamic volume
+ * @UBI_VID_STATIC: static volume
+ */
+enum {
+ UBI_VID_DYNAMIC = 1,
+ UBI_VID_STATIC = 2
+};
+
+/*
+ * Volume flags used in the volume table record.
+ *
+ * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
+ *
+ * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
+ * table. UBI automatically re-sizes the volume which has this flag and makes
+ * the volume to be of largest possible size. This means that if after the
+ * initialization UBI finds out that there are available physical eraseblocks
+ * present on the device, it automatically appends all of them to the volume
+ * (the physical eraseblocks reserved for bad eraseblocks handling and other
+ * reserved physical eraseblocks are not taken). So, if there is a volume with
+ * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
+ * eraseblocks will be zero after UBI is loaded, because all of them will be
+ * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
+ * after the volume had been initialized.
+ *
+ * The auto-resize feature is useful for device production purposes. For
+ * example, different NAND flash chips may have different amount of initial bad
+ * eraseblocks, depending of particular chip instance. Manufacturers of NAND
+ * chips usually guarantee that the amount of initial bad eraseblocks does not
+ * exceed certain percent, e.g. 2%. When one creates an UBI image which will be
+ * flashed to the end devices in production, he does not know the exact amount
+ * of good physical eraseblocks the NAND chip on the device will have, but this
+ * number is required to calculate the volume sized and put them to the volume
+ * table of the UBI image. In this case, one of the volumes (e.g., the one
+ * which will store the root file system) is marked as "auto-resizable", and
+ * UBI will adjust its size on the first boot if needed.
+ *
+ * Note, first UBI reserves some amount of physical eraseblocks for bad
+ * eraseblock handling, and then re-sizes the volume, not vice-versa. This
+ * means that the pool of reserved physical eraseblocks will always be present.
+ */
+enum {
+ UBI_VTBL_AUTORESIZE_FLG = 0x01,
+};
+
+/*
+ * Compatibility constants used by internal volumes.
+ *
+ * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
+ * to the flash
+ * @UBI_COMPAT_RO: attach this device in read-only mode
+ * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
+ * physical eraseblocks, don't allow the wear-leveling unit to move them
+ * @UBI_COMPAT_REJECT: reject this UBI image
+ */
+enum {
+ UBI_COMPAT_DELETE = 1,
+ UBI_COMPAT_RO = 2,
+ UBI_COMPAT_PRESERVE = 4,
+ UBI_COMPAT_REJECT = 5
+};
+
+/* Sizes of UBI headers */
+#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr)
+#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
+
+/* Sizes of UBI headers without the ending CRC */
+#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32))
+#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_ec_hdr - UBI erase counter header.
+ * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
+ * @version: version of UBI implementation which is supposed to accept this
+ * UBI image
+ * @padding1: reserved for future, zeroes
+ * @ec: the erase counter
+ * @vid_hdr_offset: where the VID header starts
+ * @data_offset: where the user data start
+ * @padding2: reserved for future, zeroes
+ * @hdr_crc: erase counter header CRC checksum
+ *
+ * The erase counter header takes 64 bytes and has a plenty of unused space for
+ * future usage. The unused fields are zeroed. The @version field is used to
+ * indicate the version of UBI implementation which is supposed to be able to
+ * work with this UBI image. If @version is greater then the current UBI
+ * version, the image is rejected. This may be useful in future if something
+ * is changed radically. This field is duplicated in the volume identifier
+ * header.
+ *
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the the
+ * volume identifier header and user data, relative to the beginning of the
+ * physical eraseblock. These values have to be the same for all physical
+ * eraseblocks.
+ */
+struct ubi_ec_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be64 ec; /* Warning: the current limit is 31-bit anyway! */
+ __be32 vid_hdr_offset;
+ __be32 data_offset;
+ __u8 padding2[36];
+ __be32 hdr_crc;
+} __attribute__ ((packed));
+
+/**
+ * struct ubi_vid_hdr - on-flash UBI volume identifier header.
+ * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
+ * @version: UBI implementation version which is supposed to accept this UBI
+ * image (%UBI_VERSION)
+ * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
+ * @copy_flag: if this logical eraseblock was copied from another physical
+ * eraseblock (for wear-leveling reasons)
+ * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
+ * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
+ * @vol_id: ID of this volume
+ * @lnum: logical eraseblock number
+ * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be
+ * removed, kept only for not breaking older UBI users)
+ * @data_size: how many bytes of data this logical eraseblock contains
+ * @used_ebs: total number of used logical eraseblocks in this volume
+ * @data_pad: how many bytes at the end of this physical eraseblock are not
+ * used
+ * @data_crc: CRC checksum of the data stored in this logical eraseblock
+ * @padding1: reserved for future, zeroes
+ * @sqnum: sequence number
+ * @padding2: reserved for future, zeroes
+ * @hdr_crc: volume identifier header CRC checksum
+ *
+ * The @sqnum is the value of the global sequence counter at the time when this
+ * VID header was created. The global sequence counter is incremented each time
+ * UBI writes a new VID header to the flash, i.e. when it maps a logical
+ * eraseblock to a new physical eraseblock. The global sequence counter is an
+ * unsigned 64-bit integer and we assume it never overflows. The @sqnum
+ * (sequence number) is used to distinguish between older and newer versions of
+ * logical eraseblocks.
+ *
+ * There are 2 situations when there may be more then one physical eraseblock
+ * corresponding to the same logical eraseblock, i.e., having the same @vol_id
+ * and @lnum values in the volume identifier header. Suppose we have a logical
+ * eraseblock L and it is mapped to the physical eraseblock P.
+ *
+ * 1. Because UBI may erase physical eraseblocks asynchronously, the following
+ * situation is possible: L is asynchronously erased, so P is scheduled for
+ * erasure, then L is written to,i.e. mapped to another physical eraseblock P1,
+ * so P1 is written to, then an unclean reboot happens. Result - there are 2
+ * physical eraseblocks P and P1 corresponding to the same logical eraseblock
+ * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the
+ * flash.
+ *
+ * 2. From time to time UBI moves logical eraseblocks to other physical
+ * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P
+ * to P1, and an unclean reboot happens before P is physically erased, there
+ * are two physical eraseblocks P and P1 corresponding to L and UBI has to
+ * select one of them when the flash is attached. The @sqnum field says which
+ * PEB is the original (obviously P will have lower @sqnum) and the copy. But
+ * it is not enough to select the physical eraseblock with the higher sequence
+ * number, because the unclean reboot could have happen in the middle of the
+ * copying process, so the data in P is corrupted. It is also not enough to
+ * just select the physical eraseblock with lower sequence number, because the
+ * data there may be old (consider a case if more data was added to P1 after
+ * the copying). Moreover, the unclean reboot may happen when the erasure of P
+ * was just started, so it result in unstable P, which is "mostly" OK, but
+ * still has unstable bits.
+ *
+ * UBI uses the @copy_flag field to indicate that this logical eraseblock is a
+ * copy. UBI also calculates data CRC when the data is moved and stores it at
+ * the @data_crc field of the copy (P1). So when UBI needs to pick one physical
+ * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is
+ * examined. If it is cleared, the situation* is simple and the newer one is
+ * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC
+ * checksum is correct, this physical eraseblock is selected (P1). Otherwise
+ * the older one (P) is selected.
+ *
+ * Note, there is an obsolete @leb_ver field which was used instead of @sqnum
+ * in the past. But it is not used anymore and we keep it in order to be able
+ * to deal with old UBI images. It will be removed at some point.
+ *
+ * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
+ * Internal volumes are not seen from outside and are used for various internal
+ * UBI purposes. In this implementation there is only one internal volume - the
+ * layout volume. Internal volumes are the main mechanism of UBI extensions.
+ * For example, in future one may introduce a journal internal volume. Internal
+ * volumes have their own reserved range of IDs.
+ *
+ * The @compat field is only used for internal volumes and contains the "degree
+ * of their compatibility". It is always zero for user volumes. This field
+ * provides a mechanism to introduce UBI extensions and to be still compatible
+ * with older UBI binaries. For example, if someone introduced a journal in
+ * future, he would probably use %UBI_COMPAT_DELETE compatibility for the
+ * journal volume. And in this case, older UBI binaries, which know nothing
+ * about the journal volume, would just delete this volume and work perfectly
+ * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image
+ * - it just ignores the Ext3fs journal.
+ *
+ * The @data_crc field contains the CRC checksum of the contents of the logical
+ * eraseblock if this is a static volume. In case of dynamic volumes, it does
+ * not contain the CRC checksum as a rule. The only exception is when the
+ * data of the physical eraseblock was moved by the wear-leveling unit, then
+ * the wear-leveling unit calculates the data CRC and stores it in the
+ * @data_crc field. And of course, the @copy_flag is %in this case.
+ *
+ * The @data_size field is used only for static volumes because UBI has to know
+ * how many bytes of data are stored in this eraseblock. For dynamic volumes,
+ * this field usually contains zero. The only exception is when the data of the
+ * physical eraseblock was moved to another physical eraseblock for
+ * wear-leveling reasons. In this case, UBI calculates CRC checksum of the
+ * contents and uses both @data_crc and @data_size fields. In this case, the
+ * @data_size field contains data size.
+ *
+ * The @used_ebs field is used only for static volumes and indicates how many
+ * eraseblocks the data of the volume takes. For dynamic volumes this field is
+ * not used and always contains zero.
+ *
+ * The @data_pad is calculated when volumes are created using the alignment
+ * parameter. So, effectively, the @data_pad field reduces the size of logical
+ * eraseblocks of this volume. This is very handy when one uses block-oriented
+ * software (say, cramfs) on top of the UBI volume.
+ */
+struct ubi_vid_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 vol_type;
+ __u8 copy_flag;
+ __u8 compat;
+ __be32 vol_id;
+ __be32 lnum;
+ __be32 leb_ver; /* obsolete, to be removed, don't use */
+ __be32 data_size;
+ __be32 used_ebs;
+ __be32 data_pad;
+ __be32 data_crc;
+ __u8 padding1[4];
+ __be64 sqnum;
+ __u8 padding2[12];
+ __be32 hdr_crc;
+} __attribute__ ((packed));
+
+/* Internal UBI volumes count */
+#define UBI_INT_VOL_COUNT 1
+
+/*
+ * Starting ID of internal volumes. There is reserved room for 4096 internal
+ * volumes.
+ */
+#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
+
+/* The layout volume contains the volume table */
+
+#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
+#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
+#define UBI_LAYOUT_VOLUME_ALIGN 1
+#define UBI_LAYOUT_VOLUME_EBS 2
+#define UBI_LAYOUT_VOLUME_NAME "layout volume"
+#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
+
+/* The maximum number of volumes per one UBI device */
+#define UBI_MAX_VOLUMES 128
+
+/* The maximum volume name length */
+#define UBI_VOL_NAME_MAX 127
+
+/* Size of the volume table record */
+#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
+
+/* Size of the volume table record without the ending CRC */
+#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_vtbl_record - a record in the volume table.
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are unused at the end of the each physical
+ * eraseblock to satisfy the requested alignment
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @upd_marker: if volume update was started but not finished
+ * @name_len: volume name length
+ * @name: the volume name
+ * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
+ * @padding: reserved, zeroes
+ * @crc: a CRC32 checksum of the record
+ *
+ * The volume table records are stored in the volume table, which is stored in
+ * the layout volume. The layout volume consists of 2 logical eraseblock, each
+ * of which contains a copy of the volume table (i.e., the volume table is
+ * duplicated). The volume table is an array of &struct ubi_vtbl_record
+ * objects indexed by the volume ID.
+ *
+ * If the size of the logical eraseblock is large enough to fit
+ * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES
+ * records. Otherwise, it contains as many records as it can fit (i.e., size of
+ * logical eraseblock divided by sizeof(struct ubi_vtbl_record)).
+ *
+ * The @upd_marker flag is used to implement volume update. It is set to %1
+ * before update and set to %0 after the update. So if the update operation was
+ * interrupted, UBI knows that the volume is corrupted.
+ *
+ * The @alignment field is specified when the volume is created and cannot be
+ * later changed. It may be useful, for example, when a block-oriented file
+ * system works on top of UBI. The @data_pad field is calculated using the
+ * logical eraseblock size and @alignment. The alignment must be multiple to the
+ * minimal flash I/O unit. If @alignment is 1, all the available space of
+ * the physical eraseblocks is used.
+ *
+ * Empty records contain all zeroes and the CRC checksum of those zeroes.
+ */
+struct ubi_vtbl_record {
+ __be32 reserved_pebs;
+ __be32 alignment;
+ __be32 data_pad;
+ __u8 vol_type;
+ __u8 upd_marker;
+ __be16 name_len;
+ __u8 name[UBI_VOL_NAME_MAX+1];
+ __u8 flags;
+ __u8 padding[23];
+ __be32 crc;
+} __attribute__ ((packed));
+
+#endif /* !__UBI_MEDIA_H__ */
diff --git a/u-boot/drivers/mtd/ubi/ubi.h b/u-boot/drivers/mtd/ubi/ubi.h
new file mode 100644
index 0000000..bf77a15
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/ubi.h
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_UBI_H__
+#define __UBI_UBI_H__
+
+#ifdef UBI_LINUX
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/ubi.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/ubi.h>
+
+#include "ubi-media.h"
+#include "scan.h"
+#include "debug.h"
+
+/* Maximum number of supported UBI devices */
+#define UBI_MAX_DEVICES 32
+
+/* UBI name used for character devices, sysfs, etc */
+#define UBI_NAME_STR "ubi"
+
+/* Normal UBI messages */
+#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+/* UBI warning messages */
+#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
+ __func__, ##__VA_ARGS__)
+/* UBI error messages */
+#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+ __func__, ##__VA_ARGS__)
+
+/* Lowest number PEBs reserved for bad PEB handling */
+#define MIN_RESEVED_PEBS 2
+
+/* Background thread name pattern */
+#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
+
+/* This marker in the EBA table means that the LEB is um-mapped */
+#define UBI_LEB_UNMAPPED -1
+
+/*
+ * In case of errors, UBI tries to repeat the operation several times before
+ * returning error. The below constant defines how many times UBI re-tries.
+ */
+#define UBI_IO_RETRIES 3
+
+/*
+ * Error codes returned by the I/O unit.
+ *
+ * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
+ * 0xFF bytes
+ * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a
+ * valid erase counter header, and the rest are %0xFF bytes
+ * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC)
+ * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or
+ * CRC)
+ * UBI_IO_BITFLIPS: bit-flips were detected and corrected
+ */
+enum {
+ UBI_IO_PEB_EMPTY = 1,
+ UBI_IO_PEB_FREE,
+ UBI_IO_BAD_EC_HDR,
+ UBI_IO_BAD_VID_HDR,
+ UBI_IO_BITFLIPS
+};
+
+/**
+ * struct ubi_wl_entry - wear-leveling entry.
+ * @rb: link in the corresponding RB-tree
+ * @ec: erase counter
+ * @pnum: physical eraseblock number
+ *
+ * This data structure is used in the WL unit. Each physical eraseblock has a
+ * corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL unit for details.
+ */
+struct ubi_wl_entry {
+ struct rb_node rb;
+ int ec;
+ int pnum;
+};
+
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ * the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA unit to implement per-LEB locking.
+ * When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA unit for details.
+ */
+struct ubi_ltree_entry {
+ struct rb_node rb;
+ int vol_id;
+ int lnum;
+ int users;
+ struct rw_semaphore mutex;
+};
+
+struct ubi_volume_desc;
+
+/**
+ * struct ubi_volume - UBI volume description data structure.
+ * @dev: device object to make use of the the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi: reference to the UBI device description object
+ * @vol_id: volume ID
+ * @ref_count: volume reference count
+ * @readers: number of users holding this volume in read-only mode
+ * @writers: number of users holding this volume in read-write mode
+ * @exclusive: whether somebody holds this volume in exclusive mode
+ *
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @usable_leb_size: logical eraseblock size without padding
+ * @used_ebs: how many logical eraseblocks in this volume contain data
+ * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
+ * @used_bytes: how many bytes of data this volume contains
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are not used at the end of physical eraseblocks to
+ * satisfy the requested alignment
+ * @name_len: volume name length
+ * @name: volume name
+ *
+ * @upd_ebs: how many eraseblocks are expected to be updated
+ * @ch_lnum: LEB number which is being changing by the atomic LEB change
+ * operation
+ * @ch_dtype: data persistency type which is being changing by the atomic LEB
+ * change operation
+ * @upd_bytes: how many bytes are expected to be received for volume update or
+ * atomic LEB change
+ * @upd_received: how many bytes were already received for volume update or
+ * atomic LEB change
+ * @upd_buf: update buffer which is used to collect update data or data for
+ * atomic LEB change
+ *
+ * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
+ * @checked: %1 if this static volume was checked
+ * @corrupted: %1 if the volume is corrupted (static volumes only)
+ * @upd_marker: %1 if the update marker is set for this volume
+ * @updating: %1 if the volume is being updated
+ * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
+ *
+ * @gluebi_desc: gluebi UBI volume descriptor
+ * @gluebi_refcount: reference count of the gluebi MTD device
+ * @gluebi_mtd: MTD device description object of the gluebi MTD device
+ *
+ * The @corrupted field indicates that the volume's contents is corrupted.
+ * Since UBI protects only static volumes, this field is not relevant to
+ * dynamic volumes - it is user's responsibility to assure their data
+ * integrity.
+ *
+ * The @upd_marker flag indicates that this volume is either being updated at
+ * the moment or is damaged because of an unclean reboot.
+ */
+struct ubi_volume {
+ struct device dev;
+ struct cdev cdev;
+ struct ubi_device *ubi;
+ int vol_id;
+ int ref_count;
+ int readers;
+ int writers;
+ int exclusive;
+
+ int reserved_pebs;
+ int vol_type;
+ int usable_leb_size;
+ int used_ebs;
+ int last_eb_bytes;
+ long long used_bytes;
+ int alignment;
+ int data_pad;
+ int name_len;
+ char name[UBI_VOL_NAME_MAX+1];
+
+ int upd_ebs;
+ int ch_lnum;
+ int ch_dtype;
+ long long upd_bytes;
+ long long upd_received;
+ void *upd_buf;
+
+ int *eba_tbl;
+ unsigned int checked:1;
+ unsigned int corrupted:1;
+ unsigned int upd_marker:1;
+ unsigned int updating:1;
+ unsigned int changing_leb:1;
+
+#ifdef CONFIG_MTD_UBI_GLUEBI
+ /*
+ * Gluebi-related stuff may be compiled out.
+ * TODO: this should not be built into UBI but should be a separate
+ * ubimtd driver which works on top of UBI and emulates MTD devices.
+ */
+ struct ubi_volume_desc *gluebi_desc;
+ int gluebi_refcount;
+ struct mtd_info gluebi_mtd;
+#endif
+};
+
+/**
+ * struct ubi_volume_desc - descriptor of the UBI volume returned when it is
+ * opened.
+ * @vol: reference to the corresponding volume description object
+ * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
+ */
+struct ubi_volume_desc {
+ struct ubi_volume *vol;
+ int mode;
+};
+
+struct ubi_wl_entry;
+
+/**
+ * struct ubi_device - UBI device description structure
+ * @dev: UBI device object to use the the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi_num: UBI device number
+ * @ubi_name: UBI device name
+ * @vol_count: number of volumes in this UBI device
+ * @volumes: volumes of this UBI device
+ * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
+ * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
+ * @vol->readers, @vol->writers, @vol->exclusive,
+ * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
+ * @ref_count: count of references on the UBI device
+ *
+ * @rsvd_pebs: count of reserved physical eraseblocks
+ * @avail_pebs: count of available physical eraseblocks
+ * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
+ * handling
+ * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
+ *
+ * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
+ * of UBI ititializetion
+ * @vtbl_slots: how many slots are available in the volume table
+ * @vtbl_size: size of the volume table in bytes
+ * @vtbl: in-RAM volume table copy
+ * @volumes_mutex: protects on-flash volume table and serializes volume
+ * changes, like creation, deletion, update, resize
+ *
+ * @max_ec: current highest erase counter value
+ * @mean_ec: current mean erase counter value
+ *
+ * @global_sqnum: global sequence number
+ * @ltree_lock: protects the lock tree and @global_sqnum
+ * @ltree: the lock tree
+ * @alc_mutex: serializes "atomic LEB change" operations
+ *
+ * @used: RB-tree of used physical eraseblocks
+ * @free: RB-tree of free physical eraseblocks
+ * @scrub: RB-tree of physical eraseblocks which need scrubbing
+ * @prot: protection trees
+ * @prot.pnum: protection tree indexed by physical eraseblock numbers
+ * @prot.aec: protection tree indexed by absolute erase counter value
+ * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
+ * fields
+ * @move_mutex: serializes eraseblock moves
+ * @wl_scheduled: non-zero if the wear-leveling was scheduled
+ * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
+ * physical eraseblock
+ * @abs_ec: absolute erase counter
+ * @move_from: physical eraseblock from where the data is being moved
+ * @move_to: physical eraseblock where the data is being moved to
+ * @move_to_put: if the "to" PEB was put
+ * @works: list of pending works
+ * @works_count: count of pending works
+ * @bgt_thread: background thread description object
+ * @thread_enabled: if the background thread is enabled
+ * @bgt_name: background thread name
+ *
+ * @flash_size: underlying MTD device size (in bytes)
+ * @peb_count: count of physical eraseblocks on the MTD device
+ * @peb_size: physical eraseblock size
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @good_peb_count: count of good physical eraseblocks
+ * @min_io_size: minimal input/output unit size of the underlying MTD device
+ * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
+ * @ro_mode: if the UBI device is in read-only mode
+ * @leb_size: logical eraseblock size
+ * @leb_start: starting offset of logical eraseblocks within physical
+ * eraseblocks
+ * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
+ * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
+ * @vid_hdr_offset: starting offset of the volume identifier header (might be
+ * unaligned)
+ * @vid_hdr_aloffset: starting offset of the VID header aligned to
+ * @hdrs_min_io_size
+ * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
+ * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
+ * not
+ * @mtd: MTD device descriptor
+ *
+ * @peb_buf1: a buffer of PEB size used for different purposes
+ * @peb_buf2: another buffer of PEB size used for different purposes
+ * @buf_mutex: proptects @peb_buf1 and @peb_buf2
+ * @dbg_peb_buf: buffer of PEB size used for debugging
+ * @dbg_buf_mutex: proptects @dbg_peb_buf
+ */
+struct ubi_device {
+ struct cdev cdev;
+ struct device dev;
+ int ubi_num;
+ char ubi_name[sizeof(UBI_NAME_STR)+5];
+ int vol_count;
+ struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
+ spinlock_t volumes_lock;
+ int ref_count;
+
+ int rsvd_pebs;
+ int avail_pebs;
+ int beb_rsvd_pebs;
+ int beb_rsvd_level;
+
+ int autoresize_vol_id;
+ int vtbl_slots;
+ int vtbl_size;
+ struct ubi_vtbl_record *vtbl;
+ struct mutex volumes_mutex;
+
+ int max_ec;
+ /* TODO: mean_ec is not updated run-time, fix */
+ int mean_ec;
+
+ /* EBA unit's stuff */
+ unsigned long long global_sqnum;
+ spinlock_t ltree_lock;
+ struct rb_root ltree;
+ struct mutex alc_mutex;
+
+ /* Wear-leveling unit's stuff */
+ struct rb_root used;
+ struct rb_root free;
+ struct rb_root scrub;
+ struct {
+ struct rb_root pnum;
+ struct rb_root aec;
+ } prot;
+ spinlock_t wl_lock;
+ struct mutex move_mutex;
+ struct rw_semaphore work_sem;
+ int wl_scheduled;
+ struct ubi_wl_entry **lookuptbl;
+ unsigned long long abs_ec;
+ struct ubi_wl_entry *move_from;
+ struct ubi_wl_entry *move_to;
+ int move_to_put;
+ struct list_head works;
+ int works_count;
+ struct task_struct *bgt_thread;
+ int thread_enabled;
+ char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+
+ /* I/O unit's stuff */
+ long long flash_size;
+ int peb_count;
+ int peb_size;
+ int bad_peb_count;
+ int good_peb_count;
+ int min_io_size;
+ int hdrs_min_io_size;
+ int ro_mode;
+ int leb_size;
+ int leb_start;
+ int ec_hdr_alsize;
+ int vid_hdr_alsize;
+ int vid_hdr_offset;
+ int vid_hdr_aloffset;
+ int vid_hdr_shift;
+ int bad_allowed;
+ struct mtd_info *mtd;
+
+ void *peb_buf1;
+ void *peb_buf2;
+ struct mutex buf_mutex;
+ struct mutex ckvol_mutex;
+#ifdef CONFIG_MTD_UBI_DEBUG
+ void *dbg_peb_buf;
+ struct mutex dbg_buf_mutex;
+#endif
+};
+
+extern struct kmem_cache *ubi_wl_entry_slab;
+extern struct file_operations ubi_ctrl_cdev_operations;
+extern struct file_operations ubi_cdev_operations;
+extern struct file_operations ubi_vol_cdev_operations;
+extern struct class *ubi_class;
+extern struct mutex ubi_devices_mutex;
+
+/* vtbl.c */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+
+/* vmt.c */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
+int ubi_remove_volume(struct ubi_volume_desc *desc);
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+
+/* upd.c */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes);
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req);
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+
+/* misc.c */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
+int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_calculate_reserved(struct ubi_device *ubi);
+
+/* gluebi.c */
+#ifdef CONFIG_MTD_UBI_GLUEBI
+int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
+int ubi_destroy_gluebi(struct ubi_volume *vol);
+void ubi_gluebi_updated(struct ubi_volume *vol);
+#else
+#define ubi_create_gluebi(ubi, vol) 0
+#define ubi_destroy_gluebi(vol) 0
+#define ubi_gluebi_updated(vol)
+#endif
+
+/* eba.c */
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum);
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check);
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len, int dtype);
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype,
+ int used_ebs);
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype);
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_hdr *vid_hdr);
+int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+void ubi_eba_close(const struct ubi_device *ubi);
+
+/* wl.c */
+int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
+int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
+int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+void ubi_wl_close(struct ubi_device *ubi);
+int ubi_thread(void *u);
+
+/* io.c */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len);
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len);
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture);
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose);
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr);
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int verbose);
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr);
+
+/* build.c */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_detach_mtd_dev(int ubi_num, int anyway);
+struct ubi_device *ubi_get_device(int ubi_num);
+void ubi_put_device(struct ubi_device *ubi);
+struct ubi_device *ubi_get_by_major(int major);
+int ubi_major2num(int major);
+
+/*
+ * ubi_rb_for_each_entry - walk an RB-tree.
+ * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ * @root: RB-tree's root
+ * @member: the name of the 'struct rb_node' within the RB-tree entry
+ */
+#define ubi_rb_for_each_entry(rb, pos, root, member) \
+ for (rb = rb_first(root), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
+ rb; \
+ rb = rb_next(rb), pos = container_of(rb, typeof(*pos), member))
+
+/**
+ * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
+ * @ubi: UBI device description object
+ * @gfp_flags: GFP flags to allocate with
+ *
+ * This function returns a pointer to the newly allocated and zero-filled
+ * volume identifier header object in case of success and %NULL in case of
+ * failure.
+ */
+static inline struct ubi_vid_hdr *
+ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
+{
+ void *vid_hdr;
+
+ vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags);
+ if (!vid_hdr)
+ return NULL;
+
+ /*
+ * VID headers may be stored at un-aligned flash offsets, so we shift
+ * the pointer.
+ */
+ return vid_hdr + ubi->vid_hdr_shift;
+}
+
+/**
+ * ubi_free_vid_hdr - free a volume identifier header object.
+ * @ubi: UBI device description object
+ * @vid_hdr: the object to free
+ */
+static inline void ubi_free_vid_hdr(const struct ubi_device *ubi,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ void *p = vid_hdr;
+
+ if (!p)
+ return;
+
+ kfree(p - ubi->vid_hdr_shift);
+}
+
+/*
+ * This function is equivalent to 'ubi_io_read()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/*
+ * This function is equivalent to 'ubi_io_write()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn("switch to read-only mode");
+ }
+}
+
+/**
+ * vol_id2idx - get table index by volume ID.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ */
+static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id >= UBI_INTERNAL_VOL_START)
+ return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots;
+ else
+ return vol_id;
+}
+
+/**
+ * idx2vol_id - get volume ID by table index.
+ * @ubi: UBI device description object
+ * @idx: table index
+ */
+static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
+{
+ if (idx >= ubi->vtbl_slots)
+ return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START;
+ else
+ return idx;
+}
+
+#endif /* !__UBI_UBI_H__ */
diff --git a/u-boot/drivers/mtd/ubi/upd.c b/u-boot/drivers/mtd/ubi/upd.c
new file mode 100644
index 0000000..5f7ed7b
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/upd.c
@@ -0,0 +1,441 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ *
+ * Jan 2007: Alexander Schmidt, hacked per-volume update.
+ */
+
+/*
+ * This file contains implementation of the volume update and atomic LEB change
+ * functionality.
+ *
+ * The update operation is based on the per-volume update marker which is
+ * stored in the volume table. The update marker is set before the update
+ * starts, and removed after the update has been finished. So if the update was
+ * interrupted by an unclean re-boot or due to some other reasons, the update
+ * marker stays on the flash media and UBI finds it when it attaches the MTD
+ * device next time. If the update marker is set for a volume, the volume is
+ * treated as damaged and most I/O operations are prohibited. Only a new update
+ * operation is allowed.
+ *
+ * Note, in general it is possible to implement the update operation as a
+ * transaction with a roll-back capability.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/err.h>
+#include <asm/uaccess.h>
+#include <asm/div64.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+/**
+ * set_update_marker - set update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function sets the update marker flag for volume @vol. Returns zero
+ * in case of success and a negative error code in case of failure.
+ */
+static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_msg("set update marker for volume %d", vol->vol_id);
+
+ if (vol->upd_marker) {
+ ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
+ dbg_msg("already set");
+ return 0;
+ }
+
+ memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
+ sizeof(struct ubi_vtbl_record));
+ vtbl_rec.upd_marker = 1;
+
+ mutex_lock(&ubi->volumes_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ mutex_unlock(&ubi->volumes_mutex);
+ vol->upd_marker = 1;
+ return err;
+}
+
+/**
+ * clear_update_marker - clear update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: new data size in bytes
+ *
+ * This function clears the update marker for volume @vol, sets new volume
+ * data size and clears the "corrupted" flag (static volumes only). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int err;
+ uint64_t tmp;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_msg("clear update marker for volume %d", vol->vol_id);
+
+ memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
+ sizeof(struct ubi_vtbl_record));
+ ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
+ vtbl_rec.upd_marker = 0;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ vol->corrupted = 0;
+ vol->used_bytes = tmp = bytes;
+ vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size);
+ vol->used_ebs = tmp;
+ if (vol->last_eb_bytes)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ mutex_lock(&ubi->volumes_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ mutex_unlock(&ubi->volumes_mutex);
+ vol->upd_marker = 0;
+ return err;
+}
+
+/**
+ * ubi_start_update - start volume update.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: update bytes
+ *
+ * This function starts volume update operation. If @bytes is zero, the volume
+ * is just wiped out. Returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int i, err;
+ uint64_t tmp;
+
+ dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+ ubi_assert(!vol->updating && !vol->changing_leb);
+ vol->updating = 1;
+
+ err = set_update_marker(ubi, vol);
+ if (err)
+ return err;
+
+ /* Before updating - wipe out the volume */
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ return err;
+ }
+
+ if (bytes == 0) {
+ err = clear_update_marker(ubi, vol, 0);
+ if (err)
+ return err;
+ err = ubi_wl_flush(ubi);
+ if (!err)
+ vol->updating = 0;
+ }
+
+ vol->upd_buf = vmalloc(ubi->leb_size);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ tmp = bytes;
+ vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size);
+ vol->upd_ebs += tmp;
+ vol->upd_bytes = bytes;
+ vol->upd_received = 0;
+ return 0;
+}
+
+/**
+ * ubi_start_leb_change - start atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @req: operation request
+ *
+ * This function starts atomic LEB change operation. Returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req)
+{
+ ubi_assert(!vol->updating && !vol->changing_leb);
+
+ dbg_msg("start changing LEB %d:%d, %u bytes",
+ vol->vol_id, req->lnum, req->bytes);
+ if (req->bytes == 0)
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
+ req->dtype);
+
+ vol->upd_bytes = req->bytes;
+ vol->upd_received = 0;
+ vol->changing_leb = 1;
+ vol->ch_lnum = req->lnum;
+ vol->ch_dtype = req->dtype;
+
+ vol->upd_buf = vmalloc(req->bytes);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * write_leb - write update data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: data size
+ * @used_ebs: how many logical eraseblocks will this volume contain (static
+ * volumes only)
+ *
+ * This function writes update data to corresponding logical eraseblock. In
+ * case of dynamic volume, this function checks if the data contains 0xFF bytes
+ * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole
+ * buffer contains only 0xFF bytes, the LEB is left unmapped.
+ *
+ * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is
+ * that we want to make sure that more data may be appended to the logical
+ * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and
+ * this PEB won't be writable anymore. So if one writes the file-system image
+ * to the UBI volume where 0xFFs mean free space - UBI makes sure this free
+ * space is writable after the update.
+ *
+ * We do not do this for static volumes because they are read-only. But this
+ * also cannot be done because we have to store per-LEB CRC and the correct
+ * data length.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int len, int used_ebs)
+{
+ int err;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ int l = ALIGN(len, ubi->min_io_size);
+
+ memset(buf + len, 0xFF, l - len);
+ len = ubi_calc_data_len(ubi, buf, l);
+ if (len == 0) {
+ dbg_msg("all %d bytes contain 0xFF - skip", len);
+ return 0;
+ }
+
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
+ } else {
+ /*
+ * When writing static volume, and this is the last logical
+ * eraseblock, the length (@len) does not have to be aligned to
+ * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()'
+ * function accepts exact (unaligned) length and stores it in
+ * the VID header. And it takes care of proper alignment by
+ * padding the buffer. Here we just make sure the padding will
+ * contain zeros, not random trash.
+ */
+ memset(buf + len, 0, vol->usable_leb_size - len);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
+ UBI_UNKNOWN, used_ebs);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_update_data - write more update data.
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function writes more data to the volume which is being updated. It may
+ * be called arbitrary number of times until all the update data arriveis. This
+ * function returns %0 in case of success, number of bytes written during the
+ * last call if the whole volume update has been successfully finished, and a
+ * negative error code in case of failure.
+ */
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ uint64_t tmp;
+ int lnum, offs, err = 0, len, to_write = count;
+
+ dbg_msg("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ tmp = vol->upd_received;
+ offs = do_div(tmp, vol->usable_leb_size);
+ lnum = tmp;
+
+ if (vol->upd_received + count > vol->upd_bytes)
+ to_write = count = vol->upd_bytes - vol->upd_received;
+
+ /*
+ * When updating volumes, we accumulate whole logical eraseblock of
+ * data and write it at once.
+ */
+ if (offs != 0) {
+ /*
+ * This is a write to the middle of the logical eraseblock. We
+ * copy the data to our update buffer and wait for more data or
+ * flush it if the whole eraseblock is written or the update
+ * is finished.
+ */
+
+ len = vol->usable_leb_size - offs;
+ if (len > count)
+ len = count;
+
+ err = copy_from_user(vol->upd_buf + offs, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (offs + len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ int flush_len = offs + len;
+
+ /*
+ * OK, we gathered either the whole eraseblock or this
+ * is the last chunk, it's time to flush the buffer.
+ */
+ ubi_assert(flush_len <= vol->usable_leb_size);
+ err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
+ vol->upd_ebs);
+ if (err)
+ return err;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ buf += len;
+ lnum += 1;
+ }
+
+ /*
+ * If we've got more to write, let's continue. At this point we know we
+ * are starting from the beginning of an eraseblock.
+ */
+ while (count) {
+ if (count > vol->usable_leb_size)
+ len = vol->usable_leb_size;
+ else
+ len = count;
+
+ err = copy_from_user(vol->upd_buf, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ err = write_leb(ubi, vol, lnum, vol->upd_buf,
+ len, vol->upd_ebs);
+ if (err)
+ break;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ lnum += 1;
+ buf += len;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ /* The update is finished, clear the update marker */
+ err = clear_update_marker(ubi, vol, vol->upd_bytes);
+ if (err)
+ return err;
+ err = ubi_wl_flush(ubi);
+ if (err == 0) {
+ vol->updating = 0;
+ err = to_write;
+ vfree(vol->upd_buf);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function accepts more data to the volume which is being under the
+ * "atomic LEB change" operation. It may be called arbitrary number of times
+ * until all data arrives. This function returns %0 in case of success, number
+ * of bytes written during the last call if the whole "atomic LEB change"
+ * operation has been successfully finished, and a negative error code in case
+ * of failure.
+ */
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ int err;
+
+ dbg_msg("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (vol->upd_received + count > vol->upd_bytes)
+ count = vol->upd_bytes - vol->upd_received;
+
+ err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
+ if (err)
+ return -EFAULT;
+
+ vol->upd_received += count;
+
+ if (vol->upd_received == vol->upd_bytes) {
+ int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
+
+ memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
+ len = ubi_calc_data_len(ubi, vol->upd_buf, len);
+ err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
+ vol->upd_buf, len, UBI_UNKNOWN);
+ if (err)
+ return err;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ vol->changing_leb = 0;
+ err = count;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
diff --git a/u-boot/drivers/mtd/ubi/vmt.c b/u-boot/drivers/mtd/ubi/vmt.c
new file mode 100644
index 0000000..061da64
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/vmt.c
@@ -0,0 +1,862 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file contains implementation of volume creation, deletion, updating and
+ * resizing.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/err.h>
+#include <asm/div64.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+static void paranoid_check_volumes(struct ubi_device *ubi);
+#else
+#define paranoid_check_volumes(ubi)
+#endif
+
+#ifdef UBI_LINUX
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
+static struct device_attribute attr_vol_reserved_ebs =
+ __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_type =
+ __ATTR(type, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_name =
+ __ATTR(name, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_corrupted =
+ __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_alignment =
+ __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_usable_eb_size =
+ __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_data_bytes =
+ __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_upd_marker =
+ __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
+
+/*
+ * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
+ *
+ * Consider a situation:
+ * A. process 1 opens a sysfs file related to volume Y, say
+ * /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
+ * B. process 2 removes volume Y;
+ * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
+ *
+ * In this situation, this function will return %-ENODEV because it will find
+ * out that the volume was removed from the @ubi->volumes array.
+ */
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+ struct ubi_device *ubi;
+
+ ubi = ubi_get_device(vol->ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ spin_lock(&ubi->volumes_lock);
+ if (!ubi->volumes[vol->vol_id]) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
+ return -ENODEV;
+ }
+ /* Take a reference to prevent volume removal */
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ if (attr == &attr_vol_reserved_ebs)
+ ret = sprintf(buf, "%d\n", vol->reserved_pebs);
+ else if (attr == &attr_vol_type) {
+ const char *tp;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ tp = "dynamic";
+ else
+ tp = "static";
+ ret = sprintf(buf, "%s\n", tp);
+ } else if (attr == &attr_vol_name)
+ ret = sprintf(buf, "%s\n", vol->name);
+ else if (attr == &attr_vol_corrupted)
+ ret = sprintf(buf, "%d\n", vol->corrupted);
+ else if (attr == &attr_vol_alignment)
+ ret = sprintf(buf, "%d\n", vol->alignment);
+ else if (attr == &attr_vol_usable_eb_size)
+ ret = sprintf(buf, "%d\n", vol->usable_leb_size);
+ else if (attr == &attr_vol_data_bytes)
+ ret = sprintf(buf, "%lld\n", vol->used_bytes);
+ else if (attr == &attr_vol_upd_marker)
+ ret = sprintf(buf, "%d\n", vol->upd_marker);
+ else
+ /* This must be a bug */
+ ret = -EINVAL;
+
+ /* We've done the operation, drop volume and UBI device references */
+ spin_lock(&ubi->volumes_lock);
+ vol->ref_count -= 1;
+ ubi_assert(vol->ref_count >= 0);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
+ return ret;
+}
+#endif
+
+/* Release method for volume devices */
+static void vol_release(struct device *dev)
+{
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+
+ kfree(vol);
+}
+
+#ifdef UBI_LINUX
+/**
+ * volume_sysfs_init - initialize sysfs for new volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ *
+ * Note, this function does not free allocated resources in case of failure -
+ * the caller does it. This is because this would cause release() here and the
+ * caller would oops.
+ */
+static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err;
+
+ err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_type);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_name);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_corrupted);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_alignment);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_data_bytes);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_upd_marker);
+ return err;
+}
+
+/**
+ * volume_sysfs_close - close sysfs for a volume.
+ * @vol: volume description object
+ */
+static void volume_sysfs_close(struct ubi_volume *vol)
+{
+ device_remove_file(&vol->dev, &attr_vol_upd_marker);
+ device_remove_file(&vol->dev, &attr_vol_data_bytes);
+ device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
+ device_remove_file(&vol->dev, &attr_vol_alignment);
+ device_remove_file(&vol->dev, &attr_vol_corrupted);
+ device_remove_file(&vol->dev, &attr_vol_name);
+ device_remove_file(&vol->dev, &attr_vol_type);
+ device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
+ device_unregister(&vol->dev);
+}
+#endif
+
+/**
+ * ubi_create_volume - create volume.
+ * @ubi: UBI device description object
+ * @req: volume creation request
+ *
+ * This function creates volume described by @req. If @req->vol_id id
+ * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
+ * and saves it in @req->vol_id. Returns zero in case of success and a negative
+ * error code in case of failure. Note, the caller has to have the
+ * @ubi->volumes_mutex locked.
+ */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
+{
+ int i, err, vol_id = req->vol_id, dont_free = 0;
+ struct ubi_volume *vol;
+ struct ubi_vtbl_record vtbl_rec;
+ uint64_t bytes;
+ dev_t dev;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ /* Find unused volume ID */
+ dbg_msg("search for vacant volume ID");
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (!ubi->volumes[i]) {
+ vol_id = i;
+ break;
+ }
+
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ dbg_err("out of volume IDs");
+ err = -ENFILE;
+ goto out_unlock;
+ }
+ req->vol_id = vol_id;
+ }
+
+ dbg_msg("volume ID %d, %llu bytes, type %d, name %s",
+ vol_id, (unsigned long long)req->bytes,
+ (int)req->vol_type, req->name);
+
+ /* Ensure that this volume does not exist */
+ err = -EEXIST;
+ if (ubi->volumes[vol_id]) {
+ dbg_err("volume %d already exists", vol_id);
+ goto out_unlock;
+ }
+
+ /* Ensure that the name is unique */
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i] &&
+ ubi->volumes[i]->name_len == req->name_len &&
+ !strcmp(ubi->volumes[i]->name, req->name)) {
+ dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+ goto out_unlock;
+ }
+
+ /* Calculate how many eraseblocks are requested */
+ vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
+ bytes = req->bytes;
+ if (do_div(bytes, vol->usable_leb_size))
+ vol->reserved_pebs = 1;
+ vol->reserved_pebs += bytes;
+
+ /* Reserve physical eraseblocks */
+ if (vol->reserved_pebs > ubi->avail_pebs) {
+ dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ err = -ENOSPC;
+ goto out_unlock;
+ }
+ ubi->avail_pebs -= vol->reserved_pebs;
+ ubi->rsvd_pebs += vol->reserved_pebs;
+ spin_unlock(&ubi->volumes_lock);
+
+ vol->vol_id = vol_id;
+ vol->alignment = req->alignment;
+ vol->data_pad = ubi->leb_size % vol->alignment;
+ vol->vol_type = req->vol_type;
+ vol->name_len = req->name_len;
+ memcpy(vol->name, req->name, vol->name_len + 1);
+ vol->ubi = ubi;
+
+ /*
+ * Finish all pending erases because there may be some LEBs belonging
+ * to the same volume ID.
+ */
+ err = ubi_wl_flush(ubi);
+ if (err)
+ goto out_acc;
+
+ vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
+ if (!vol->eba_tbl) {
+ err = -ENOMEM;
+ goto out_acc;
+ }
+
+ for (i = 0; i < vol->reserved_pebs; i++)
+ vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ } else {
+ bytes = vol->used_bytes;
+ vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
+ vol->used_ebs = bytes;
+ if (vol->last_eb_bytes)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
+ if (err) {
+ ubi_err("cannot add character device");
+ goto out_mapping;
+ }
+
+ err = ubi_create_gluebi(ubi, vol);
+ if (err)
+ goto out_cdev;
+
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.devt = dev;
+ vol->dev.class = ubi_class;
+
+ sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = device_register(&vol->dev);
+ if (err) {
+ ubi_err("cannot register device");
+ goto out_gluebi;
+ }
+
+ err = volume_sysfs_init(ubi, vol);
+ if (err)
+ goto out_sysfs;
+
+ /* Fill volume table record */
+ memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
+ vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
+ vtbl_rec.alignment = cpu_to_be32(vol->alignment);
+ vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
+ vtbl_rec.name_len = cpu_to_be16(vol->name_len);
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ vtbl_rec.vol_type = UBI_VID_DYNAMIC;
+ else
+ vtbl_rec.vol_type = UBI_VID_STATIC;
+ memcpy(vtbl_rec.name, vol->name, vol->name_len + 1);
+
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_sysfs;
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ paranoid_check_volumes(ubi);
+ return 0;
+
+out_sysfs:
+ /*
+ * We have registered our device, we should not free the volume*
+ * description object in this function in case of an error - it is
+ * freed by the release function.
+ *
+ * Get device reference to prevent the release function from being
+ * called just after sysfs has been closed.
+ */
+ dont_free = 1;
+ get_device(&vol->dev);
+ volume_sysfs_close(vol);
+out_gluebi:
+ if (ubi_destroy_gluebi(vol))
+ dbg_err("cannot destroy gluebi for volume %d:%d",
+ ubi->ubi_num, vol_id);
+out_cdev:
+ cdev_del(&vol->cdev);
+out_mapping:
+ kfree(vol->eba_tbl);
+out_acc:
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= vol->reserved_pebs;
+ ubi->avail_pebs += vol->reserved_pebs;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ if (dont_free)
+ put_device(&vol->dev);
+ else
+ kfree(vol);
+ ubi_err("cannot create volume %d, error %d", vol_id, err);
+ return err;
+}
+
+/**
+ * ubi_remove_volume - remove volume.
+ * @desc: volume descriptor
+ *
+ * This function removes volume described by @desc. The volume has to be opened
+ * in "exclusive" mode. Returns zero in case of success and a negative error
+ * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * locked.
+ */
+int ubi_remove_volume(struct ubi_volume_desc *desc)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
+
+ dbg_msg("remove UBI volume %d", vol_id);
+ ubi_assert(desc->mode == UBI_EXCLUSIVE);
+ ubi_assert(vol == ubi->volumes[vol_id]);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ /*
+ * The volume is busy, probably someone is reading one of its
+ * sysfs files.
+ */
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ ubi->volumes[vol_id] = NULL;
+ spin_unlock(&ubi->volumes_lock);
+
+ err = ubi_destroy_gluebi(vol);
+ if (err)
+ goto out_err;
+
+ err = ubi_change_vtbl_record(ubi, vol_id, NULL);
+ if (err)
+ goto out_err;
+
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ goto out_err;
+ }
+
+ kfree(vol->eba_tbl);
+ vol->eba_tbl = NULL;
+ cdev_del(&vol->cdev);
+ volume_sysfs_close(vol);
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= reserved_pebs;
+ ubi->avail_pebs += reserved_pebs;
+ i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+ if (i > 0) {
+ i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
+ ubi->avail_pebs -= i;
+ ubi->rsvd_pebs += i;
+ ubi->beb_rsvd_pebs += i;
+ if (i > 0)
+ ubi_msg("reserve more %d PEBs", i);
+ }
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ paranoid_check_volumes(ubi);
+ return 0;
+
+out_err:
+ ubi_err("cannot remove volume %d, error %d", vol_id, err);
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ return err;
+}
+
+/**
+ * ubi_resize_volume - re-size volume.
+ * @desc: volume descriptor
+ * @reserved_pebs: new size in physical eraseblocks
+ *
+ * This function re-sizes the volume and returns zero in case of success, and a
+ * negative error code in case of failure. The caller has to have the
+ * @ubi->volumes_mutex locked.
+ */
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+{
+ int i, err, pebs, *new_mapping;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ struct ubi_vtbl_record vtbl_rec;
+ int vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ dbg_msg("re-size volume %d to from %d to %d PEBs",
+ vol_id, vol->reserved_pebs, reserved_pebs);
+
+ if (vol->vol_type == UBI_STATIC_VOLUME &&
+ reserved_pebs < vol->used_ebs) {
+ dbg_err("too small size %d, %d LEBs contain data",
+ reserved_pebs, vol->used_ebs);
+ return -EINVAL;
+ }
+
+ /* If the size is the same, we have nothing to do */
+ if (reserved_pebs == vol->reserved_pebs)
+ return 0;
+
+ new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
+ if (!new_mapping)
+ return -ENOMEM;
+
+ for (i = 0; i < reserved_pebs; i++)
+ new_mapping[i] = UBI_LEB_UNMAPPED;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ spin_unlock(&ubi->volumes_lock);
+ err = -EBUSY;
+ goto out_free;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ /* Reserve physical eraseblocks */
+ pebs = reserved_pebs - vol->reserved_pebs;
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ if (pebs > ubi->avail_pebs) {
+ dbg_err("not enough PEBs: requested %d, available %d",
+ pebs, ubi->avail_pebs);
+ spin_unlock(&ubi->volumes_lock);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= pebs;
+ ubi->rsvd_pebs += pebs;
+ for (i = 0; i < vol->reserved_pebs; i++)
+ new_mapping[i] = vol->eba_tbl[i];
+ kfree(vol->eba_tbl);
+ vol->eba_tbl = new_mapping;
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ /* Change volume table record */
+ memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_acc;
+
+ if (pebs < 0) {
+ for (i = 0; i < -pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+ if (err)
+ goto out_acc;
+ }
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs += pebs;
+ ubi->avail_pebs -= pebs;
+ pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+ if (pebs > 0) {
+ pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
+ ubi->avail_pebs -= pebs;
+ ubi->rsvd_pebs += pebs;
+ ubi->beb_rsvd_pebs += pebs;
+ if (pebs > 0)
+ ubi_msg("reserve more %d PEBs", pebs);
+ }
+ for (i = 0; i < reserved_pebs; i++)
+ new_mapping[i] = vol->eba_tbl[i];
+ kfree(vol->eba_tbl);
+ vol->eba_tbl = new_mapping;
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ vol->reserved_pebs = reserved_pebs;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ }
+
+ paranoid_check_volumes(ubi);
+ return 0;
+
+out_acc:
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ spin_unlock(&ubi->volumes_lock);
+ }
+out_free:
+ kfree(new_mapping);
+ return err;
+}
+
+/**
+ * ubi_add_volume - add volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function adds an existing volume and initializes all its data
+ * structures. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err, vol_id = vol->vol_id;
+ dev_t dev;
+
+ dbg_msg("add volume %d", vol_id);
+ ubi_dbg_dump_vol_info(vol);
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
+ if (err) {
+ ubi_err("cannot add character device for volume %d, error %d",
+ vol_id, err);
+ return err;
+ }
+
+ err = ubi_create_gluebi(ubi, vol);
+ if (err)
+ goto out_cdev;
+
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.devt = dev;
+ vol->dev.class = ubi_class;
+ sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = device_register(&vol->dev);
+ if (err)
+ goto out_gluebi;
+
+ err = volume_sysfs_init(ubi, vol);
+ if (err) {
+ cdev_del(&vol->cdev);
+ err = ubi_destroy_gluebi(vol);
+ volume_sysfs_close(vol);
+ return err;
+ }
+
+ paranoid_check_volumes(ubi);
+ return 0;
+
+out_gluebi:
+ err = ubi_destroy_gluebi(vol);
+out_cdev:
+ cdev_del(&vol->cdev);
+ return err;
+}
+
+/**
+ * ubi_free_volume - free volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function frees all resources for volume @vol but does not remove it.
+ * Used only when the UBI device is detached.
+ */
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err;
+
+ dbg_msg("free volume %d", vol->vol_id);
+
+ ubi->volumes[vol->vol_id] = NULL;
+ err = ubi_destroy_gluebi(vol);
+ cdev_del(&vol->cdev);
+ volume_sysfs_close(vol);
+}
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+
+/**
+ * paranoid_check_volume - check volume information.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ */
+static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ int idx = vol_id2idx(ubi, vol_id);
+ int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
+ const struct ubi_volume *vol;
+ long long n;
+ const char *name;
+
+ spin_lock(&ubi->volumes_lock);
+ reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ vol = ubi->volumes[idx];
+
+ if (!vol) {
+ if (reserved_pebs) {
+ ubi_err("no volume info, but volume exists");
+ goto fail;
+ }
+ spin_unlock(&ubi->volumes_lock);
+ return;
+ }
+
+ if (vol->exclusive) {
+ /*
+ * The volume may be being created at the moment, do not check
+ * it (e.g., it may be in the middle of ubi_create_volume().
+ */
+ spin_unlock(&ubi->volumes_lock);
+ return;
+ }
+
+ if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
+ vol->name_len < 0) {
+ ubi_err("negative values");
+ goto fail;
+ }
+ if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
+ ubi_err("bad alignment");
+ goto fail;
+ }
+
+ n = vol->alignment & (ubi->min_io_size - 1);
+ if (vol->alignment != 1 && n) {
+ ubi_err("alignment is not multiple of min I/O unit");
+ goto fail;
+ }
+
+ n = ubi->leb_size % vol->alignment;
+ if (vol->data_pad != n) {
+ ubi_err("bad data_pad, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
+ vol->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err("bad vol_type");
+ goto fail;
+ }
+
+ if (vol->upd_marker && vol->corrupted) {
+ dbg_err("update marker and corrupted simultaneously");
+ goto fail;
+ }
+
+ if (vol->reserved_pebs > ubi->good_peb_count) {
+ ubi_err("too large reserved_pebs");
+ goto fail;
+ }
+
+ n = ubi->leb_size - vol->data_pad;
+ if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
+ ubi_err("bad usable_leb_size, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->name_len > UBI_VOL_NAME_MAX) {
+ ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX);
+ goto fail;
+ }
+
+ if (!vol->name) {
+ ubi_err("NULL volume name");
+ goto fail;
+ }
+
+ n = strnlen(vol->name, vol->name_len + 1);
+ if (n != vol->name_len) {
+ ubi_err("bad name_len %lld", n);
+ goto fail;
+ }
+
+ n = (long long)vol->used_ebs * vol->usable_leb_size;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ if (vol->corrupted) {
+ ubi_err("corrupted dynamic volume");
+ goto fail;
+ }
+ if (vol->used_ebs != vol->reserved_pebs) {
+ ubi_err("bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes != vol->usable_leb_size) {
+ ubi_err("bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes != n) {
+ ubi_err("bad used_bytes");
+ goto fail;
+ }
+ } else {
+ if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
+ ubi_err("bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes < 0 ||
+ vol->last_eb_bytes > vol->usable_leb_size) {
+ ubi_err("bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes < 0 || vol->used_bytes > n ||
+ vol->used_bytes < n - vol->usable_leb_size) {
+ ubi_err("bad used_bytes");
+ goto fail;
+ }
+ }
+
+ alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
+ data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
+ name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
+ upd_marker = ubi->vtbl[vol_id].upd_marker;
+ name = &ubi->vtbl[vol_id].name[0];
+ if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
+ vol_type = UBI_DYNAMIC_VOLUME;
+ else
+ vol_type = UBI_STATIC_VOLUME;
+
+ if (alignment != vol->alignment || data_pad != vol->data_pad ||
+ upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
+ name_len!= vol->name_len || strncmp(name, vol->name, name_len)) {
+ ubi_err("volume info is different");
+ goto fail;
+ }
+
+ spin_unlock(&ubi->volumes_lock);
+ return;
+
+fail:
+ ubi_err("paranoid check failed for volume %d", vol_id);
+ ubi_dbg_dump_vol_info(vol);
+ ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ spin_unlock(&ubi->volumes_lock);
+ BUG();
+}
+
+/**
+ * paranoid_check_volumes - check information about all volumes.
+ * @ubi: UBI device description object
+ */
+static void paranoid_check_volumes(struct ubi_device *ubi)
+{
+ int i;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ paranoid_check_volume(ubi, i);
+}
+#endif
diff --git a/u-boot/drivers/mtd/ubi/vtbl.c b/u-boot/drivers/mtd/ubi/vtbl.c
new file mode 100644
index 0000000..f679f06
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/vtbl.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file includes volume table manipulation code. The volume table is an
+ * on-flash table containing volume meta-data like name, number of reserved
+ * physical eraseblocks, type, etc. The volume table is stored in the so-called
+ * "layout volume".
+ *
+ * The layout volume is an internal volume which is organized as follows. It
+ * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical
+ * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
+ * other. This redundancy guarantees robustness to unclean reboots. The volume
+ * table is basically an array of volume table records. Each record contains
+ * full information about the volume and protected by a CRC checksum.
+ *
+ * The volume table is changed, it is first changed in RAM. Then LEB 0 is
+ * erased, and the updated volume table is written back to LEB 0. Then same for
+ * LEB 1. This scheme guarantees recoverability from unclean reboots.
+ *
+ * In this UBI implementation the on-flash volume table does not contain any
+ * information about how many data static volumes contain. This information may
+ * be found from the scanning data.
+ *
+ * But it would still be beneficial to store this information in the volume
+ * table. For example, suppose we have a static volume X, and all its physical
+ * eraseblocks became bad for some reasons. Suppose we are attaching the
+ * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding to the volume X. According to the volume table volume X does
+ * exist. So we don't know whether it is just empty or all its physical
+ * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ *
+ * The volume table also stores so-called "update marker", which is used for
+ * volume updates. Before updating the volume, the update marker is set, and
+ * after the update operation is finished, the update marker is cleared. So if
+ * the update operation was interrupted (e.g. by an unclean reboot) - the
+ * update marker is still there and we know that the volume's contents is
+ * damaged.
+ */
+
+#ifdef UBI_LINUX
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <asm/div64.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+static void paranoid_vtbl_check(const struct ubi_device *ubi);
+#else
+#define paranoid_vtbl_check(ubi)
+#endif
+
+/* Empty volume table record */
+static struct ubi_vtbl_record empty_vtbl_record;
+
+/**
+ * ubi_change_vtbl_record - change volume table record.
+ * @ubi: UBI device description object
+ * @idx: table index to change
+ * @vtbl_rec: new volume table record
+ *
+ * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty
+ * volume table record is written. The caller does not have to calculate CRC of
+ * the record as it is done by this function. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec)
+{
+ int i, err;
+ uint32_t crc;
+ struct ubi_volume *layout_vol;
+
+ ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+
+ if (!vtbl_rec)
+ vtbl_rec = &empty_vtbl_record;
+ else {
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ err = ubi_eba_unmap_leb(ubi, layout_vol, i);
+ if (err)
+ return err;
+
+ err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
+ ubi->vtbl_size, UBI_LONGTERM);
+ if (err)
+ return err;
+ }
+
+ paranoid_vtbl_check(ubi);
+ return 0;
+}
+
+/**
+ * vtbl_check - check if volume table is not corrupted and contains sensible
+ * data.
+ * @ubi: UBI device description object
+ * @vtbl: volume table
+ *
+ * This function returns zero if @vtbl is all right, %1 if CRC is incorrect,
+ * and %-EINVAL if it contains inconsistent data.
+ */
+static int vtbl_check(const struct ubi_device *ubi,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
+ int upd_marker, err;
+ uint32_t crc;
+ const char *name;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ alignment = be32_to_cpu(vtbl[i].alignment);
+ data_pad = be32_to_cpu(vtbl[i].data_pad);
+ upd_marker = vtbl[i].upd_marker;
+ vol_type = vtbl[i].vol_type;
+ name_len = be16_to_cpu(vtbl[i].name_len);
+ name = (const char *) &vtbl[i].name[0];
+
+ crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
+ if (be32_to_cpu(vtbl[i].crc) != crc) {
+ ubi_err("bad CRC at record %u: %#08x, not %#08x",
+ i, crc, be32_to_cpu(vtbl[i].crc));
+ ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ return 1;
+ }
+
+ if (reserved_pebs == 0) {
+ if (memcmp(&vtbl[i], &empty_vtbl_record,
+ UBI_VTBL_RECORD_SIZE)) {
+ err = 2;
+ goto bad;
+ }
+ continue;
+ }
+
+ if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
+ name_len < 0) {
+ err = 3;
+ goto bad;
+ }
+
+ if (alignment > ubi->leb_size || alignment == 0) {
+ err = 4;
+ goto bad;
+ }
+
+ n = alignment & (ubi->min_io_size - 1);
+ if (alignment != 1 && n) {
+ err = 5;
+ goto bad;
+ }
+
+ n = ubi->leb_size % alignment;
+ if (data_pad != n) {
+ dbg_err("bad data_pad, has to be %d", n);
+ err = 6;
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ err = 7;
+ goto bad;
+ }
+
+ if (upd_marker != 0 && upd_marker != 1) {
+ err = 8;
+ goto bad;
+ }
+
+ if (reserved_pebs > ubi->good_peb_count) {
+ dbg_err("too large reserved_pebs, good PEBs %d",
+ ubi->good_peb_count);
+ err = 9;
+ goto bad;
+ }
+
+ if (name_len > UBI_VOL_NAME_MAX) {
+ err = 10;
+ goto bad;
+ }
+
+ if (name[0] == '\0') {
+ err = 11;
+ goto bad;
+ }
+
+ if (name_len != strnlen(name, name_len + 1)) {
+ err = 12;
+ goto bad;
+ }
+ }
+
+ /* Checks that all names are unique */
+ for (i = 0; i < ubi->vtbl_slots - 1; i++) {
+ for (n = i + 1; n < ubi->vtbl_slots; n++) {
+ int len1 = be16_to_cpu(vtbl[i].name_len);
+ int len2 = be16_to_cpu(vtbl[n].name_len);
+
+ if (len1 > 0 && len1 == len2 &&
+ !strncmp((char *)vtbl[i].name, (char *)vtbl[n].name, len1)) {
+ ubi_err("volumes %d and %d have the same name"
+ " \"%s\"", i, n, vtbl[i].name);
+ ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err("volume table check failed: record %d, error %d", i, err);
+ ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ return -EINVAL;
+}
+
+/**
+ * create_vtbl - create a copy of volume table.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ * @copy: number of the volume table copy
+ * @vtbl: contents of the volume table
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+ int copy, void *vtbl)
+{
+ int err, tries = 0;
+ static struct ubi_vid_hdr *vid_hdr;
+ struct ubi_scan_volume *sv;
+ struct ubi_scan_leb *new_seb, *old_seb = NULL;
+
+ ubi_msg("create volume table (copy #%d)", copy + 1);
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ /*
+ * Check if there is a logical eraseblock which would have to contain
+ * this volume table copy was found during scanning. It has to be wiped
+ * out.
+ */
+ sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
+ if (sv)
+ old_seb = ubi_scan_find_seb(sv, copy);
+
+retry:
+ new_seb = ubi_scan_get_free_peb(ubi, si);
+ if (IS_ERR(new_seb)) {
+ err = PTR_ERR(new_seb);
+ goto out_free;
+ }
+
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
+ vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
+ vid_hdr->data_size = vid_hdr->used_ebs =
+ vid_hdr->data_pad = cpu_to_be32(0);
+ vid_hdr->lnum = cpu_to_be32(copy);
+ vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+ vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0);
+
+ /* The EC header is already there, write the VID header */
+ err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+ if (err)
+ goto write_error;
+
+ /* Write the layout volume contents */
+ err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+ if (err)
+ goto write_error;
+
+ /*
+ * And add it to the scanning information. Don't delete the old
+ * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'.
+ */
+ err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
+ vid_hdr, 0);
+ kfree(new_seb);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ if (err == -EIO && ++tries <= 5) {
+ /*
+ * Probably this physical eraseblock went bad, try to pick
+ * another one.
+ */
+ list_add_tail(&new_seb->u.list, &si->corr);
+ goto retry;
+ }
+ kfree(new_seb);
+out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+}
+
+/**
+ * process_lvol - process the layout volume.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ * @sv: layout volume scanning information
+ *
+ * This function is responsible for reading the layout volume, ensuring it is
+ * not corrupted, and recovering from corruptions if needed. Returns volume
+ * table in case of success and a negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
+ struct ubi_scan_info *si,
+ struct ubi_scan_volume *sv)
+{
+ int err;
+ struct rb_node *rb;
+ struct ubi_scan_leb *seb;
+ struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
+ int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
+
+ /*
+ * UBI goes through the following steps when it changes the layout
+ * volume:
+ * a. erase LEB 0;
+ * b. write new data to LEB 0;
+ * c. erase LEB 1;
+ * d. write new data to LEB 1.
+ *
+ * Before the change, both LEBs contain the same data.
+ *
+ * Due to unclean reboots, the contents of LEB 0 may be lost, but there
+ * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not.
+ * Similarly, LEB 1 may be lost, but there should be LEB 0. And
+ * finally, unclean reboots may result in a situation when neither LEB
+ * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB
+ * 0 contains more recent information.
+ *
+ * So the plan is to first check LEB 0. Then
+ * a. if LEB 0 is OK, it must be containing the most resent data; then
+ * we compare it with LEB 1, and if they are different, we copy LEB
+ * 0 to LEB 1;
+ * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
+ * to LEB 0.
+ */
+
+ dbg_msg("check layout volume");
+
+ /* Read both LEB 0 and LEB 1 into memory */
+ ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
+ leb[seb->lnum] = vmalloc(ubi->vtbl_size);
+ if (!leb[seb->lnum]) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ memset(leb[seb->lnum], 0, ubi->vtbl_size);
+
+ err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+ ubi->vtbl_size);
+ if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
+ /*
+ * Scrub the PEB later. Note, -EBADMSG indicates an
+ * uncorrectable ECC error, but we have our own CRC and
+ * the data will be checked later. If the data is OK,
+ * the PEB will be scrubbed (because we set
+ * seb->scrub). If the data is not OK, the contents of
+ * the PEB will be recovered from the second copy, and
+ * seb->scrub will be cleared in
+ * 'ubi_scan_add_used()'.
+ */
+ seb->scrub = 1;
+ else if (err)
+ goto out_free;
+ }
+
+ err = -EINVAL;
+ if (leb[0]) {
+ leb_corrupted[0] = vtbl_check(ubi, leb[0]);
+ if (leb_corrupted[0] < 0)
+ goto out_free;
+ }
+
+ if (!leb_corrupted[0]) {
+ /* LEB 0 is OK */
+ if (leb[1])
+ leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size);
+ if (leb_corrupted[1]) {
+ ubi_warn("volume table copy #2 is corrupted");
+ err = create_vtbl(ubi, si, 1, leb[0]);
+ if (err)
+ goto out_free;
+ ubi_msg("volume table was restored");
+ }
+
+ /* Both LEB 1 and LEB 2 are OK and consistent */
+ vfree(leb[1]);
+ return leb[0];
+ } else {
+ /* LEB 0 is corrupted or does not exist */
+ if (leb[1]) {
+ leb_corrupted[1] = vtbl_check(ubi, leb[1]);
+ if (leb_corrupted[1] < 0)
+ goto out_free;
+ }
+ if (leb_corrupted[1]) {
+ /* Both LEB 0 and LEB 1 are corrupted */
+ ubi_err("both volume tables are corrupted");
+ goto out_free;
+ }
+
+ ubi_warn("volume table copy #1 is corrupted");
+ err = create_vtbl(ubi, si, 0, leb[1]);
+ if (err)
+ goto out_free;
+ ubi_msg("volume table was restored");
+
+ vfree(leb[0]);
+ return leb[1];
+ }
+
+out_free:
+ vfree(leb[0]);
+ vfree(leb[1]);
+ return ERR_PTR(err);
+}
+
+/**
+ * create_empty_lvol - create empty layout volume.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * This function returns volume table contents in case of success and a
+ * negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
+ struct ubi_scan_info *si)
+{
+ int i;
+ struct ubi_vtbl_record *vtbl;
+
+ vtbl = vmalloc(ubi->vtbl_size);
+ if (!vtbl)
+ return ERR_PTR(-ENOMEM);
+ memset(vtbl, 0, ubi->vtbl_size);
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
+
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ int err;
+
+ err = create_vtbl(ubi, si, i, vtbl);
+ if (err) {
+ vfree(vtbl);
+ return ERR_PTR(err);
+ }
+ }
+
+ return vtbl;
+}
+
+/**
+ * init_volumes - initialize volume information for existing volumes.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ * @vtbl: volume table
+ *
+ * This function allocates volume description objects for existing volumes.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, reserved_pebs = 0;
+ struct ubi_scan_volume *sv;
+ struct ubi_volume *vol;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
+ continue; /* Empty record */
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ vol->alignment = be32_to_cpu(vtbl[i].alignment);
+ vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
+ vol->upd_marker = vtbl[i].upd_marker;
+ vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ vol->name_len = be16_to_cpu(vtbl[i].name_len);
+ vol->usable_leb_size = ubi->leb_size - vol->data_pad;
+ memcpy(vol->name, vtbl[i].name, vol->name_len);
+ vol->name[vol->name_len] = '\0';
+ vol->vol_id = i;
+
+ if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
+ /* Auto re-size flag may be set only for one volume */
+ if (ubi->autoresize_vol_id != -1) {
+ ubi_err("more then one auto-resize volume (%d "
+ "and %d)", ubi->autoresize_vol_id, i);
+ kfree(vol);
+ return -EINVAL;
+ }
+
+ ubi->autoresize_vol_id = i;
+ }
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[i] = vol;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+ reserved_pebs += vol->reserved_pebs;
+
+ /*
+ * In case of dynamic volume UBI knows nothing about how many
+ * data is stored there. So assume the whole volume is used.
+ */
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ continue;
+ }
+
+ /* Static volumes only */
+ sv = ubi_scan_find_sv(si, i);
+ if (!sv) {
+ /*
+ * No eraseblocks belonging to this volume found. We
+ * don't actually know whether this static volume is
+ * completely corrupted or just contains no data. And
+ * we cannot know this as long as data size is not
+ * stored on flash. So we just assume the volume is
+ * empty. FIXME: this should be handled.
+ */
+ continue;
+ }
+
+ if (sv->leb_count != sv->used_ebs) {
+ /*
+ * We found a static volume which misses several
+ * eraseblocks. Treat it as corrupted.
+ */
+ ubi_warn("static volume %d misses %d LEBs - corrupted",
+ sv->vol_id, sv->used_ebs - sv->leb_count);
+ vol->corrupted = 1;
+ continue;
+ }
+
+ vol->used_ebs = sv->used_ebs;
+ vol->used_bytes =
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
+ vol->used_bytes += sv->last_data_size;
+ vol->last_eb_bytes = sv->last_data_size;
+ }
+
+ /* And add the layout volume */
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
+ vol->alignment = 1;
+ vol->vol_type = UBI_DYNAMIC_VOLUME;
+ vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
+ memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
+ vol->usable_leb_size = ubi->leb_size;
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->reserved_pebs;
+ vol->used_bytes =
+ (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
+ vol->vol_id = UBI_LAYOUT_VOLUME_ID;
+ vol->ref_count = 1;
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
+ reserved_pebs += vol->reserved_pebs;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+
+ if (reserved_pebs > ubi->avail_pebs)
+ ubi_err("not enough PEBs, required %d, available %d",
+ reserved_pebs, ubi->avail_pebs);
+ ubi->rsvd_pebs += reserved_pebs;
+ ubi->avail_pebs -= reserved_pebs;
+
+ return 0;
+}
+
+/**
+ * check_sv - check volume scanning information.
+ * @vol: UBI volume description object
+ * @sv: volume scanning information
+ *
+ * This function returns zero if the volume scanning information is consistent
+ * to the data read from the volume tabla, and %-EINVAL if not.
+ */
+static int check_sv(const struct ubi_volume *vol,
+ const struct ubi_scan_volume *sv)
+{
+ int err;
+
+ if (sv->highest_lnum >= vol->reserved_pebs) {
+ err = 1;
+ goto bad;
+ }
+ if (sv->leb_count > vol->reserved_pebs) {
+ err = 2;
+ goto bad;
+ }
+ if (sv->vol_type != vol->vol_type) {
+ err = 3;
+ goto bad;
+ }
+ if (sv->used_ebs > vol->reserved_pebs) {
+ err = 4;
+ goto bad;
+ }
+ if (sv->data_pad != vol->data_pad) {
+ err = 5;
+ goto bad;
+ }
+ return 0;
+
+bad:
+ ubi_err("bad scanning information, error %d", err);
+ ubi_dbg_dump_sv(sv);
+ ubi_dbg_dump_vol_info(vol);
+ return -EINVAL;
+}
+
+/**
+ * check_scanning_info - check that scanning information.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * Even though we protect on-flash data by CRC checksums, we still don't trust
+ * the media. This function ensures that scanning information is consistent to
+ * the information read from the volume table. Returns zero if the scanning
+ * information is OK and %-EINVAL if it is not.
+ */
+static int check_scanning_info(const struct ubi_device *ubi,
+ struct ubi_scan_info *si)
+{
+ int err, i;
+ struct ubi_scan_volume *sv;
+ struct ubi_volume *vol;
+
+ if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err("scanning found %d volumes, maximum is %d + %d",
+ si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ return -EINVAL;
+ }
+
+ if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ si->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("too large volume ID %d found by scanning",
+ si->highest_vol_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ cond_resched();
+
+ sv = ubi_scan_find_sv(si, i);
+ vol = ubi->volumes[i];
+ if (!vol) {
+ if (sv)
+ ubi_scan_rm_volume(si, sv);
+ continue;
+ }
+
+ if (vol->reserved_pebs == 0) {
+ ubi_assert(i < ubi->vtbl_slots);
+
+ if (!sv)
+ continue;
+
+ /*
+ * During scanning we found a volume which does not
+ * exist according to the information in the volume
+ * table. This must have happened due to an unclean
+ * reboot while the volume was being removed. Discard
+ * these eraseblocks.
+ */
+ ubi_msg("finish volume %d removal", sv->vol_id);
+ ubi_scan_rm_volume(si, sv);
+ } else if (sv) {
+ err = check_sv(vol, sv);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_read_volume_table - read volume table.
+ * information.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * This function reads volume table, checks it, recover from errors if needed,
+ * or creates it if needed. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+{
+ int i, err;
+ struct ubi_scan_volume *sv;
+
+ empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
+
+ /*
+ * The number of supported volumes is limited by the eraseblock size
+ * and by the UBI_MAX_VOLUMES constant.
+ */
+ ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
+ if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
+ ubi->vtbl_slots = UBI_MAX_VOLUMES;
+
+ ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
+ ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
+
+ sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
+ if (!sv) {
+ /*
+ * No logical eraseblocks belonging to the layout volume were
+ * found. This could mean that the flash is just empty. In
+ * this case we create empty layout volume.
+ *
+ * But if flash is not empty this must be a corruption or the
+ * MTD device just contains garbage.
+ */
+ if (si->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, si);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ } else {
+ ubi_err("the layout volume was not found");
+ return -EINVAL;
+ }
+ } else {
+ if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ /* This must not happen with proper UBI images */
+ dbg_err("too many LEBs (%d) in layout volume",
+ sv->leb_count);
+ return -EINVAL;
+ }
+
+ ubi->vtbl = process_lvol(ubi, si, sv);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ }
+
+ ubi->avail_pebs = ubi->good_peb_count;
+
+ /*
+ * The layout volume is OK, initialize the corresponding in-RAM data
+ * structures.
+ */
+ err = init_volumes(ubi, si, ubi->vtbl);
+ if (err)
+ goto out_free;
+
+ /*
+ * Get sure that the scanning information is consistent to the
+ * information stored in the volume table.
+ */
+ err = check_scanning_info(ubi, si);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ vfree(ubi->vtbl);
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
+ if (ubi->volumes[i]) {
+ kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
+ }
+ return err;
+}
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+
+/**
+ * paranoid_vtbl_check - check volume table.
+ * @ubi: UBI device description object
+ */
+static void paranoid_vtbl_check(const struct ubi_device *ubi)
+{
+ if (vtbl_check(ubi, ubi->vtbl)) {
+ ubi_err("paranoid check failed");
+ BUG();
+ }
+}
+
+#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/u-boot/drivers/mtd/ubi/wl.c b/u-boot/drivers/mtd/ubi/wl.c
new file mode 100644
index 0000000..88b867a
--- /dev/null
+++ b/u-boot/drivers/mtd/ubi/wl.c
@@ -0,0 +1,1675 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
+ */
+
+/*
+ * UBI wear-leveling unit.
+ *
+ * This unit is responsible for wear-leveling. It works in terms of physical
+ * eraseblocks and erase counters and knows nothing about logical eraseblocks,
+ * volumes, etc. From this unit's perspective all physical eraseblocks are of
+ * two types - used and free. Used physical eraseblocks are those that were
+ * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
+ * those that were put by the 'ubi_wl_put_peb()' function.
+ *
+ * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
+ * header. The rest of the physical eraseblock contains only 0xFF bytes.
+ *
+ * When physical eraseblocks are returned to the WL unit by means of the
+ * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
+ * done asynchronously in context of the per-UBI device background thread,
+ * which is also managed by the WL unit.
+ *
+ * The wear-leveling is ensured by means of moving the contents of used
+ * physical eraseblocks with low erase counter to free physical eraseblocks
+ * with high erase counter.
+ *
+ * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
+ * an "optimal" physical eraseblock. For example, when it is known that the
+ * physical eraseblock will be "put" soon because it contains short-term data,
+ * the WL unit may pick a free physical eraseblock with low erase counter, and
+ * so forth.
+ *
+ * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
+ *
+ * This unit is also responsible for scrubbing. If a bit-flip is detected in a
+ * physical eraseblock, it has to be moved. Technically this is the same as
+ * moving it for wear-leveling reasons.
+ *
+ * As it was said, for the UBI unit all physical eraseblocks are either "free"
+ * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
+ * eraseblocks are kept in a set of different RB-trees: @wl->used,
+ * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
+ *
+ * Note, in this implementation, we keep a small in-RAM object for each physical
+ * eraseblock. This is surely not a scalable solution. But it appears to be good
+ * enough for moderately large flashes and it is simple. In future, one may
+ * re-work this unit and make it more scalable.
+ *
+ * At the moment this unit does not utilize the sequence number, which was
+ * introduced relatively recently. But it would be wise to do this because the
+ * sequence number of a logical eraseblock characterizes how old is it. For
+ * example, when we move a PEB with low erase counter, and we need to pick the
+ * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
+ * pick target PEB with an average EC if our PEB is not very "old". This is a
+ * room for future re-works of the WL unit.
+ *
+ * FIXME: looks too complex, should be simplified (later).
+ */
+
+#ifdef UBI_LINUX
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#endif
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+/* Number of physical eraseblocks reserved for wear-leveling purposes */
+#define WL_RESERVED_PEBS 1
+
+/*
+ * How many erase cycles are short term, unknown, and long term physical
+ * eraseblocks protected.
+ */
+#define ST_PROTECTION 16
+#define U_PROTECTION 10
+#define LT_PROTECTION 4
+
+/*
+ * Maximum difference between two erase counters. If this threshold is
+ * exceeded, the WL unit starts moving data from used physical eraseblocks with
+ * low erase counter to free physical eraseblocks with high erase counter.
+ */
+#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
+
+/*
+ * When a physical eraseblock is moved, the WL unit has to pick the target
+ * physical eraseblock to move to. The simplest way would be just to pick the
+ * one with the highest erase counter. But in certain workloads this could lead
+ * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
+ * situation when the picked physical eraseblock is constantly erased after the
+ * data is written to it. So, we have a constant which limits the highest erase
+ * counter of the free physical eraseblock to pick. Namely, the WL unit does
+ * not pick eraseblocks with erase counter greater then the lowest erase
+ * counter plus %WL_FREE_MAX_DIFF.
+ */
+#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
+
+/*
+ * Maximum number of consecutive background thread failures which is enough to
+ * switch to read-only mode.
+ */
+#define WL_MAX_FAILURES 32
+
+/**
+ * struct ubi_wl_prot_entry - PEB protection entry.
+ * @rb_pnum: link in the @wl->prot.pnum RB-tree
+ * @rb_aec: link in the @wl->prot.aec RB-tree
+ * @abs_ec: the absolute erase counter value when the protection ends
+ * @e: the wear-leveling entry of the physical eraseblock under protection
+ *
+ * When the WL unit returns a physical eraseblock, the physical eraseblock is
+ * protected from being moved for some "time". For this reason, the physical
+ * eraseblock is not directly moved from the @wl->free tree to the @wl->used
+ * tree. There is one more tree in between where this physical eraseblock is
+ * temporarily stored (@wl->prot).
+ *
+ * All this protection stuff is needed because:
+ * o we don't want to move physical eraseblocks just after we have given them
+ * to the user; instead, we first want to let users fill them up with data;
+ *
+ * o there is a chance that the user will put the physical eraseblock very
+ * soon, so it makes sense not to move it for some time, but wait; this is
+ * especially important in case of "short term" physical eraseblocks.
+ *
+ * Physical eraseblocks stay protected only for limited time. But the "time" is
+ * measured in erase cycles in this case. This is implemented with help of the
+ * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
+ * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
+ * the @wl->used tree.
+ *
+ * Protected physical eraseblocks are searched by physical eraseblock number
+ * (when they are put) and by the absolute erase counter (to check if it is
+ * time to move them to the @wl->used tree). So there are actually 2 RB-trees
+ * storing the protected physical eraseblocks: @wl->prot.pnum and
+ * @wl->prot.aec. They are referred to as the "protection" trees. The
+ * first one is indexed by the physical eraseblock number. The second one is
+ * indexed by the absolute erase counter. Both trees store
+ * &struct ubi_wl_prot_entry objects.
+ *
+ * Each physical eraseblock has 2 main states: free and used. The former state
+ * corresponds to the @wl->free tree. The latter state is split up on several
+ * sub-states:
+ * o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is temporarily prohibited (@wl->prot.pnum and
+ * @wl->prot.aec trees);
+ * o scrubbing is needed (@wl->scrub tree).
+ *
+ * Depending on the sub-state, wear-leveling entries of the used physical
+ * eraseblocks may be kept in one of those trees.
+ */
+struct ubi_wl_prot_entry {
+ struct rb_node rb_pnum;
+ struct rb_node rb_aec;
+ unsigned long long abs_ec;
+ struct ubi_wl_entry *e;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @priv: private data of the worker function
+ *
+ * @e: physical eraseblock to erase
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * The @func pointer points to the worker function. If the @cancel argument is
+ * not zero, the worker has to free the resources and exit immediately. The
+ * worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int torture;
+};
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
+ struct rb_root *root);
+#else
+#define paranoid_check_ec(ubi, pnum, ec) 0
+#define paranoid_check_in_wl_tree(e, root)
+#endif
+
+/**
+ * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
+ * @e: the wear-leveling entry to add
+ * @root: the root of the tree
+ *
+ * Note, we use (erase counter, physical eraseblock number) pairs as keys in
+ * the @ubi->used and @ubi->free RB-trees.
+ */
+static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node **p, *parent = NULL;
+
+ p = &root->rb_node;
+ while (*p) {
+ struct ubi_wl_entry *e1;
+
+ parent = *p;
+ e1 = rb_entry(parent, struct ubi_wl_entry, rb);
+
+ if (e->ec < e1->ec)
+ p = &(*p)->rb_left;
+ else if (e->ec > e1->ec)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&e->rb, parent, p);
+ rb_insert_color(&e->rb, root);
+}
+
+/**
+ * do_work - do one pending work.
+ * @ubi: UBI device description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int do_work(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_work *wrk;
+
+ cond_resched();
+
+ /*
+ * @ubi->work_sem is used to synchronize with the workers. Workers take
+ * it in read mode, so many of them may be doing works at a time. But
+ * the queue flush code has to be sure the whole queue of works is
+ * done, and it takes the mutex in write mode.
+ */
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works)) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
+ return 0;
+ }
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Call the worker function. Do not touch the work structure
+ * after this call as it will have been freed or reused by that
+ * time by the worker function.
+ */
+ err = wrk->func(ubi, wrk, 0);
+ if (err)
+ ubi_err("work failed with error code %d", err);
+ up_read(&ubi->work_sem);
+
+ return err;
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ spin_lock(&ubi->wl_lock);
+ while (!ubi->free.rb_node) {
+ spin_unlock(&ubi->wl_lock);
+
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+ if (err)
+ return err;
+
+ spin_lock(&ubi->wl_lock);
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ return 0;
+}
+
+/**
+ * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns non-zero if @e is in the @root RB-tree and zero if it
+ * is not.
+ */
+static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node *p;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, rb);
+
+ if (e->pnum == e1->pnum) {
+ ubi_assert(e == e1);
+ return 1;
+ }
+
+ if (e->ec < e1->ec)
+ p = p->rb_left;
+ else if (e->ec > e1->ec)
+ p = p->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * prot_tree_add - add physical eraseblock to protection trees.
+ * @ubi: UBI device description object
+ * @e: the physical eraseblock to add
+ * @pe: protection entry object to use
+ * @abs_ec: absolute erase counter value when this physical eraseblock has
+ * to be removed from the protection trees.
+ *
+ * @wl->lock has to be locked.
+ */
+static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ struct ubi_wl_prot_entry *pe, int abs_ec)
+{
+ struct rb_node **p, *parent = NULL;
+ struct ubi_wl_prot_entry *pe1;
+
+ pe->e = e;
+ pe->abs_ec = ubi->abs_ec + abs_ec;
+
+ p = &ubi->prot.pnum.rb_node;
+ while (*p) {
+ parent = *p;
+ pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
+
+ if (e->pnum < pe1->e->pnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&pe->rb_pnum, parent, p);
+ rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
+
+ p = &ubi->prot.aec.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
+
+ if (pe->abs_ec < pe1->abs_ec)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&pe->rb_aec, parent, p);
+ rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
+}
+
+/**
+ * find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @root: the RB-tree where to look for
+ * @max: highest possible erase counter
+ *
+ * This function looks for a wear leveling entry with erase counter closest to
+ * @max and less then @max.
+ */
+static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
+ max += e->ec;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, rb);
+ if (e1->ec >= max)
+ p = p->rb_left;
+ else {
+ p = p->rb_right;
+ e = e1;
+ }
+ }
+
+ return e;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ * @dtype: type of data which will be stored in this physical eraseblock
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure. Might sleep.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+{
+ int err, protect, medium_ec;
+ struct ubi_wl_entry *e, *first, *last;
+ struct ubi_wl_prot_entry *pe;
+
+ ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
+ dtype == UBI_UNKNOWN);
+
+ pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
+ if (!pe)
+ return -ENOMEM;
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->free.rb_node) {
+ if (ubi->works_count == 0) {
+ ubi_assert(list_empty(&ubi->works));
+ ubi_err("no free eraseblocks");
+ spin_unlock(&ubi->wl_lock);
+ kfree(pe);
+ return -ENOSPC;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = produce_free_peb(ubi);
+ if (err < 0) {
+ kfree(pe);
+ return err;
+ }
+ goto retry;
+ }
+
+ switch (dtype) {
+ case UBI_LONGTERM:
+ /*
+ * For long term data we pick a physical eraseblock
+ * with high erase counter. But the highest erase
+ * counter we can pick is bounded by the the lowest
+ * erase counter plus %WL_FREE_MAX_DIFF.
+ */
+ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ protect = LT_PROTECTION;
+ break;
+ case UBI_UNKNOWN:
+ /*
+ * For unknown data we pick a physical eraseblock with
+ * medium erase counter. But we by no means can pick a
+ * physical eraseblock with erase counter greater or
+ * equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF.
+ */
+ first = rb_entry(rb_first(&ubi->free),
+ struct ubi_wl_entry, rb);
+ last = rb_entry(rb_last(&ubi->free),
+ struct ubi_wl_entry, rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF)
+ e = rb_entry(ubi->free.rb_node,
+ struct ubi_wl_entry, rb);
+ else {
+ medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
+ e = find_wl_entry(&ubi->free, medium_ec);
+ }
+ protect = U_PROTECTION;
+ break;
+ case UBI_SHORTTERM:
+ /*
+ * For short term data we pick a physical eraseblock
+ * with the lowest erase counter as we expect it will
+ * be erased soon.
+ */
+ e = rb_entry(rb_first(&ubi->free),
+ struct ubi_wl_entry, rb);
+ protect = ST_PROTECTION;
+ break;
+ default:
+ protect = 0;
+ e = NULL;
+ BUG();
+ }
+
+ /*
+ * Move the physical eraseblock to the protection trees where it will
+ * be protected from being moved for some time.
+ */
+ paranoid_check_in_wl_tree(e, &ubi->free);
+ rb_erase(&e->rb, &ubi->free);
+ prot_tree_add(ubi, e, pe, protect);
+
+ dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
+ spin_unlock(&ubi->wl_lock);
+
+ return e->pnum;
+}
+
+/**
+ * prot_tree_del - remove a physical eraseblock from the protection trees
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to remove
+ *
+ * This function returns PEB @pnum from the protection trees and returns zero
+ * in case of success and %-ENODEV if the PEB was not found in the protection
+ * trees.
+ */
+static int prot_tree_del(struct ubi_device *ubi, int pnum)
+{
+ struct rb_node *p;
+ struct ubi_wl_prot_entry *pe = NULL;
+
+ p = ubi->prot.pnum.rb_node;
+ while (p) {
+
+ pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
+
+ if (pnum == pe->e->pnum)
+ goto found;
+
+ if (pnum < pe->e->pnum)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+
+ return -ENODEV;
+
+found:
+ ubi_assert(pe->e->pnum == pnum);
+ rb_erase(&pe->rb_aec, &ubi->prot.aec);
+ rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
+ kfree(pe);
+ return 0;
+}
+
+/**
+ * sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @e: the the physical eraseblock to erase
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+ unsigned long long ec = e->ec;
+
+ dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
+
+ err = paranoid_check_ec(ubi, e->pnum, e->ec);
+ if (err > 0)
+ return -EINVAL;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_sync_erase(ubi, e->pnum, torture);
+ if (err < 0)
+ goto out_free;
+
+ ec += err;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err("erase counter overflow at PEB %d, EC %llu",
+ e->pnum, ec);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
+ if (err)
+ goto out_free;
+
+ e->ec = ec;
+ spin_lock(&ubi->wl_lock);
+ if (e->ec > ubi->max_ec)
+ ubi->max_ec = e->ec;
+ spin_unlock(&ubi->wl_lock);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * check_protection_over - check if it is time to stop protecting some
+ * physical eraseblocks.
+ * @ubi: UBI device description object
+ *
+ * This function is called after each erase operation, when the absolute erase
+ * counter is incremented, to check if some physical eraseblock have not to be
+ * protected any longer. These physical eraseblocks are moved from the
+ * protection trees to the used tree.
+ */
+static void check_protection_over(struct ubi_device *ubi)
+{
+ struct ubi_wl_prot_entry *pe;
+
+ /*
+ * There may be several protected physical eraseblock to remove,
+ * process them all.
+ */
+ while (1) {
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->prot.aec.rb_node) {
+ spin_unlock(&ubi->wl_lock);
+ break;
+ }
+
+ pe = rb_entry(rb_first(&ubi->prot.aec),
+ struct ubi_wl_prot_entry, rb_aec);
+
+ if (pe->abs_ec > ubi->abs_ec) {
+ spin_unlock(&ubi->wl_lock);
+ break;
+ }
+
+ dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
+ pe->e->pnum, ubi->abs_ec, pe->abs_ec);
+ rb_erase(&pe->rb_aec, &ubi->prot.aec);
+ rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
+ wl_tree_add(pe->e, &ubi->used);
+ spin_unlock(&ubi->wl_lock);
+
+ kfree(pe);
+ cond_resched();
+ }
+}
+
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function enqueues a work defined by @wrk to the tail of the pending
+ * works list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ spin_lock(&ubi->wl_lock);
+ list_add_tail(&wrk->list, &ubi->works);
+ ubi_assert(ubi->works_count >= 0);
+ ubi->works_count += 1;
+
+ /*
+ * U-Boot special: We have no bgt_thread in U-Boot!
+ * So just call do_work() here directly.
+ */
+ do_work(ubi);
+
+ spin_unlock(&ubi->wl_lock);
+}
+
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int cancel);
+
+/**
+ * schedule_erase - schedule an erase work.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * This function returns zero in case of success and a %-ENOMEM in case of
+ * failure.
+ */
+static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
+ e->pnum, e->ec, torture);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->func = &erase_worker;
+ wl_wrk->e = e;
+ wl_wrk->torture = torture;
+
+ schedule_ubi_work(ubi, wl_wrk);
+ return 0;
+}
+
+/**
+ * wear_leveling_worker - wear-leveling worker function.
+ * @ubi: UBI device description object
+ * @wrk: the work object
+ * @cancel: non-zero if the worker has to free memory and exit
+ *
+ * This function copies a more worn out physical eraseblock to a less worn out
+ * one. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int cancel)
+{
+ int err, put = 0, scrubbing = 0, protect = 0;
+ struct ubi_wl_prot_entry *uninitialized_var(pe);
+ struct ubi_wl_entry *e1, *e2;
+ struct ubi_vid_hdr *vid_hdr;
+
+ kfree(wrk);
+
+ if (cancel)
+ return 0;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ mutex_lock(&ubi->move_mutex);
+ spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+ ubi_assert(!ubi->move_to_put);
+
+ if (!ubi->free.rb_node ||
+ (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
+ /*
+ * No free physical eraseblocks? Well, they must be waiting in
+ * the queue to be erased. Cancel movement - it will be
+ * triggered again when a free physical eraseblock appears.
+ *
+ * No used physical eraseblocks? They must be temporarily
+ * protected from being moved. They will be moved to the
+ * @ubi->used tree later and the wear-leveling will be
+ * triggered again.
+ */
+ dbg_wl("cancel WL, a list is empty: free %d, used %d",
+ !ubi->free.rb_node, !ubi->used.rb_node);
+ goto out_cancel;
+ }
+
+ if (!ubi->scrub.rb_node) {
+ /*
+ * Now pick the least worn-out used physical eraseblock and a
+ * highly worn-out free physical eraseblock. If the erase
+ * counters differ much enough, start wear-leveling.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
+ e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ dbg_wl("no WL needed: min used EC %d, max free EC %d",
+ e1->ec, e2->ec);
+ goto out_cancel;
+ }
+ paranoid_check_in_wl_tree(e1, &ubi->used);
+ rb_erase(&e1->rb, &ubi->used);
+ dbg_wl("move PEB %d EC %d to PEB %d EC %d",
+ e1->pnum, e1->ec, e2->pnum, e2->ec);
+ } else {
+ /* Perform scrubbing */
+ scrubbing = 1;
+ e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
+ e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ paranoid_check_in_wl_tree(e1, &ubi->scrub);
+ rb_erase(&e1->rb, &ubi->scrub);
+ dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
+ }
+
+ paranoid_check_in_wl_tree(e2, &ubi->free);
+ rb_erase(&e2->rb, &ubi->free);
+ ubi->move_from = e1;
+ ubi->move_to = e2;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
+ * We so far do not know which logical eraseblock our physical
+ * eraseblock (@e1) belongs to. We have to read the volume identifier
+ * header first.
+ *
+ * Note, we are protected from this PEB being unmapped and erased. The
+ * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+ * which is being moved was unmapped.
+ */
+
+ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err == UBI_IO_PEB_FREE) {
+ /*
+ * We are trying to move PEB without a VID header. UBI
+ * always write VID headers shortly after the PEB was
+ * given, so we have a situation when it did not have
+ * chance to write it down because it was preempted.
+ * Just re-schedule the work, so that next time it will
+ * likely have the VID header in place.
+ */
+ dbg_wl("PEB %d has no VID header", e1->pnum);
+ goto out_not_moved;
+ }
+
+ ubi_err("error %d while reading VID header from PEB %d",
+ err, e1->pnum);
+ if (err > 0)
+ err = -EIO;
+ goto out_error;
+ }
+
+ err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
+ if (err) {
+
+ if (err < 0)
+ goto out_error;
+ if (err == 1)
+ goto out_not_moved;
+
+ /*
+ * For some reason the LEB was not moved - it might be because
+ * the volume is being deleted. We should prevent this PEB from
+ * being selected for wear-levelling movement for some "time",
+ * so put it to the protection tree.
+ */
+
+ dbg_wl("cancelled moving PEB %d", e1->pnum);
+ pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
+ if (!pe) {
+ err = -ENOMEM;
+ goto out_error;
+ }
+
+ protect = 1;
+ }
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ spin_lock(&ubi->wl_lock);
+ if (protect)
+ prot_tree_add(ubi, e1, pe, protect);
+ if (!ubi->move_to_put)
+ wl_tree_add(e2, &ubi->used);
+ else
+ put = 1;
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ if (put) {
+ /*
+ * Well, the target PEB was put meanwhile, schedule it for
+ * erasure.
+ */
+ dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
+ err = schedule_erase(ubi, e2, 0);
+ if (err)
+ goto out_error;
+ }
+
+ if (!protect) {
+ err = schedule_erase(ubi, e1, 0);
+ if (err)
+ goto out_error;
+ }
+
+
+ dbg_wl("done");
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+ /*
+ * For some reasons the LEB was not moved, might be an error, might be
+ * something else. @e1 was not changed, so return it back. @e2 might
+ * be changed, schedule it for erasure.
+ */
+out_not_moved:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ spin_lock(&ubi->wl_lock);
+ if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
+ else
+ wl_tree_add(e1, &ubi->used);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ err = schedule_erase(ubi, e2, 0);
+ if (err)
+ goto out_error;
+
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+out_error:
+ ubi_err("error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ spin_lock(&ubi->wl_lock);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ ubi_ro_mode(ubi);
+
+ mutex_unlock(&ubi->move_mutex);
+ return err;
+
+out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+}
+
+/**
+ * ensure_wear_leveling - schedule wear-leveling if it is needed.
+ * @ubi: UBI device description object
+ *
+ * This function checks if it is time to start wear-leveling and schedules it
+ * if yes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+static int ensure_wear_leveling(struct ubi_device *ubi)
+{
+ int err = 0;
+ struct ubi_wl_entry *e1;
+ struct ubi_wl_entry *e2;
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled)
+ /* Wear-leveling is already in the work queue */
+ goto out_unlock;
+
+ /*
+ * If the ubi->scrub tree is not empty, scrubbing is needed, and the
+ * the WL worker has to be scheduled anyway.
+ */
+ if (!ubi->scrub.rb_node) {
+ if (!ubi->used.rb_node || !ubi->free.rb_node)
+ /* No physical eraseblocks - no deal */
+ goto out_unlock;
+
+ /*
+ * We schedule wear-leveling only if the difference between the
+ * lowest erase counter of used physical eraseblocks and a high
+ * erase counter of free physical eraseblocks is greater then
+ * %UBI_WL_THRESHOLD.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
+ e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
+ goto out_unlock;
+ dbg_wl("schedule wear-leveling");
+ } else
+ dbg_wl("schedule scrubbing");
+
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ err = -ENOMEM;
+ goto out_cancel;
+ }
+
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return err;
+
+out_cancel:
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+out_unlock:
+ spin_unlock(&ubi->wl_lock);
+ return err;
+}
+
+/**
+ * erase_worker - physical eraseblock erase worker function.
+ * @ubi: UBI device description object
+ * @wl_wrk: the work object
+ * @cancel: non-zero if the worker has to free memory and exit
+ *
+ * This function erases a physical eraseblock and perform torture testing if
+ * needed. It also takes care about marking the physical eraseblock bad if
+ * needed. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int cancel)
+{
+ struct ubi_wl_entry *e = wl_wrk->e;
+ int pnum = e->pnum, err, need;
+
+ if (cancel) {
+ dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
+ kfree(wl_wrk);
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ return 0;
+ }
+
+ dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+
+ err = sync_erase(ubi, e, wl_wrk->torture);
+ if (!err) {
+ /* Fine, we've erased it successfully */
+ kfree(wl_wrk);
+
+ spin_lock(&ubi->wl_lock);
+ ubi->abs_ec += 1;
+ wl_tree_add(e, &ubi->free);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * One more erase operation has happened, take care about protected
+ * physical eraseblocks.
+ */
+ check_protection_over(ubi);
+
+ /* And take care about wear-leveling */
+ err = ensure_wear_leveling(ubi);
+ return err;
+ }
+
+ ubi_err("failed to erase PEB %d, error %d", pnum, err);
+ kfree(wl_wrk);
+ kmem_cache_free(ubi_wl_entry_slab, e);
+
+ if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ err == -EBUSY) {
+ int err1;
+
+ /* Re-schedule the LEB for erasure */
+ err1 = schedule_erase(ubi, e, 0);
+ if (err1) {
+ err = err1;
+ goto out_ro;
+ }
+ return err;
+ } else if (err != -EIO) {
+ /*
+ * If this is not %-EIO, we have no idea what to do. Scheduling
+ * this physical eraseblock for erasure again would cause
+ * errors again and again. Well, lets switch to RO mode.
+ */
+ goto out_ro;
+ }
+
+ /* It is %-EIO, the PEB went bad */
+
+ if (!ubi->bad_allowed) {
+ ubi_err("bad physical eraseblock %d detected", pnum);
+ goto out_ro;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
+ if (need > 0) {
+ need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ if (need > 0)
+ ubi_msg("reserve more %d PEBs", need);
+ }
+
+ if (ubi->beb_rsvd_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err("no reserved physical eraseblocks");
+ goto out_ro;
+ }
+
+ spin_unlock(&ubi->volumes_lock);
+ ubi_msg("mark PEB %d as bad", pnum);
+
+ err = ubi_io_mark_bad(ubi, pnum);
+ if (err)
+ goto out_ro;
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->beb_rsvd_pebs -= 1;
+ ubi->bad_peb_count += 1;
+ ubi->good_peb_count -= 1;
+ ubi_calculate_reserved(ubi);
+ if (ubi->beb_rsvd_pebs == 0)
+ ubi_warn("last PEB from the reserved pool was used");
+ spin_unlock(&ubi->volumes_lock);
+
+ return err;
+
+out_ro:
+ ubi_ro_mode(ubi);
+ return err;
+}
+
+/**
+ * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to return
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function is called to return physical eraseblock @pnum to the pool of
+ * free physical eraseblocks. The @torture flag has to be set if an I/O error
+ * occurred to this @pnum and it has to be tested. This function returns zero
+ * in case of success, and a negative error code in case of failure.
+ */
+int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+ dbg_wl("PEB %d", pnum);
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (e == ubi->move_from) {
+ /*
+ * User is putting the physical eraseblock which was selected to
+ * be moved. It will be scheduled for erasure in the
+ * wear-leveling worker.
+ */
+ dbg_wl("PEB %d is being moved, wait", pnum);
+ spin_unlock(&ubi->wl_lock);
+
+ /* Wait for the WL worker by taking the @ubi->move_mutex */
+ mutex_lock(&ubi->move_mutex);
+ mutex_unlock(&ubi->move_mutex);
+ goto retry;
+ } else if (e == ubi->move_to) {
+ /*
+ * User is putting the physical eraseblock which was selected
+ * as the target the data is moved to. It may happen if the EBA
+ * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
+ * the WL unit has not put the PEB to the "used" tree yet, but
+ * it is about to do this. So we just set a flag which will
+ * tell the WL worker that the PEB is not needed anymore and
+ * should be scheduled for erasure.
+ */
+ dbg_wl("PEB %d is the target of data moving", pnum);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_to_put = 1;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ } else {
+ if (in_wl_tree(e, &ubi->used)) {
+ paranoid_check_in_wl_tree(e, &ubi->used);
+ rb_erase(&e->rb, &ubi->used);
+ } else if (in_wl_tree(e, &ubi->scrub)) {
+ paranoid_check_in_wl_tree(e, &ubi->scrub);
+ rb_erase(&e->rb, &ubi->scrub);
+ } else {
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = schedule_erase(ubi, e, torture);
+ if (err) {
+ spin_lock(&ubi->wl_lock);
+ wl_tree_add(e, &ubi->used);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to schedule
+ *
+ * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
+ * needs scrubbing. This function schedules a physical eraseblock for
+ * scrubbing which is done in background. This function returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ ubi_msg("schedule PEB %d for scrubbing", pnum);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ if (e == ubi->move_to) {
+ /*
+ * This physical eraseblock was used to move data to. The data
+ * was moved but the PEB was not yet inserted to the proper
+ * tree. We should just wait a little and let the WL worker
+ * proceed.
+ */
+ spin_unlock(&ubi->wl_lock);
+ dbg_wl("the PEB %d is not in proper tree, retry", pnum);
+ yield();
+ goto retry;
+ }
+
+ if (in_wl_tree(e, &ubi->used)) {
+ paranoid_check_in_wl_tree(e, &ubi->used);
+ rb_erase(&e->rb, &ubi->used);
+ } else {
+ int err;
+
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
+
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Technically scrubbing is the same as wear-leveling, so it is done
+ * by the WL worker.
+ */
+ return ensure_wear_leveling(ubi);
+}
+
+/**
+ * ubi_wl_flush - flush all pending works.
+ * @ubi: UBI device description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_wl_flush(struct ubi_device *ubi)
+{
+ int err;
+
+ /*
+ * Erase while the pending works queue is not empty, but not more then
+ * the number of currently pending works.
+ */
+ dbg_wl("flush (%d pending works)", ubi->works_count);
+ while (ubi->works_count) {
+ err = do_work(ubi);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
+ up_write(&ubi->work_sem);
+
+ /*
+ * And in case last was the WL worker and it cancelled the LEB
+ * movement, flush again.
+ */
+ while (ubi->works_count) {
+ dbg_wl("flush more (%d pending works)", ubi->works_count);
+ err = do_work(ubi);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * tree_destroy - destroy an RB-tree.
+ * @root: the root of the tree to destroy
+ */
+static void tree_destroy(struct rb_root *root)
+{
+ struct rb_node *rb;
+ struct ubi_wl_entry *e;
+
+ rb = root->rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ e = rb_entry(rb, struct ubi_wl_entry, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &e->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ }
+ }
+}
+
+/**
+ * ubi_thread - UBI background thread.
+ * @u: the UBI device description object pointer
+ */
+int ubi_thread(void *u)
+{
+ int failures = 0;
+ struct ubi_device *ubi = u;
+
+ ubi_msg("background thread \"%s\" started, PID %d",
+ ubi->bgt_name, task_pid_nr(current));
+
+ set_freezable();
+ for (;;) {
+ int err;
+
+ if (kthread_should_stop())
+ break;
+
+ if (try_to_freeze())
+ continue;
+
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works) || ubi->ro_mode ||
+ !ubi->thread_enabled) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&ubi->wl_lock);
+ schedule();
+ continue;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = do_work(ubi);
+ if (err) {
+ ubi_err("%s: work failed with error code %d",
+ ubi->bgt_name, err);
+ if (failures++ > WL_MAX_FAILURES) {
+ /*
+ * Too many failures, disable the thread and
+ * switch to read-only mode.
+ */
+ ubi_msg("%s: %d consecutive failures",
+ ubi->bgt_name, WL_MAX_FAILURES);
+ ubi_ro_mode(ubi);
+ break;
+ }
+ } else
+ failures = 0;
+
+ cond_resched();
+ }
+
+ dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
+ return 0;
+}
+
+/**
+ * cancel_pending - cancel all pending works.
+ * @ubi: UBI device description object
+ */
+static void cancel_pending(struct ubi_device *ubi)
+{
+ while (!list_empty(&ubi->works)) {
+ struct ubi_work *wrk;
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ wrk->func(ubi, wrk, 1);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ }
+}
+
+/**
+ * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
+ * information.
+ * @ubi: UBI device description object
+ * @si: scanning information
+ *
+ * This function returns zero in case of success, and a negative error code in
+ * case of failure.
+ */
+int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+{
+ int err;
+ struct rb_node *rb1, *rb2;
+ struct ubi_scan_volume *sv;
+ struct ubi_scan_leb *seb, *tmp;
+ struct ubi_wl_entry *e;
+
+
+ ubi->used = ubi->free = ubi->scrub = RB_ROOT;
+ ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
+ spin_lock_init(&ubi->wl_lock);
+ mutex_init(&ubi->move_mutex);
+ init_rwsem(&ubi->work_sem);
+ ubi->max_ec = si->max_ec;
+ INIT_LIST_HEAD(&ubi->works);
+
+ sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
+
+ err = -ENOMEM;
+ ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
+ if (!ubi->lookuptbl)
+ return err;
+
+ list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = seb->pnum;
+ e->ec = seb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+ if (schedule_erase(ubi, e, 0)) {
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ goto out_free;
+ }
+ }
+
+ list_for_each_entry(seb, &si->free, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = seb->pnum;
+ e->ec = seb->ec;
+ ubi_assert(e->ec >= 0);
+ wl_tree_add(e, &ubi->free);
+ ubi->lookuptbl[e->pnum] = e;
+ }
+
+ list_for_each_entry(seb, &si->corr, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = seb->pnum;
+ e->ec = seb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+ if (schedule_erase(ubi, e, 0)) {
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ goto out_free;
+ }
+ }
+
+ ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = seb->pnum;
+ e->ec = seb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+ if (!seb->scrub) {
+ dbg_wl("add PEB %d EC %d to the used tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->used);
+ } else {
+ dbg_wl("add PEB %d EC %d to the scrub tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->scrub);
+ }
+ }
+ }
+
+ if (ubi->avail_pebs < WL_RESERVED_PEBS) {
+ ubi_err("no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, WL_RESERVED_PEBS);
+ goto out_free;
+ }
+ ubi->avail_pebs -= WL_RESERVED_PEBS;
+ ubi->rsvd_pebs += WL_RESERVED_PEBS;
+
+ /* Schedule wear-leveling if needed */
+ err = ensure_wear_leveling(ubi);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ cancel_pending(ubi);
+ tree_destroy(&ubi->used);
+ tree_destroy(&ubi->free);
+ tree_destroy(&ubi->scrub);
+ kfree(ubi->lookuptbl);
+ return err;
+}
+
+/**
+ * protection_trees_destroy - destroy the protection RB-trees.
+ * @ubi: UBI device description object
+ */
+static void protection_trees_destroy(struct ubi_device *ubi)
+{
+ struct rb_node *rb;
+ struct ubi_wl_prot_entry *pe;
+
+ rb = ubi->prot.aec.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &pe->rb_aec)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ kmem_cache_free(ubi_wl_entry_slab, pe->e);
+ kfree(pe);
+ }
+ }
+}
+
+/**
+ * ubi_wl_close - close the wear-leveling unit.
+ * @ubi: UBI device description object
+ */
+void ubi_wl_close(struct ubi_device *ubi)
+{
+ dbg_wl("close the UBI wear-leveling unit");
+
+ cancel_pending(ubi);
+ protection_trees_destroy(ubi);
+ tree_destroy(&ubi->used);
+ tree_destroy(&ubi->free);
+ tree_destroy(&ubi->scrub);
+ kfree(ubi->lookuptbl);
+}
+
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+
+/**
+ * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
+ * is correct.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @ec: the erase counter to check
+ *
+ * This function returns zero if the erase counter of physical eraseblock @pnum
+ * is equivalent to @ec, %1 if not, and a negative error code if an error
+ * occurred.
+ */
+static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+{
+ int err;
+ long long read_ec;
+ struct ubi_ec_hdr *ec_hdr;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ /* The header does not have to exist */
+ err = 0;
+ goto out_free;
+ }
+
+ read_ec = be64_to_cpu(ec_hdr->ec);
+ if (ec != read_ec) {
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("read EC is %lld, should be %d", read_ec, ec);
+ ubi_dbg_dump_stack();
+ err = 1;
+ } else
+ err = 0;
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
+ * in a WL RB-tree.
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns zero if @e is in the @root RB-tree and %1 if it
+ * is not.
+ */
+static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
+ struct rb_root *root)
+{
+ if (in_wl_tree(e, root))
+ return 0;
+
+ ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+ e->pnum, e->ec, root);
+ ubi_dbg_dump_stack();
+ return 1;
+}
+
+#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */