summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /drivers/mtd
Initial import
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig341
-rw-r--r--drivers/mtd/Makefile36
-rw-r--r--drivers/mtd/afs.c282
-rw-r--r--drivers/mtd/ar7part.c157
-rw-r--r--drivers/mtd/bcm47xxpart.c336
-rw-r--r--drivers/mtd/bcm63xxpart.c241
-rw-r--r--drivers/mtd/chips/Kconfig234
-rw-r--r--drivers/mtd/chips/Makefile15
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c2666
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2929
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1400
-rw-r--r--drivers/mtd/chips/cfi_probe.c420
-rw-r--r--drivers/mtd/chips/cfi_util.c251
-rw-r--r--drivers/mtd/chips/chipreg.c104
-rw-r--r--drivers/mtd/chips/fwh_lock.h107
-rw-r--r--drivers/mtd/chips/gen_probe.c264
-rw-r--r--drivers/mtd/chips/jedec_probe.c2281
-rw-r--r--drivers/mtd/chips/map_absent.c112
-rw-r--r--drivers/mtd/chips/map_ram.c154
-rw-r--r--drivers/mtd/chips/map_rom.c124
-rw-r--r--drivers/mtd/cmdlinepart.c415
-rw-r--r--drivers/mtd/devices/Kconfig228
-rw-r--r--drivers/mtd/devices/Makefile21
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c339
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h76
-rw-r--r--drivers/mtd/devices/block2mtd.c500
-rw-r--r--drivers/mtd/devices/docg3.c2141
-rw-r--r--drivers/mtd/devices/docg3.h370
-rw-r--r--drivers/mtd/devices/lart.c685
-rw-r--r--drivers/mtd/devices/m25p80.c332
-rw-r--r--drivers/mtd/devices/ms02-nv.c311
-rw-r--r--drivers/mtd/devices/ms02-nv.h105
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c928
-rw-r--r--drivers/mtd/devices/mtdram.c158
-rw-r--r--drivers/mtd/devices/phram.c328
-rw-r--r--drivers/mtd/devices/pmc551.c856
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h61
-rw-r--r--drivers/mtd/devices/slram.c347
-rw-r--r--drivers/mtd/devices/spear_smi.c1092
-rw-r--r--drivers/mtd/devices/sst25l.c431
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c2178
-rw-r--r--drivers/mtd/ftl.c1114
-rw-r--r--drivers/mtd/inftlcore.c968
-rw-r--r--drivers/mtd/inftlmount.c791
-rw-r--r--drivers/mtd/lpddr/Kconfig29
-rw-r--r--drivers/mtd/lpddr/Makefile7
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c507
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c751
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c249
-rw-r--r--drivers/mtd/maps/Kconfig402
-rw-r--r--drivers/mtd/maps/Makefile45
-rw-r--r--drivers/mtd/maps/amd76xrom.c349
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c196
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c137
-rw-r--r--drivers/mtd/maps/ck804xrom.c387
-rw-r--r--drivers/mtd/maps/dc21285.c231
-rw-r--r--drivers/mtd/maps/esb2rom.c452
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c302
-rw-r--r--drivers/mtd/maps/ichxrom.c380
-rw-r--r--drivers/mtd/maps/impa7.c119
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c265
-rw-r--r--drivers/mtd/maps/ixp4xx.c261
-rw-r--r--drivers/mtd/maps/l440gx.c166
-rw-r--r--drivers/mtd/maps/lantiq-flash.c216
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c230
-rw-r--r--drivers/mtd/maps/map_funcs.c43
-rw-r--r--drivers/mtd/maps/netsc520.c140
-rw-r--r--drivers/mtd/maps/nettel.c454
-rw-r--r--drivers/mtd/maps/pci.c333
-rw-r--r--drivers/mtd/maps/pcmciamtd.c753
-rw-r--r--drivers/mtd/maps/physmap.c281
-rw-r--r--drivers/mtd/maps/physmap_of.c375
-rw-r--r--drivers/mtd/maps/pismo.c292
-rw-r--r--drivers/mtd/maps/plat-ram.c261
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c229
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c144
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c137
-rw-r--r--drivers/mtd/maps/sa1100-flash.c300
-rw-r--r--drivers/mtd/maps/sbc_gxx.c236
-rw-r--r--drivers/mtd/maps/sc520cdp.c302
-rw-r--r--drivers/mtd/maps/scb2_flash.c238
-rw-r--r--drivers/mtd/maps/scx200_docflash.c225
-rw-r--r--drivers/mtd/maps/solutionengine.c95
-rw-r--r--drivers/mtd/maps/sun_uflash.c159
-rw-r--r--drivers/mtd/maps/ts5500_flash.c121
-rw-r--r--drivers/mtd/maps/tsunami_flash.c108
-rw-r--r--drivers/mtd/maps/uclinux.c143
-rw-r--r--drivers/mtd/maps/vmu-flash.c824
-rw-r--r--drivers/mtd/mtd_blkdevs.c603
-rw-r--r--drivers/mtd/mtdblock.c396
-rw-r--r--drivers/mtd/mtdblock_ro.c99
-rw-r--r--drivers/mtd/mtdchar.c1164
-rw-r--r--drivers/mtd/mtdconcat.c942
-rw-r--r--drivers/mtd/mtdcore.c1303
-rw-r--r--drivers/mtd/mtdcore.h23
-rw-r--r--drivers/mtd/mtdoops.c451
-rw-r--r--drivers/mtd/mtdpart.c803
-rw-r--r--drivers/mtd/mtdsuper.c220
-rw-r--r--drivers/mtd/mtdswap.c1591
-rw-r--r--drivers/mtd/nand/Kconfig533
-rw-r--r--drivers/mtd/nand/Makefile56
-rw-r--r--drivers/mtd/nand/ams-delta.c296
-rw-r--r--drivers/mtd/nand/atmel_nand.c2412
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h158
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h103
-rw-r--r--drivers/mtd/nand/au1550nd.c515
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/Makefile4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h26
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c79
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c454
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c847
-rw-r--r--drivers/mtd/nand/cafe_nand.c869
-rw-r--r--drivers/mtd/nand/cmx270_nand.c250
-rw-r--r--drivers/mtd/nand/cs553x_nand.c354
-rw-r--r--drivers/mtd/nand/davinci_nand.c883
-rw-r--r--drivers/mtd/nand/denali.c1621
-rw-r--r--drivers/mtd/nand/denali.h483
-rw-r--r--drivers/mtd/nand/denali_dt.c131
-rw-r--r--drivers/mtd/nand/denali_pci.c142
-rw-r--r--drivers/mtd/nand/diskonchip.c1716
-rw-r--r--drivers/mtd/nand/docg4.c1393
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c963
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c1178
-rw-r--r--drivers/mtd/nand/fsl_upm.c361
-rw-r--r--drivers/mtd/nand/fsmc_nand.c1242
-rw-r--r--drivers/mtd/nand/gpio.c321
-rw-r--r--drivers/mtd/nand/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h128
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c1508
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2051
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h311
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h187
-rw-r--r--drivers/mtd/nand/hisi504_nand.c890
-rw-r--r--drivers/mtd/nand/jz4740_nand.c579
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c889
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c1011
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c858
-rw-r--r--drivers/mtd/nand/mxc_nand.c1649
-rw-r--r--drivers/mtd/nand/nand_base.c4286
-rw-r--r--drivers/mtd/nand/nand_bbt.c1375
-rw-r--r--drivers/mtd/nand/nand_bch.c243
-rw-r--r--drivers/mtd/nand/nand_ecc.c533
-rw-r--r--drivers/mtd/nand/nand_ids.c190
-rw-r--r--drivers/mtd/nand/nand_timings.c253
-rw-r--r--drivers/mtd/nand/nandsim.c2425
-rw-r--r--drivers/mtd/nand/ndfc.c290
-rw-r--r--drivers/mtd/nand/nuc900_nand.c311
-rw-r--r--drivers/mtd/nand/omap2.c2086
-rw-r--r--drivers/mtd/nand/omap_elm.c578
-rw-r--r--drivers/mtd/nand/orion_nand.c219
-rw-r--r--drivers/mtd/nand/pasemi_nand.c236
-rw-r--r--drivers/mtd/nand/plat_nand.c150
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1940
-rw-r--r--drivers/mtd/nand/r852.c1085
-rw-r--r--drivers/mtd/nand/r852.h161
-rw-r--r--drivers/mtd/nand/s3c2410.c1142
-rw-r--r--drivers/mtd/nand/sh_flctl.c1200
-rw-r--r--drivers/mtd/nand/sharpsl.c232
-rw-r--r--drivers/mtd/nand/sm_common.c141
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c253
-rw-r--r--drivers/mtd/nand/sunxi_nand.c1430
-rw-r--r--drivers/mtd/nand/tmio_nand.c508
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c427
-rw-r--r--drivers/mtd/nand/xway_nand.c201
-rw-r--r--drivers/mtd/nftlcore.c829
-rw-r--r--drivers/mtd/nftlmount.c790
-rw-r--r--drivers/mtd/ofpart.c198
-rw-r--r--drivers/mtd/onenand/Kconfig70
-rw-r--r--drivers/mtd/onenand/Makefile13
-rw-r--r--drivers/mtd/onenand/generic.c119
-rw-r--r--drivers/mtd/onenand/omap2.c815
-rw-r--r--drivers/mtd/onenand/onenand_base.c4141
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c252
-rw-r--r--drivers/mtd/onenand/samsung.c1114
-rw-r--r--drivers/mtd/onenand/samsung.h59
-rw-r--r--drivers/mtd/redboot.c317
-rw-r--r--drivers/mtd/rfd_ftl.c849
-rw-r--r--drivers/mtd/sm_ftl.c1297
-rw-r--r--drivers/mtd/sm_ftl.h94
-rw-r--r--drivers/mtd/spi-nor/Kconfig31
-rw-r--r--drivers/mtd/spi-nor/Makefile2
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c1012
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1225
-rw-r--r--drivers/mtd/ssfdc.c458
-rw-r--r--drivers/mtd/tests/Makefile18
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c322
-rw-r--r--drivers/mtd/tests/mtd_test.c113
-rw-r--r--drivers/mtd/tests/mtd_test.h23
-rw-r--r--drivers/mtd/tests/nandbiterrs.c432
-rw-r--r--drivers/mtd/tests/oobtest.c724
-rw-r--r--drivers/mtd/tests/pagetest.c470
-rw-r--r--drivers/mtd/tests/readtest.c227
-rw-r--r--drivers/mtd/tests/speedtest.c438
-rw-r--r--drivers/mtd/tests/stresstest.c253
-rw-r--r--drivers/mtd/tests/subpagetest.c450
-rw-r--r--drivers/mtd/tests/torturetest.c494
-rw-r--r--drivers/mtd/ubi/Kconfig106
-rw-r--r--drivers/mtd/ubi/Makefile8
-rw-r--r--drivers/mtd/ubi/attach.c1768
-rw-r--r--drivers/mtd/ubi/block.c669
-rw-r--r--drivers/mtd/ubi/build.c1515
-rw-r--r--drivers/mtd/ubi/cdev.c1110
-rw-r--r--drivers/mtd/ubi/debug.c545
-rw-r--r--drivers/mtd/ubi/debug.h142
-rw-r--r--drivers/mtd/ubi/eba.c1477
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c362
-rw-r--r--drivers/mtd/ubi/fastmap.c1631
-rw-r--r--drivers/mtd/ubi/gluebi.c521
-rw-r--r--drivers/mtd/ubi/io.c1435
-rw-r--r--drivers/mtd/ubi/kapi.c864
-rw-r--r--drivers/mtd/ubi/misc.c155
-rw-r--r--drivers/mtd/ubi/ubi-media.h513
-rw-r--r--drivers/mtd/ubi/ubi.h1102
-rw-r--r--drivers/mtd/ubi/upd.c433
-rw-r--r--drivers/mtd/ubi/vmt.c863
-rw-r--r--drivers/mtd/ubi/vtbl.c864
-rw-r--r--drivers/mtd/ubi/wl.c1844
-rw-r--r--drivers/mtd/ubi/wl.h28
219 files changed, 131712 insertions, 0 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
new file mode 100644
index 000000000..a03ad2951
--- /dev/null
+++ b/drivers/mtd/Kconfig
@@ -0,0 +1,341 @@
+menuconfig MTD
+ tristate "Memory Technology Device (MTD) support"
+ depends on GENERIC_IO
+ help
+ Memory Technology Devices are flash, RAM and similar chips, often
+ used for solid state file systems on embedded devices. This option
+ will provide the generic support for MTD drivers to register
+ themselves with the kernel and for potential users of MTD devices
+ to enumerate the devices which are present and obtain a handle on
+ them. It will also allow you to select individual drivers for
+ particular hardware and users of MTD devices. If unsure, say N.
+
+if MTD
+
+config MTD_TESTS
+ tristate "MTD tests support (DANGEROUS)"
+ depends on m
+ help
+ This option includes various MTD tests into compilation. The tests
+ should normally be compiled as kernel modules. The modules perform
+ various checks and verifications when loaded.
+
+ WARNING: some of the tests will ERASE entire MTD device which they
+ test. Do not use these tests unless you really know what you do.
+
+config MTD_REDBOOT_PARTS
+ tristate "RedBoot partition table parsing"
+ ---help---
+ RedBoot is a ROM monitor and bootloader which deals with multiple
+ 'images' in flash devices by putting a table one of the erase
+ blocks on the device, similar to a partition table, which gives
+ the offsets, lengths and names of all the images stored in the
+ flash.
+
+ If you need code which can detect and parse this table, and register
+ MTD 'partitions' corresponding to each image in the table, enable
+ this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
+ example.
+
+if MTD_REDBOOT_PARTS
+
+config MTD_REDBOOT_DIRECTORY_BLOCK
+ int "Location of RedBoot partition table"
+ default "-1"
+ ---help---
+ This option is the Linux counterpart to the
+ CYGNUM_REDBOOT_FIS_DIRECTORY_BLOCK RedBoot compile time
+ option.
+
+ The option specifies which Flash sectors holds the RedBoot
+ partition table. A zero or positive value gives an absolute
+ erase block number. A negative value specifies a number of
+ sectors before the end of the device.
+
+ For example "2" means block number 2, "-1" means the last
+ block and "-2" means the penultimate block.
+
+config MTD_REDBOOT_PARTS_UNALLOCATED
+ bool "Include unallocated flash regions"
+ help
+ If you need to register each unallocated flash region as a MTD
+ 'partition', enable this option.
+
+config MTD_REDBOOT_PARTS_READONLY
+ bool "Force read-only for RedBoot system images"
+ help
+ If you need to force read-only for 'RedBoot', 'RedBoot Config' and
+ 'FIS directory' images, enable this option.
+
+endif # MTD_REDBOOT_PARTS
+
+config MTD_CMDLINE_PARTS
+ tristate "Command line partition table parsing"
+ depends on MTD
+ ---help---
+ Allow generic configuration of the MTD partition tables via the kernel
+ command line. Multiple flash resources are supported for hardware where
+ different kinds of flash memory are available.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
+ example.
+
+ The format for the command line is as follows:
+
+ mtdparts=<mtddef>[;<mtddef]
+ <mtddef> := <mtd-id>:<partdef>[,<partdef>]
+ <partdef> := <size>[@offset][<name>][ro]
+ <mtd-id> := unique id used in mapping driver/device
+ <size> := standard linux memsize OR "-" to denote all
+ remaining space
+ <name> := (NAME)
+
+ Due to the way Linux handles the command line, no spaces are
+ allowed in the partition definition, including mtd id's and partition
+ names.
+
+ Examples:
+
+ 1 flash resource (mtd-id "sa1100"), with 1 single writable partition:
+ mtdparts=sa1100:-
+
+ Same flash, but 2 named partitions, the first one being read-only:
+ mtdparts=sa1100:256k(ARMboot)ro,-(root)
+
+ If unsure, say 'N'.
+
+config MTD_AFS_PARTS
+ tristate "ARM Firmware Suite partition parsing"
+ depends on ARM
+ ---help---
+ The ARM Firmware Suite allows the user to divide flash devices into
+ multiple 'images'. Each such image has a header containing its name
+ and offset/size etc.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
+
+config MTD_OF_PARTS
+ tristate "OpenFirmware partitioning information support"
+ default y
+ depends on OF
+ help
+ This provides a partition parsing function which derives
+ the partition map from the children of the flash node,
+ as described in Documentation/devicetree/bindings/mtd/partition.txt.
+
+config MTD_AR7_PARTS
+ tristate "TI AR7 partitioning support"
+ ---help---
+ TI AR7 partitioning support
+
+config MTD_BCM63XX_PARTS
+ tristate "BCM63XX CFE partitioning support"
+ depends on BCM63XX
+ select CRC32
+ help
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
+
+config MTD_BCM47XX_PARTS
+ tristate "BCM47XX partitioning support"
+ depends on BCM47XX || ARCH_BCM_5301X
+ help
+ This provides partitions parser for devices based on BCM47xx
+ boards.
+
+comment "User Modules And Translation Layers"
+
+#
+# MTD block device support is select'ed if needed
+#
+config MTD_BLKDEVS
+ tristate
+
+config MTD_BLOCK
+ tristate "Caching block device access to MTD devices"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ ---help---
+ Although most flash chips have an erase size too large to be useful
+ as block devices, it is possible to use MTD devices which are based
+ on RAM chips in this manner. This block device is a user of MTD
+ devices performing that function.
+
+ At the moment, it is also required for the Journalling Flash File
+ System(s) to obtain a handle on the MTD device when it's mounted
+ (although JFFS and JFFS2 don't actually use any of the functionality
+ of the mtdblock device).
+
+ Later, it may be extended to perform read/erase/modify/write cycles
+ on flash chips to emulate a smaller block size. Needless to say,
+ this is very unsafe, but could be useful for file systems which are
+ almost never written to.
+
+ You do not need this option for use with the DiskOnChip devices. For
+ those, enable NFTL support (CONFIG_NFTL) instead.
+
+config MTD_BLOCK_RO
+ tristate "Readonly block device access to MTD devices"
+ depends on MTD_BLOCK!=y && BLOCK
+ select MTD_BLKDEVS
+ help
+ This allows you to mount read-only file systems (such as cramfs)
+ from an MTD device, without the overhead (and danger) of the caching
+ driver.
+
+ You do not need this option for use with the DiskOnChip devices. For
+ those, enable NFTL support (CONFIG_NFTL) instead.
+
+config FTL
+ tristate "FTL (Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ ---help---
+ This provides support for the original Flash Translation Layer which
+ is part of the PCMCIA specification. It uses a kind of pseudo-
+ file system on a flash device to emulate a block device with
+ 512-byte sectors, on top of which you put a 'normal' file system.
+
+ You may find that the algorithms used in this code are patented
+ unless you live in the Free World where software patents aren't
+ legal - in the USA you are only permitted to use this on PCMCIA
+ hardware, although under the terms of the GPL you're obviously
+ permitted to copy, modify and distribute the code as you wish. Just
+ not use it.
+
+config NFTL
+ tristate "NFTL (NAND Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ ---help---
+ This provides support for the NAND Flash Translation Layer which is
+ used on M-Systems' DiskOnChip devices. It uses a kind of pseudo-
+ file system on a flash device to emulate a block device with
+ 512-byte sectors, on top of which you put a 'normal' file system.
+
+ You may find that the algorithms used in this code are patented
+ unless you live in the Free World where software patents aren't
+ legal - in the USA you are only permitted to use this on DiskOnChip
+ hardware, although under the terms of the GPL you're obviously
+ permitted to copy, modify and distribute the code as you wish. Just
+ not use it.
+
+config NFTL_RW
+ bool "Write support for NFTL"
+ depends on NFTL
+ help
+ Support for writing to the NAND Flash Translation Layer, as used
+ on the DiskOnChip.
+
+config INFTL
+ tristate "INFTL (Inverse NAND Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ ---help---
+ This provides support for the Inverse NAND Flash Translation
+ Layer which is used on M-Systems' newer DiskOnChip devices. It
+ uses a kind of pseudo-file system on a flash device to emulate
+ a block device with 512-byte sectors, on top of which you put
+ a 'normal' file system.
+
+ You may find that the algorithms used in this code are patented
+ unless you live in the Free World where software patents aren't
+ legal - in the USA you are only permitted to use this on DiskOnChip
+ hardware, although under the terms of the GPL you're obviously
+ permitted to copy, modify and distribute the code as you wish. Just
+ not use it.
+
+config RFD_FTL
+ tristate "Resident Flash Disk (Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ ---help---
+ This provides support for the flash translation layer known
+ as the Resident Flash Disk (RFD), as used by the Embedded BIOS
+ of General Software. There is a blurb at:
+
+ http://www.gensw.com/pages/prod/bios/rfd.htm
+
+config SSFDC
+ tristate "NAND SSFDC (SmartMedia) read only translation layer"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ help
+ This enables read only access to SmartMedia formatted NAND
+ flash. You can mount it with FAT file system.
+
+
+config SM_FTL
+ tristate "SmartMedia/xD new translation layer"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ select MTD_NAND_ECC
+ help
+ This enables EXPERIMENTAL R/W support for SmartMedia/xD
+ FTL (Flash translation layer).
+ Write support is only lightly tested, therefore this driver
+ isn't recommended to use with valuable data (anyway if you have
+ valuable data, do backups regardless of software/hardware you
+ use, because you never know what will eat your data...)
+ If you only need R/O access, you can use older R/O driver
+ (CONFIG_SSFDC)
+
+config MTD_OOPS
+ tristate "Log panic/oops to an MTD buffer"
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in a flash partition where it can be read back at some
+ later point.
+
+config MTD_SWAP
+ tristate "Swap on MTD device support"
+ depends on MTD && SWAP
+ select MTD_BLKDEVS
+ help
+ Provides volatile block device driver on top of mtd partition
+ suitable for swapping. The mapping of written blocks is not saved.
+ The driver provides wear leveling by storing erase counter into the
+ OOB.
+
+config MTD_PARTITIONED_MASTER
+ bool "Retain master device when partitioned"
+ default n
+ depends on MTD
+ help
+ For historical reasons, by default, either a master is present or
+ several partitions are present, but not both. The concern was that
+ data listed in multiple partitions was dangerous; however, SCSI does
+ this and it is frequently useful for applications. This config option
+ leaves the master in even if the device is partitioned. It also makes
+ the parent of the partition device be the master device, rather than
+ what lies behind the master.
+
+source "drivers/mtd/chips/Kconfig"
+
+source "drivers/mtd/maps/Kconfig"
+
+source "drivers/mtd/devices/Kconfig"
+
+source "drivers/mtd/nand/Kconfig"
+
+source "drivers/mtd/onenand/Kconfig"
+
+source "drivers/mtd/lpddr/Kconfig"
+
+source "drivers/mtd/spi-nor/Kconfig"
+
+source "drivers/mtd/ubi/Kconfig"
+
+endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
new file mode 100644
index 000000000..99bb9a1f6
--- /dev/null
+++ b/drivers/mtd/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for the memory technology device drivers.
+#
+
+# Core functionality.
+obj-$(CONFIG_MTD) += mtd.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+
+obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
+obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
+obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
+obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
+
+# 'Users' - code which presents functionality to userspace.
+obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
+obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
+obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o
+obj-$(CONFIG_FTL) += ftl.o
+obj-$(CONFIG_NFTL) += nftl.o
+obj-$(CONFIG_INFTL) += inftl.o
+obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
+obj-$(CONFIG_SSFDC) += ssfdc.o
+obj-$(CONFIG_SM_FTL) += sm_ftl.o
+obj-$(CONFIG_MTD_OOPS) += mtdoops.o
+obj-$(CONFIG_MTD_SWAP) += mtdswap.o
+
+nftl-objs := nftlcore.o nftlmount.o
+inftl-objs := inftlcore.o inftlmount.o
+
+obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
+
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
+obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
new file mode 100644
index 000000000..96a33e3f7
--- /dev/null
+++ b/drivers/mtd/afs.c
@@ -0,0 +1,282 @@
+/*======================================================================
+
+ drivers/mtd/afs.c: ARM Flash Layout/Partitioning
+
+ Copyright © 2000 ARM Limited
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ This is access code for flashes using ARM's flash partitioning
+ standards.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+struct footer_struct {
+ u32 image_info_base; /* Address of first word of ImageFooter */
+ u32 image_start; /* Start of area reserved by this footer */
+ u32 signature; /* 'Magic' number proves it's a footer */
+ u32 type; /* Area type: ARM Image, SIB, customer */
+ u32 checksum; /* Just this structure */
+};
+
+struct image_info_struct {
+ u32 bootFlags; /* Boot flags, compression etc. */
+ u32 imageNumber; /* Unique number, selects for boot etc. */
+ u32 loadAddress; /* Address program should be loaded to */
+ u32 length; /* Actual size of image */
+ u32 address; /* Image is executed from here */
+ char name[16]; /* Null terminated */
+ u32 headerBase; /* Flash Address of any stripped header */
+ u32 header_length; /* Length of header in memory */
+ u32 headerType; /* AIF, RLF, s-record etc. */
+ u32 checksum; /* Image checksum (inc. this struct) */
+};
+
+static u32 word_sum(void *words, int num)
+{
+ u32 *p = words;
+ u32 sum = 0;
+
+ while (num--)
+ sum += *p++;
+
+ return sum;
+}
+
+static int
+afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
+ u_int off, u_int mask)
+{
+ struct footer_struct fs;
+ u_int ptr = off + mtd->erasesize - sizeof(fs);
+ size_t sz;
+ int ret;
+
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
+ if (ret >= 0 && sz != sizeof(fs))
+ ret = -EINVAL;
+
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return ret;
+ }
+
+ ret = 1;
+
+ /*
+ * Does it contain the magic number?
+ */
+ if (fs.signature != 0xa0ffff9f)
+ ret = 0;
+
+ /*
+ * Check the checksum.
+ */
+ if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
+ ret = 0;
+
+ /*
+ * Don't touch the SIB.
+ */
+ if (fs.type == 2)
+ ret = 0;
+
+ *iis_start = fs.image_info_base & mask;
+ *img_start = fs.image_start & mask;
+
+ /*
+ * Check the image info base. This can not
+ * be located after the footer structure.
+ */
+ if (*iis_start >= ptr)
+ ret = 0;
+
+ /*
+ * Check the start of this image. The image
+ * data can not be located after this block.
+ */
+ if (*img_start > off)
+ ret = 0;
+
+ return ret;
+}
+
+static int
+afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
+{
+ size_t sz;
+ int ret, i;
+
+ memset(iis, 0, sizeof(*iis));
+ ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
+ if (ret < 0)
+ goto failed;
+
+ if (sz != sizeof(*iis)) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ret = 0;
+
+ /*
+ * Validate the name - it must be NUL terminated.
+ */
+ for (i = 0; i < sizeof(iis->name); i++)
+ if (iis->name[i] == '\0')
+ break;
+
+ if (i < sizeof(iis->name))
+ ret = 1;
+
+ return ret;
+
+ failed:
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return ret;
+}
+
+static int parse_afs_partitions(struct mtd_info *mtd,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ u_int mask, off, idx, sz;
+ int ret = 0;
+ char *str;
+
+ /*
+ * This is the address mask; we use this to mask off out of
+ * range address bits.
+ */
+ mask = mtd->size - 1;
+
+ /*
+ * First, calculate the size of the array we need for the
+ * partition information. We include in this the size of
+ * the strings.
+ */
+ for (idx = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
+ struct image_info_struct iis;
+ u_int iis_ptr, img_ptr;
+
+ ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
+ if (ret < 0)
+ break;
+ if (ret == 0)
+ continue;
+
+ ret = afs_read_iis(mtd, &iis, iis_ptr);
+ if (ret < 0)
+ break;
+ if (ret == 0)
+ continue;
+
+ sz += sizeof(struct mtd_partition);
+ sz += strlen(iis.name) + 1;
+ idx += 1;
+ }
+
+ if (!sz)
+ return ret;
+
+ parts = kzalloc(sz, GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ str = (char *)(parts + idx);
+
+ /*
+ * Identify the partitions
+ */
+ for (idx = off = 0; off < mtd->size; off += mtd->erasesize) {
+ struct image_info_struct iis;
+ u_int iis_ptr, img_ptr;
+
+ /* Read the footer. */
+ ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
+ if (ret < 0)
+ break;
+ if (ret == 0)
+ continue;
+
+ /* Read the image info block */
+ ret = afs_read_iis(mtd, &iis, iis_ptr);
+ if (ret < 0)
+ break;
+ if (ret == 0)
+ continue;
+
+ strcpy(str, iis.name);
+
+ parts[idx].name = str;
+ parts[idx].size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
+ parts[idx].offset = img_ptr;
+ parts[idx].mask_flags = 0;
+
+ printk(" mtd%d: at 0x%08x, %5lluKiB, %8u, %s\n",
+ idx, img_ptr, parts[idx].size / 1024,
+ iis.imageNumber, str);
+
+ idx += 1;
+ str = str + strlen(iis.name) + 1;
+ }
+
+ if (!idx) {
+ kfree(parts);
+ parts = NULL;
+ }
+
+ *pparts = parts;
+ return idx ? idx : ret;
+}
+
+static struct mtd_part_parser afs_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_afs_partitions,
+ .name = "afs",
+};
+
+static int __init afs_parser_init(void)
+{
+ register_mtd_parser(&afs_parser);
+ return 0;
+}
+
+static void __exit afs_parser_exit(void)
+{
+ deregister_mtd_parser(&afs_parser);
+}
+
+module_init(afs_parser_init);
+module_exit(afs_parser_exit);
+
+
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
new file mode 100644
index 000000000..7c9172ad2
--- /dev/null
+++ b/drivers/mtd/ar7part.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright © 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * TI AR7 flash partition table.
+ * Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+
+#include <uapi/linux/magic.h>
+
+#define AR7_PARTS 4
+#define ROOT_OFFSET 0xe0000
+
+#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
+#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
+
+struct ar7_bin_rec {
+ unsigned int checksum;
+ unsigned int length;
+ unsigned int address;
+};
+
+static int create_mtd_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct ar7_bin_rec header;
+ unsigned int offset;
+ size_t len;
+ unsigned int pre_size = master->erasesize, post_size = 0;
+ unsigned int root_offset = ROOT_OFFSET;
+
+ int retries = 10;
+ struct mtd_partition *ar7_parts;
+
+ ar7_parts = kzalloc(sizeof(*ar7_parts) * AR7_PARTS, GFP_KERNEL);
+ if (!ar7_parts)
+ return -ENOMEM;
+ ar7_parts[0].name = "loader";
+ ar7_parts[0].offset = 0;
+ ar7_parts[0].size = master->erasesize;
+ ar7_parts[0].mask_flags = MTD_WRITEABLE;
+
+ ar7_parts[1].name = "config";
+ ar7_parts[1].offset = 0;
+ ar7_parts[1].size = master->erasesize;
+ ar7_parts[1].mask_flags = 0;
+
+ do { /* Try 10 blocks starting from master->erasesize */
+ offset = pre_size;
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
+ if (!strncmp((char *)&header, "TIENV0.8", 8))
+ ar7_parts[1].offset = pre_size;
+ if (header.checksum == LOADER_MAGIC1)
+ break;
+ if (header.checksum == LOADER_MAGIC2)
+ break;
+ pre_size += master->erasesize;
+ } while (retries--);
+
+ pre_size = offset;
+
+ if (!ar7_parts[1].offset) {
+ ar7_parts[1].offset = master->size - master->erasesize;
+ post_size = master->erasesize;
+ }
+
+ switch (header.checksum) {
+ case LOADER_MAGIC1:
+ while (header.length) {
+ offset += sizeof(header) + header.length;
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
+ }
+ root_offset = offset + sizeof(header) + 4;
+ break;
+ case LOADER_MAGIC2:
+ while (header.length) {
+ offset += sizeof(header) + header.length;
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
+ }
+ root_offset = offset + sizeof(header) + 4 + 0xff;
+ root_offset &= ~(uint32_t)0xff;
+ break;
+ default:
+ printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
+ break;
+ }
+
+ mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
+ if (header.checksum != SQUASHFS_MAGIC) {
+ root_offset += master->erasesize - 1;
+ root_offset &= ~(master->erasesize - 1);
+ }
+
+ ar7_parts[2].name = "linux";
+ ar7_parts[2].offset = pre_size;
+ ar7_parts[2].size = master->size - pre_size - post_size;
+ ar7_parts[2].mask_flags = 0;
+
+ ar7_parts[3].name = "rootfs";
+ ar7_parts[3].offset = root_offset;
+ ar7_parts[3].size = master->size - root_offset - post_size;
+ ar7_parts[3].mask_flags = 0;
+
+ *pparts = ar7_parts;
+ return AR7_PARTS;
+}
+
+static struct mtd_part_parser ar7_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = create_mtd_partitions,
+ .name = "ar7part",
+};
+
+static int __init ar7_parser_init(void)
+{
+ register_mtd_parser(&ar7_parser);
+ return 0;
+}
+
+static void __exit ar7_parser_exit(void)
+{
+ deregister_mtd_parser(&ar7_parser);
+}
+
+module_init(ar7_parser_init);
+module_exit(ar7_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
+ "Eugene Konev <ejka@openwrt.org>");
+MODULE_DESCRIPTION("MTD partitioning for TI AR7");
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
new file mode 100644
index 000000000..c0720c1ee
--- /dev/null
+++ b/drivers/mtd/bcm47xxpart.c
@@ -0,0 +1,336 @@
+/*
+ * BCM47XX MTD partitioning
+ *
+ * Copyright © 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <uapi/linux/magic.h>
+
+/*
+ * NAND flash on Netgear R6250 was verified to contain 15 partitions.
+ * This will result in allocating too big array for some old devices, but the
+ * memory will be freed soon anyway (see mtd_device_parse_register).
+ */
+#define BCM47XXPART_MAX_PARTS 20
+
+/*
+ * Amount of bytes we read when analyzing each block of flash memory.
+ * Set it big enough to allow detecting partition and reading important data.
+ */
+#define BCM47XXPART_BYTES_TO_READ 0x4e8
+
+/* Magics */
+#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define BOARD_DATA_MAGIC2 0xBD0D0BBD
+#define CFE_MAGIC 0x43464531 /* 1EFC */
+#define FACTORY_MAGIC 0x59544346 /* FCTY */
+#define NVRAM_HEADER 0x48534C46 /* FLSH */
+#define POT_MAGIC1 0x54544f50 /* POTT */
+#define POT_MAGIC2 0x504f /* OP */
+#define ML_MAGIC1 0x39685a42
+#define ML_MAGIC2 0x26594131
+#define TRX_MAGIC 0x30524448
+#define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */
+#define UBI_EC_MAGIC 0x23494255 /* UBI# */
+
+struct trx_header {
+ uint32_t magic;
+ uint32_t length;
+ uint32_t crc32;
+ uint16_t flags;
+ uint16_t version;
+ uint32_t offset[3];
+} __packed;
+
+static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
+ u64 offset, uint32_t mask_flags)
+{
+ part->name = name;
+ part->offset = offset;
+ part->mask_flags = mask_flags;
+}
+
+static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
+ size_t offset)
+{
+ uint32_t buf;
+ size_t bytes_read;
+
+ if (mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ goto out_default;
+ }
+
+ if (buf == UBI_EC_MAGIC)
+ return "ubi";
+
+out_default:
+ return "rootfs";
+}
+
+static int bcm47xxpart_parse(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ uint8_t i, curr_part = 0;
+ uint32_t *buf;
+ size_t bytes_read;
+ uint32_t offset;
+ uint32_t blocksize = master->erasesize;
+ struct trx_header *trx;
+ int trx_part = -1;
+ int last_trx_part = -1;
+ int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+
+ /*
+ * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
+ * partitions were aligned to at least 0x1000 anyway.
+ */
+ if (blocksize < 0x1000)
+ blocksize = 0x1000;
+
+ /* Alloc */
+ parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
+ GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ if (!buf) {
+ kfree(parts);
+ return -ENOMEM;
+ }
+
+ /* Parse block by block looking for magics */
+ for (offset = 0; offset <= master->size - blocksize;
+ offset += blocksize) {
+ /* Nothing more in higher memory */
+ if (offset >= 0x2000000)
+ break;
+
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ /* Read beginning of the block */
+ if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ continue;
+ }
+
+ /* Magic or small NVRAM at 0x400 */
+ if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) ||
+ (buf[0x400 / 4] == NVRAM_HEADER)) {
+ bcm47xxpart_add_part(&parts[curr_part++], "boot",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /*
+ * board_data starts with board_id which differs across boards,
+ * but we can use 'MPFR' (hopefully) magic at 0x100
+ */
+ if (buf[0x100 / 4] == BOARD_DATA_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* Found on Huawei E970 */
+ if (buf[0x000 / 4] == FACTORY_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "factory",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* POT(TOP) */
+ if (buf[0x000 / 4] == POT_MAGIC1 &&
+ (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "POT", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* ML */
+ if (buf[0x010 / 4] == ML_MAGIC1 &&
+ buf[0x014 / 4] == ML_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "ML", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* TRX */
+ if (buf[0x000 / 4] == TRX_MAGIC) {
+ if (BCM47XXPART_MAX_PARTS - curr_part < 4) {
+ pr_warn("Not enough partitions left to register trx, scanning stopped!\n");
+ break;
+ }
+
+ trx = (struct trx_header *)buf;
+
+ trx_part = curr_part;
+ bcm47xxpart_add_part(&parts[curr_part++], "firmware",
+ offset, 0);
+
+ i = 0;
+ /* We have LZMA loader if offset[2] points to sth */
+ if (trx->offset[2]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "loader",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
+
+ if (trx->offset[i]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "linux",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
+
+ /*
+ * Pure rootfs size is known and can be calculated as:
+ * trx->length - trx->offset[i]. We don't fill it as
+ * we want to have jffs2 (overlay) in the same mtd.
+ */
+ if (trx->offset[i]) {
+ const char *name;
+
+ name = bcm47xxpart_trx_data_part_name(master, offset + trx->offset[i]);
+ bcm47xxpart_add_part(&parts[curr_part++],
+ name,
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
+
+ last_trx_part = curr_part - 1;
+
+ /*
+ * We have whole TRX scanned, skip to the next part. Use
+ * roundown (not roundup), as the loop will increase
+ * offset in next step.
+ */
+ offset = rounddown(offset + trx->length, blocksize);
+ continue;
+ }
+
+ /* Squashfs on devices not using TRX */
+ if (le32_to_cpu(buf[0x000 / 4]) == SQUASHFS_MAGIC ||
+ buf[0x000 / 4] == SHSQ_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ offset, 0);
+ continue;
+ }
+
+ /*
+ * New (ARM?) devices may have NVRAM in some middle block. Last
+ * block will be checked later, so skip it.
+ */
+ if (offset != master->size - blocksize &&
+ buf[0x000 / 4] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+ offset, 0);
+ continue;
+ }
+
+ /* Read middle of the block */
+ if (mtd_read(master, offset + 0x8000, 0x4,
+ &bytes_read, (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ continue;
+ }
+
+ /* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
+ if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+ }
+
+ /* Look for NVRAM at the end of the last block. */
+ for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ offset = master->size - possible_nvram_sizes[i];
+ if (mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while reading at offset 0x%X!\n",
+ offset);
+ continue;
+ }
+
+ /* Standard NVRAM */
+ if (buf[0] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+ master->size - blocksize, 0);
+ break;
+ }
+ }
+
+ kfree(buf);
+
+ /*
+ * Assume that partitions end at the beginning of the one they are
+ * followed by.
+ */
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : master->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ if (i == last_trx_part && trx_part >= 0)
+ parts[trx_part].size = next_part_offset -
+ parts[trx_part].offset;
+ }
+
+ *pparts = parts;
+ return curr_part;
+};
+
+static struct mtd_part_parser bcm47xxpart_mtd_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm47xxpart_parse,
+ .name = "bcm47xxpart",
+};
+
+static int __init bcm47xxpart_init(void)
+{
+ register_mtd_parser(&bcm47xxpart_mtd_parser);
+ return 0;
+}
+
+static void __exit bcm47xxpart_exit(void)
+{
+ deregister_mtd_parser(&bcm47xxpart_mtd_parser);
+}
+
+module_init(bcm47xxpart_init);
+module_exit(bcm47xxpart_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories");
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
new file mode 100644
index 000000000..b2443f703
--- /dev/null
+++ b/drivers/mtd/bcm63xxpart.c
@@ -0,0 +1,241 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011-2013 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <asm/mach-bcm63xx/board_bcm963xx.h>
+
+#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
+
+#define BCM63XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
+
+#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+
+static int bcm63xx_detect_cfe(struct mtd_info *master)
+{
+ char buf[9];
+ int ret;
+ size_t retlen;
+
+ ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ if (ret)
+ return ret;
+
+ if (strncmp("cfe-v", buf, 5) == 0)
+ return 0;
+
+ /* very old CFE's do not have the cfe-v string, so check for magic */
+ ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ return strncmp("CFE1CFE1", buf, 8);
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 3, curpart = 0;
+ struct bcm_tag *buf;
+ struct mtd_partition *parts;
+ int ret;
+ size_t retlen;
+ unsigned int rootfsaddr, kerneladdr, spareaddr;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ unsigned int cfelen, nvramlen;
+ unsigned int cfe_erasesize;
+ int i;
+ u32 computed_crc;
+ bool rootfs_first = false;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ cfe_erasesize = max_t(uint32_t, master->erasesize,
+ BCM63XX_CFE_BLOCK_SIZE);
+
+ cfelen = cfe_erasesize;
+ nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K;
+ nvramlen = roundup(nvramlen, cfe_erasesize);
+
+ /* Allocate memory for buffer */
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
+ (void *)buf);
+
+ if (retlen != sizeof(struct bcm_tag)) {
+ vfree(buf);
+ return -EIO;
+ }
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ char *boardid = &(buf->board_id[0]);
+ char *tagversion = &(buf->tag_version[0]);
+
+ sscanf(buf->flash_image_start, "%u", &rootfsaddr);
+ sscanf(buf->kernel_address, "%u", &kerneladdr);
+ sscanf(buf->kernel_length, "%u", &kernellen);
+ sscanf(buf->total_length, "%u", &totallen);
+
+ pr_info("CFE boot tag found with version %s and board type %s\n",
+ tagversion, boardid);
+
+ kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+ rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
+ spareaddr = roundup(totallen, master->erasesize) + cfelen;
+
+ if (rootfsaddr < kerneladdr) {
+ /* default Broadcom layout */
+ rootfslen = kerneladdr - rootfsaddr;
+ rootfs_first = true;
+ } else {
+ /* OpenWrt layout */
+ rootfsaddr = kerneladdr + kernellen;
+ rootfslen = spareaddr - rootfsaddr;
+ }
+ } else {
+ pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
+ buf->header_crc, computed_crc);
+ kernellen = 0;
+ rootfslen = 0;
+ rootfsaddr = 0;
+ spareaddr = cfelen;
+ }
+ sparelen = master->size - spareaddr - nvramlen;
+
+ /* Determine number of partitions */
+ if (rootfslen > 0)
+ nrparts++;
+
+ if (kernellen > 0)
+ nrparts++;
+
+ /* Ask kernel for more memory */
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = cfelen;
+ curpart++;
+
+ if (kernellen > 0) {
+ int kernelpart = curpart;
+
+ if (rootfslen > 0 && rootfs_first)
+ kernelpart++;
+ parts[kernelpart].name = "kernel";
+ parts[kernelpart].offset = kerneladdr;
+ parts[kernelpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ int rootfspart = curpart;
+
+ if (kernellen > 0 && rootfs_first)
+ rootfspart--;
+ parts[rootfspart].name = "rootfs";
+ parts[rootfspart].offset = rootfsaddr;
+ parts[rootfspart].size = rootfslen;
+ if (sparelen > 0 && !rootfs_first)
+ parts[rootfspart].size += sparelen;
+ curpart++;
+ }
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - nvramlen;
+ parts[curpart].size = nvramlen;
+ curpart++;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ parts[curpart].name = "linux";
+ parts[curpart].offset = cfelen;
+ parts[curpart].size = master->size - cfelen - nvramlen;
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ vfree(buf);
+
+ return nrparts;
+};
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm63xx_parse_cfe_partitions,
+ .name = "bcm63xxpart",
+};
+
+static int __init bcm63xx_cfe_parser_init(void)
+{
+ register_mtd_parser(&bcm63xx_cfe_parser);
+ return 0;
+}
+
+static void __exit bcm63xx_cfe_parser_exit(void)
+{
+ deregister_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+module_init(bcm63xx_cfe_parser_init);
+module_exit(bcm63xx_cfe_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
new file mode 100644
index 000000000..9f02c28c0
--- /dev/null
+++ b/drivers/mtd/chips/Kconfig
@@ -0,0 +1,234 @@
+menu "RAM/ROM/Flash chip drivers"
+ depends on MTD!=n
+
+config MTD_CFI
+ tristate "Detect flash chips by Common Flash Interface (CFI) probe"
+ select MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface specification was developed by Intel,
+ AMD and other flash manufactures that provides a universal method
+ for probing the capabilities of flash devices. If you wish to
+ support any device that is CFI-compliant, you need to enable this
+ option. Visit <http://www.amd.com/products/nvd/overview/cfi.html>
+ for more information on CFI.
+
+config MTD_JEDECPROBE
+ tristate "Detect non-CFI AMD/JEDEC-compatible flash chips"
+ select MTD_GEN_PROBE
+ help
+ This option enables JEDEC-style probing of flash chips which are not
+ compatible with the Common Flash Interface, but will use the common
+ CFI-targeted flash drivers for any chips which are identified which
+ are in fact compatible in all but the probe method. This actually
+ covers most AMD/Fujitsu-compatible chips and also non-CFI
+ Intel chips.
+
+config MTD_GEN_PROBE
+ tristate
+
+config MTD_CFI_ADV_OPTIONS
+ bool "Flash chip driver advanced configuration options"
+ depends on MTD_GEN_PROBE
+ help
+ If you need to specify a specific endianness for access to flash
+ chips, or if you wish to reduce the size of the kernel by including
+ support for only specific arrangements of flash chips, say 'Y'. This
+ option does not directly affect the code, but will enable other
+ configuration options which allow you to do so.
+
+ If unsure, say 'N'.
+
+choice
+ prompt "Flash cmd/query data swapping"
+ depends on MTD_CFI_ADV_OPTIONS
+ default MTD_CFI_NOSWAP
+ ---help---
+ This option defines the way in which the CPU attempts to arrange
+ data bits when writing the 'magic' commands to the chips. Saying
+ 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
+ enabled, means that the CPU will not do any swapping; the chips
+ are expected to be wired to the CPU in 'host-endian' form.
+ Specific arrangements are possible with the BIG_ENDIAN_BYTE and
+ LITTLE_ENDIAN_BYTE, if the bytes are reversed.
+
+config MTD_CFI_NOSWAP
+ bool "NO"
+
+config MTD_CFI_BE_BYTE_SWAP
+ bool "BIG_ENDIAN_BYTE"
+
+config MTD_CFI_LE_BYTE_SWAP
+ bool "LITTLE_ENDIAN_BYTE"
+
+endchoice
+
+config MTD_CFI_GEOMETRY
+ bool "Specific CFI Flash geometry selection"
+ depends on MTD_CFI_ADV_OPTIONS
+ help
+ This option does not affect the code directly, but will enable
+ some other configuration options which would allow you to reduce
+ the size of the kernel by including support for only certain
+ arrangements of CFI chips. If unsure, say 'N' and all options
+ which are supported by the current code will be enabled.
+
+config MTD_MAP_BANK_WIDTH_1
+ bool "Support 8-bit buswidth" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 8 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_2
+ bool "Support 16-bit buswidth" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 16 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_4
+ bool "Support 32-bit buswidth" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 32 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_8
+ bool "Support 64-bit buswidth" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 64 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_16
+ bool "Support 128-bit buswidth" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 128 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_32
+ bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 256 bits wide, say 'Y'.
+
+config MTD_CFI_I1
+ bool "Support 1-chip flash interleave" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If your flash chips are not interleaved - i.e. you only have one
+ flash chip addressed by each bus cycle, then say 'Y'.
+
+config MTD_CFI_I2
+ bool "Support 2-chip flash interleave" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If your flash chips are interleaved in pairs - i.e. you have two
+ flash chips addressed by each bus cycle, then say 'Y'.
+
+config MTD_CFI_I4
+ bool "Support 4-chip flash interleave" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If your flash chips are interleaved in fours - i.e. you have four
+ flash chips addressed by each bus cycle, then say 'Y'.
+
+config MTD_CFI_I8
+ bool "Support 8-chip flash interleave" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If your flash chips are interleaved in eights - i.e. you have eight
+ flash chips addressed by each bus cycle, then say 'Y'.
+
+config MTD_OTP
+ bool "Protection Registers aka one-time programmable (OTP) bits"
+ depends on MTD_CFI_ADV_OPTIONS
+ default n
+ help
+ This enables support for reading, writing and locking so called
+ "Protection Registers" present on some flash chips.
+ A subset of them are pre-programmed at the factory with a
+ unique set of values. The rest is user-programmable.
+
+ The user-programmable Protection Registers contain one-time
+ programmable (OTP) bits; when programmed, register bits cannot be
+ erased. Each Protection Register can be accessed multiple times to
+ program individual bits, as long as the register remains unlocked.
+
+ Each Protection Register has an associated Lock Register bit. When a
+ Lock Register bit is programmed, the associated Protection Register
+ can only be read; it can no longer be programmed. Additionally,
+ because the Lock Register bits themselves are OTP, when programmed,
+ Lock Register bits cannot be erased. Therefore, when a Protection
+ Register is locked, it cannot be unlocked.
+
+ This feature should therefore be used with extreme care. Any mistake
+ in the programming of OTP bits will waste them.
+
+config MTD_CFI_INTELEXT
+ tristate "Support for CFI command set 0001 (Intel/Sharp chips)"
+ depends on MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface defines a number of different command
+ sets which a CFI-compliant chip may claim to implement. This code
+ provides support for command set 0001, used on Intel StrataFlash
+ and other parts.
+
+config MTD_CFI_AMDSTD
+ tristate "Support for CFI command set 0002 (AMD/Fujitsu/Spansion chips)"
+ depends on MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface defines a number of different command
+ sets which a CFI-compliant chip may claim to implement. This code
+ provides support for command set 0002, used on chips including
+ the AMD Am29LV320.
+
+config MTD_CFI_STAA
+ tristate "Support for CFI command set 0020 (ST (Advanced Architecture) chips)"
+ depends on MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface defines a number of different command
+ sets which a CFI-compliant chip may claim to implement. This code
+ provides support for command set 0020.
+
+config MTD_CFI_UTIL
+ tristate
+
+config MTD_RAM
+ tristate "Support for RAM chips in bus mapping"
+ help
+ This option enables basic support for RAM chips accessed through
+ a bus mapping driver.
+
+config MTD_ROM
+ tristate "Support for ROM chips in bus mapping"
+ help
+ This option enables basic support for ROM chips accessed through
+ a bus mapping driver.
+
+config MTD_ABSENT
+ tristate "Support for absent chips in bus mapping"
+ help
+ This option enables support for a dummy probing driver used to
+ allocated placeholder MTD devices on systems that have socketed
+ or removable media. Use of this driver as a fallback chip probe
+ preserves the expected registration order of MTD device nodes on
+ the system regardless of media presence. Device nodes created
+ with this driver will return -ENODEV upon access.
+
+config MTD_XIP
+ bool "XIP aware MTD support"
+ depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && ARCH_MTD_XIP
+ default y if XIP_KERNEL
+ help
+ This allows MTD support to work with flash memory which is also
+ used for XIP purposes. If you're not sure what this is all about
+ then say N.
+
+endmenu
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
new file mode 100644
index 000000000..36582412c
--- /dev/null
+++ b/drivers/mtd/chips/Makefile
@@ -0,0 +1,15 @@
+#
+# linux/drivers/chips/Makefile
+#
+
+obj-$(CONFIG_MTD) += chipreg.o
+obj-$(CONFIG_MTD_CFI) += cfi_probe.o
+obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
+obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
+obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
+obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
+obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
+obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
+obj-$(CONFIG_MTD_RAM) += map_ram.o
+obj-$(CONFIG_MTD_ROM) += map_rom.o
+obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
new file mode 100644
index 000000000..286b97a30
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -0,0 +1,2666 @@
+/*
+ * Common Flash Interface support:
+ * Intel Extended Vendor Command Set (ID 0x0001)
+ *
+ * (C) 2000 Red Hat. GPL'd
+ *
+ *
+ * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
+ * - completely revamped method functions so they are aware and
+ * independent of the flash geometry (buswidth, interleave, etc.)
+ * - scalability vs code size is completely set at compile-time
+ * (see include/linux/mtd/cfi.h for selection)
+ * - optimized write buffer method
+ * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
+ * - reworked lock/unlock/erase support for var size flash
+ * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
+ * - auto unlock sectors on resume for auto locking flash on power up
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/bitmap.h>
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/cfi.h>
+
+/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
+/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
+
+// debugging, turns off buffer write mode if set to 1
+#define FORCE_WORD_WRITE 0
+
+/* Intel chips */
+#define I82802AB 0x00ad
+#define I82802AC 0x00ac
+#define PF38F4476 0x881c
+/* STMicroelectronics chips */
+#define M50LPW080 0x002F
+#define M50FLW080A 0x0080
+#define M50FLW080B 0x0081
+/* Atmel chips */
+#define AT49BV640D 0x02de
+#define AT49BV640DT 0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90 0x00b0
+#define LH28F640BFHE_PBTL90 0x00b1
+#define LH28F640BFHE_PTTL70A 0x00b2
+#define LH28F640BFHE_PBTL70A 0x00b3
+
+static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
+static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
+static void cfi_intelext_sync (struct mtd_info *);
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len);
+#ifdef CONFIG_MTD_OTP
+static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+#endif
+static int cfi_intelext_suspend (struct mtd_info *);
+static void cfi_intelext_resume (struct mtd_info *);
+static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
+
+static void cfi_intelext_destroy(struct mtd_info *);
+
+struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
+
+static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
+static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
+
+static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
+static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
+#include "fwh_lock.h"
+
+
+
+/*
+ * *********** SETUP AND PROBE BITS ***********
+ */
+
+static struct mtd_chip_driver cfi_intelext_chipdrv = {
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_intelext_destroy,
+ .name = "cfi_cmdset_0001",
+ .module = THIS_MODULE
+};
+
+/* #define DEBUG_LOCK_BITS */
+/* #define DEBUG_CFI_FEATURES */
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_intelext *extp)
+{
+ int i;
+ printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
+ printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
+ printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
+ printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
+ printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
+ printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
+ printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
+ printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
+ printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
+ printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
+ printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
+ printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
+ printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
+ for (i=11; i<32; i++) {
+ if (extp->FeatureSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
+ printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
+ for (i=1; i<8; i++) {
+ if (extp->SuspendCmdSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
+ printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
+ printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
+ for (i=2; i<3; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+ printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
+ printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
+ for (i=6; i<16; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+
+ printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
+ if (extp->VppOptimal)
+ printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
+}
+#endif
+
+/* Atmel chips don't use the same PRI format as Intel chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ struct cfi_pri_atmel atmel_pri;
+ uint32_t features = 0;
+
+ /* Reverse byteswapping */
+ extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
+ extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
+ extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
+
+ memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+ memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+ printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
+
+ if (atmel_pri.Features & 0x01) /* chip erase supported */
+ features |= (1<<0);
+ if (atmel_pri.Features & 0x02) /* erase suspend supported */
+ features |= (1<<1);
+ if (atmel_pri.Features & 0x04) /* program suspend supported */
+ features |= (1<<2);
+ if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
+ features |= (1<<9);
+ if (atmel_pri.Features & 0x20) /* page mode read supported */
+ features |= (1<<7);
+ if (atmel_pri.Features & 0x40) /* queued erase supported */
+ features |= (1<<4);
+ if (atmel_pri.Features & 0x80) /* Protection bits supported */
+ features |= (1<<6);
+
+ extp->FeatureSupport = features;
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
+static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ cfip->FeatureSupport |= (1 << 5);
+ mtd->flags |= MTD_POWERUP_LOCK;
+}
+
+#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
+static void fixup_intel_strataflash(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
+ "erase on write disabled.\n");
+ extp->SuspendCmdSupport &= ~1;
+}
+#endif
+
+#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
+static void fixup_no_write_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ if (cfip && (cfip->FeatureSupport&4)) {
+ cfip->FeatureSupport &= ~4;
+ printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
+ }
+}
+#endif
+
+static void fixup_st_m28w320ct(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
+ cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
+}
+
+static void fixup_st_m28w320cb(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /* Note this is done after the region info is endian swapped */
+ cfi->cfiq->EraseRegionInfo[1] =
+ (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
+};
+
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+ /* Sharp LH28F640BF Family */
+ if (cfi->mfr == CFI_MFR_SHARP && (
+ cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+ cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+ return 1;
+ return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /* Reset the Partition Configuration Register on LH28F640BF
+ * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+ if (is_LH28F640BF(cfi)) {
+ printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+ map_write(map, CMD(0x60), 0);
+ map_write(map, CMD(0x04), 0);
+
+ /* We have set one single partition thus
+ * Simultaneous Operations are not allowed */
+ printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+ extp->FeatureSupport &= ~512;
+ }
+}
+
+static void fixup_use_point(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ if (!mtd->_point && map_is_linear(map)) {
+ mtd->_point = cfi_intelext_point;
+ mtd->_unpoint = cfi_intelext_unpoint;
+ }
+}
+
+static void fixup_use_write_buffers(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (cfi->cfiq->BufWriteTimeoutTyp) {
+ printk(KERN_INFO "Using buffer write method\n" );
+ mtd->_write = cfi_intelext_write_buffers;
+ mtd->_writev = cfi_intelext_writev;
+ }
+}
+
+/*
+ * Some chips power-up with all sectors locked by default.
+ */
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ if (cfip->FeatureSupport&32) {
+ printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
+ mtd->flags |= MTD_POWERUP_LOCK;
+ }
+}
+
+static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+ { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
+ { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
+#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
+#endif
+#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
+#endif
+#if !FORCE_WORD_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
+#endif
+ { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
+ { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
+ { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
+ { 0, 0, NULL }
+};
+
+static struct cfi_fixup jedec_fixup_table[] = {
+ { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
+ { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
+ { 0, 0, NULL }
+};
+static struct cfi_fixup fixup_table[] = {
+ /* The CFI vendor ids and the JEDEC vendor IDs appear
+ * to be common. It is like the devices id's are as
+ * well. This table is to pick all cases where
+ * we know that is the case.
+ */
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
+ { 0, 0, NULL }
+};
+
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_intelext *extp)
+{
+ if (cfi->mfr == CFI_MFR_INTEL &&
+ cfi->id == PF38F4476 && extp->MinorVersion == '3')
+ extp->MinorVersion = '1';
+}
+
+static inline struct cfi_pri_intelext *
+read_pri_intelext(struct map_info *map, __u16 adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp;
+ unsigned int extra_size = 0;
+ unsigned int extp_size = sizeof(*extp);
+
+ again:
+ extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
+ if (!extp)
+ return NULL;
+
+ cfi_fixup_major_minor(cfi, extp);
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
+ printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
+ /* Do some byteswapping if necessary */
+ extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
+ extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
+ extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
+
+ if (extp->MinorVersion >= '0') {
+ extra_size = 0;
+
+ /* Protection Register info */
+ extra_size += (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
+ }
+
+ if (extp->MinorVersion >= '1') {
+ /* Burst Read info */
+ extra_size += 2;
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ extra_size += extp->extra[extra_size - 1];
+ }
+
+ if (extp->MinorVersion >= '3') {
+ int nb_parts, i;
+
+ /* Number of hardware-partitions */
+ extra_size += 1;
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ nb_parts = extp->extra[extra_size - 1];
+
+ /* skip the sizeof(partregion) field in CFI 1.4 */
+ if (extp->MinorVersion >= '4')
+ extra_size += 2;
+
+ for (i = 0; i < nb_parts; i++) {
+ struct cfi_intelext_regioninfo *rinfo;
+ rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
+ extra_size += sizeof(*rinfo);
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
+ extra_size += (rinfo->NumBlockTypes - 1)
+ * sizeof(struct cfi_intelext_blockinfo);
+ }
+
+ if (extp->MinorVersion >= '4')
+ extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
+
+ if (extp_size < sizeof(*extp) + extra_size) {
+ need_more:
+ extp_size = sizeof(*extp) + extra_size;
+ kfree(extp);
+ if (extp_size > 4096) {
+ printk(KERN_ERR
+ "%s: cfi_pri_intelext is too fat\n",
+ __func__);
+ return NULL;
+ }
+ goto again;
+ }
+ }
+
+ return extp;
+}
+
+struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_info *mtd;
+ int i;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->_erase = cfi_intelext_erase_varsize;
+ mtd->_read = cfi_intelext_read;
+ mtd->_write = cfi_intelext_write_words;
+ mtd->_sync = cfi_intelext_sync;
+ mtd->_lock = cfi_intelext_lock;
+ mtd->_unlock = cfi_intelext_unlock;
+ mtd->_is_locked = cfi_intelext_is_locked;
+ mtd->_suspend = cfi_intelext_suspend;
+ mtd->_resume = cfi_intelext_resume;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+ mtd->writesize = 1;
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+
+ mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
+
+ if (cfi->cfi_mode == CFI_MODE_CFI) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure. So we read the feature
+ * table from it.
+ */
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_intelext *extp;
+
+ extp = read_pri_intelext(map, adr);
+ if (!extp) {
+ kfree(mtd);
+ return NULL;
+ }
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+
+ cfi_fixup(mtd, cfi_fixup_table);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+ if(extp->SuspendCmdSupport & 1) {
+ printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
+ }
+ }
+ else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
+ /* Apply jedec specific fixups */
+ cfi_fixup(mtd, jedec_fixup_table);
+ }
+ /* Apply generic fixups */
+ cfi_fixup(mtd, fixup_table);
+
+ for (i=0; i< cfi->numchips; i++) {
+ if (cfi->cfiq->WordWriteTimeoutTyp)
+ cfi->chips[i].word_write_time =
+ 1<<cfi->cfiq->WordWriteTimeoutTyp;
+ else
+ cfi->chips[i].word_write_time = 50000;
+
+ if (cfi->cfiq->BufWriteTimeoutTyp)
+ cfi->chips[i].buffer_write_time =
+ 1<<cfi->cfiq->BufWriteTimeoutTyp;
+ /* No default; if it isn't specified, we won't use it */
+
+ if (cfi->cfiq->BlockEraseTimeoutTyp)
+ cfi->chips[i].erase_time =
+ 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
+ else
+ cfi->chips[i].erase_time = 2000000;
+
+ if (cfi->cfiq->WordWriteTimeoutTyp &&
+ cfi->cfiq->WordWriteTimeoutMax)
+ cfi->chips[i].word_write_time_max =
+ 1<<(cfi->cfiq->WordWriteTimeoutTyp +
+ cfi->cfiq->WordWriteTimeoutMax);
+ else
+ cfi->chips[i].word_write_time_max = 50000 * 8;
+
+ if (cfi->cfiq->BufWriteTimeoutTyp &&
+ cfi->cfiq->BufWriteTimeoutMax)
+ cfi->chips[i].buffer_write_time_max =
+ 1<<(cfi->cfiq->BufWriteTimeoutTyp +
+ cfi->cfiq->BufWriteTimeoutMax);
+
+ if (cfi->cfiq->BlockEraseTimeoutTyp &&
+ cfi->cfiq->BlockEraseTimeoutMax)
+ cfi->chips[i].erase_time_max =
+ 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
+ cfi->cfiq->BlockEraseTimeoutMax);
+ else
+ cfi->chips[i].erase_time_max = 2000000 * 8;
+
+ cfi->chips[i].ref_point_counter = 0;
+ init_waitqueue_head(&(cfi->chips[i].wq));
+ }
+
+ map->fldrv = &cfi_intelext_chipdrv;
+
+ return cfi_intelext_setup(mtd);
+}
+struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
+struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
+EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
+
+static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long offset = 0;
+ int i,j;
+ unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+
+ //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
+
+ mtd->size = devsize * cfi->numchips;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
+ * mtd->numeraseregions, GFP_KERNEL);
+ if (!mtd->eraseregions)
+ goto setup_err;
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
+ }
+ offset += (ersize * ernum);
+ }
+
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ goto setup_err;
+ }
+
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i,(unsigned long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
+
+#ifdef CONFIG_MTD_OTP
+ mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
+ mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
+ mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
+ mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
+ mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
+#endif
+
+ /* This function has the potential to distort the reality
+ a bit and therefore should be called last. */
+ if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
+ goto setup_err;
+
+ __module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
+ return mtd;
+
+ setup_err:
+ kfree(mtd->eraseregions);
+ kfree(mtd);
+ kfree(cfi->cmdset_priv);
+ return NULL;
+}
+
+static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
+ struct cfi_private **pcfi)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = *pcfi;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /*
+ * Probing of multi-partition flash chips.
+ *
+ * To support multiple partitions when available, we simply arrange
+ * for each of them to have their own flchip structure even if they
+ * are on the same physical chip. This means completely recreating
+ * a new cfi_private structure right here which is a blatent code
+ * layering violation, but this is still the least intrusive
+ * arrangement at this point. This can be rearranged in the future
+ * if someone feels motivated enough. --nico
+ */
+ if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
+ && extp->FeatureSupport & (1 << 9)) {
+ struct cfi_private *newcfi;
+ struct flchip *chip;
+ struct flchip_shared *shared;
+ int offs, numregions, numparts, partshift, numvirtchips, i, j;
+
+ /* Protection Register info */
+ offs = (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
+
+ /* Burst Read info */
+ offs += extp->extra[offs+1]+2;
+
+ /* Number of partition regions */
+ numregions = extp->extra[offs];
+ offs += 1;
+
+ /* skip the sizeof(partregion) field in CFI 1.4 */
+ if (extp->MinorVersion >= '4')
+ offs += 2;
+
+ /* Number of hardware partitions */
+ numparts = 0;
+ for (i = 0; i < numregions; i++) {
+ struct cfi_intelext_regioninfo *rinfo;
+ rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
+ numparts += rinfo->NumIdentPartitions;
+ offs += sizeof(*rinfo)
+ + (rinfo->NumBlockTypes - 1) *
+ sizeof(struct cfi_intelext_blockinfo);
+ }
+
+ if (!numparts)
+ numparts = 1;
+
+ /* Programming Region info */
+ if (extp->MinorVersion >= '4') {
+ struct cfi_intelext_programming_regioninfo *prinfo;
+ prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
+ mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
+ mtd->flags &= ~MTD_BIT_WRITEABLE;
+ printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
+ map->name, mtd->writesize,
+ cfi->interleave * prinfo->ControlValid,
+ cfi->interleave * prinfo->ControlInvalid);
+ }
+
+ /*
+ * All functions below currently rely on all chips having
+ * the same geometry so we'll just assume that all hardware
+ * partitions are of the same size too.
+ */
+ partshift = cfi->chipshift - __ffs(numparts);
+
+ if ((1 << partshift) < mtd->erasesize) {
+ printk( KERN_ERR
+ "%s: bad number of hw partitions (%d)\n",
+ __func__, numparts);
+ return -EINVAL;
+ }
+
+ numvirtchips = cfi->numchips * numparts;
+ newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ if (!newcfi)
+ return -ENOMEM;
+ shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
+ if (!shared) {
+ kfree(newcfi);
+ return -ENOMEM;
+ }
+ memcpy(newcfi, cfi, sizeof(struct cfi_private));
+ newcfi->numchips = numvirtchips;
+ newcfi->chipshift = partshift;
+
+ chip = &newcfi->chips[0];
+ for (i = 0; i < cfi->numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+ mutex_init(&shared[i].lock);
+ for (j = 0; j < numparts; j++) {
+ *chip = cfi->chips[i];
+ chip->start += j << partshift;
+ chip->priv = &shared[i];
+ /* those should be reset too since
+ they create memory references. */
+ init_waitqueue_head(&chip->wq);
+ mutex_init(&chip->mutex);
+ chip++;
+ }
+ }
+
+ printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
+ "--> %d partitions of %d KiB\n",
+ map->name, cfi->numchips, cfi->interleave,
+ newcfi->numchips, 1<<(newcfi->chipshift-10));
+
+ map->fldrv_priv = newcfi;
+ *pcfi = newcfi;
+ kfree(cfi);
+ }
+
+ return 0;
+}
+
+/*
+ * *********** CHIP ACCESS FUNCTIONS ***********
+ */
+static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+ unsigned long timeo = jiffies + HZ;
+
+ /* Prevent setting state FL_SYNCING for chip in suspended state. */
+ if (mode == FL_SYNCING && chip->oldstate != FL_READY)
+ goto sleep;
+
+ switch (chip->state) {
+
+ case FL_STATUS:
+ for (;;) {
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* At this point we're fine with write operations
+ in other partitions as they don't conflict. */
+ if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
+ break;
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Someone else might have been playing with it. */
+ return -EAGAIN;
+ }
+ /* Fall through */
+ case FL_READY:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (!cfip ||
+ !(cfip->FeatureSupport & 2) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
+ goto sleep;
+
+
+ /* Erase suspend */
+ map_write(map, CMD(0xB0), adr);
+
+ /* If the flash has finished erasing, then 'erase suspend'
+ * appears to make some (28F320) flash devices switch to
+ * 'read' mode. Make sure that we switch to 'read status'
+ * mode so we get the right data. --rmk
+ */
+ map_write(map, CMD(0x70), adr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ chip->erase_suspended = 1;
+ for (;;) {
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Urgh. Resume and pretend we weren't here.
+ * Make sure we're in 'read status' mode if it had finished */
+ put_chip(map, chip, adr);
+ printk(KERN_ERR "%s: Chip not ready after erase "
+ "suspended: status = 0x%lx\n", map->name, status.x[0]);
+ return -EIO;
+ }
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
+ }
+ chip->state = FL_STATUS;
+ return 0;
+
+ case FL_XIP_WHILE_ERASING:
+ if (mode != FL_READY && mode != FL_POINT &&
+ (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
+ goto sleep;
+ chip->oldstate = chip->state;
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_SHUTDOWN:
+ /* The machine is rebooting now,so no one can get chip anymore */
+ return -EIO;
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+ /* Fall through */
+ default:
+ sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ return -EAGAIN;
+ }
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+{
+ int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+ if (chip->priv &&
+ (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
+ || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
+ /*
+ * OK. We have possibility for contention on the write/erase
+ * operations which are global to the real chip and not per
+ * partition. So let's fight it over in the partition which
+ * currently has authority on the operation.
+ *
+ * The rules are as follows:
+ *
+ * - any write operation must own shared->writing.
+ *
+ * - any erase operation must own _both_ shared->writing and
+ * shared->erasing.
+ *
+ * - contention arbitration is handled in the owner's context.
+ *
+ * The 'shared' struct can be read and/or written only when
+ * its lock is taken.
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+ * The engine to perform desired operation on this
+ * partition is already in use by someone else.
+ * Let's fight over it in the context of the chip
+ * currently using it. If it is possible to suspend,
+ * that other partition will do just that, otherwise
+ * it'll happily send us to sleep. In any case, when
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+ ret = chip_ready(map, contender, contender->start, mode);
+ mutex_lock(&chip->mutex);
+
+ if (ret == -EAGAIN) {
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ if (ret) {
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already
+ * in FL_SYNCING state. Put contender and retry. */
+ if (chip->state == FL_SYNCING) {
+ put_chip(map, contender, contender->start);
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ mutex_unlock(&contender->mutex);
+ }
+
+ /* Check if we already have suspended erase
+ * on this chip. Sleep. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ goto retry;
+ }
+
+ /* We now own it */
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+ mutex_unlock(&shared->lock);
+ }
+ ret = chip_ready(map, chip, adr, mode);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return ret;
+}
+
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+ if (shared->writing && shared->writing != chip) {
+ /* give back ownership to who we loaned it from */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner, loaner->start);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
+ wake_up(&chip->wq);
+ return;
+ }
+ shared->erasing = NULL;
+ shared->writing = NULL;
+ } else if (shared->erasing == chip && shared->writing != chip) {
+ /*
+ * We own the ability to erase without the ability
+ * to write, which means the erase was suspended
+ * and some other partition is currently writing.
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+ mutex_unlock(&shared->lock);
+ }
+
+ switch(chip->oldstate) {
+ case FL_ERASING:
+ /* What if one interleaved chip has finished and the
+ other hasn't? The old code would leave the finished
+ one in READY mode. That's bad, and caused -EROFS
+ errors to be returned from do_erase_oneblock because
+ that's the only bit it checked for at the time.
+ As the state machine appears to explicitly allow
+ sending the 0x70 (Read Status) command to an erasing
+ chip and expecting it to be ignored, that's what we
+ do. */
+ map_write(map, CMD(0xd0), adr);
+ map_write(map, CMD(0x70), adr);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+
+ case FL_XIP_WHILE_ERASING:
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ break;
+
+ case FL_READY:
+ case FL_STATUS:
+ case FL_JEDEC_QUERY:
+ break;
+ default:
+ printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * No interrupt what so ever can be serviced while the flash isn't in array
+ * mode. This is ensured by the xip_disable() and xip_enable() functions
+ * enclosing any code path where the flash is known not to be in array mode.
+ * And within a XIP disabled code path, only functions marked with __xipram
+ * may be called and nothing else (it's a good thing to inspect generated
+ * assembly to make sure inline functions were actually inlined and that gcc
+ * didn't emit calls to its own support functions). Also configuring MTD CFI
+ * support to a single buswidth and a single interleave is also recommended.
+ */
+
+static void xip_disable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ /* TODO: chips with no XIP use should ignore and return */
+ (void) map_read(map, adr); /* ensure mmu mapping is up to date */
+ local_irq_disable();
+}
+
+static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xff), adr);
+ chip->state = FL_READY;
+ }
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+}
+
+/*
+ * When a delay is required for the flash operation to complete, the
+ * xip_wait_for_operation() function is polling for both the given timeout
+ * and pending (but still masked) hardware interrupts. Whenever there is an
+ * interrupt pending then the flash erase or write operation is suspended,
+ * array mode restored and interrupts unmasked. Task scheduling might also
+ * happen at that point. The CPU eventually returns from the interrupt or
+ * the call to schedule() and the suspended flash operation is resumed for
+ * the remaining of the delay period.
+ *
+ * Warning: this function _will_ fool interrupt latency tracing tools.
+ */
+
+static int __xipram xip_wait_for_operation(
+ struct map_info *map, struct flchip *chip,
+ unsigned long adr, unsigned int chip_op_time_max)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+ map_word status, OK = CMD(0x80);
+ unsigned long usec, suspended, start, done;
+ flstate_t oldstate, newstate;
+
+ start = xip_currtime();
+ usec = chip_op_time_max;
+ if (usec == 0)
+ usec = 500000;
+ done = 0;
+
+ do {
+ cpu_relax();
+ if (xip_irqpending() && cfip &&
+ ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
+ (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
+ (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
+ /*
+ * Let's suspend the erase or write operation when
+ * supported. Note that we currently don't try to
+ * suspend interleaved chips if there is already
+ * another operation suspended (imagine what happens
+ * when one chip was already done with the current
+ * operation while another chip suspended it, then
+ * we resume the whole thing at once). Yes, it
+ * can happen!
+ */
+ usec -= done;
+ map_write(map, CMD(0xb0), adr);
+ map_write(map, CMD(0x70), adr);
+ suspended = xip_currtime();
+ do {
+ if (xip_elapsed_since(suspended) > 100000) {
+ /*
+ * The chip doesn't want to suspend
+ * after waiting for 100 msecs.
+ * This is a critical error but there
+ * is not much we can do here.
+ */
+ return -EIO;
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK));
+
+ /* Suspend succeeded */
+ oldstate = chip->state;
+ if (oldstate == FL_ERASING) {
+ if (!map_word_bitsset(map, status, CMD(0x40)))
+ break;
+ newstate = FL_XIP_WHILE_ERASING;
+ chip->erase_suspended = 1;
+ } else {
+ if (!map_word_bitsset(map, status, CMD(0x04)))
+ break;
+ newstate = FL_XIP_WHILE_WRITING;
+ chip->write_suspended = 1;
+ }
+ chip->state = newstate;
+ map_write(map, CMD(0xff), adr);
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+ mutex_unlock(&chip->mutex);
+ xip_iprefetch();
+ cond_resched();
+
+ /*
+ * We're back. However someone else might have
+ * decided to go write to the chip if we are in
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+ mutex_lock(&chip->mutex);
+ while (chip->state != newstate) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+
+ /* Resume the write or erase operation */
+ map_write(map, CMD(0xd0), adr);
+ map_write(map, CMD(0x70), adr);
+ chip->state = oldstate;
+ start = xip_currtime();
+ } else if (usec >= 1000000/HZ) {
+ /*
+ * Try to save on CPU power when waiting delay
+ * is at least a system timer tick period.
+ * No need to be extremely accurate here.
+ */
+ xip_cpu_idle();
+ }
+ status = map_read(map, adr);
+ done = xip_elapsed_since(start);
+ } while (!map_word_andequal(map, status, OK, OK)
+ && done < usec);
+
+ return (done >= usec) ? -ETIME : 0;
+}
+
+/*
+ * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
+ * the flash is actively programming or erasing since we have to poll for
+ * the operation to complete anyway. We can't do that in a generic way with
+ * a XIP setup so do it before the actual flash operation in this case
+ * and stub it out from INVAL_CACHE_AND_WAIT.
+ */
+#define XIP_INVAL_CACHED_RANGE(map, from, size) \
+ INVALIDATE_CACHED_RANGE(map, from, size)
+
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
+ xip_wait_for_operation(map, chip, cmd_adr, usec_max)
+
+#else
+
+#define xip_disable(map, chip, adr)
+#define xip_enable(map, chip, adr)
+#define XIP_INVAL_CACHED_RANGE(x...)
+#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
+
+static int inval_cache_and_wait_for_operation(
+ struct map_info *map, struct flchip *chip,
+ unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
+ unsigned int chip_op_time, unsigned int chip_op_time_max)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK = CMD(0x80);
+ int chip_state = chip->state;
+ unsigned int timeo, sleep_time, reset_timeo;
+
+ mutex_unlock(&chip->mutex);
+ if (inval_len)
+ INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
+ mutex_lock(&chip->mutex);
+
+ timeo = chip_op_time_max;
+ if (!timeo)
+ timeo = 500000;
+ reset_timeo = timeo;
+ sleep_time = chip_op_time / 2;
+
+ for (;;) {
+ if (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ if (chip->erase_suspended && chip_state == FL_ERASING) {
+ /* Erase suspend occurred while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->erase_suspended = 0;
+ }
+ if (chip->write_suspended && chip_state == FL_WRITING) {
+ /* Write suspend occurred while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->write_suspended = 0;
+ }
+ if (!timeo) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ return -ETIME;
+ }
+
+ /* OK Still waiting. Drop the lock, wait a while and retry. */
+ mutex_unlock(&chip->mutex);
+ if (sleep_time >= 1000000/HZ) {
+ /*
+ * Half of the normal delay still remaining
+ * can be performed with a sleeping delay instead
+ * of busy waiting.
+ */
+ msleep(sleep_time/1000);
+ timeo -= sleep_time;
+ sleep_time = 1000000/HZ;
+ } else {
+ udelay(1);
+ cond_resched();
+ timeo--;
+ }
+ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ return 0;
+}
+
+#endif
+
+#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
+ INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
+
+
+static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret = 0;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, cmd_addr, FL_POINT);
+
+ if (!ret) {
+ if (chip->state != FL_POINT && chip->state != FL_READY)
+ map_write(map, CMD(0xff), cmd_addr);
+
+ chip->state = FL_POINT;
+ chip->ref_point_counter++;
+ }
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs, last_end = 0;
+ int chipnum;
+ int ret = 0;
+
+ if (!map->virt)
+ return -EINVAL;
+
+ /* Now lock the chip(s) to POINT state */
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ *virt = map->virt + cfi->chips[chipnum].start + ofs;
+ if (phys)
+ *phys = map->phys + cfi->chips[chipnum].start + ofs;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ /* We cannot point across chips that are virtually disjoint */
+ if (!last_end)
+ last_end = cfi->chips[chipnum].start;
+ else if (cfi->chips[chipnum].start != last_end)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+
+ ofs = 0;
+ last_end += 1 << cfi->chipshift;
+ chipnum++;
+ }
+ return 0;
+}
+
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum, err = 0;
+
+ /* Now unlock the chip(s) POINT state */
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len && !err) {
+ unsigned long thislen;
+ struct flchip *chip;
+
+ chip = &cfi->chips[chipnum];
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ mutex_lock(&chip->mutex);
+ if (chip->state == FL_POINT) {
+ chip->ref_point_counter--;
+ if(chip->ref_point_counter == 0)
+ chip->state = FL_READY;
+ } else {
+ printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
+ err = -EINVAL;
+ }
+
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ len -= thislen;
+ ofs = 0;
+ chipnum++;
+ }
+
+ return err;
+}
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xff), cmd_addr);
+
+ chip->state = FL_READY;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ put_chip(map, chip, cmd_addr);
+
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum, int mode)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, write_cmd;
+ int ret=0;
+
+ adr += chip->start;
+
+ switch (mode) {
+ case FL_WRITING:
+ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
+ break;
+ case FL_OTP_WRITE:
+ write_cmd = CMD(0xc0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, mode);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+ map_write(map, write_cmd, adr);
+ map_write(map, datum, adr);
+ chip->state = mode;
+
+ ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ adr, map_bankwidth(map),
+ chip->word_write_time,
+ chip->word_write_time_max);
+ if (ret) {
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
+ goto out;
+ }
+
+ /* check for errors */
+ status = map_read(map, adr);
+ if (map_word_bitsset(map, status, CMD(0x1a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
+ /* reset status */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+ xip_enable(map, chip, adr);
+
+ if (chipstatus & 0x02) {
+ ret = -EROFS;
+ } else if (chipstatus & 0x08) {
+ printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else {
+ printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
+ }
+
+ goto out;
+ }
+
+ xip_enable(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+
+static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+
+ /* If it's not bus-aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map)-1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
+ int gap = ofs - bus_ofs;
+ int n;
+ map_word datum;
+
+ n = min_t(int, len, map_bankwidth(map)-gap);
+ datum = map_word_ff(map);
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ len -= n;
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ while(len >= map_bankwidth(map)) {
+ map_word datum = map_word_load(map, buf);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ if (len & (map_bankwidth(map)-1)) {
+ map_word datum;
+
+ datum = map_word_ff(map);
+ datum = map_word_load_partial(map, datum, buf, 0, len);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
+
+static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const struct kvec **pvec,
+ unsigned long *pvec_seek, int len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, write_cmd, datum;
+ unsigned long cmd_adr;
+ int ret, wbufsize, word_gap, words;
+ const struct kvec *vec;
+ unsigned long vec_seek;
+ unsigned long initial_adr;
+ int initial_len = len;
+
+ wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ adr += chip->start;
+ initial_adr = adr;
+ cmd_adr = adr & ~(wbufsize-1);
+
+ /* Sharp LH28F640BF chips need the first address for the
+ * Page Buffer Program command. See Table 5 of
+ * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+ if (is_LH28F640BF(cfi))
+ cmd_adr = adr;
+
+ /* Let's determine this according to the interleave only once */
+ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_adr, FL_WRITING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, cmd_adr);
+
+ /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
+ [...], the device will not accept any more Write to Buffer commands".
+ So we must check here and reset those bits if they're set. Otherwise
+ we're just pissing in the wind */
+ if (chip->state != FL_STATUS) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ }
+ status = map_read(map, cmd_adr);
+ if (map_word_bitsset(map, status, CMD(0x30))) {
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
+ xip_disable(map, chip, cmd_adr);
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ }
+
+ chip->state = FL_WRITING_TO_BUFFER;
+ map_write(map, write_cmd, cmd_adr);
+ ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
+ if (ret) {
+ /* Argh. Not ready for write to buffer */
+ map_word Xstatus = map_read(map, cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, cmd_adr);
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
+ map->name, Xstatus.x[0], status.x[0]);
+ goto out;
+ }
+
+ /* Figure out the number of words to write */
+ word_gap = (-adr & (map_bankwidth(map)-1));
+ words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
+ if (!word_gap) {
+ words--;
+ } else {
+ word_gap = map_bankwidth(map) - word_gap;
+ adr -= word_gap;
+ datum = map_word_ff(map);
+ }
+
+ /* Write length of data to come */
+ map_write(map, CMD(words), cmd_adr );
+
+ /* Write data */
+ vec = *pvec;
+ vec_seek = *pvec_seek;
+ do {
+ int n = map_bankwidth(map) - word_gap;
+ if (n > vec->iov_len - vec_seek)
+ n = vec->iov_len - vec_seek;
+ if (n > len)
+ n = len;
+
+ if (!word_gap && len < map_bankwidth(map))
+ datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum,
+ vec->iov_base + vec_seek,
+ word_gap, n);
+
+ len -= n;
+ word_gap += n;
+ if (!len || word_gap == map_bankwidth(map)) {
+ map_write(map, datum, adr);
+ adr += map_bankwidth(map);
+ word_gap = 0;
+ }
+
+ vec_seek += n;
+ if (vec_seek == vec->iov_len) {
+ vec++;
+ vec_seek = 0;
+ }
+ } while (len);
+ *pvec = vec;
+ *pvec_seek = vec_seek;
+
+ /* GO GO GO */
+ map_write(map, CMD(0xd0), cmd_adr);
+ chip->state = FL_WRITING;
+
+ ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
+ initial_adr, initial_len,
+ chip->buffer_write_time,
+ chip->buffer_write_time_max);
+ if (ret) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
+ goto out;
+ }
+
+ /* check for errors */
+ status = map_read(map, cmd_adr);
+ if (map_word_bitsset(map, status, CMD(0x1a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
+ /* reset status */
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ xip_enable(map, chip, cmd_adr);
+
+ if (chipstatus & 0x02) {
+ ret = -EROFS;
+ } else if (chipstatus & 0x08) {
+ printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else {
+ printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
+ }
+
+ goto out;
+ }
+
+ xip_enable(map, chip, cmd_adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, cmd_adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs, vec_seek, i;
+ size_t len = 0;
+
+ for (i = 0; i < count; i++)
+ len += vecs[i].iov_len;
+
+ if (!len)
+ return 0;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ vec_seek = 0;
+
+ do {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, &vecs, &vec_seek, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ (*retlen) += size;
+ len -= size;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+
+ /* Be nice and reschedule with the chip in a usable state for other
+ processes. */
+ cond_resched();
+
+ } while (len);
+
+ return 0;
+}
+
+static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct kvec vec;
+
+ vec.iov_base = (void *) buf;
+ vec.iov_len = len;
+
+ return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
+}
+
+static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status;
+ int retries = 3;
+ int ret;
+
+ adr += chip->start;
+
+ retry:
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ /* Clear the status register first */
+ map_write(map, CMD(0x50), adr);
+
+ /* Now erase */
+ map_write(map, CMD(0x20), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+
+ ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ adr, len,
+ chip->erase_time,
+ chip->erase_time_max);
+ if (ret) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
+ goto out;
+ }
+
+ /* We've broken this before. It doesn't hurt to be safe */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, adr);
+
+ /* check for errors */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
+ /* Reset the error bits */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+ xip_enable(map, chip, adr);
+
+ if ((chipstatus & 0x30) == 0x30) {
+ printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
+ } else if (chipstatus & 0x02) {
+ /* Protection bit set */
+ ret = -EROFS;
+ } else if (chipstatus & 0x8) {
+ /* Voltage */
+ printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else if (chipstatus & 0x20 && retries--) {
+ printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ goto retry;
+ } else {
+ printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
+ ret = -EIO;
+ }
+
+ goto out;
+ }
+
+ xip_enable(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+{
+ unsigned long ofs, len;
+ int ret;
+
+ ofs = instr->addr;
+ len = instr->len;
+
+ ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
+ if (ret)
+ return ret;
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+static void cfi_intelext_sync (struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SYNCING);
+
+ if (!ret) {
+ chip->oldstate = chip->state;
+ chip->state = FL_SYNCING;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+static int __xipram do_getlockstatus_oneblock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr,
+ int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int status, ofs_factor = cfi->interleave * cfi->device_type;
+
+ adr += chip->start;
+ xip_disable(map, chip, adr+(2*ofs_factor));
+ map_write(map, CMD(0x90), adr+(2*ofs_factor));
+ chip->state = FL_JEDEC_QUERY;
+ status = cfi_read_query(map, adr+(2*ofs_factor));
+ xip_enable(map, chip, 0);
+ return status;
+}
+
+#ifdef DEBUG_LOCK_BITS
+static int __xipram do_printlockstatus_oneblock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr,
+ int len, void *thunk)
+{
+ printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
+ adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
+ return 0;
+}
+#endif
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
+
+static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ int mdelay;
+ int ret;
+
+ adr += chip->start;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ map_write(map, CMD(0x60), adr);
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ map_write(map, CMD(0x01), adr);
+ chip->state = FL_LOCKING;
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_UNLOCKING;
+ } else
+ BUG();
+
+ /*
+ * If Instant Individual Block Locking supported then no need
+ * to delay.
+ */
+ /*
+ * Unlocking may take up to 1.4 seconds on some Intel flashes. So
+ * lets use a max of 1.5 seconds (1500ms) as timeout.
+ *
+ * See "Clear Block Lock-Bits Time" on page 40 in
+ * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
+ * from February 2003
+ */
+ mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
+
+ ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
+ if (ret) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
+ goto out;
+ }
+
+ xip_enable(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ __func__, ofs, len);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
+ ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
+ __func__, ret);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ return ret;
+}
+
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ __func__, ofs, len);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
+ ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
+ __func__, ret);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ return ret;
+}
+
+static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
+ ofs, len, NULL) ? 1 : 0;
+}
+
+#ifdef CONFIG_MTD_OTP
+
+typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
+ u_long data_offset, u_char *buf, u_int size,
+ u_long prot_offset, u_int groupno, u_int groupsize);
+
+static int __xipram
+do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ /* let's ensure we're not reading back cached data from array mode */
+ INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+
+ xip_disable(map, chip, chip->start);
+ if (chip->state != FL_JEDEC_QUERY) {
+ map_write(map, CMD(0x90), chip->start);
+ chip->state = FL_JEDEC_QUERY;
+ }
+ map_copy_from(map, buf, chip->start + offset, size);
+ xip_enable(map, chip, chip->start);
+
+ /* then ensure we don't keep OTP data in the cache */
+ INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+static int
+do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ int ret;
+
+ while (size) {
+ unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
+ int gap = offset - bus_ofs;
+ int n = min_t(int, size, map_bankwidth(map)-gap);
+ map_word datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+ ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
+ if (ret)
+ return ret;
+
+ offset += n;
+ buf += n;
+ size -= n;
+ }
+
+ return 0;
+}
+
+static int
+do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word datum;
+
+ /* make sure area matches group boundaries */
+ if (size != grpsz)
+ return -EXDEV;
+
+ datum = map_word_ff(map);
+ datum = map_word_clr(map, datum, CMD(1 << grpno));
+ return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
+}
+
+static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int user_regs)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ struct flchip *chip;
+ struct cfi_intelext_otpinfo *otp;
+ u_long devsize, reg_prot_offset, data_offset;
+ u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
+ u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
+ int ret;
+
+ *retlen = 0;
+
+ /* Check that we actually have some OTP registers */
+ if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
+ return -ENODATA;
+
+ /* we need real chips here not virtual ones */
+ devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
+ chip_step = devsize >> cfi->chipshift;
+ chip_num = 0;
+
+ /* Some chips have OTP located in the _top_ partition only.
+ For example: Intel 28F256L18T (T means top-parameter device) */
+ if (cfi->mfr == CFI_MFR_INTEL) {
+ switch (cfi->id) {
+ case 0x880b:
+ case 0x880c:
+ case 0x880d:
+ chip_num = chip_step - 1;
+ }
+ }
+
+ for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
+ chip = &cfi->chips[chip_num];
+ otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
+ /* first OTP region */
+ field = 0;
+ reg_prot_offset = extp->ProtRegAddr;
+ reg_fact_groups = 1;
+ reg_fact_size = 1 << extp->FactProtRegSize;
+ reg_user_groups = 1;
+ reg_user_size = 1 << extp->UserProtRegSize;
+
+ while (len > 0) {
+ /* flash geometry fixup */
+ data_offset = reg_prot_offset + 1;
+ data_offset *= cfi->interleave * cfi->device_type;
+ reg_prot_offset *= cfi->interleave * cfi->device_type;
+ reg_fact_size *= cfi->interleave;
+ reg_user_size *= cfi->interleave;
+
+ if (user_regs) {
+ groups = reg_user_groups;
+ groupsize = reg_user_size;
+ /* skip over factory reg area */
+ groupno = reg_fact_groups;
+ data_offset += reg_fact_groups * reg_fact_size;
+ } else {
+ groups = reg_fact_groups;
+ groupsize = reg_fact_size;
+ groupno = 0;
+ }
+
+ while (len > 0 && groups > 0) {
+ if (!action) {
+ /*
+ * Special case: if action is NULL
+ * we fill buf with otp_info records.
+ */
+ struct otp_info *otpinfo;
+ map_word lockword;
+ len -= sizeof(struct otp_info);
+ if (len <= 0)
+ return -ENOSPC;
+ ret = do_otp_read(map, chip,
+ reg_prot_offset,
+ (u_char *)&lockword,
+ map_bankwidth(map),
+ 0, 0, 0);
+ if (ret)
+ return ret;
+ otpinfo = (struct otp_info *)buf;
+ otpinfo->start = from;
+ otpinfo->length = groupsize;
+ otpinfo->locked =
+ !map_word_bitsset(map, lockword,
+ CMD(1 << groupno));
+ from += groupsize;
+ buf += sizeof(*otpinfo);
+ *retlen += sizeof(*otpinfo);
+ } else if (from >= groupsize) {
+ from -= groupsize;
+ data_offset += groupsize;
+ } else {
+ int size = groupsize;
+ data_offset += from;
+ size -= from;
+ from = 0;
+ if (size > len)
+ size = len;
+ ret = action(map, chip, data_offset,
+ buf, size, reg_prot_offset,
+ groupno, groupsize);
+ if (ret < 0)
+ return ret;
+ buf += size;
+ len -= size;
+ *retlen += size;
+ data_offset += size;
+ }
+ groupno++;
+ groups--;
+ }
+
+ /* next OTP region */
+ if (++field == extp->NumProtectionFields)
+ break;
+ reg_prot_offset = otp->ProtRegAddr;
+ reg_fact_groups = otp->FactGroups;
+ reg_fact_size = 1 << otp->FactProtRegSize;
+ reg_user_groups = otp->UserGroups;
+ reg_user_size = 1 << otp->UserProtRegSize;
+ otp++;
+ }
+ }
+
+ return 0;
+}
+
+static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_read, 0);
+}
+
+static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_read, 1);
+}
+
+static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_write, 1);
+}
+
+static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
+ loff_t from, size_t len)
+{
+ size_t retlen;
+ return cfi_intelext_otp_walk(mtd, from, len, &retlen,
+ NULL, do_otp_lock, 1);
+}
+
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+
+{
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 0);
+}
+
+static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 1);
+}
+
+#endif
+
+static void cfi_intelext_save_locks(struct mtd_info *mtd)
+{
+ struct mtd_erase_region_info *region;
+ int block, status, i;
+ unsigned long adr;
+ size_t len;
+
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ region = &mtd->eraseregions[i];
+ if (!region->lockmap)
+ continue;
+
+ for (block = 0; block < region->numblocks; block++){
+ len = region->erasesize;
+ adr = region->offset + block * len;
+
+ status = cfi_varsize_frob(mtd,
+ do_getlockstatus_oneblock, adr, len, NULL);
+ if (status)
+ set_bit(block, region->lockmap);
+ else
+ clear_bit(block, region->lockmap);
+ }
+ }
+}
+
+static int cfi_intelext_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ if ((mtd->flags & MTD_POWERUP_LOCK)
+ && extp && (extp->FeatureSupport & (1 << 5)))
+ cfi_intelext_save_locks(mtd);
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ switch (chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ if (chip->oldstate == FL_READY) {
+ /* place the chip in a known state before suspend */
+ map_write(map, CMD(0xFF), cfi->chips[i].start);
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ } else {
+ /* There seems to be an operation pending. We must wait for it. */
+ printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
+ ret = -EAGAIN;
+ }
+ break;
+ default:
+ /* Should we actually wait? Once upon a time these routines weren't
+ allowed to. Or should we return -EAGAIN, because the upper layers
+ ought to have already shut down anything which was using the device
+ anyway? The latter for now. */
+ printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
+ ret = -EAGAIN;
+ case FL_PM_SUSPENDED:
+ break;
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ if (ret) {
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+ because we're returning failure, and it didn't
+ get power cycled */
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void cfi_intelext_restore_locks(struct mtd_info *mtd)
+{
+ struct mtd_erase_region_info *region;
+ int block, i;
+ unsigned long adr;
+ size_t len;
+
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ region = &mtd->eraseregions[i];
+ if (!region->lockmap)
+ continue;
+
+ for_each_clear_bit(block, region->lockmap, region->numblocks) {
+ len = region->erasesize;
+ adr = region->offset + block * len;
+ cfi_intelext_unlock(mtd, adr, len);
+ }
+ }
+}
+
+static void cfi_intelext_resume(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ int i;
+ struct flchip *chip;
+
+ for (i=0; i<cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* Refresh LH28F640BF Partition Config. Register */
+ fixup_LH28F640BF(mtd);
+ map_write(map, CMD(0xFF), cfi->chips[i].start);
+ chip->oldstate = chip->state = FL_READY;
+ wake_up(&chip->wq);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ if ((mtd->flags & MTD_POWERUP_LOCK)
+ && extp && (extp->FeatureSupport & (1 << 5)))
+ cfi_intelext_restore_locks(mtd);
+}
+
+static int cfi_intelext_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+
+ for (i=0; i < cfi->numchips; i++) {
+ struct flchip *chip = &cfi->chips[i];
+
+ /* force the completion of any ongoing operation
+ and switch to array mode so any bootloader in
+ flash is accessible for soft reboot. */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xff), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_intelext_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+static void cfi_intelext_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_erase_region_info *region;
+ int i;
+ cfi_intelext_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
+ kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
+ kfree(cfi->chips[0].priv);
+ kfree(cfi);
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ region = &mtd->eraseregions[i];
+ kfree(region->lockmap);
+ }
+ kfree(mtd->eraseregions);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
+MODULE_ALIAS("cfi_cmdset_0003");
+MODULE_ALIAS("cfi_cmdset_0200");
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
new file mode 100644
index 000000000..c50d8cf0f
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -0,0 +1,2929 @@
+/*
+ * Common Flash Interface support:
+ * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
+ *
+ * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
+ * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
+ * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
+ *
+ * 2_by_8 routines added by Simon Munton
+ *
+ * 4_by_16 work by Carolyn J. Smith
+ *
+ * XIP support hooks by Vitaly Wool (based on code for Intel flash
+ * by Nicolas Pitre)
+ *
+ * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
+ *
+ * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
+ *
+ * This code is GPL
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/xip.h>
+
+#define AMD_BOOTLOC_BUG
+#define FORCE_WORD_WRITE 0
+
+#define MAX_WORD_RETRIES 3
+
+#define SST49LF004B 0x0060
+#define SST49LF040B 0x0050
+#define SST49LF008A 0x005a
+#define AT49BV6416 0x00d6
+
+static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
+static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
+static void cfi_amdstd_sync (struct mtd_info *);
+static int cfi_amdstd_suspend (struct mtd_info *);
+static void cfi_amdstd_resume (struct mtd_info *);
+static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
+static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
+
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+
+static void cfi_amdstd_destroy(struct mtd_info *);
+
+struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
+static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
+#include "fwh_lock.h"
+
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
+static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
+static struct mtd_chip_driver cfi_amdstd_chipdrv = {
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_amdstd_destroy,
+ .name = "cfi_cmdset_0002",
+ .module = THIS_MODULE
+};
+
+
+/* #define DEBUG_CFI_FEATURES */
+
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_amdstd *extp)
+{
+ const char* erase_suspend[3] = {
+ "Not supported", "Read only", "Read/write"
+ };
+ const char* top_bottom[6] = {
+ "No WP", "8x8KiB sectors at top & bottom, no WP",
+ "Bottom boot", "Top boot",
+ "Uniform, Bottom WP", "Uniform, Top WP"
+ };
+
+ printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
+ printk(" Address sensitive unlock: %s\n",
+ (extp->SiliconRevision & 1) ? "Not required" : "Required");
+
+ if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
+ printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
+ else
+ printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
+
+ if (extp->BlkProt == 0)
+ printk(" Block protection: Not supported\n");
+ else
+ printk(" Block protection: %d sectors per group\n", extp->BlkProt);
+
+
+ printk(" Temporary block unprotect: %s\n",
+ extp->TmpBlkUnprotect ? "Supported" : "Not supported");
+ printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
+ printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
+ printk(" Burst mode: %s\n",
+ extp->BurstMode ? "Supported" : "Not supported");
+ if (extp->PageMode == 0)
+ printk(" Page mode: Not supported\n");
+ else
+ printk(" Page mode: %d word page\n", extp->PageMode << 2);
+
+ printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppMin >> 4, extp->VppMin & 0xf);
+ printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
+ extp->VppMax >> 4, extp->VppMax & 0xf);
+
+ if (extp->TopBottom < ARRAY_SIZE(top_bottom))
+ printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
+ else
+ printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
+}
+#endif
+
+#ifdef AMD_BOOTLOC_BUG
+/* Wheee. Bring me the head of someone at AMD. */
+static void fixup_amd_bootblock(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ __u8 major = extp->MajorVersion;
+ __u8 minor = extp->MinorVersion;
+
+ if (((major << 8) | minor) < 0x3131) {
+ /* CFI version 1.0 => don't trust bootloc */
+
+ pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
+ map->name, cfi->mfr, cfi->id);
+
+ /* AFAICS all 29LV400 with a bottom boot block have a device ID
+ * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
+ * These were badly detected as they have the 0x80 bit set
+ * so treat them as a special case.
+ */
+ if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
+
+ /* Macronix added CFI to their 2nd generation
+ * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
+ * Fujitsu, Spansion, EON, ESI and older Macronix)
+ * has CFI.
+ *
+ * Therefore also check the manufacturer.
+ * This reduces the risk of false detection due to
+ * the 8-bit device ID.
+ */
+ (cfi->mfr == CFI_MFR_MACRONIX)) {
+ pr_debug("%s: Macronix MX29LV400C with bottom boot block"
+ " detected\n", map->name);
+ extp->TopBottom = 2; /* bottom boot */
+ } else
+ if (cfi->id & 0x80) {
+ printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
+ extp->TopBottom = 3; /* top boot */
+ } else {
+ extp->TopBottom = 2; /* bottom boot */
+ }
+
+ pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
+ " deduced %s from Device ID\n", map->name, major, minor,
+ extp->TopBottom == 2 ? "bottom" : "top");
+ }
+}
+#endif
+
+static void fixup_use_write_buffers(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (cfi->cfiq->BufWriteTimeoutTyp) {
+ pr_debug("Using buffer write method\n" );
+ mtd->_write = cfi_amdstd_write_buffers;
+ }
+}
+
+/* Atmel chips don't use the same PRI format as AMD chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ struct cfi_pri_atmel atmel_pri;
+
+ memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+ memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+ if (atmel_pri.Features & 0x02)
+ extp->EraseSuspend = 2;
+
+ /* Some chips got it backwards... */
+ if (cfi->id == AT49BV6416) {
+ if (atmel_pri.BottomBoot)
+ extp->TopBottom = 3;
+ else
+ extp->TopBottom = 2;
+ } else {
+ if (atmel_pri.BottomBoot)
+ extp->TopBottom = 2;
+ else
+ extp->TopBottom = 3;
+ }
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
+static void fixup_use_secsi(struct mtd_info *mtd)
+{
+ /* Setup for chips with a secsi area */
+ mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
+ mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
+}
+
+static void fixup_use_erase_chip(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if ((cfi->cfiq->NumEraseRegions == 1) &&
+ ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
+ mtd->_erase = cfi_amdstd_erase_chip;
+ }
+
+}
+
+/*
+ * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
+ * locked by default.
+ */
+static void fixup_use_atmel_lock(struct mtd_info *mtd)
+{
+ mtd->_lock = cfi_atmel_lock;
+ mtd->_unlock = cfi_atmel_unlock;
+ mtd->flags |= MTD_POWERUP_LOCK;
+}
+
+static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * These flashes report two separate eraseblock regions based on the
+ * sector_erase-size and block_erase-size, although they both operate on the
+ * same memory. This is not allowed according to CFI, so we just pick the
+ * sector_erase-size.
+ */
+ cfi->cfiq->NumEraseRegions = 1;
+}
+
+static void fixup_sst39vf(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x5555;
+ cfi->addr_unlock2 = 0x2AAA;
+}
+
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2AA;
+
+ cfi->sector_erase_cmd = CMD(0x50);
+}
+
+static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_sst39vf_rev_b(mtd);
+
+ /*
+ * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
+ * it should report a size of 8KBytes (0x0020*256).
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
+ pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
+}
+
+static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
+ cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
+ pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
+ }
+}
+
+static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
+ cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
+ pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
+ }
+}
+
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * S29NS512P flash uses more than 8bits to report number of sectors,
+ * which is not permitted by CFI.
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+ pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
+}
+
+/* Used to fix CFI-Tables of chips without Extended Query Tables */
+static struct cfi_fixup cfi_nopri_fixup_table[] = {
+ { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
+ { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
+ { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
+ { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
+ { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
+ { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
+ { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
+ { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
+ { 0, 0, NULL }
+};
+
+static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+#ifdef AMD_BOOTLOC_BUG
+ { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
+ { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
+#endif
+ { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
+ { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
+ { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
+ { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
+ { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
+#if !FORCE_WORD_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
+#endif
+ { 0, 0, NULL }
+};
+static struct cfi_fixup jedec_fixup_table[] = {
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
+ { 0, 0, NULL }
+};
+
+static struct cfi_fixup fixup_table[] = {
+ /* The CFI vendor ids and the JEDEC vendor IDs appear
+ * to be common. It is like the devices id's are as
+ * well. This table is to pick all cases where
+ * we know that is the case.
+ */
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
+ { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
+ { 0, 0, NULL }
+};
+
+
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_amdstd *extp)
+{
+ if (cfi->mfr == CFI_MFR_SAMSUNG) {
+ if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
+ (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+ /*
+ * Samsung K8P2815UQB and K8D6x16UxM chips
+ * report major=0 / minor=0.
+ * K8D3x16UxC chips report major=3 / minor=3.
+ */
+ printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
+ " Extended Query version to 1.%c\n",
+ extp->MinorVersion);
+ extp->MajorVersion = '1';
+ }
+ }
+
+ /*
+ * SST 38VF640x chips report major=0xFF / minor=0xFF.
+ */
+ if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
+ extp->MajorVersion = '1';
+ extp->MinorVersion = '0';
+ }
+}
+
+static int is_m29ew(struct cfi_private *cfi)
+{
+ if (cfi->mfr == CFI_MFR_INTEL &&
+ ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
+ (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
+ return 1;
+ return 0;
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
+ * Some revisions of the M29EW suffer from erase suspend hang ups. In
+ * particular, it can occur when the sequence
+ * Erase Confirm -> Suspend -> Program -> Resume
+ * causes a lockup due to internal timing issues. The consequence is that the
+ * erase cannot be resumed without inserting a dummy command after programming
+ * and prior to resuming. [...] The work-around is to issue a dummy write cycle
+ * that writes an F0 command code before the RESUME command.
+ */
+static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
+ if (is_m29ew(cfi))
+ map_write(map, CMD(0xF0), adr);
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
+ *
+ * Some revisions of the M29EW (for example, A1 and A2 step revisions)
+ * are affected by a problem that could cause a hang up when an ERASE SUSPEND
+ * command is issued after an ERASE RESUME operation without waiting for a
+ * minimum delay. The result is that once the ERASE seems to be completed
+ * (no bits are toggling), the contents of the Flash memory block on which
+ * the erase was ongoing could be inconsistent with the expected values
+ * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
+ * values), causing a consequent failure of the ERASE operation.
+ * The occurrence of this issue could be high, especially when file system
+ * operations on the Flash are intensive. As a result, it is recommended
+ * that a patch be applied. Intensive file system operations can cause many
+ * calls to the garbage routine to free Flash space (also by erasing physical
+ * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
+ * commands can occur. The problem disappears when a delay is inserted after
+ * the RESUME command by using the udelay() function available in Linux.
+ * The DELAY value must be tuned based on the customer's platform.
+ * The maximum value that fixes the problem in all cases is 500us.
+ * But, in our experience, a delay of 30 µs to 50 µs is sufficient
+ * in most cases.
+ * We have chosen 500µs because this latency is acceptable.
+ */
+static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
+{
+ /*
+ * Resolving the Delay After Resume Issue see Micron TN-13-07
+ * Worst case delay must be 500µs but 30-50µs should be ok as well
+ */
+ if (is_m29ew(cfi))
+ cfi_udelay(500);
+}
+
+struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct device_node __maybe_unused *np = map->device_node;
+ struct mtd_info *mtd;
+ int i;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->_erase = cfi_amdstd_erase_varsize;
+ mtd->_write = cfi_amdstd_write_words;
+ mtd->_read = cfi_amdstd_read;
+ mtd->_sync = cfi_amdstd_sync;
+ mtd->_suspend = cfi_amdstd_suspend;
+ mtd->_resume = cfi_amdstd_resume;
+ mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
+ mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
+ mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
+ mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
+ mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+ mtd->writesize = 1;
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+
+ pr_debug("MTD %s(): write buffer size %d\n", __func__,
+ mtd->writebufsize);
+
+ mtd->_panic_write = cfi_amdstd_panic_write;
+ mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
+
+ if (cfi->cfi_mode==CFI_MODE_CFI){
+ unsigned char bootloc;
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_amdstd *extp;
+
+ extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
+ if (extp) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure.
+ */
+ cfi_fixup_major_minor(cfi, extp);
+
+ /*
+ * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
+ * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
+ * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
+ * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
+ * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
+ */
+ if (extp->MajorVersion != '1' ||
+ (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c (%#02x/%#02x).\n",
+ extp->MajorVersion, extp->MinorVersion,
+ extp->MajorVersion, extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
+
+ printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
+ extp->MajorVersion, extp->MinorVersion);
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+
+ /* Apply cfi device specific fixups */
+ cfi_fixup(mtd, cfi_fixup_table);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(
+ np, "use-advanced-sector-protection")
+ && extp->BlkProtUnprot == 8) {
+ printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
+ mtd->_lock = cfi_ppb_lock;
+ mtd->_unlock = cfi_ppb_unlock;
+ mtd->_is_locked = cfi_ppb_is_locked;
+ }
+#endif
+
+ bootloc = extp->TopBottom;
+ if ((bootloc < 2) || (bootloc > 5)) {
+ printk(KERN_WARNING "%s: CFI contains unrecognised boot "
+ "bank location (%d). Assuming bottom.\n",
+ map->name, bootloc);
+ bootloc = 2;
+ }
+
+ if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
+ printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
+ int j = (cfi->cfiq->NumEraseRegions-1)-i;
+ __u32 swap;
+
+ swap = cfi->cfiq->EraseRegionInfo[i];
+ cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
+ cfi->cfiq->EraseRegionInfo[j] = swap;
+ }
+ }
+ /* Set the default CFI lock/unlock addresses */
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ }
+ cfi_fixup(mtd, cfi_nopri_fixup_table);
+
+ if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
+ kfree(mtd);
+ return NULL;
+ }
+
+ } /* CFI mode */
+ else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
+ /* Apply jedec specific fixups */
+ cfi_fixup(mtd, jedec_fixup_table);
+ }
+ /* Apply generic fixups */
+ cfi_fixup(mtd, fixup_table);
+
+ for (i=0; i< cfi->numchips; i++) {
+ cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
+ cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
+ cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
+ /*
+ * First calculate the timeout max according to timeout field
+ * of struct cfi_ident that probed from chip's CFI aera, if
+ * available. Specify a minimum of 2000us, in case the CFI data
+ * is wrong.
+ */
+ if (cfi->cfiq->BufWriteTimeoutTyp &&
+ cfi->cfiq->BufWriteTimeoutMax)
+ cfi->chips[i].buffer_write_time_max =
+ 1 << (cfi->cfiq->BufWriteTimeoutTyp +
+ cfi->cfiq->BufWriteTimeoutMax);
+ else
+ cfi->chips[i].buffer_write_time_max = 0;
+
+ cfi->chips[i].buffer_write_time_max =
+ max(cfi->chips[i].buffer_write_time_max, 2000);
+
+ cfi->chips[i].ref_point_counter = 0;
+ init_waitqueue_head(&(cfi->chips[i].wq));
+ }
+
+ map->fldrv = &cfi_amdstd_chipdrv;
+
+ return cfi_amdstd_setup(mtd);
+}
+struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
+struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
+EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
+
+static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+ unsigned long offset = 0;
+ int i,j;
+
+ printk(KERN_NOTICE "number of %s chips: %d\n",
+ (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
+ /* Select the correct geometry setup */
+ mtd->size = devsize * cfi->numchips;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
+ * mtd->numeraseregions, GFP_KERNEL);
+ if (!mtd->eraseregions)
+ goto setup_err;
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ }
+ offset += (ersize * ernum);
+ }
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ goto setup_err;
+ }
+
+ __module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
+ return mtd;
+
+ setup_err:
+ kfree(mtd->eraseregions);
+ kfree(mtd);
+ kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
+ return NULL;
+}
+
+/*
+ * Return true if the chip is ready.
+ *
+ * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
+ * non-suspended sector) and is indicated by no toggle bits toggling.
+ *
+ * Note that anything more complicated than checking if no bits are toggling
+ * (including checking DQ5 for an error status) is tricky to get working
+ * correctly and is therefore not done (particularly with interleaved chips
+ * as each chip must be checked independently of the others).
+ */
+static int __xipram chip_ready(struct map_info *map, unsigned long addr)
+{
+ map_word d, t;
+
+ d = map_read(map, addr);
+ t = map_read(map, addr);
+
+ return map_word_equal(map, d, t);
+}
+
+/*
+ * Return true if the chip is ready and has the correct value.
+ *
+ * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
+ * non-suspended sector) and it is indicated by no bits toggling.
+ *
+ * Error are indicated by toggling bits or bits held with the wrong value,
+ * or with bits toggling.
+ *
+ * Note that anything more complicated than checking if no bits are toggling
+ * (including checking DQ5 for an error status) is tricky to get working
+ * correctly and is therefore not done (particularly with interleaved chips
+ * as each chip must be checked independently of the others).
+ *
+ */
+static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
+{
+ map_word oldd, curd;
+
+ oldd = map_read(map, addr);
+ curd = map_read(map, addr);
+
+ return map_word_equal(map, oldd, curd) &&
+ map_word_equal(map, curd, expected);
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
+
+ resettime:
+ timeo = jiffies + HZ;
+ retry:
+ switch (chip->state) {
+
+ case FL_STATUS:
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ return -EIO;
+ }
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Someone else might have been playing with it. */
+ goto retry;
+ }
+
+ case FL_READY:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ goto sleep;
+
+ /* We could check to see if we're trying to access the sector
+ * that is currently being erased. However, no user will try
+ * anything like that so we just wait for the timeout. */
+
+ /* Erase suspend */
+ /* It's harmless to issue the Erase-Suspend and Erase-Resume
+ * commands when the erase algorithm isn't in progress. */
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ chip->erase_suspended = 1;
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Should have suspended the erase by now.
+ * Send an Erase-Resume command as either
+ * there was an error (so leave the erase
+ * routine to recover from it) or we trying to
+ * use the erase-in-progress sector. */
+ put_chip(map, chip, adr);
+ printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
+ return -EIO;
+ }
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
+ }
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_XIP_WHILE_ERASING:
+ if (mode != FL_READY && mode != FL_POINT &&
+ (!cfip || !(cfip->EraseSuspend&2)))
+ goto sleep;
+ chip->oldstate = chip->state;
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_SHUTDOWN:
+ /* The machine is rebooting */
+ return -EIO;
+
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+
+ default:
+ sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ goto resettime;
+ }
+}
+
+
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ switch(chip->oldstate) {
+ case FL_ERASING:
+ cfi_fixup_m29ew_erase_suspend(map,
+ chip->in_progress_block_addr);
+ map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
+ cfi_fixup_m29ew_delay_after_resume(cfi);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+
+ case FL_XIP_WHILE_ERASING:
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ break;
+
+ case FL_READY:
+ case FL_STATUS:
+ break;
+ default:
+ printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * No interrupt what so ever can be serviced while the flash isn't in array
+ * mode. This is ensured by the xip_disable() and xip_enable() functions
+ * enclosing any code path where the flash is known not to be in array mode.
+ * And within a XIP disabled code path, only functions marked with __xipram
+ * may be called and nothing else (it's a good thing to inspect generated
+ * assembly to make sure inline functions were actually inlined and that gcc
+ * didn't emit calls to its own support functions). Also configuring MTD CFI
+ * support to a single buswidth and a single interleave is also recommended.
+ */
+
+static void xip_disable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ /* TODO: chips with no XIP use should ignore and return */
+ (void) map_read(map, adr); /* ensure mmu mapping is up to date */
+ local_irq_disable();
+}
+
+static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xf0), adr);
+ chip->state = FL_READY;
+ }
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+}
+
+/*
+ * When a delay is required for the flash operation to complete, the
+ * xip_udelay() function is polling for both the given timeout and pending
+ * (but still masked) hardware interrupts. Whenever there is an interrupt
+ * pending then the flash erase operation is suspended, array mode restored
+ * and interrupts unmasked. Task scheduling might also happen at that
+ * point. The CPU eventually returns from the interrupt or the call to
+ * schedule() and the suspended flash operation is resumed for the remaining
+ * of the delay period.
+ *
+ * Warning: this function _will_ fool interrupt latency tracing tools.
+ */
+
+static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int usec)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ map_word status, OK = CMD(0x80);
+ unsigned long suspended, start = xip_currtime();
+ flstate_t oldstate;
+
+ do {
+ cpu_relax();
+ if (xip_irqpending() && extp &&
+ ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
+ (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
+ /*
+ * Let's suspend the erase operation when supported.
+ * Note that we currently don't try to suspend
+ * interleaved chips if there is already another
+ * operation suspended (imagine what happens
+ * when one chip was already done with the current
+ * operation while another chip suspended it, then
+ * we resume the whole thing at once). Yes, it
+ * can happen!
+ */
+ map_write(map, CMD(0xb0), adr);
+ usec -= xip_elapsed_since(start);
+ suspended = xip_currtime();
+ do {
+ if (xip_elapsed_since(suspended) > 100000) {
+ /*
+ * The chip doesn't want to suspend
+ * after waiting for 100 msecs.
+ * This is a critical error but there
+ * is not much we can do here.
+ */
+ return;
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK));
+
+ /* Suspend succeeded */
+ oldstate = chip->state;
+ if (!map_word_bitsset(map, status, CMD(0x40)))
+ break;
+ chip->state = FL_XIP_WHILE_ERASING;
+ chip->erase_suspended = 1;
+ map_write(map, CMD(0xf0), adr);
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+ mutex_unlock(&chip->mutex);
+ xip_iprefetch();
+ cond_resched();
+
+ /*
+ * We're back. However someone else might have
+ * decided to go write to the chip if we are in
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+ mutex_lock(&chip->mutex);
+ while (chip->state != FL_XIP_WHILE_ERASING) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+
+ /* Correct Erase Suspend Hangups for M29EW */
+ cfi_fixup_m29ew_erase_suspend(map, adr);
+ /* Resume the write or erase operation */
+ map_write(map, cfi->sector_erase_cmd, adr);
+ chip->state = oldstate;
+ start = xip_currtime();
+ } else if (usec >= 1000000/HZ) {
+ /*
+ * Try to save on CPU power when waiting delay
+ * is at least a system timer tick period.
+ * No need to be extremely accurate here.
+ */
+ xip_cpu_idle();
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK)
+ && xip_elapsed_since(start) < usec);
+}
+
+#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
+
+/*
+ * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
+ * the flash is actively programming or erasing since we have to poll for
+ * the operation to complete anyway. We can't do that in a generic way with
+ * a XIP setup so do it before the actual flash operation in this case
+ * and stub it out from INVALIDATE_CACHE_UDELAY.
+ */
+#define XIP_INVAL_CACHED_RANGE(map, from, size) \
+ INVALIDATE_CACHED_RANGE(map, from, size)
+
+#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+ UDELAY(map, chip, adr, usec)
+
+/*
+ * Extra notes:
+ *
+ * Activating this XIP support changes the way the code works a bit. For
+ * example the code to suspend the current process when concurrent access
+ * happens is never executed because xip_udelay() will always return with the
+ * same chip state as it was entered with. This is why there is no care for
+ * the presence of add_wait_queue() or schedule() calls from within a couple
+ * xip_disable()'d areas of code, like in do_erase_oneblock for example.
+ * The queueing and scheduling are always happening within xip_udelay().
+ *
+ * Similarly, get_chip() and put_chip() just happen to always be executed
+ * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
+ * is in array mode, therefore never executing many cases therein and not
+ * causing any problem with XIP.
+ */
+
+#else
+
+#define xip_disable(map, chip, adr)
+#define xip_enable(map, chip, adr)
+#define XIP_INVAL_CACHED_RANGE(x...)
+
+#define UDELAY(map, chip, adr, usec) \
+do { \
+ mutex_unlock(&chip->mutex); \
+ cfi_udelay(usec); \
+ mutex_lock(&chip->mutex); \
+} while (0)
+
+#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+do { \
+ mutex_unlock(&chip->mutex); \
+ INVALIDATE_CACHED_RANGE(map, adr, len); \
+ cfi_udelay(usec); \
+ mutex_lock(&chip->mutex); \
+} while (0)
+
+#endif
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xf0), cmd_addr);
+ chip->state = FL_READY;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ put_chip(map, chip, cmd_addr);
+
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+
+static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len, u_char *buf, size_t grouplen);
+
+static inline void otp_enter(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
+}
+
+static inline void otp_exit(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
+}
+
+static inline int do_read_secsi_onechip(struct map_info *map,
+ struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf,
+ size_t grouplen)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long timeo = jiffies + HZ;
+
+ retry:
+ mutex_lock(&chip->mutex);
+
+ if (chip->state != FL_READY){
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+ mutex_unlock(&chip->mutex);
+
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+
+ goto retry;
+ }
+
+ adr += chip->start;
+
+ chip->state = FL_READY;
+
+ otp_enter(map, chip, adr, len);
+ map_copy_from(map, buf, adr, len);
+ otp_exit(map, chip, adr, len);
+
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+}
+
+static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ /* 8 secsi bytes per chip */
+ chipnum=from>>3;
+ ofs=from & 7;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> 3)
+ thislen = (1<<3) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
+ thislen, buf, 0);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode);
+
+static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf, size_t grouplen)
+{
+ int ret;
+ while (len) {
+ unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
+ int gap = adr - bus_ofs;
+ int n = min_t(int, len, map_bankwidth(map) - gap);
+ map_word datum;
+
+ if (n != map_bankwidth(map)) {
+ /* partial write of a word, load old contents */
+ otp_enter(map, chip, bus_ofs, map_bankwidth(map));
+ datum = map_read(map, bus_ofs);
+ otp_exit(map, chip, bus_ofs, map_bankwidth(map));
+ }
+
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+ ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
+ if (ret)
+ return ret;
+
+ adr += n;
+ buf += n;
+ len -= n;
+ }
+
+ return 0;
+}
+
+static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf, size_t grouplen)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ uint8_t lockreg;
+ unsigned long timeo;
+ int ret;
+
+ /* make sure area matches group boundaries */
+ if ((adr != 0) || (len != grouplen))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ chip->state = FL_LOCKING;
+
+ /* Enter lock register command */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ /* read lock register */
+ lockreg = cfi_read_query(map, 0);
+
+ /* set bit 0 to protect extended memory block */
+ lockreg &= ~0x01;
+
+ /* set bit 0 to protect extended memory block */
+ /* write lock register */
+ map_write(map, CMD(0xA0), chip->start);
+ map_write(map, CMD(lockreg), chip->start);
+
+ /* wait for chip to become ready */
+ timeo = jiffies + msecs_to_jiffies(2);
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ pr_err("Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+ UDELAY(map, chip, 0, 1);
+ }
+
+ /* exit protection commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int user_regs)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ofs_factor = cfi->interleave * cfi->device_type;
+ unsigned long base;
+ int chipnum;
+ struct flchip *chip;
+ uint8_t otp, lockreg;
+ int ret;
+
+ size_t user_size, factory_size, otpsize;
+ loff_t user_offset, factory_offset, otpoffset;
+ int user_locked = 0, otplocked;
+
+ *retlen = 0;
+
+ for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
+ chip = &cfi->chips[chipnum];
+ factory_size = 0;
+ user_size = 0;
+
+ /* Micron M29EW family */
+ if (is_m29ew(cfi)) {
+ base = chip->start;
+
+ /* check whether secsi area is factory locked
+ or user lockable */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, base, FL_CFI_QUERY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ cfi_qry_mode_on(base, map, cfi);
+ otp = cfi_read_query(map, base + 0x3 * ofs_factor);
+ cfi_qry_mode_off(base, map, cfi);
+ put_chip(map, chip, base);
+ mutex_unlock(&chip->mutex);
+
+ if (otp & 0x80) {
+ /* factory locked */
+ factory_offset = 0;
+ factory_size = 0x100;
+ } else {
+ /* customer lockable */
+ user_offset = 0;
+ user_size = 0x100;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, base, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ /* Enter lock register command */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* read lock register */
+ lockreg = cfi_read_query(map, 0);
+ /* exit protection commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ user_locked = ((lockreg & 0x01) == 0x00);
+ }
+ }
+
+ otpsize = user_regs ? user_size : factory_size;
+ if (!otpsize)
+ continue;
+ otpoffset = user_regs ? user_offset : factory_offset;
+ otplocked = user_regs ? user_locked : 1;
+
+ if (!action) {
+ /* return otpinfo */
+ struct otp_info *otpinfo;
+ len -= sizeof(*otpinfo);
+ if (len <= 0)
+ return -ENOSPC;
+ otpinfo = (struct otp_info *)buf;
+ otpinfo->start = from;
+ otpinfo->length = otpsize;
+ otpinfo->locked = otplocked;
+ buf += sizeof(*otpinfo);
+ *retlen += sizeof(*otpinfo);
+ from += otpsize;
+ } else if ((from < otpsize) && (len > 0)) {
+ size_t size;
+ size = (len < otpsize - from) ? len : otpsize - from;
+ ret = action(map, chip, otpoffset + from, size, buf,
+ otpsize);
+ if (ret < 0)
+ return ret;
+
+ buf += size;
+ len -= size;
+ *retlen += size;
+ from = 0;
+ } else {
+ from -= otpsize;
+ }
+ }
+ return 0;
+}
+
+static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 0);
+}
+
+static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 1);
+}
+
+static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen,
+ buf, do_read_secsi_onechip, 0);
+}
+
+static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen,
+ buf, do_read_secsi_onechip, 1);
+}
+
+static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
+ do_otp_write, 1);
+}
+
+static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ size_t retlen;
+ return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
+ do_otp_lock, 1);
+}
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ /*
+ * We use a 1ms + 1 jiffies generic timeout for writes (most devices
+ * have a max write time of a few hundreds usec). However, we should
+ * use the maximum timeout value given by the chip at probe time
+ * instead. Unfortunately, struct flchip does have a field for
+ * maximum timeout, only for typical which can be far too short
+ * depending of the conditions. The ' + 1' is to avoid having a
+ * timeout of 0 jiffies if HZ is smaller than 1000.
+ */
+ unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+ int ret = 0;
+ map_word oldd;
+ int retry_cnt = 0;
+
+ adr += chip->start;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, mode);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0] );
+
+ if (mode == FL_OTP_WRITE)
+ otp_enter(map, chip, adr, map_bankwidth(map));
+
+ /*
+ * Check for a NOP for the case when the datum to write is already
+ * present - it saves time and works around buggy chips that corrupt
+ * data at other locations when 0xff is written to a location that
+ * already contains 0xff.
+ */
+ oldd = map_read(map, adr);
+ if (map_word_equal(map, oldd, datum)) {
+ pr_debug("MTD %s(): NOP\n",
+ __func__);
+ goto op_done;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, datum, adr);
+ chip->state = mode;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map_bankwidth(map),
+ chip->word_write_time);
+
+ /* See comment above for timeout value. */
+ timeo = jiffies + uWriteTimeout;
+ for (;;) {
+ if (chip->state != mode) {
+ /* Someone's suspended the write. Sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
+ xip_enable(map, chip, adr);
+ printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
+ xip_disable(map, chip, adr);
+ break;
+ }
+
+ if (chip_ready(map, adr))
+ break;
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1);
+ }
+ /* Did we succeed? */
+ if (!chip_good(map, adr, datum)) {
+ /* reset on all failures. */
+ map_write( map, CMD(0xF0), chip->start );
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_WORD_RETRIES)
+ goto retry;
+
+ ret = -EIO;
+ }
+ xip_enable(map, chip, adr);
+ op_done:
+ if (mode == FL_OTP_WRITE)
+ otp_exit(map, chip, adr, map_bankwidth(map));
+ chip->state = FL_READY;
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+
+static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs, chipstart;
+ DECLARE_WAITQUEUE(wait, current);
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ chipstart = cfi->chips[chipnum].start;
+
+ /* If it's not bus-aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map)-1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
+ int i = ofs - bus_ofs;
+ int n = 0;
+ map_word tmp_buf;
+
+ retry:
+ mutex_lock(&cfi->chips[chipnum].mutex);
+
+ if (cfi->chips[chipnum].state != FL_READY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+ goto retry;
+ }
+
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs+chipstart);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map)-i);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, tmp_buf, FL_WRITING);
+ if (ret)
+ return ret;
+
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+ len -= n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* We are now aligned, write as much as possible */
+ while(len >= map_bankwidth(map)) {
+ map_word datum;
+
+ datum = map_word_load(map, buf);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ chipstart = cfi->chips[chipnum].start;
+ }
+ }
+
+ /* Write the trailing bytes if any */
+ if (len & (map_bankwidth(map)-1)) {
+ map_word tmp_buf;
+
+ retry1:
+ mutex_lock(&cfi->chips[chipnum].mutex);
+
+ if (cfi->chips[chipnum].state != FL_READY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+ goto retry1;
+ }
+
+ tmp_buf = map_read(map, ofs + chipstart);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, tmp_buf, FL_WRITING);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
+
+/*
+ * FIXME: interleaved mode not tested, and probably not supported!
+ */
+static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const u_char *buf,
+ int len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ /*
+ * Timeout is calculated according to CFI data, if available.
+ * See more comments in cfi_cmdset_0002().
+ */
+ unsigned long uWriteTimeout =
+ usecs_to_jiffies(chip->buffer_write_time_max);
+ int ret = -EIO;
+ unsigned long cmd_adr;
+ int z, words;
+ map_word datum;
+
+ adr += chip->start;
+ cmd_adr = adr;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ datum = map_word_load(map, buf);
+
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0] );
+
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, cmd_adr);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
+
+ chip->state = FL_WRITING_TO_BUFFER;
+
+ /* Write length of data to come */
+ words = len / map_bankwidth(map);
+ map_write(map, CMD(words - 1), cmd_adr);
+ /* Write data */
+ z = 0;
+ while(z < words * map_bankwidth(map)) {
+ datum = map_word_load(map, buf);
+ map_write(map, datum, adr + z);
+
+ z += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ }
+ z -= map_bankwidth(map);
+
+ adr += z;
+
+ /* Write Buffer Program Confirm: GO GO GO */
+ map_write(map, CMD(0x29), cmd_adr);
+ chip->state = FL_WRITING;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map_bankwidth(map),
+ chip->word_write_time);
+
+ timeo = jiffies + uWriteTimeout;
+
+ for (;;) {
+ if (chip->state != FL_WRITING) {
+ /* Someone's suspended the write. Sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+ break;
+
+ if (chip_ready(map, adr)) {
+ xip_enable(map, chip, adr);
+ goto op_done;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1);
+ }
+
+ /*
+ * Recovery from write-buffer programming failures requires
+ * the write-to-buffer-reset sequence. Since the last part
+ * of the sequence also works as a normal reset, we can run
+ * the same commands regardless of why we are here.
+ * See e.g.
+ * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
+ */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ xip_enable(map, chip, adr);
+ /* FIXME - should have reset delay before continuing */
+
+ printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
+ __func__, adr);
+
+ ret = -EIO;
+ op_done:
+ chip->state = FL_READY;
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+
+static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+
+ /* If it's not bus-aligned, do the first word write */
+ if (ofs & (map_bankwidth(map)-1)) {
+ size_t local_len = (-ofs)&(map_bankwidth(map)-1);
+ if (local_len > len)
+ local_len = len;
+ ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
+ local_len, retlen, buf);
+ if (ret)
+ return ret;
+ ofs += local_len;
+ buf += local_len;
+ len -= local_len;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* Write buffer is worth it only if more than one word to write... */
+ while (len >= map_bankwidth(map) * 2) {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+ if (size % map_bankwidth(map))
+ size -= size % map_bankwidth(map);
+
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, buf, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ buf += size;
+ (*retlen) += size;
+ len -= size;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ if (len) {
+ size_t retlen_dregs = 0;
+
+ ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
+ len, &retlen_dregs, buf);
+
+ *retlen += retlen_dregs;
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Wait for the flash chip to become ready to write data
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ */
+static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retries = 10;
+ int i;
+
+ /*
+ * If the driver thinks the chip is idle, and no toggle bits
+ * are changing, then the chip is actually idle for sure.
+ */
+ if (chip->state == FL_READY && chip_ready(map, adr))
+ return 0;
+
+ /*
+ * Try several times to reset the chip and then wait for it
+ * to become idle. The upper limit of a few milliseconds of
+ * delay isn't a big problem: the kernel is dying anyway. It
+ * is more important to save the messages.
+ */
+ while (retries > 0) {
+ const unsigned long timeo = (HZ / 1000) + 1;
+
+ /* send the reset command */
+ map_write(map, CMD(0xF0), chip->start);
+
+ /* wait for the chip to become ready */
+ for (i = 0; i < jiffies_to_usecs(timeo); i++) {
+ if (chip_ready(map, adr))
+ return 0;
+
+ udelay(1);
+ }
+
+ retries--;
+ }
+
+ /* the chip never became ready */
+ return -EBUSY;
+}
+
+/*
+ * Write out one word of data to a single flash chip during a kernel panic
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ *
+ * The implementation of this routine is intentionally similar to
+ * do_write_oneword(), in order to ease code maintenance.
+ */
+static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum)
+{
+ const unsigned long uWriteTimeout = (HZ / 1000) + 1;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retry_cnt = 0;
+ map_word oldd;
+ int ret = 0;
+ int i;
+
+ adr += chip->start;
+
+ ret = cfi_amdstd_panic_wait(map, chip, adr);
+ if (ret)
+ return ret;
+
+ pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0]);
+
+ /*
+ * Check for a NOP for the case when the datum to write is already
+ * present - it saves time and works around buggy chips that corrupt
+ * data at other locations when 0xff is written to a location that
+ * already contains 0xff.
+ */
+ oldd = map_read(map, adr);
+ if (map_word_equal(map, oldd, datum)) {
+ pr_debug("MTD %s(): NOP\n", __func__);
+ goto op_done;
+ }
+
+ ENABLE_VPP(map);
+
+retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, datum, adr);
+
+ for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
+ if (chip_ready(map, adr))
+ break;
+
+ udelay(1);
+ }
+
+ if (!chip_good(map, adr, datum)) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_WORD_RETRIES)
+ goto retry;
+
+ ret = -EIO;
+ }
+
+op_done:
+ DISABLE_VPP(map);
+ return ret;
+}
+
+/*
+ * Write out some data during a kernel panic
+ *
+ * This is used by the mtdoops driver to save the dying messages from a
+ * kernel which has panic'd.
+ *
+ * This routine ignores all of the locking used throughout the rest of the
+ * driver, in order to ensure that the data gets written out no matter what
+ * state this driver (and the flash chip itself) was in when the kernel crashed.
+ *
+ * The implementation of this routine is intentionally similar to
+ * cfi_amdstd_write_words(), in order to ease code maintenance.
+ */
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs, chipstart;
+ int ret = 0;
+ int chipnum;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ chipstart = cfi->chips[chipnum].start;
+
+ /* If it's not bus aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map) - 1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
+ int i = ofs - bus_ofs;
+ int n = 0;
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
+ if (ret)
+ return ret;
+
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs + chipstart);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map) - i);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+ len -= n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* We are now aligned, write as much as possible */
+ while (len >= map_bankwidth(map)) {
+ map_word datum;
+
+ datum = map_word_load(map, buf);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+
+ chipstart = cfi->chips[chipnum].start;
+ }
+ }
+
+ /* Write the trailing bytes if any */
+ if (len & (map_bankwidth(map) - 1)) {
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
+ if (ret)
+ return ret;
+
+ tmp_buf = map_read(map, ofs + chipstart);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Handle devices with one erase region, that only implement
+ * the chip erase command.
+ */
+static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ unsigned long int adr;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ adr = cfi->addr_unlock1;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
+ __func__, chip->start );
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map->size);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map->size,
+ chip->erase_time*500);
+
+ timeo = jiffies + (HZ*20);
+
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+ /* This erase was suspended and resumed.
+ Adjust the timeout */
+ timeo = jiffies + (HZ*20); /* FIXME */
+ chip->erase_suspended = 0;
+ }
+
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_WARNING "MTD %s(): software timeout\n",
+ __func__ );
+ break;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1000000/HZ);
+ }
+ /* Did we succeed? */
+ if (!chip_good(map, adr, map_word_ff(map))) {
+ /* reset on all failures. */
+ map_write( map, CMD(0xF0), chip->start );
+ /* FIXME - should have reset delay before continuing */
+
+ ret = -EIO;
+ }
+
+ chip->state = FL_READY;
+ xip_enable(map, chip, adr);
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+
+static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ adr += chip->start;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
+ __func__, adr );
+
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, cfi->sector_erase_cmd, adr);
+
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, len,
+ chip->erase_time*500);
+
+ timeo = jiffies + (HZ*20);
+
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+ /* This erase was suspended and resumed.
+ Adjust the timeout */
+ timeo = jiffies + (HZ*20); /* FIXME */
+ chip->erase_suspended = 0;
+ }
+
+ if (chip_ready(map, adr)) {
+ xip_enable(map, chip, adr);
+ break;
+ }
+
+ if (time_after(jiffies, timeo)) {
+ xip_enable(map, chip, adr);
+ printk(KERN_WARNING "MTD %s(): software timeout\n",
+ __func__ );
+ break;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1000000/HZ);
+ }
+ /* Did we succeed? */
+ if (!chip_good(map, adr, map_word_ff(map))) {
+ /* reset on all failures. */
+ map_write( map, CMD(0xF0), chip->start );
+ /* FIXME - should have reset delay before continuing */
+
+ ret = -EIO;
+ }
+
+ chip->state = FL_READY;
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+
+static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+{
+ unsigned long ofs, len;
+ int ret;
+
+ ofs = instr->addr;
+ len = instr->len;
+
+ ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
+ if (ret)
+ return ret;
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+
+static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret = 0;
+
+ if (instr->addr != 0)
+ return -EINVAL;
+
+ if (instr->len != mtd->size)
+ return -EINVAL;
+
+ ret = do_erase_chip(map, &cfi->chips[0]);
+ if (ret)
+ return ret;
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+static int do_atmel_lock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret)
+ goto out_unlock;
+ chip->state = FL_LOCKING;
+
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ map_write(map, CMD(0x40), chip->start + adr);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
+ if (ret)
+ goto out_unlock;
+ chip->state = FL_UNLOCKING;
+
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ map_write(map, CMD(0x70), adr);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
+}
+
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
+}
+
+/*
+ * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
+ */
+
+struct ppb_lock {
+ struct flchip *chip;
+ loff_t offset;
+ int locked;
+};
+
+#define MAX_SECTORS 512
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
+#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
+
+static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* PPB entry command */
+ cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ chip->state = FL_LOCKING;
+ map_write(map, CMD(0xA0), chip->start + adr);
+ map_write(map, CMD(0x00), chip->start + adr);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ /*
+ * Unlocking of one specific sector is not supported, so we
+ * have to unlock all sectors of this device instead
+ */
+ chip->state = FL_UNLOCKING;
+ map_write(map, CMD(0x80), chip->start);
+ map_write(map, CMD(0x30), chip->start);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
+ chip->state = FL_JEDEC_QUERY;
+ /* Return locked status: 0->locked, 1->unlocked */
+ ret = !cfi_read_query(map, adr);
+ } else
+ BUG();
+
+ /*
+ * Wait for some time as unlocking of all sectors takes quite long
+ */
+ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+
+ UDELAY(map, chip, adr, 1);
+ }
+
+ /* Exit BC commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+}
+
+static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct ppb_lock *sect;
+ unsigned long adr;
+ loff_t offset;
+ uint64_t length;
+ int chipnum;
+ int i;
+ int sectors;
+ int ret;
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors. So lets
+ * first check the locking status of all sectors and save
+ * it for future use.
+ */
+ sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
+ if (!sect)
+ return -ENOMEM;
+
+ /*
+ * This code to walk all sectors is a slightly modified version
+ * of the cfi_varsize_frob() code.
+ */
+ i = 0;
+ chipnum = 0;
+ adr = 0;
+ sectors = 0;
+ offset = 0;
+ length = mtd->size;
+
+ while (length) {
+ int size = regions[i].erasesize;
+
+ /*
+ * Only test sectors that shall not be unlocked. The other
+ * sectors shall be unlocked, so lets keep their locking
+ * status at "unlocked" (locked=0) for the final re-locking.
+ */
+ if ((adr < ofs) || (adr >= (ofs + len))) {
+ sect[sectors].chip = &cfi->chips[chipnum];
+ sect[sectors].offset = offset;
+ sect[sectors].locked = do_ppb_xxlock(
+ map, &cfi->chips[chipnum], adr, 0,
+ DO_XXLOCK_ONEBLOCK_GETLOCK);
+ }
+
+ adr += size;
+ offset += size;
+ length -= size;
+
+ if (offset == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+
+ sectors++;
+ if (sectors >= MAX_SECTORS) {
+ printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
+ MAX_SECTORS);
+ kfree(sect);
+ return -EINVAL;
+ }
+ }
+
+ /* Now unlock the whole chip */
+ ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_UNLOCK);
+ if (ret) {
+ kfree(sect);
+ return ret;
+ }
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors.
+ */
+ for (i = 0; i < sectors; i++) {
+ if (sect[i].locked)
+ do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+ }
+
+ kfree(sect);
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
+}
+
+static void cfi_amdstd_sync (struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ retry:
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_SYNCING;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ case FL_SYNCING:
+ mutex_unlock(&chip->mutex);
+ break;
+
+ default:
+ /* Not an idle state */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+ mutex_unlock(&chip->mutex);
+
+ schedule();
+
+ remove_wait_queue(&chip->wq, &wait);
+
+ goto retry;
+ }
+ }
+
+ /* Unlock the chips again */
+
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+
+static int cfi_amdstd_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ case FL_PM_SUSPENDED:
+ break;
+
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ if (ret) {
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+ }
+
+ return ret;
+}
+
+
+static void cfi_amdstd_resume(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+
+ for (i=0; i<cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ chip->state = FL_READY;
+ map_write(map, CMD(0xF0), chip->start);
+ wake_up(&chip->wq);
+ }
+ else
+ printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
+
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+
+/*
+ * Ensure that the flash device is put back into read array mode before
+ * unloading the driver or rebooting. On some systems, rebooting while
+ * the flash is in query/program/erase mode will prevent the CPU from
+ * fetching the bootloader code, requiring a hard reset or power cycle.
+ */
+static int cfi_amdstd_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+ struct flchip *chip;
+
+ for (i = 0; i < cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xF0), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+
+static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_amdstd_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+
+static void cfi_amdstd_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_amdstd_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
+ kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
+ kfree(cfi);
+ kfree(mtd->eraseregions);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
+MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
+MODULE_ALIAS("cfi_cmdset_0006");
+MODULE_ALIAS("cfi_cmdset_0701");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
new file mode 100644
index 000000000..9a1a6ffd1
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -0,0 +1,1400 @@
+/*
+ * Common Flash Interface support:
+ * ST Advanced Architecture Command Set (ID 0x0020)
+ *
+ * (C) 2000 Red Hat. GPL'd
+ *
+ * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
+ * - completely revamped method functions so they are aware and
+ * independent of the flash geometry (buswidth, interleave, etc.)
+ * - scalability vs code size is completely set at compile-time
+ * (see include/linux/mtd/cfi.h for selection)
+ * - optimized write buffer method
+ * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
+ * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
+ * (command set 0x0020)
+ * - added a writev function
+ * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de>
+ * - Plugged memory leak in cfi_staa_writev().
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+
+
+static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
+static void cfi_staa_sync (struct mtd_info *);
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_staa_suspend (struct mtd_info *);
+static void cfi_staa_resume (struct mtd_info *);
+
+static void cfi_staa_destroy(struct mtd_info *);
+
+struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
+
+static struct mtd_info *cfi_staa_setup (struct map_info *);
+
+static struct mtd_chip_driver cfi_staa_chipdrv = {
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_staa_destroy,
+ .name = "cfi_cmdset_0020",
+ .module = THIS_MODULE
+};
+
+/* #define DEBUG_LOCK_BITS */
+//#define DEBUG_CFI_FEATURES
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_intelext *extp)
+{
+ int i;
+ printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
+ printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
+ printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
+ printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
+ printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
+ printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
+ printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
+ printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
+ printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
+ printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
+ for (i=9; i<32; i++) {
+ if (extp->FeatureSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
+ printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
+ for (i=1; i<8; i++) {
+ if (extp->SuspendCmdSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
+ printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
+ printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
+ for (i=2; i<16; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+
+ printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
+ if (extp->VppOptimal)
+ printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
+}
+#endif
+
+/* This routine is made available to other mtd code via
+ * inter_module_register. It must only be accessed through
+ * inter_module_get which will bump the use count of this module. The
+ * addresses passed back in cfi are valid as long as the use count of
+ * this module is non-zero, i.e. between inter_module_get and
+ * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
+ */
+struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+
+ if (cfi->cfi_mode) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure. So we read the feature
+ * table from it.
+ */
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_intelext *extp;
+
+ extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
+ if (!extp)
+ return NULL;
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
+ printk(KERN_ERR " Unknown ST Microelectronics"
+ " Extended Query version %c.%c.\n",
+ extp->MajorVersion, extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
+ /* Do some byteswapping if necessary */
+ extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(map,
+ extp->BlkStatusRegMask);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+ }
+
+ for (i=0; i< cfi->numchips; i++) {
+ cfi->chips[i].word_write_time = 128;
+ cfi->chips[i].buffer_write_time = 128;
+ cfi->chips[i].erase_time = 1024;
+ cfi->chips[i].ref_point_counter = 0;
+ init_waitqueue_head(&(cfi->chips[i].wq));
+ }
+
+ return cfi_staa_setup(map);
+}
+EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
+
+static struct mtd_info *cfi_staa_setup(struct map_info *map)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_info *mtd;
+ unsigned long offset = 0;
+ int i,j;
+ unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
+
+ if (!mtd) {
+ kfree(cfi->cmdset_priv);
+ return NULL;
+ }
+
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+ mtd->size = devsize * cfi->numchips;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
+ * mtd->numeraseregions, GFP_KERNEL);
+ if (!mtd->eraseregions) {
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ }
+ offset += (ersize * ernum);
+ }
+
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ kfree(mtd->eraseregions);
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
+
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i, (unsigned long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
+
+ /* Also select the correct geometry setup too */
+ mtd->_erase = cfi_staa_erase_varsize;
+ mtd->_read = cfi_staa_read;
+ mtd->_write = cfi_staa_write_buffers;
+ mtd->_writev = cfi_staa_writev;
+ mtd->_sync = cfi_staa_sync;
+ mtd->_lock = cfi_staa_lock;
+ mtd->_unlock = cfi_staa_unlock;
+ mtd->_suspend = cfi_staa_suspend;
+ mtd->_resume = cfi_staa_resume;
+ mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
+ mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ map->fldrv = &cfi_staa_chipdrv;
+ __module_get(THIS_MODULE);
+ mtd->name = map->name;
+ return mtd;
+}
+
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ map_word status, status_OK;
+ unsigned long timeo;
+ DECLARE_WAITQUEUE(wait, current);
+ int suspended = 0;
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+ retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * If it's in FL_ERASING state, suspend it and make it talk now.
+ */
+ switch (chip->state) {
+ case FL_ERASING:
+ if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
+ goto sleep; /* We don't support erase suspend */
+
+ map_write (map, CMD(0xb0), cmd_addr);
+ /* If the flash has finished erasing, then 'erase suspend'
+ * appears to make some (28F320) flash devices switch to
+ * 'read' mode. Make sure that we switch to 'read status'
+ * mode so we get the right data. --rmk
+ */
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ // printk("Erase suspending at 0x%lx\n", cmd_addr);
+ for (;;) {
+ status = map_read(map, cmd_addr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Urgh */
+ map_write(map, CMD(0xd0), cmd_addr);
+ /* make sure we're in 'read status' mode */
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->state = FL_ERASING;
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "Chip not ready after erase "
+ "suspended: status = 0x%lx\n", status.x[0]);
+ return -EIO;
+ }
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ suspended = 1;
+ map_write(map, CMD(0xff), cmd_addr);
+ chip->state = FL_READY;
+ break;
+
+#if 0
+ case FL_WRITING:
+ /* Not quite yet */
+#endif
+
+ case FL_READY:
+ break;
+
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, cmd_addr);
+ if (map_word_andequal(map, status, status_OK, status_OK)) {
+ map_write(map, CMD(0xff), cmd_addr);
+ chip->state = FL_READY;
+ break;
+ }
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ sleep:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ if (suspended) {
+ chip->state = chip->oldstate;
+ /* What if one interleaved chip has finished and the
+ other hasn't? The old code would leave the finished
+ one in READY mode. That's bad, and caused -EROFS
+ errors to be returned from do_erase_oneblock because
+ that's the only bit it checked for at the time.
+ As the state machine appears to explicitly allow
+ sending the 0x70 (Read Status) command to an erasing
+ chip and expecting it to be ignored, that's what we
+ do. */
+ map_write(map, CMD(0xd0), cmd_addr);
+ map_write(map, CMD(0x70), cmd_addr);
+ }
+
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const u_char *buf, int len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long cmd_adr, timeo;
+ DECLARE_WAITQUEUE(wait, current);
+ int wbufsize, z;
+
+ /* M58LW064A requires bus alignment for buffer wriets -- saw */
+ if (adr & (map_bankwidth(map)-1))
+ return -EINVAL;
+
+ wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ adr += chip->start;
+ cmd_adr = adr & ~(wbufsize-1);
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+ retry:
+
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: chip->state[%d]\n", __func__, chip->state);
+#endif
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * Later, we can actually think about interrupting it
+ * if it's in FL_ERASING state.
+ * Not just yet, though.
+ */
+ switch (chip->state) {
+ case FL_READY:
+ break;
+
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
+#endif
+
+ case FL_STATUS:
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
+ status.x[0], map_read(map, cmd_adr).x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0xe8), cmd_adr);
+ chip->state = FL_WRITING_TO_BUFFER;
+
+ z = 0;
+ for (;;) {
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+
+ if (++z > 100) {
+ /* Argh. Not ready for write to buffer */
+ DISABLE_VPP(map);
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
+ return -EIO;
+ }
+ }
+
+ /* Write length of data to come */
+ map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
+
+ /* Write data */
+ for (z = 0; z < len;
+ z += map_bankwidth(map), buf += map_bankwidth(map)) {
+ map_word d;
+ d = map_word_load(map, buf);
+ map_write(map, d, adr+z);
+ }
+ /* GO GO GO */
+ map_write(map, CMD(0xd0), cmd_adr);
+ chip->state = FL_WRITING;
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(chip->buffer_write_time);
+ mutex_lock(&chip->mutex);
+
+ timeo = jiffies + (HZ/2);
+ z = 0;
+ for (;;) {
+ if (chip->state != FL_WRITING) {
+ /* Someone's suspended the write. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ /* clear status */
+ map_write(map, CMD(0x50), cmd_adr);
+ /* put back into read status register mode */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ z++;
+ mutex_lock(&chip->mutex);
+ }
+ if (!z) {
+ chip->buffer_write_time--;
+ if (!chip->buffer_write_time)
+ chip->buffer_write_time++;
+ }
+ if (z > 1)
+ chip->buffer_write_time++;
+
+ /* Done and happy. */
+ DISABLE_VPP(map);
+ chip->state = FL_STATUS;
+
+ /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
+#endif
+ /* clear status */
+ map_write(map, CMD(0x50), cmd_adr);
+ /* put back into read status register mode */
+ map_write(map, CMD(0x70), adr);
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
+ }
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+}
+
+static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
+ printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
+ printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
+#endif
+
+ /* Write buffer is worth it only if more than one word to write... */
+ while (len > 0) {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, buf, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ buf += size;
+ (*retlen) += size;
+ len -= size;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Writev for ECC-Flashes is a little more complicated. We need to maintain
+ * a small buffer for this.
+ * XXX: If the buffer size is not a multiple of 2, this will break
+ */
+#define ECCBUF_SIZE (mtd->writesize)
+#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
+#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
+static int
+cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+ size_t buflen = 0;
+ static char *buffer;
+
+ if (!ECCBUF_SIZE) {
+ /* We should fall back to a general writev implementation.
+ * Until that is written, just break.
+ */
+ return -EIO;
+ }
+ buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ for (i=0; i<count; i++) {
+ size_t elem_len = vecs[i].iov_len;
+ void *elem_base = vecs[i].iov_base;
+ if (!elem_len) /* FIXME: Might be unnecessary. Check that */
+ continue;
+ if (buflen) { /* cut off head */
+ if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
+ memcpy(buffer+buflen, elem_base, elem_len);
+ buflen += elem_len;
+ continue;
+ }
+ memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
+ ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+ buffer);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_SIZE)
+ goto write_error;
+ elem_len -= thislen-buflen;
+ elem_base += thislen-buflen;
+ to += ECCBUF_SIZE;
+ }
+ if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
+ ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+ &thislen, elem_base);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_DIV(elem_len))
+ goto write_error;
+ to += thislen;
+ }
+ buflen = ECCBUF_MOD(elem_len); /* cut off tail */
+ if (buflen) {
+ memset(buffer, 0xff, ECCBUF_SIZE);
+ memcpy(buffer, elem_base + thislen, buflen);
+ }
+ }
+ if (buflen) { /* flush last page, even if not full */
+ /* This is sometimes intended behaviour, really */
+ ret = mtd_write(mtd, to, buflen, &thislen, buffer);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_SIZE)
+ goto write_error;
+ }
+write_error:
+ if (retlen)
+ *retlen = totlen;
+ kfree(buffer);
+ return ret;
+}
+
+
+static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo;
+ int retries = 3;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ /* Clear the status register first */
+ map_write(map, CMD(0x50), adr);
+
+ /* Now erase */
+ map_write(map, CMD(0x20), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_ERASING;
+
+ mutex_unlock(&chip->mutex);
+ msleep(1000);
+ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*20);
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ*20); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ DISABLE_VPP(map);
+ ret = 0;
+
+ /* We've broken this before. It doesn't hurt to be safe */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, adr);
+
+ /* check for lock bit */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+ unsigned char chipstatus = status.x[0];
+ if (!map_word_equal(map, status, CMD(chipstatus))) {
+ int i, w;
+ for (w=0; w<map_words(map); w++) {
+ for (i = 0; i<cfi_interleave(cfi); i++) {
+ chipstatus |= status.x[w] >> (cfi->device_type * 8);
+ }
+ }
+ printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
+ status.x[0], chipstatus);
+ }
+ /* Reset the error bits */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+
+ if ((chipstatus & 0x30) == 0x30) {
+ printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
+ ret = -EIO;
+ } else if (chipstatus & 0x02) {
+ /* Protection bit set */
+ ret = -EROFS;
+ } else if (chipstatus & 0x8) {
+ /* Voltage */
+ printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
+ ret = -EIO;
+ } else if (chipstatus & 0x20) {
+ if (retries--) {
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
+ timeo = jiffies + HZ;
+ chip->state = FL_STATUS;
+ mutex_unlock(&chip->mutex);
+ goto retry;
+ }
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
+ ret = -EIO;
+ }
+ }
+
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_staa_erase_varsize(struct mtd_info *mtd,
+ struct erase_info *instr)
+{ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr, len;
+ int chipnum, ret = 0;
+ int i, first;
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+
+ /* Check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ */
+
+ i = 0;
+
+ /* Skip all erase regions which are ended before the start of
+ the requested erase. Actually, to save on the calculations,
+ we skip to the first erase region which starts after the
+ start of the requested erase, and then go back one.
+ */
+
+ while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
+ i++;
+ i--;
+
+ /* OK, now i is pointing at the erase region in which this
+ erase request starts. Check the start of the requested
+ erase range is aligned with the erase size which is in
+ effect here.
+ */
+
+ if (instr->addr & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /* Next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ */
+
+ while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
+ i++;
+
+ /* As before, drop back one to point at the region in which
+ the address actually falls
+ */
+ i--;
+
+ if ((instr->addr + instr->len) & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ chipnum = instr->addr >> cfi->chipshift;
+ adr = instr->addr - (chipnum << cfi->chipshift);
+ len = instr->len;
+
+ i=first;
+
+ while(len) {
+ ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
+
+ if (ret)
+ return ret;
+
+ adr += regions[i].erasesize;
+ len -= regions[i].erasesize;
+
+ if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+static void cfi_staa_sync (struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ retry:
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_SYNCING;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ case FL_SYNCING:
+ mutex_unlock(&chip->mutex);
+ break;
+
+ default:
+ /* Not an idle state */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+
+ goto retry;
+ }
+ }
+
+ /* Unlock the chips again */
+
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0x60), adr);
+ map_write(map, CMD(0x01), adr);
+ chip->state = FL_LOCKING;
+
+ mutex_unlock(&chip->mutex);
+ msleep(1000);
+ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*2);
+ for (;;) {
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret = 0;
+#ifdef DEBUG_LOCK_BITS
+ int ofs_factor = cfi->interleave * cfi->device_type;
+#endif
+
+ if (ofs & (mtd->erasesize - 1))
+ return -EINVAL;
+
+ if (len & (mtd->erasesize -1))
+ return -EINVAL;
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+ while(len) {
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ if (ret)
+ return ret;
+
+ adr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+ return 0;
+}
+static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0x60), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_UNLOCKING;
+
+ mutex_unlock(&chip->mutex);
+ msleep(1000);
+ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*2);
+ for (;;) {
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the unlock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret = 0;
+#ifdef DEBUG_LOCK_BITS
+ int ofs_factor = cfi->interleave * cfi->device_type;
+#endif
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+#ifdef DEBUG_LOCK_BITS
+ {
+ unsigned long temp_adr = adr;
+ unsigned long temp_len = len;
+
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ while (temp_len) {
+ printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
+ temp_adr += mtd->erasesize;
+ temp_len -= mtd->erasesize;
+ }
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ }
+#endif
+
+ ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ return ret;
+}
+
+static int cfi_staa_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ case FL_PM_SUSPENDED:
+ break;
+
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ if (ret) {
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+ because we're returning failure, and it didn't
+ get power cycled */
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void cfi_staa_resume(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+
+ for (i=0; i<cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
+ map_write(map, CMD(0xFF), 0);
+ chip->state = FL_READY;
+ wake_up(&chip->wq);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+static void cfi_staa_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ kfree(cfi->cmdset_priv);
+ kfree(cfi);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
new file mode 100644
index 000000000..e8d016449
--- /dev/null
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -0,0 +1,420 @@
+/*
+ Common Flash Interface probe code.
+ (C) 2000 Red Hat. GPL'd.
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+//#define DEBUG_CFI
+
+#ifdef DEBUG_CFI
+static void print_cfi_ident(struct cfi_ident *);
+#endif
+
+static int cfi_probe_chip(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi);
+static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
+
+struct mtd_info *cfi_probe(struct map_info *map);
+
+#ifdef CONFIG_MTD_XIP
+
+/* only needed for short periods, so this is rather simple */
+#define xip_disable() local_irq_disable()
+
+#define xip_allowed(base, map) \
+do { \
+ (void) map_read(map, base); \
+ xip_iprefetch(); \
+ local_irq_enable(); \
+} while (0)
+
+#define xip_enable(base, map, cfi) \
+do { \
+ cfi_qry_mode_off(base, map, cfi); \
+ xip_allowed(base, map); \
+} while (0)
+
+#define xip_disable_qry(base, map, cfi) \
+do { \
+ xip_disable(); \
+ cfi_qry_mode_on(base, map, cfi); \
+} while (0)
+
+#else
+
+#define xip_disable() do { } while (0)
+#define xip_allowed(base, map) do { } while (0)
+#define xip_enable(base, map, cfi) do { } while (0)
+#define xip_disable_qry(base, map, cfi) do { } while (0)
+
+#endif
+
+/* check for QRY.
+ in: interleave,type,mode
+ ret: table index, <0 for error
+ */
+
+static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi)
+{
+ int i;
+
+ if ((base + 0) >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
+ (unsigned long)base, map->size -1);
+ return 0;
+ }
+ if ((base + 0xff) >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n",
+ (unsigned long)base + 0x55, map->size -1);
+ return 0;
+ }
+
+ xip_disable();
+ if (!cfi_qry_mode_on(base, map, cfi)) {
+ xip_enable(base, map, cfi);
+ return 0;
+ }
+
+ if (!cfi->numchips) {
+ /* This is the first time we're called. Set up the CFI
+ stuff accordingly and return */
+ return cfi_chip_setup(map, cfi);
+ }
+
+ /* Check each previous chip to see if it's an alias */
+ for (i=0; i < (base >> cfi->chipshift); i++) {
+ unsigned long start;
+ if(!test_bit(i, chip_map)) {
+ /* Skip location; no valid chip at this address */
+ continue;
+ }
+ start = i << cfi->chipshift;
+ /* This chip should be in read mode if it's one
+ we've already touched. */
+ if (cfi_qry_present(map, start, cfi)) {
+ /* Eep. This chip also had the QRY marker.
+ * Is it an alias for the new one? */
+ cfi_qry_mode_off(start, map, cfi);
+
+ /* If the QRY marker goes away, it's an alias */
+ if (!cfi_qry_present(map, start, cfi)) {
+ xip_allowed(base, map);
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+ /* Yes, it's actually got QRY for data. Most
+ * unfortunate. Stick the new chip in read mode
+ * too and if it's the same, assume it's an alias. */
+ /* FIXME: Use other modes to do a proper check */
+ cfi_qry_mode_off(base, map, cfi);
+
+ if (cfi_qry_present(map, base, cfi)) {
+ xip_allowed(base, map);
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+ }
+ }
+
+ /* OK, if we got to here, then none of the previous chips appear to
+ be aliases for the current one. */
+ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
+ cfi->numchips++;
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ map->name, cfi->interleave, cfi->device_type*8, base,
+ map->bankwidth*8);
+
+ return 1;
+}
+
+static int __xipram cfi_chip_setup(struct map_info *map,
+ struct cfi_private *cfi)
+{
+ int ofs_factor = cfi->interleave*cfi->device_type;
+ __u32 base = 0;
+ int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
+ int i;
+ int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
+
+ xip_enable(base, map, cfi);
+#ifdef DEBUG_CFI
+ printk("Number of erase regions: %d\n", num_erase_regions);
+#endif
+ if (!num_erase_regions)
+ return 0;
+
+ cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ if (!cfi->cfiq)
+ return 0;
+
+ memset(cfi->cfiq,0,sizeof(struct cfi_ident));
+
+ cfi->cfi_mode = CFI_MODE_CFI;
+
+ cfi->sector_erase_cmd = CMD(0x30);
+
+ /* Read the CFI info structure */
+ xip_disable_qry(base, map, cfi);
+ for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
+ ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
+
+ /* Do any necessary byteswapping */
+ cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
+
+ cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR);
+ cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID);
+ cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR);
+ cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
+ cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
+
+#ifdef DEBUG_CFI
+ /* Dump the information therein */
+ print_cfi_ident(cfi->cfiq);
+#endif
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
+
+#ifdef DEBUG_CFI
+ printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
+ i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
+ (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
+#endif
+ }
+
+ if (cfi->cfiq->P_ID == P_ID_SST_OLD) {
+ addr_unlock1 = 0x5555;
+ addr_unlock2 = 0x2AAA;
+ }
+
+ /*
+ * Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n",
+ map->name, cfi->interleave, cfi->device_type*8, base,
+ map->bankwidth*8, cfi->mfr, cfi->id);
+
+ return 1;
+}
+
+#ifdef DEBUG_CFI
+static char *vendorname(__u16 vendor)
+{
+ switch (vendor) {
+ case P_ID_NONE:
+ return "None";
+
+ case P_ID_INTEL_EXT:
+ return "Intel/Sharp Extended";
+
+ case P_ID_AMD_STD:
+ return "AMD/Fujitsu Standard";
+
+ case P_ID_INTEL_STD:
+ return "Intel/Sharp Standard";
+
+ case P_ID_AMD_EXT:
+ return "AMD/Fujitsu Extended";
+
+ case P_ID_WINBOND:
+ return "Winbond Standard";
+
+ case P_ID_ST_ADV:
+ return "ST Advanced";
+
+ case P_ID_MITSUBISHI_STD:
+ return "Mitsubishi Standard";
+
+ case P_ID_MITSUBISHI_EXT:
+ return "Mitsubishi Extended";
+
+ case P_ID_SST_PAGE:
+ return "SST Page Write";
+
+ case P_ID_SST_OLD:
+ return "SST 39VF160x/39VF320x";
+
+ case P_ID_INTEL_PERFORMANCE:
+ return "Intel Performance Code";
+
+ case P_ID_INTEL_DATA:
+ return "Intel Data";
+
+ case P_ID_RESERVED:
+ return "Not Allowed / Reserved for Future Use";
+
+ default:
+ return "Unknown";
+ }
+}
+
+
+static void print_cfi_ident(struct cfi_ident *cfip)
+{
+#if 0
+ if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
+ printk("Invalid CFI ident structure.\n");
+ return;
+ }
+#endif
+ printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
+ if (cfip->P_ADR)
+ printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
+ else
+ printk("No Primary Algorithm Table\n");
+
+ printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
+ if (cfip->A_ADR)
+ printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
+ else
+ printk("No Alternate Algorithm Table\n");
+
+
+ printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
+ printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
+ if (cfip->VppMin) {
+ printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
+ printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
+ }
+ else
+ printk("No Vpp line\n");
+
+ printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
+ printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
+
+ if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
+ printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
+ printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
+ }
+ else
+ printk("Full buffer write not supported\n");
+
+ printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
+ printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
+ if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
+ printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
+ printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
+ }
+ else
+ printk("Chip erase not supported\n");
+
+ printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
+ printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
+ switch(cfip->InterfaceDesc) {
+ case CFI_INTERFACE_X8_ASYNC:
+ printk(" - x8-only asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X16_ASYNC:
+ printk(" - x16-only asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X8_BY_X16_ASYNC:
+ printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X32_ASYNC:
+ printk(" - x32-only asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X16_BY_X32_ASYNC:
+ printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_NOT_ALLOWED:
+ printk(" - Not Allowed / Reserved\n");
+ break;
+
+ default:
+ printk(" - Unknown\n");
+ break;
+ }
+
+ printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
+ printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
+
+}
+#endif /* DEBUG_CFI */
+
+static struct chip_probe cfi_chip_probe = {
+ .name = "CFI",
+ .probe_chip = cfi_probe_chip
+};
+
+struct mtd_info *cfi_probe(struct map_info *map)
+{
+ /*
+ * Just use the generic probe stuff to call our CFI-specific
+ * chip_probe routine in all the possible permutations, etc.
+ */
+ return mtd_do_chip_probe(map, &cfi_chip_probe);
+}
+
+static struct mtd_chip_driver cfi_chipdrv = {
+ .probe = cfi_probe,
+ .name = "cfi_probe",
+ .module = THIS_MODULE
+};
+
+static int __init cfi_probe_init(void)
+{
+ register_mtd_chip_driver(&cfi_chipdrv);
+ return 0;
+}
+
+static void __exit cfi_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&cfi_chipdrv);
+}
+
+module_init(cfi_probe_init);
+module_exit(cfi_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
new file mode 100644
index 000000000..09c79bd0b
--- /dev/null
+++ b/drivers/mtd/chips/cfi_util.c
@@ -0,0 +1,251 @@
+/*
+ * Common Flash Interface support:
+ * Generic utility functions not dependent on command set
+ *
+ * Copyright (C) 2002 Red Hat
+ * Copyright (C) 2003 STMicroelectronics Limited
+ *
+ * This code is covered by the GPL.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/xip.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+ struct cfi_private *cfi)
+{
+ int osf = cfi->interleave * cfi->device_type; /* scale factor */
+ map_word val[3];
+ map_word qry[3];
+
+ qry[0] = cfi_build_cmd('Q', map, cfi);
+ qry[1] = cfi_build_cmd('R', map, cfi);
+ qry[2] = cfi_build_cmd('Y', map, cfi);
+
+ val[0] = map_read(map, base + osf*0x10);
+ val[1] = map_read(map, base + osf*0x11);
+ val[2] = map_read(map, base + osf*0x12);
+
+ if (!map_word_equal(map, qry[0], val[0]))
+ return 0;
+
+ if (!map_word_equal(map, qry[1], val[1]))
+ return 0;
+
+ if (!map_word_equal(map, qry[2], val[2]))
+ return 0;
+
+ return 1; /* "QRY" found */
+}
+EXPORT_SYMBOL_GPL(cfi_qry_present);
+
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* QRY not found probably we deal with some odd CFI chips */
+ /* Some revisions of some old Intel chips? */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* ST M29DW chips */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* some old SST chips, e.g. 39VF160x/39VF320x */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* SST 39VF640xB */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* QRY not found */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
+
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ /* M29W128G flashes require an additional reset command
+ when exit qry mode */
+ if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+}
+EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
+
+struct cfi_extquery *
+__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u32 base = 0; // cfi->chips[0].start;
+ int ofs_factor = cfi->interleave * cfi->device_type;
+ int i;
+ struct cfi_extquery *extp = NULL;
+
+ if (!adr)
+ goto out;
+
+ printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
+
+ extp = kmalloc(size, GFP_KERNEL);
+ if (!extp)
+ goto out;
+
+#ifdef CONFIG_MTD_XIP
+ local_irq_disable();
+#endif
+
+ /* Switch it into Query Mode */
+ cfi_qry_mode_on(base, map, cfi);
+ /* Read in the Extended Query Table */
+ for (i=0; i<size; i++) {
+ ((unsigned char *)extp)[i] =
+ cfi_read_query(map, base+((adr+i)*ofs_factor));
+ }
+
+ /* Make sure it returns to read mode */
+ cfi_qry_mode_off(base, map, cfi);
+
+#ifdef CONFIG_MTD_XIP
+ (void) map_read(map, base);
+ xip_iprefetch();
+ local_irq_enable();
+#endif
+
+ out: return extp;
+}
+
+EXPORT_SYMBOL(cfi_read_pri);
+
+void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_fixup *f;
+
+ for (f=fixups; f->fixup; f++) {
+ if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
+ ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
+ f->fixup(mtd);
+ }
+ }
+}
+
+EXPORT_SYMBOL(cfi_fixup);
+
+int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
+ loff_t ofs, size_t len, void *thunk)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret = 0;
+ int i, first;
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+
+ /* Check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ */
+
+ i = 0;
+
+ /* Skip all erase regions which are ended before the start of
+ the requested erase. Actually, to save on the calculations,
+ we skip to the first erase region which starts after the
+ start of the requested erase, and then go back one.
+ */
+
+ while (i < mtd->numeraseregions && ofs >= regions[i].offset)
+ i++;
+ i--;
+
+ /* OK, now i is pointing at the erase region in which this
+ erase request starts. Check the start of the requested
+ erase range is aligned with the erase size which is in
+ effect here.
+ */
+
+ if (ofs & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /* Next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ */
+
+ while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
+ i++;
+
+ /* As before, drop back one to point at the region in which
+ the address actually falls
+ */
+ i--;
+
+ if ((ofs + len) & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+ i=first;
+
+ while(len) {
+ int size = regions[i].erasesize;
+
+ ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
+
+ if (ret)
+ return ret;
+
+ adr += size;
+ ofs += size;
+ len -= size;
+
+ if (ofs == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(cfi_varsize_frob);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
new file mode 100644
index 000000000..0bbc61ba9
--- /dev/null
+++ b/drivers/mtd/chips/chipreg.c
@@ -0,0 +1,104 @@
+/*
+ * Registration for chip drivers
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+
+static DEFINE_SPINLOCK(chip_drvs_lock);
+static LIST_HEAD(chip_drvs_list);
+
+void register_mtd_chip_driver(struct mtd_chip_driver *drv)
+{
+ spin_lock(&chip_drvs_lock);
+ list_add(&drv->list, &chip_drvs_list);
+ spin_unlock(&chip_drvs_lock);
+}
+
+void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
+{
+ spin_lock(&chip_drvs_lock);
+ list_del(&drv->list);
+ spin_unlock(&chip_drvs_lock);
+}
+
+static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
+{
+ struct list_head *pos;
+ struct mtd_chip_driver *ret = NULL, *this;
+
+ spin_lock(&chip_drvs_lock);
+
+ list_for_each(pos, &chip_drvs_list) {
+ this = list_entry(pos, typeof(*this), list);
+
+ if (!strcmp(this->name, name)) {
+ ret = this;
+ break;
+ }
+ }
+ if (ret && !try_module_get(ret->module))
+ ret = NULL;
+
+ spin_unlock(&chip_drvs_lock);
+
+ return ret;
+}
+
+ /* Hide all the horrid details, like some silly person taking
+ get_module_symbol() away from us, from the caller. */
+
+struct mtd_info *do_map_probe(const char *name, struct map_info *map)
+{
+ struct mtd_chip_driver *drv;
+ struct mtd_info *ret;
+
+ drv = get_mtd_chip_driver(name);
+
+ if (!drv && !request_module("%s", name))
+ drv = get_mtd_chip_driver(name);
+
+ if (!drv)
+ return NULL;
+
+ ret = drv->probe(map);
+
+ /* We decrease the use count here. It may have been a
+ probe-only module, which is no longer required from this
+ point, having given us a handle on (and increased the use
+ count of) the actual driver code.
+ */
+ module_put(drv->module);
+
+ return ret;
+}
+/*
+ * Destroy an MTD device which was created for a map device.
+ * Make sure the MTD device is already unregistered before calling this
+ */
+void map_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+
+ if (map->fldrv->destroy)
+ map->fldrv->destroy(mtd);
+
+ module_put(map->fldrv->module);
+
+ kfree(mtd);
+}
+
+EXPORT_SYMBOL(register_mtd_chip_driver);
+EXPORT_SYMBOL(unregister_mtd_chip_driver);
+EXPORT_SYMBOL(do_map_probe);
+EXPORT_SYMBOL(map_destroy);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
new file mode 100644
index 000000000..800b0e853
--- /dev/null
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -0,0 +1,107 @@
+#ifndef FWH_LOCK_H
+#define FWH_LOCK_H
+
+
+enum fwh_lock_state {
+ FWH_UNLOCKED = 0,
+ FWH_DENY_WRITE = 1,
+ FWH_IMMUTABLE = 2,
+ FWH_DENY_READ = 4,
+};
+
+struct fwh_xxlock_thunk {
+ enum fwh_lock_state val;
+ flstate_t state;
+};
+
+
+#define FWH_XXLOCK_ONEBLOCK_LOCK ((struct fwh_xxlock_thunk){ FWH_DENY_WRITE, FL_LOCKING})
+#define FWH_XXLOCK_ONEBLOCK_UNLOCK ((struct fwh_xxlock_thunk){ FWH_UNLOCKED, FL_UNLOCKING})
+
+/*
+ * This locking/unlock is specific to firmware hub parts. Only one
+ * is known that supports the Intel command set. Firmware
+ * hub parts cannot be interleaved as they are on the LPC bus
+ * so this code has not been tested with interleaved chips,
+ * and will likely fail in that context.
+ */
+static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct fwh_xxlock_thunk *xxlt = (struct fwh_xxlock_thunk *)thunk;
+ int ret;
+
+ /* Refuse the operation if the we cannot look behind the chip */
+ if (chip->start < 0x400000) {
+ pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
+ __func__, chip->start );
+ return -EIO;
+ }
+ /*
+ * lock block registers:
+ * - on 64k boundariesand
+ * - bit 1 set high
+ * - block lock registers are 4MiB lower - overflow subtract (danger)
+ *
+ * The address manipulation is first done on the logical address
+ * which is 0 at the start of the chip, and then the offset of
+ * the individual chip is addted to it. Any other order a weird
+ * map offset could cause problems.
+ */
+ adr = (adr & ~0xffffUL) | 0x2;
+ adr += chip->start - 0x400000;
+
+ /*
+ * This is easy because these are writes to registers and not writes
+ * to flash memory - that means that we don't have to check status
+ * and timeout.
+ */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ chip->oldstate = chip->state;
+ chip->state = xxlt->state;
+ map_write(map, CMD(xxlt->val), adr);
+
+ /* Done and happy. */
+ chip->state = chip->oldstate;
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+
+static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
+ (void *)&FWH_XXLOCK_ONEBLOCK_LOCK);
+
+ return ret;
+}
+
+
+static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
+ (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
+
+ return ret;
+}
+
+static void fixup_use_fwh_lock(struct mtd_info *mtd)
+{
+ printk(KERN_NOTICE "using fwh lock/unlock method\n");
+ /* Setup for the chips with the fwh lock method */
+ mtd->_lock = fwh_lock_varsize;
+ mtd->_unlock = fwh_unlock_varsize;
+}
+#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
new file mode 100644
index 000000000..b57ceea21
--- /dev/null
+++ b/drivers/mtd/chips/gen_probe.c
@@ -0,0 +1,264 @@
+/*
+ * Routines common to all CFI-type probes.
+ * (C) 2001-2003 Red Hat, Inc.
+ * GPL'd
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+static struct mtd_info *check_cmd_set(struct map_info *, int);
+static struct cfi_private *genprobe_ident_chips(struct map_info *map,
+ struct chip_probe *cp);
+static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
+ struct cfi_private *cfi);
+
+struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
+{
+ struct mtd_info *mtd = NULL;
+ struct cfi_private *cfi;
+
+ /* First probe the map to see if we have CFI stuff there. */
+ cfi = genprobe_ident_chips(map, cp);
+
+ if (!cfi)
+ return NULL;
+
+ map->fldrv_priv = cfi;
+ /* OK we liked it. Now find a driver for the command set it talks */
+
+ mtd = check_cmd_set(map, 1); /* First the primary cmdset */
+ if (!mtd)
+ mtd = check_cmd_set(map, 0); /* Then the secondary */
+
+ if (mtd) {
+ if (mtd->size > map->size) {
+ printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n",
+ (unsigned long)mtd->size >> 10,
+ (unsigned long)map->size >> 10);
+ mtd->size = map->size;
+ }
+ return mtd;
+ }
+
+ printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
+
+ kfree(cfi->cfiq);
+ kfree(cfi);
+ map->fldrv_priv = NULL;
+ return NULL;
+}
+EXPORT_SYMBOL(mtd_do_chip_probe);
+
+
+static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
+{
+ struct cfi_private cfi;
+ struct cfi_private *retcfi;
+ unsigned long *chip_map;
+ int i, j, mapsize;
+ int max_chips;
+
+ memset(&cfi, 0, sizeof(cfi));
+
+ /* Call the probetype-specific code with all permutations of
+ interleave and device type, etc. */
+ if (!genprobe_new_chip(map, cp, &cfi)) {
+ /* The probe didn't like it */
+ pr_debug("%s: Found no %s device at location zero\n",
+ cp->name, map->name);
+ return NULL;
+ }
+
+#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
+ probe routines won't ever return a broken CFI structure anyway,
+ because they make them up themselves.
+ */
+ if (cfi.cfiq->NumEraseRegions == 0) {
+ printk(KERN_WARNING "Number of erase regions is zero\n");
+ kfree(cfi.cfiq);
+ return NULL;
+ }
+#endif
+ cfi.chipshift = cfi.cfiq->DevSize;
+
+ if (cfi_interleave_is_1(&cfi)) {
+ ;
+ } else if (cfi_interleave_is_2(&cfi)) {
+ cfi.chipshift++;
+ } else if (cfi_interleave_is_4((&cfi))) {
+ cfi.chipshift += 2;
+ } else if (cfi_interleave_is_8(&cfi)) {
+ cfi.chipshift += 3;
+ } else {
+ BUG();
+ }
+
+ cfi.numchips = 1;
+
+ /*
+ * Allocate memory for bitmap of valid chips.
+ * Align bitmap storage size to full byte.
+ */
+ max_chips = map->size >> cfi.chipshift;
+ if (!max_chips) {
+ printk(KERN_WARNING "NOR chip too large to fit in mapping. Attempting to cope...\n");
+ max_chips = 1;
+ }
+
+ mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
+ chip_map = kzalloc(mapsize, GFP_KERNEL);
+ if (!chip_map) {
+ kfree(cfi.cfiq);
+ return NULL;
+ }
+
+ set_bit(0, chip_map); /* Mark first chip valid */
+
+ /*
+ * Now probe for other chips, checking sensibly for aliases while
+ * we're at it. The new_chip probe above should have let the first
+ * chip in read mode.
+ */
+
+ for (i = 1; i < max_chips; i++) {
+ cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
+ }
+
+ /*
+ * Now allocate the space for the structures we need to return to
+ * our caller, and copy the appropriate data into them.
+ */
+
+ retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
+
+ if (!retcfi) {
+ kfree(cfi.cfiq);
+ kfree(chip_map);
+ return NULL;
+ }
+
+ memcpy(retcfi, &cfi, sizeof(cfi));
+ memset(&retcfi->chips[0], 0, sizeof(struct flchip) * cfi.numchips);
+
+ for (i = 0, j = 0; (j < cfi.numchips) && (i < max_chips); i++) {
+ if(test_bit(i, chip_map)) {
+ struct flchip *pchip = &retcfi->chips[j++];
+
+ pchip->start = (i << cfi.chipshift);
+ pchip->state = FL_READY;
+ init_waitqueue_head(&pchip->wq);
+ mutex_init(&pchip->mutex);
+ }
+ }
+
+ kfree(chip_map);
+ return retcfi;
+}
+
+
+static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
+ struct cfi_private *cfi)
+{
+ int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
+ int max_chips = map_bankwidth(map); /* And minimum 1 */
+ int nr_chips, type;
+
+ for (nr_chips = max_chips; nr_chips >= min_chips; nr_chips >>= 1) {
+
+ if (!cfi_interleave_supported(nr_chips))
+ continue;
+
+ cfi->interleave = nr_chips;
+
+ /* Minimum device size. Don't look for one 8-bit device
+ in a 16-bit bus, etc. */
+ type = map_bankwidth(map) / nr_chips;
+
+ for (; type <= CFI_DEVICETYPE_X32; type<<=1) {
+ cfi->device_type = type;
+
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
+
+extern cfi_cmdset_fn_t cfi_cmdset_0001;
+extern cfi_cmdset_fn_t cfi_cmdset_0002;
+extern cfi_cmdset_fn_t cfi_cmdset_0020;
+
+static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
+ int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
+#ifdef CONFIG_MODULES
+ char probename[sizeof(VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X))];
+ cfi_cmdset_fn_t *probe_function;
+
+ sprintf(probename, VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X), type);
+
+ probe_function = __symbol_get(probename);
+ if (!probe_function) {
+ request_module("cfi_cmdset_%4.4X", type);
+ probe_function = __symbol_get(probename);
+ }
+
+ if (probe_function) {
+ struct mtd_info *mtd;
+
+ mtd = (*probe_function)(map, primary);
+ /* If it was happy, it'll have increased its own use count */
+ symbol_put_addr(probe_function);
+ return mtd;
+ }
+#endif
+ printk(KERN_NOTICE "Support for command set %04X not present\n", type);
+
+ return NULL;
+}
+
+static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
+
+ if (type == P_ID_NONE || type == P_ID_RESERVED)
+ return NULL;
+
+ switch(type){
+ /* We need these for the !CONFIG_MODULES case,
+ because symbol_get() doesn't work there */
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ case P_ID_INTEL_EXT:
+ case P_ID_INTEL_STD:
+ case P_ID_INTEL_PERFORMANCE:
+ return cfi_cmdset_0001(map, primary);
+#endif
+#ifdef CONFIG_MTD_CFI_AMDSTD
+ case P_ID_AMD_STD:
+ case P_ID_SST_OLD:
+ case P_ID_WINBOND:
+ return cfi_cmdset_0002(map, primary);
+#endif
+#ifdef CONFIG_MTD_CFI_STAA
+ case P_ID_ST_ADV:
+ return cfi_cmdset_0020(map, primary);
+#endif
+ default:
+ return cfi_cmdset_unknown(map, primary);
+ }
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Helper routines for flash chip probe code");
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
new file mode 100644
index 000000000..7c0b27d13
--- /dev/null
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -0,0 +1,2281 @@
+/*
+ Common Flash Interface probe code.
+ (C) 2000 Red Hat. GPL'd.
+ See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
+ for the standard this probe goes back to.
+
+ Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+/* AMD */
+#define AM29DL800BB 0x22CB
+#define AM29DL800BT 0x224A
+
+#define AM29F800BB 0x2258
+#define AM29F800BT 0x22D6
+#define AM29LV400BB 0x22BA
+#define AM29LV400BT 0x22B9
+#define AM29LV800BB 0x225B
+#define AM29LV800BT 0x22DA
+#define AM29LV160DT 0x22C4
+#define AM29LV160DB 0x2249
+#define AM29F017D 0x003D
+#define AM29F016D 0x00AD
+#define AM29F080 0x00D5
+#define AM29F040 0x00A4
+#define AM29LV040B 0x004F
+#define AM29F032B 0x0041
+#define AM29F002T 0x00B0
+#define AM29SL800DB 0x226B
+#define AM29SL800DT 0x22EA
+
+/* Atmel */
+#define AT49BV512 0x0003
+#define AT29LV512 0x003d
+#define AT49BV16X 0x00C0
+#define AT49BV16XT 0x00C2
+#define AT49BV32X 0x00C8
+#define AT49BV32XT 0x00C9
+
+/* Eon */
+#define EN29SL800BB 0x226B
+#define EN29SL800BT 0x22EA
+
+/* Fujitsu */
+#define MBM29F040C 0x00A4
+#define MBM29F800BA 0x2258
+#define MBM29LV650UE 0x22D7
+#define MBM29LV320TE 0x22F6
+#define MBM29LV320BE 0x22F9
+#define MBM29LV160TE 0x22C4
+#define MBM29LV160BE 0x2249
+#define MBM29LV800BA 0x225B
+#define MBM29LV800TA 0x22DA
+#define MBM29LV400TC 0x22B9
+#define MBM29LV400BC 0x22BA
+
+/* Hyundai */
+#define HY29F002T 0x00B0
+
+/* Intel */
+#define I28F004B3T 0x00d4
+#define I28F004B3B 0x00d5
+#define I28F400B3T 0x8894
+#define I28F400B3B 0x8895
+#define I28F008S5 0x00a6
+#define I28F016S5 0x00a0
+#define I28F008SA 0x00a2
+#define I28F008B3T 0x00d2
+#define I28F008B3B 0x00d3
+#define I28F800B3T 0x8892
+#define I28F800B3B 0x8893
+#define I28F016S3 0x00aa
+#define I28F016B3T 0x00d0
+#define I28F016B3B 0x00d1
+#define I28F160B3T 0x8890
+#define I28F160B3B 0x8891
+#define I28F320B3T 0x8896
+#define I28F320B3B 0x8897
+#define I28F640B3T 0x8898
+#define I28F640B3B 0x8899
+#define I28F640C3B 0x88CD
+#define I28F160F3T 0x88F3
+#define I28F160F3B 0x88F4
+#define I28F160C3T 0x88C2
+#define I28F160C3B 0x88C3
+#define I82802AB 0x00ad
+#define I82802AC 0x00ac
+
+/* Macronix */
+#define MX29LV040C 0x004F
+#define MX29LV160T 0x22C4
+#define MX29LV160B 0x2249
+#define MX29F040 0x00A4
+#define MX29F016 0x00AD
+#define MX29F002T 0x00B0
+#define MX29F004T 0x0045
+#define MX29F004B 0x0046
+
+/* NEC */
+#define UPD29F064115 0x221C
+
+/* PMC */
+#define PM49FL002 0x006D
+#define PM49FL004 0x006E
+#define PM49FL008 0x006A
+
+/* Sharp */
+#define LH28F640BF 0x00B0
+
+/* ST - www.st.com */
+#define M29F800AB 0x0058
+#define M29W800DT 0x22D7
+#define M29W800DB 0x225B
+#define M29W400DT 0x00EE
+#define M29W400DB 0x00EF
+#define M29W160DT 0x22C4
+#define M29W160DB 0x2249
+#define M29W040B 0x00E3
+#define M50FW040 0x002C
+#define M50FW080 0x002D
+#define M50FW016 0x002E
+#define M50LPW080 0x002F
+#define M50FLW080A 0x0080
+#define M50FLW080B 0x0081
+#define PSD4256G6V 0x00e9
+
+/* SST */
+#define SST29EE020 0x0010
+#define SST29LE020 0x0012
+#define SST29EE512 0x005d
+#define SST29LE512 0x003d
+#define SST39LF800 0x2781
+#define SST39LF160 0x2782
+#define SST39VF1601 0x234b
+#define SST39VF3201 0x235b
+#define SST39WF1601 0x274b
+#define SST39WF1602 0x274a
+#define SST39LF512 0x00D4
+#define SST39LF010 0x00D5
+#define SST39LF020 0x00D6
+#define SST39LF040 0x00D7
+#define SST39SF010A 0x00B5
+#define SST39SF020A 0x00B6
+#define SST39SF040 0x00B7
+#define SST49LF004B 0x0060
+#define SST49LF040B 0x0050
+#define SST49LF008A 0x005a
+#define SST49LF030A 0x001C
+#define SST49LF040A 0x0051
+#define SST49LF080A 0x005B
+#define SST36VF3203 0x7354
+
+/* Toshiba */
+#define TC58FVT160 0x00C2
+#define TC58FVB160 0x0043
+#define TC58FVT321 0x009A
+#define TC58FVB321 0x009C
+#define TC58FVT641 0x0093
+#define TC58FVB641 0x0095
+
+/* Winbond */
+#define W49V002A 0x00b0
+
+
+/*
+ * Unlock address sets for AMD command sets.
+ * Intel command sets use the MTD_UADDR_UNNECESSARY.
+ * Each identifier, except MTD_UADDR_UNNECESSARY, and
+ * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
+ * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
+ * initialization need not require initializing all of the
+ * unlock addresses for all bit widths.
+ */
+enum uaddr {
+ MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
+ MTD_UADDR_0x0555_0x02AA,
+ MTD_UADDR_0x0555_0x0AAA,
+ MTD_UADDR_0x5555_0x2AAA,
+ MTD_UADDR_0x0AAA_0x0554,
+ MTD_UADDR_0x0AAA_0x0555,
+ MTD_UADDR_0xAAAA_0x5555,
+ MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
+ MTD_UADDR_UNNECESSARY, /* Does not require any address */
+};
+
+
+struct unlock_addr {
+ uint32_t addr1;
+ uint32_t addr2;
+};
+
+
+/*
+ * I don't like the fact that the first entry in unlock_addrs[]
+ * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
+ * should not be used. The problem is that structures with
+ * initializers have extra fields initialized to 0. It is _very_
+ * desirable to have the unlock address entries for unsupported
+ * data widths automatically initialized - that means that
+ * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
+ * must go unused.
+ */
+static const struct unlock_addr unlock_addrs[] = {
+ [MTD_UADDR_NOT_SUPPORTED] = {
+ .addr1 = 0xffff,
+ .addr2 = 0xffff
+ },
+
+ [MTD_UADDR_0x0555_0x02AA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x02aa
+ },
+
+ [MTD_UADDR_0x0555_0x0AAA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x0aaa
+ },
+
+ [MTD_UADDR_0x5555_0x2AAA] = {
+ .addr1 = 0x5555,
+ .addr2 = 0x2aaa
+ },
+
+ [MTD_UADDR_0x0AAA_0x0554] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0554
+ },
+
+ [MTD_UADDR_0x0AAA_0x0555] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0555
+ },
+
+ [MTD_UADDR_0xAAAA_0x5555] = {
+ .addr1 = 0xaaaa,
+ .addr2 = 0x5555
+ },
+
+ [MTD_UADDR_DONT_CARE] = {
+ .addr1 = 0x0000, /* Doesn't matter which address */
+ .addr2 = 0x0000 /* is used - must be last entry */
+ },
+
+ [MTD_UADDR_UNNECESSARY] = {
+ .addr1 = 0x0000,
+ .addr2 = 0x0000
+ }
+};
+
+struct amd_flash_info {
+ const char *name;
+ const uint16_t mfr_id;
+ const uint16_t dev_id;
+ const uint8_t dev_size;
+ const uint8_t nr_regions;
+ const uint16_t cmd_set;
+ const uint32_t regions[6];
+ const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
+ const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
+};
+
+#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
+
+#define SIZE_64KiB 16
+#define SIZE_128KiB 17
+#define SIZE_256KiB 18
+#define SIZE_512KiB 19
+#define SIZE_1MiB 20
+#define SIZE_2MiB 21
+#define SIZE_4MiB 22
+#define SIZE_8MiB 23
+
+
+/*
+ * Please keep this list ordered by manufacturer!
+ * Fortunately, the list isn't searched often and so a
+ * slow, linear search isn't so bad.
+ */
+static const struct amd_flash_info jedec_table[] = {
+ {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F032B,
+ .name = "AMD AM29F032B",
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,64)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV160DT,
+ .name = "AMD AM29LV160DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV160DB,
+ .name = "AMD AM29LV160DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV400BB,
+ .name = "AMD AM29LV400BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV400BT,
+ .name = "AMD AM29LV400BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV800BB,
+ .name = "AMD AM29LV800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+/* add DL */
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29DL800BB,
+ .name = "AMD AM29DL800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,4),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x10000,14)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29DL800BT,
+ .name = "AMD AM29DL800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
+ .regions = {
+ ERASEINFO(0x10000,14),
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,4),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F800BB,
+ .name = "AMD AM29F800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV800BT,
+ .name = "AMD AM29LV800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F800BT,
+ .name = "AMD AM29F800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F017D,
+ .name = "AMD AM29F017D",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F016D,
+ .name = "AMD AM29F016D",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F080,
+ .name = "AMD AM29F080",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F040,
+ .name = "AMD AM29F040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV040B,
+ .name = "AMD AM29LV040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F002T,
+ .name = "AMD AM29F002T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29SL800DT,
+ .name = "AMD AM29SL800DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29SL800DB,
+ .name = "AMD AM29SL800DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV512,
+ .name = "Atmel AT49BV512",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT29LV512,
+ .name = "Atmel AT29LV512",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x80,256),
+ ERASEINFO(0x80,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV16X,
+ .name = "Atmel AT49BV16X",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV16XT,
+ .name = "Atmel AT49BV16XT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV32X,
+ .name = "Atmel AT49BV32X",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV32XT,
+ .name = "Atmel AT49BV32XT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_EON,
+ .dev_id = EN29SL800BT,
+ .name = "Eon EN29SL800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_EON,
+ .dev_id = EN29SL800BB,
+ .name = "Eon EN29SL800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29F040C,
+ .name = "Fujitsu MBM29F040C",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29F800BA,
+ .name = "Fujitsu MBM29F800BA",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV650UE,
+ .name = "Fujitsu MBM29LV650UE",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,128)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV320TE,
+ .name = "Fujitsu MBM29LV320TE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV320BE,
+ .name = "Fujitsu MBM29LV320BE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV160TE,
+ .name = "Fujitsu MBM29LV160TE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV160BE,
+ .name = "Fujitsu MBM29LV160BE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV800BA,
+ .name = "Fujitsu MBM29LV800BA",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV800TA,
+ .name = "Fujitsu MBM29LV800TA",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV400BC,
+ .name = "Fujitsu MBM29LV400BC",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV400TC,
+ .name = "Fujitsu MBM29LV400TC",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_HYUNDAI,
+ .dev_id = HY29F002T,
+ .name = "Hyundai HY29F002T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F004B3B,
+ .name = "Intel 28F004B3B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 7),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F004B3T,
+ .name = "Intel 28F004B3T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 7),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F400B3B,
+ .name = "Intel 28F400B3B",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 7),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F400B3T,
+ .name = "Intel 28F400B3T",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 7),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008B3B,
+ .name = "Intel 28F008B3B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008B3T,
+ .name = "Intel 28F008B3T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008S5,
+ .name = "Intel 28F008S5",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016S5,
+ .name = "Intel 28F016S5",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008SA,
+ .name = "Intel 28F008SA",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000, 16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F800B3B,
+ .name = "Intel 28F800B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F800B3T,
+ .name = "Intel 28F800B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016B3B,
+ .name = "Intel 28F016B3B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 31),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016S3,
+ .name = "Intel I28F016S3",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000, 32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016B3T,
+ .name = "Intel 28F016B3T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 31),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F160B3B,
+ .name = "Intel 28F160B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 31),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F160B3T,
+ .name = "Intel 28F160B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 31),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F320B3B,
+ .name = "Intel 28F320B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 63),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F320B3T,
+ .name = "Intel 28F320B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 63),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F640B3B,
+ .name = "Intel 28F640B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 127),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F640B3T,
+ .name = "Intel 28F640B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 127),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F640C3B,
+ .name = "Intel 28F640C3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 127),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I82802AB,
+ .name = "Intel 82802AB",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I82802AC,
+ .name = "Intel 82802AC",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29LV040C,
+ .name = "Macronix MX29LV040C",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29LV160T,
+ .name = "MXIC MX29LV160T",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_NEC,
+ .dev_id = UPD29F064115,
+ .name = "NEC uPD29F064115",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 3,
+ .regions = {
+ ERASEINFO(0x2000,8),
+ ERASEINFO(0x10000,126),
+ ERASEINFO(0x2000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29LV160B,
+ .name = "MXIC MX29LV160B",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F040,
+ .name = "Macronix MX29F040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F016,
+ .name = "Macronix MX29F016",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F004T,
+ .name = "Macronix MX29F004T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F004B,
+ .name = "Macronix MX29F004B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F002T,
+ .name = "Macronix MX29F002T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_PMC,
+ .dev_id = PM49FL002,
+ .name = "PMC Pm49FL002",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO( 0x01000, 64 )
+ }
+ }, {
+ .mfr_id = CFI_MFR_PMC,
+ .dev_id = PM49FL004,
+ .name = "PMC Pm49FL004",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO( 0x01000, 128 )
+ }
+ }, {
+ .mfr_id = CFI_MFR_PMC,
+ .dev_id = PM49FL008,
+ .name = "PMC Pm49FL008",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO( 0x01000, 256 )
+ }
+ }, {
+ .mfr_id = CFI_MFR_SHARP,
+ .dev_id = LH28F640BF,
+ .name = "LH28F640BF",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 127),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF512,
+ .name = "SST 39LF512",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF010,
+ .name = "SST 39LF010",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST29EE020,
+ .name = "SST 29EE020",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST29LE020,
+ .name = "SST 29LE020",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF020,
+ .name = "SST 39LF020",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF040,
+ .name = "SST 39LF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39SF010A,
+ .name = "SST 39SF010A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39SF020A,
+ .name = "SST 39SF020A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39SF040,
+ .name = "SST 39SF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF040B,
+ .name = "SST 49LF040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF004B,
+ .name = "SST 49LF004B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF008A,
+ .name = "SST 49LF008A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,256),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF030A,
+ .name = "SST 49LF030A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,96),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF040A,
+ .name = "SST 49LF040A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF080A,
+ .name = "SST 49LF080A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,256),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
+ .dev_id = SST39LF160,
+ .name = "SST 39LF160",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
+ .dev_id = SST39VF1601,
+ .name = "SST 39VF1601",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1601,
+ .name = "SST 39WF1601",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1602,
+ .name = "SST 39WF1602",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
+ .dev_id = SST39VF3201,
+ .name = "SST 39VF3201",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST36VF3203,
+ .name = "SST 36VF3203",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29F800AB,
+ .name = "ST M29F800AB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W800DT,
+ .name = "ST M29W800DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W800DB,
+ .name = "ST M29W800DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29W400DT,
+ .name = "ST M29W400DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,7),
+ ERASEINFO(0x02000,1),
+ ERASEINFO(0x08000,2),
+ ERASEINFO(0x10000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29W400DB,
+ .name = "ST M29W400DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W160DT,
+ .name = "ST M29W160DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W160DB,
+ .name = "ST M29W160DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29W040B,
+ .name = "ST M29W040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FW040,
+ .name = "ST M50FW040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FW080,
+ .name = "ST M50FW080",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FW016,
+ .name = "ST M50FW016",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50LPW080,
+ .name = "ST M50LPW080",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ },
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FLW080A,
+ .name = "ST M50FLW080A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x10000,13),
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x1000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FLW080B,
+ .name = "ST M50FLW080B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x10000,13),
+ ERASEINFO(0x1000,16),
+ }
+ }, {
+ .mfr_id = 0xff00 | CFI_MFR_ST,
+ .dev_id = 0xff00 | PSD4256G6V,
+ .name = "ST PSD4256G6V",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0x0AAA_0x0554,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVT160,
+ .name = "Toshiba TC58FVT160",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVB160,
+ .name = "Toshiba TC58FVB160",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVB321,
+ .name = "Toshiba TC58FVB321",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVT321,
+ .name = "Toshiba TC58FVT321",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVB641,
+ .name = "Toshiba TC58FVB641",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,127)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVT641,
+ .name = "Toshiba TC58FVT641",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,127),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_WINBOND,
+ .dev_id = W49V002A,
+ .name = "Winbond W49V002A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000, 3),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x04000, 1),
+ }
+ }
+};
+
+static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
+ struct cfi_private *cfi)
+{
+ map_word result;
+ unsigned long mask;
+ int bank = 0;
+
+ /* According to JEDEC "Standard Manufacturer's Identification Code"
+ * (http://www.jedec.org/download/search/jep106W.pdf)
+ * several first banks can contain 0x7f instead of actual ID
+ */
+ do {
+ uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
+ mask = (1 << (cfi->device_type * 8)) - 1;
+ result = map_read(map, base + ofs);
+ bank++;
+ } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
+
+ return result.x[0] & mask;
+}
+
+static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
+ struct cfi_private *cfi)
+{
+ map_word result;
+ unsigned long mask;
+ u32 ofs = cfi_build_cmd_addr(1, map, cfi);
+ mask = (1 << (cfi->device_type * 8)) -1;
+ result = map_read(map, base + ofs);
+ return result.x[0] & mask;
+}
+
+static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
+{
+ /* Reset */
+
+ /* after checking the datasheets for SST, MACRONIX and ATMEL
+ * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
+ * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
+ * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
+ * as they will ignore the writes and don't care what address
+ * the F0 is written to */
+ if (cfi->addr_unlock1) {
+ pr_debug( "reset unlock called %x %x \n",
+ cfi->addr_unlock1,cfi->addr_unlock2);
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
+ * so ensure we're in read mode. Send both the Intel and the AMD command
+ * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
+ * this should be safe.
+ */
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have reset delay before continuing */
+}
+
+
+static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
+{
+ int i,num_erase_regions;
+ uint8_t uaddr;
+
+ if (!(jedec_table[index].devtypes & cfi->device_type)) {
+ pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
+ jedec_table[index].name, 4 * (1<<cfi->device_type));
+ return 0;
+ }
+
+ printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
+
+ num_erase_regions = jedec_table[index].nr_regions;
+
+ cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ if (!cfi->cfiq) {
+ //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
+ return 0;
+ }
+
+ memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
+
+ cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+ cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+ cfi->cfiq->DevSize = jedec_table[index].dev_size;
+ cfi->cfi_mode = CFI_MODE_JEDEC;
+ cfi->sector_erase_cmd = CMD(0x30);
+
+ for (i=0; i<num_erase_regions; i++){
+ cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
+ }
+ cfi->cmdset_priv = NULL;
+
+ /* This may be redundant for some cases, but it doesn't hurt */
+ cfi->mfr = jedec_table[index].mfr_id;
+ cfi->id = jedec_table[index].dev_id;
+
+ uaddr = jedec_table[index].uaddr;
+
+ /* The table has unlock addresses in _bytes_, and we try not to let
+ our brains explode when we see the datasheets talking about address
+ lines numbered from A-1 to A18. The CFI table has unlock addresses
+ in device-words according to the mode the device is connected in */
+ cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
+ cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
+
+ return 1; /* ok */
+}
+
+
+/*
+ * There is a BIG problem properly ID'ing the JEDEC device and guaranteeing
+ * the mapped address, unlock addresses, and proper chip ID. This function
+ * attempts to minimize errors. It is doubtfull that this probe will ever
+ * be perfect - consequently there should be some module parameters that
+ * could be manually specified to force the chip info.
+ */
+static inline int jedec_match( uint32_t base,
+ struct map_info *map,
+ struct cfi_private *cfi,
+ const struct amd_flash_info *finfo )
+{
+ int rc = 0; /* failure until all tests pass */
+ u32 mfr, id;
+ uint8_t uaddr;
+
+ /*
+ * The IDs must match. For X16 and X32 devices operating in
+ * a lower width ( X8 or X16 ), the device ID's are usually just
+ * the lower byte(s) of the larger device ID for wider mode. If
+ * a part is found that doesn't fit this assumption (device id for
+ * smaller width mode is completely unrealated to full-width mode)
+ * then the jedec_table[] will have to be augmented with the IDs
+ * for different widths.
+ */
+ switch (cfi->device_type) {
+ case CFI_DEVICETYPE_X8:
+ mfr = (uint8_t)finfo->mfr_id;
+ id = (uint8_t)finfo->dev_id;
+
+ /* bjd: it seems that if we do this, we can end up
+ * detecting 16bit flashes as an 8bit device, even though
+ * there aren't.
+ */
+ if (finfo->dev_id > 0xff) {
+ pr_debug("%s(): ID is not 8bit\n",
+ __func__);
+ goto match_done;
+ }
+ break;
+ case CFI_DEVICETYPE_X16:
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint16_t)finfo->dev_id;
+ break;
+ case CFI_DEVICETYPE_X32:
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint32_t)finfo->dev_id;
+ break;
+ default:
+ printk(KERN_WARNING
+ "MTD %s(): Unsupported device type %d\n",
+ __func__, cfi->device_type);
+ goto match_done;
+ }
+ if ( cfi->mfr != mfr || cfi->id != id ) {
+ goto match_done;
+ }
+
+ /* the part size must fit in the memory window */
+ pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
+ __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
+ if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
+ __func__, finfo->mfr_id, finfo->dev_id,
+ 1 << finfo->dev_size );
+ goto match_done;
+ }
+
+ if (! (finfo->devtypes & cfi->device_type))
+ goto match_done;
+
+ uaddr = finfo->uaddr;
+
+ pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
+ __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
+ if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
+ && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
+ unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
+ __func__,
+ unlock_addrs[uaddr].addr1,
+ unlock_addrs[uaddr].addr2);
+ goto match_done;
+ }
+
+ /*
+ * Make sure the ID's disappear when the device is taken out of
+ * ID mode. The only time this should fail when it should succeed
+ * is when the ID's are written as data to the same
+ * addresses. For this rare and unfortunate case the chip
+ * cannot be probed correctly.
+ * FIXME - write a driver that takes all of the chip info as
+ * module parameters, doesn't probe but forces a load.
+ */
+ pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
+ __func__ );
+ jedec_reset( base, map, cfi );
+ mfr = jedec_read_mfr( map, base, cfi );
+ id = jedec_read_id( map, base, cfi );
+ if ( mfr == cfi->mfr && id == cfi->id ) {
+ pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
+ "You might need to manually specify JEDEC parameters.\n",
+ __func__, cfi->mfr, cfi->id );
+ goto match_done;
+ }
+
+ /* all tests passed - mark as success */
+ rc = 1;
+
+ /*
+ * Put the device back in ID mode - only need to do this if we
+ * were truly frobbing a real device.
+ */
+ pr_debug("MTD %s(): return to ID mode\n", __func__ );
+ if (cfi->addr_unlock1) {
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have a delay before continuing */
+
+ match_done:
+ return rc;
+}
+
+
+static int jedec_probe_chip(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi)
+{
+ int i;
+ enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
+ u32 probe_offset1, probe_offset2;
+
+ retry:
+ if (!cfi->numchips) {
+ uaddr_idx++;
+
+ if (MTD_UADDR_UNNECESSARY == uaddr_idx)
+ return 0;
+
+ cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
+ cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
+ }
+
+ /* Make certain we aren't probing past the end of map */
+ if (base >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
+ base, map->size -1);
+ return 0;
+
+ }
+ /* Ensure the unlock addresses we try stay inside the map */
+ probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
+ probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
+ if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
+ ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
+ goto retry;
+
+ /* Reset */
+ jedec_reset(base, map, cfi);
+
+ /* Autoselect Mode */
+ if(cfi->addr_unlock1) {
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have a delay before continuing */
+
+ if (!cfi->numchips) {
+ /* This is the first time we're called. Set up the CFI
+ stuff accordingly and return */
+
+ cfi->mfr = jedec_read_mfr(map, base, cfi);
+ cfi->id = jedec_read_id(map, base, cfi);
+ pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
+ cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
+ for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
+ if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
+ pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
+ __func__, cfi->mfr, cfi->id,
+ cfi->addr_unlock1, cfi->addr_unlock2 );
+ if (!cfi_jedec_setup(map, cfi, i))
+ return 0;
+ goto ok_out;
+ }
+ }
+ goto retry;
+ } else {
+ uint16_t mfr;
+ uint16_t id;
+
+ /* Make sure it is a chip of the same manufacturer and id */
+ mfr = jedec_read_mfr(map, base, cfi);
+ id = jedec_read_id(map, base, cfi);
+
+ if ((mfr != cfi->mfr) || (id != cfi->id)) {
+ printk(KERN_DEBUG "%s: Found different chip or no chip at all (mfr 0x%x, id 0x%x) at 0x%x\n",
+ map->name, mfr, id, base);
+ jedec_reset(base, map, cfi);
+ return 0;
+ }
+ }
+
+ /* Check each previous chip locations to see if it's an alias */
+ for (i=0; i < (base >> cfi->chipshift); i++) {
+ unsigned long start;
+ if(!test_bit(i, chip_map)) {
+ continue; /* Skip location; no valid chip at this address */
+ }
+ start = i << cfi->chipshift;
+ if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
+ jedec_read_id(map, start, cfi) == cfi->id) {
+ /* Eep. This chip also looks like it's in autoselect mode.
+ Is it an alias for the new one? */
+ jedec_reset(start, map, cfi);
+
+ /* If the device IDs go away, it's an alias */
+ if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
+ jedec_read_id(map, base, cfi) != cfi->id) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+
+ /* Yes, it's actually got the device IDs as data. Most
+ * unfortunate. Stick the new chip in read mode
+ * too and if it's the same, assume it's an alias. */
+ /* FIXME: Use other modes to do a proper check */
+ jedec_reset(base, map, cfi);
+ if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
+ jedec_read_id(map, base, cfi) == cfi->id) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+ }
+ }
+
+ /* OK, if we got to here, then none of the previous chips appear to
+ be aliases for the current one. */
+ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
+ cfi->numchips++;
+
+ok_out:
+ /* Put it back into Read Mode */
+ jedec_reset(base, map, cfi);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ map->name, cfi_interleave(cfi), cfi->device_type*8, base,
+ map->bankwidth*8);
+
+ return 1;
+}
+
+static struct chip_probe jedec_chip_probe = {
+ .name = "JEDEC",
+ .probe_chip = jedec_probe_chip
+};
+
+static struct mtd_info *jedec_probe(struct map_info *map)
+{
+ /*
+ * Just use the generic probe stuff to call our CFI-specific
+ * chip_probe routine in all the possible permutations, etc.
+ */
+ return mtd_do_chip_probe(map, &jedec_chip_probe);
+}
+
+static struct mtd_chip_driver jedec_chipdrv = {
+ .probe = jedec_probe,
+ .name = "jedec_probe",
+ .module = THIS_MODULE
+};
+
+static int __init jedec_probe_init(void)
+{
+ register_mtd_chip_driver(&jedec_chipdrv);
+ return 0;
+}
+
+static void __exit jedec_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&jedec_chipdrv);
+}
+
+module_init(jedec_probe_init);
+module_exit(jedec_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
+MODULE_DESCRIPTION("Probe code for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
new file mode 100644
index 000000000..f7a5bca92
--- /dev/null
+++ b/drivers/mtd/chips/map_absent.c
@@ -0,0 +1,112 @@
+/*
+ * Common code to handle absent "placeholder" devices
+ * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
+ *
+ * This map driver is used to allocate "placeholder" MTD
+ * devices on systems that have socketed/removable media.
+ * Use of this driver as a fallback preserves the expected
+ * registration of MTD device nodes regardless of probe outcome.
+ * A usage example is as follows:
+ *
+ * my_dev[i] = do_map_probe("cfi", &my_map[i]);
+ * if(NULL == my_dev[i]) {
+ * my_dev[i] = do_map_probe("map_absent", &my_map[i]);
+ * }
+ *
+ * Any device 'probed' with this driver will return -ENODEV
+ * upon open.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int map_absent_erase (struct mtd_info *, struct erase_info *);
+static void map_absent_sync (struct mtd_info *);
+static struct mtd_info *map_absent_probe(struct map_info *map);
+static void map_absent_destroy (struct mtd_info *);
+
+
+static struct mtd_chip_driver map_absent_chipdrv = {
+ .probe = map_absent_probe,
+ .destroy = map_absent_destroy,
+ .name = "map_absent",
+ .module = THIS_MODULE
+};
+
+static struct mtd_info *map_absent_probe(struct map_info *map)
+{
+ struct mtd_info *mtd;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ return NULL;
+ }
+
+ map->fldrv = &map_absent_chipdrv;
+ mtd->priv = map;
+ mtd->name = map->name;
+ mtd->type = MTD_ABSENT;
+ mtd->size = map->size;
+ mtd->_erase = map_absent_erase;
+ mtd->_read = map_absent_read;
+ mtd->_write = map_absent_write;
+ mtd->_sync = map_absent_sync;
+ mtd->flags = 0;
+ mtd->erasesize = PAGE_SIZE;
+ mtd->writesize = 1;
+
+ __module_get(THIS_MODULE);
+ return mtd;
+}
+
+
+static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ return -ENODEV;
+}
+
+static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ return -ENODEV;
+}
+
+static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return -ENODEV;
+}
+
+static void map_absent_sync(struct mtd_info *mtd)
+{
+ /* nop */
+}
+
+static void map_absent_destroy(struct mtd_info *mtd)
+{
+ /* nop */
+}
+
+static int __init map_absent_init(void)
+{
+ register_mtd_chip_driver(&map_absent_chipdrv);
+ return 0;
+}
+
+static void __exit map_absent_exit(void)
+{
+ unregister_mtd_chip_driver(&map_absent_chipdrv);
+}
+
+module_init(map_absent_init);
+module_exit(map_absent_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Resilience Corporation - Eric Brower <ebrower@resilience.com>");
+MODULE_DESCRIPTION("Placeholder MTD chip driver for 'absent' chips");
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
new file mode 100644
index 000000000..afb43d5e1
--- /dev/null
+++ b/drivers/mtd/chips/map_ram.c
@@ -0,0 +1,154 @@
+/*
+ * Common code to handle map devices which are simple RAM
+ * (C) 2000 Red Hat. GPL'd.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+
+static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int mapram_erase (struct mtd_info *, struct erase_info *);
+static void mapram_nop (struct mtd_info *);
+static struct mtd_info *map_ram_probe(struct map_info *map);
+static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
+ unsigned long, unsigned long);
+
+
+static struct mtd_chip_driver mapram_chipdrv = {
+ .probe = map_ram_probe,
+ .name = "map_ram",
+ .module = THIS_MODULE
+};
+
+static struct mtd_info *map_ram_probe(struct map_info *map)
+{
+ struct mtd_info *mtd;
+
+ /* Check the first byte is RAM */
+#if 0
+ map_write8(map, 0x55, 0);
+ if (map_read8(map, 0) != 0x55)
+ return NULL;
+
+ map_write8(map, 0xAA, 0);
+ if (map_read8(map, 0) != 0xAA)
+ return NULL;
+
+ /* Check the last byte is RAM */
+ map_write8(map, 0x55, map->size-1);
+ if (map_read8(map, map->size-1) != 0x55)
+ return NULL;
+
+ map_write8(map, 0xAA, map->size-1);
+ if (map_read8(map, map->size-1) != 0xAA)
+ return NULL;
+#endif
+ /* OK. It seems to be RAM. */
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+
+ map->fldrv = &mapram_chipdrv;
+ mtd->priv = map;
+ mtd->name = map->name;
+ mtd->type = MTD_RAM;
+ mtd->size = map->size;
+ mtd->_erase = mapram_erase;
+ mtd->_get_unmapped_area = mapram_unmapped_area;
+ mtd->_read = mapram_read;
+ mtd->_write = mapram_write;
+ mtd->_panic_write = mapram_write;
+ mtd->_sync = mapram_nop;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->writesize = 1;
+
+ mtd->erasesize = PAGE_SIZE;
+ while(mtd->size & (mtd->erasesize - 1))
+ mtd->erasesize >>= 1;
+
+ __module_get(THIS_MODULE);
+ return mtd;
+}
+
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ struct map_info *map = mtd->priv;
+ return (unsigned long) map->virt + offset;
+}
+
+static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ map_copy_from(map, buf, from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ map_copy_to(map, to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
+{
+ /* Yeah, it's inefficient. Who cares? It's faster than a _real_
+ flash erase. */
+ struct map_info *map = mtd->priv;
+ map_word allff;
+ unsigned long i;
+
+ allff = map_word_ff(map);
+ for (i=0; i<instr->len; i += map_bankwidth(map))
+ map_write(map, allff, instr->addr + i);
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+}
+
+static void mapram_nop(struct mtd_info *mtd)
+{
+ /* Nothing to see here */
+}
+
+static int __init map_ram_init(void)
+{
+ register_mtd_chip_driver(&mapram_chipdrv);
+ return 0;
+}
+
+static void __exit map_ram_exit(void)
+{
+ unregister_mtd_chip_driver(&mapram_chipdrv);
+}
+
+module_init(map_ram_init);
+module_exit(map_ram_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD chip driver for RAM chips");
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
new file mode 100644
index 000000000..e67f73ab4
--- /dev/null
+++ b/drivers/mtd/chips/map_rom.c
@@ -0,0 +1,124 @@
+/*
+ * Common code to handle map devices which are simple ROM
+ * (C) 2000 Red Hat. GPL'd.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static void maprom_nop (struct mtd_info *);
+static struct mtd_info *map_rom_probe(struct map_info *map);
+static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
+static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
+ unsigned long, unsigned long);
+
+static struct mtd_chip_driver maprom_chipdrv = {
+ .probe = map_rom_probe,
+ .name = "map_rom",
+ .module = THIS_MODULE
+};
+
+static unsigned int default_erasesize(struct map_info *map)
+{
+ const __be32 *erase_size = NULL;
+
+ erase_size = of_get_property(map->device_node, "erase-size", NULL);
+
+ return !erase_size ? map->size : be32_to_cpu(*erase_size);
+}
+
+static struct mtd_info *map_rom_probe(struct map_info *map)
+{
+ struct mtd_info *mtd;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+
+ map->fldrv = &maprom_chipdrv;
+ mtd->priv = map;
+ mtd->name = map->name;
+ mtd->type = MTD_ROM;
+ mtd->size = map->size;
+ mtd->_get_unmapped_area = maprom_unmapped_area;
+ mtd->_read = maprom_read;
+ mtd->_write = maprom_write;
+ mtd->_sync = maprom_nop;
+ mtd->_erase = maprom_erase;
+ mtd->flags = MTD_CAP_ROM;
+ mtd->erasesize = default_erasesize(map);
+ mtd->writesize = 1;
+ mtd->writebufsize = 1;
+
+ __module_get(THIS_MODULE);
+ return mtd;
+}
+
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ struct map_info *map = mtd->priv;
+ return (unsigned long) map->virt + offset;
+}
+
+static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ map_copy_from(map, buf, from, len);
+ *retlen = len;
+ return 0;
+}
+
+static void maprom_nop(struct mtd_info *mtd)
+{
+ /* Nothing to see here */
+}
+
+static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ return -EROFS;
+}
+
+static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
+{
+ /* We do our best 8) */
+ return -EROFS;
+}
+
+static int __init map_rom_init(void)
+{
+ register_mtd_chip_driver(&maprom_chipdrv);
+ return 0;
+}
+
+static void __exit map_rom_exit(void)
+{
+ unregister_mtd_chip_driver(&maprom_chipdrv);
+}
+
+module_init(map_rom_init);
+module_exit(map_rom_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD chip driver for ROM chips");
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
new file mode 100644
index 000000000..c8503006f
--- /dev/null
+++ b/drivers/mtd/cmdlinepart.c
@@ -0,0 +1,415 @@
+/*
+ * Read flash partition table from command line
+ *
+ * Copyright © 2002 SYSGO Real-Time Solutions GmbH
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * The format for the command line is as follows:
+ *
+ * mtdparts=<mtddef>[;<mtddef]
+ * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk]
+ * <mtd-id> := unique name used in mapping driver/device (mtd->name)
+ * <size> := standard linux memsize OR "-" to denote all remaining space
+ * size is automatically truncated at end of device
+ * if specified or truncated size is 0 the part is skipped
+ * <offset> := standard linux memsize
+ * if omitted the part will immediately follow the previous part
+ * or 0 if the first part
+ * <name> := '(' NAME ')'
+ * NAME will appear in /proc/mtd
+ *
+ * <size> and <offset> can be specified such that the parts are out of order
+ * in physical memory and may even overlap.
+ *
+ * The parts are assigned MTD numbers in the order they are specified in the
+ * command line regardless of their order in physical memory.
+ *
+ * Examples:
+ *
+ * 1 NOR Flash, with 1 single writable partition:
+ * edb7312-nor:-
+ *
+ * 1 NOR Flash with 2 partitions, 1 NAND with one
+ * edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+/* error message prefix */
+#define ERRP "mtd: "
+
+/* debug macro */
+#if 0
+#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
+#else
+#define dbg(x)
+#endif
+
+
+/* special size referring to all the remaining space in a partition */
+#define SIZE_REMAINING ULLONG_MAX
+#define OFFSET_CONTINUOUS ULLONG_MAX
+
+struct cmdline_mtd_partition {
+ struct cmdline_mtd_partition *next;
+ char *mtd_id;
+ int num_parts;
+ struct mtd_partition *parts;
+};
+
+/* mtdpart_setup() parses into here */
+static struct cmdline_mtd_partition *partitions;
+
+/* the command line passed to mtdpart_setup() */
+static char *mtdparts;
+static char *cmdline;
+static int cmdline_parsed;
+
+/*
+ * Parse one partition definition for an MTD. Since there can be many
+ * comma separated partition definitions, this function calls itself
+ * recursively until no more partition definitions are found. Nice side
+ * effect: the memory to keep the mtd_partition structs and the names
+ * is allocated upon the last definition being found. At that point the
+ * syntax has been verified ok.
+ */
+static struct mtd_partition * newpart(char *s,
+ char **retptr,
+ int *num_parts,
+ int this_part,
+ unsigned char **extra_mem_ptr,
+ int extra_mem_size)
+{
+ struct mtd_partition *parts;
+ unsigned long long size, offset = OFFSET_CONTINUOUS;
+ char *name;
+ int name_len;
+ unsigned char *extra_mem;
+ char delim;
+ unsigned int mask_flags;
+
+ /* fetch the partition size */
+ if (*s == '-') {
+ /* assign all remaining space to this partition */
+ size = SIZE_REMAINING;
+ s++;
+ } else {
+ size = memparse(s, &s);
+ if (size < PAGE_SIZE) {
+ printk(KERN_ERR ERRP "partition size too small (%llx)\n",
+ size);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* fetch partition name and flags */
+ mask_flags = 0; /* this is going to be a regular partition */
+ delim = 0;
+
+ /* check for offset */
+ if (*s == '@') {
+ s++;
+ offset = memparse(s, &s);
+ }
+
+ /* now look for name */
+ if (*s == '(')
+ delim = ')';
+
+ if (delim) {
+ char *p;
+
+ name = ++s;
+ p = strchr(name, delim);
+ if (!p) {
+ printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
+ return ERR_PTR(-EINVAL);
+ }
+ name_len = p - name;
+ s = p + 1;
+ } else {
+ name = NULL;
+ name_len = 13; /* Partition_000 */
+ }
+
+ /* record name length for memory allocation later */
+ extra_mem_size += name_len + 1;
+
+ /* test for options */
+ if (strncmp(s, "ro", 2) == 0) {
+ mask_flags |= MTD_WRITEABLE;
+ s += 2;
+ }
+
+ /* if lk is found do NOT unlock the MTD partition*/
+ if (strncmp(s, "lk", 2) == 0) {
+ mask_flags |= MTD_POWERUP_LOCK;
+ s += 2;
+ }
+
+ /* test if more partitions are following */
+ if (*s == ',') {
+ if (size == SIZE_REMAINING) {
+ printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
+ return ERR_PTR(-EINVAL);
+ }
+ /* more partitions follow, parse them */
+ parts = newpart(s + 1, &s, num_parts, this_part + 1,
+ &extra_mem, extra_mem_size);
+ if (IS_ERR(parts))
+ return parts;
+ } else {
+ /* this is the last partition: allocate space for all */
+ int alloc_size;
+
+ *num_parts = this_part + 1;
+ alloc_size = *num_parts * sizeof(struct mtd_partition) +
+ extra_mem_size;
+
+ parts = kzalloc(alloc_size, GFP_KERNEL);
+ if (!parts)
+ return ERR_PTR(-ENOMEM);
+ extra_mem = (unsigned char *)(parts + *num_parts);
+ }
+
+ /* enter this partition (offset will be calculated later if it is zero at this point) */
+ parts[this_part].size = size;
+ parts[this_part].offset = offset;
+ parts[this_part].mask_flags = mask_flags;
+ if (name)
+ strlcpy(extra_mem, name, name_len + 1);
+ else
+ sprintf(extra_mem, "Partition_%03d", this_part);
+ parts[this_part].name = extra_mem;
+ extra_mem += name_len + 1;
+
+ dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
+ this_part, parts[this_part].name, parts[this_part].offset,
+ parts[this_part].size, parts[this_part].mask_flags));
+
+ /* return (updated) pointer to extra_mem memory */
+ if (extra_mem_ptr)
+ *extra_mem_ptr = extra_mem;
+
+ /* return (updated) pointer command line string */
+ *retptr = s;
+
+ /* return partition table */
+ return parts;
+}
+
+/*
+ * Parse the command line.
+ */
+static int mtdpart_setup_real(char *s)
+{
+ cmdline_parsed = 1;
+
+ for( ; s != NULL; )
+ {
+ struct cmdline_mtd_partition *this_mtd;
+ struct mtd_partition *parts;
+ int mtd_id_len, num_parts;
+ char *p, *mtd_id;
+
+ mtd_id = s;
+
+ /* fetch <mtd-id> */
+ p = strchr(s, ':');
+ if (!p) {
+ printk(KERN_ERR ERRP "no mtd-id\n");
+ return -EINVAL;
+ }
+ mtd_id_len = p - mtd_id;
+
+ dbg(("parsing <%s>\n", p+1));
+
+ /*
+ * parse one mtd. have it reserve memory for the
+ * struct cmdline_mtd_partition and the mtd-id string.
+ */
+ parts = newpart(p + 1, /* cmdline */
+ &s, /* out: updated cmdline ptr */
+ &num_parts, /* out: number of parts */
+ 0, /* first partition */
+ (unsigned char**)&this_mtd, /* out: extra mem */
+ mtd_id_len + 1 + sizeof(*this_mtd) +
+ sizeof(void*)-1 /*alignment*/);
+ if (IS_ERR(parts)) {
+ /*
+ * An error occurred. We're either:
+ * a) out of memory, or
+ * b) in the middle of the partition spec
+ * Either way, this mtd is hosed and we're
+ * unlikely to succeed in parsing any more
+ */
+ return PTR_ERR(parts);
+ }
+
+ /* align this_mtd */
+ this_mtd = (struct cmdline_mtd_partition *)
+ ALIGN((unsigned long)this_mtd, sizeof(void *));
+ /* enter results */
+ this_mtd->parts = parts;
+ this_mtd->num_parts = num_parts;
+ this_mtd->mtd_id = (char*)(this_mtd + 1);
+ strlcpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
+
+ /* link into chain */
+ this_mtd->next = partitions;
+ partitions = this_mtd;
+
+ dbg(("mtdid=<%s> num_parts=<%d>\n",
+ this_mtd->mtd_id, this_mtd->num_parts));
+
+
+ /* EOS - we're done */
+ if (*s == 0)
+ break;
+
+ /* does another spec follow? */
+ if (*s != ';') {
+ printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
+ return -EINVAL;
+ }
+ s++;
+ }
+
+ return 0;
+}
+
+/*
+ * Main function to be called from the MTD mapping driver/device to
+ * obtain the partitioning information. At this point the command line
+ * arguments will actually be parsed and turned to struct mtd_partition
+ * information. It returns partitions for the requested mtd device, or
+ * the first one in the chain if a NULL mtd_id is passed in.
+ */
+static int parse_cmdline_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ unsigned long long offset;
+ int i, err;
+ struct cmdline_mtd_partition *part;
+ const char *mtd_id = master->name;
+
+ /* parse command line */
+ if (!cmdline_parsed) {
+ err = mtdpart_setup_real(cmdline);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Search for the partition definition matching master->name.
+ * If master->name is not set, stop at first partition definition.
+ */
+ for (part = partitions; part; part = part->next) {
+ if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
+ break;
+ }
+
+ if (!part)
+ return 0;
+
+ for (i = 0, offset = 0; i < part->num_parts; i++) {
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
+
+ if (part->parts[i].size == 0) {
+ printk(KERN_WARNING ERRP
+ "%s: skipping zero sized partition\n",
+ part->mtd_id);
+ part->num_parts--;
+ memmove(&part->parts[i], &part->parts[i + 1],
+ sizeof(*part->parts) * (part->num_parts - i));
+ i--;
+ }
+ }
+
+ *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
+ GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ return part->num_parts;
+}
+
+
+/*
+ * This is the handler for our kernel parameter, called from
+ * main.c::checksetup(). Note that we can not yet kmalloc() anything,
+ * so we only save the commandline for later processing.
+ *
+ * This function needs to be visible for bootloaders.
+ */
+static int __init mtdpart_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+
+__setup("mtdparts=", mtdpart_setup);
+
+static struct mtd_part_parser cmdline_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_cmdline_partitions,
+ .name = "cmdlinepart",
+};
+
+static int __init cmdline_parser_init(void)
+{
+ if (mtdparts)
+ mtdpart_setup(mtdparts);
+ register_mtd_parser(&cmdline_parser);
+ return 0;
+}
+
+static void __exit cmdline_parser_exit(void)
+{
+ deregister_mtd_parser(&cmdline_parser);
+}
+
+module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
+MODULE_PARM_DESC(mtdparts, "Partitioning specification");
+module_param(mtdparts, charp, 0);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+MODULE_DESCRIPTION("Command line configuration of MTD partitions");
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
new file mode 100644
index 000000000..c49d0b127
--- /dev/null
+++ b/drivers/mtd/devices/Kconfig
@@ -0,0 +1,228 @@
+menu "Self-contained MTD device drivers"
+ depends on MTD!=n
+ depends on HAS_IOMEM
+
+config MTD_PMC551
+ tristate "Ramix PMC551 PCI Mezzanine RAM card support"
+ depends on PCI
+ ---help---
+ This provides a MTD device driver for the Ramix PMC551 RAM PCI card
+ from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
+ These devices come in memory configurations from 32M - 1G. If you
+ have one, you probably want to enable this.
+
+ If this driver is compiled as a module you get the ability to select
+ the size of the aperture window pointing into the devices memory.
+ What this means is that if you have a 1G card, normally the kernel
+ will use a 1G memory map as its view of the device. As a module,
+ you can select a 1M window into the memory and the driver will
+ "slide" the window around the PMC551's memory. This was
+ particularly useful on the 2.2 kernels on PPC architectures as there
+ was limited kernel space to deal with.
+
+config MTD_PMC551_BUGFIX
+ bool "PMC551 256M DRAM Bugfix"
+ depends on MTD_PMC551
+ help
+ Some of Ramix's PMC551 boards with 256M configurations have invalid
+ column and row mux values. This option will fix them, but will
+ break other memory configurations. If unsure say N.
+
+config MTD_PMC551_DEBUG
+ bool "PMC551 Debugging"
+ depends on MTD_PMC551
+ help
+ This option makes the PMC551 more verbose during its operation and
+ is only really useful if you are developing on this driver or
+ suspect a possible hardware or driver bug. If unsure say N.
+
+config MTD_MS02NV
+ tristate "DEC MS02-NV NVRAM module support"
+ depends on MACH_DECSTATION
+ help
+ This is an MTD driver for the DEC's MS02-NV (54-20948-01) battery
+ backed-up NVRAM module. The module was originally meant as an NFS
+ accelerator. Say Y here if you have a DECstation 5000/2x0 or a
+ DECsystem 5900 equipped with such a module.
+
+ If you want to compile this driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>.
+ The module will be called ms02-nv.
+
+config MTD_DATAFLASH
+ tristate "Support for AT45xxx DataFlash"
+ depends on SPI_MASTER
+ help
+ This enables access to AT45xxx DataFlash chips, using SPI.
+ Sometimes DataFlash chips are packaged inside MMC-format
+ cards; at this writing, the MMC stack won't handle those.
+
+config MTD_DATAFLASH_WRITE_VERIFY
+ bool "Verify DataFlash page writes"
+ depends on MTD_DATAFLASH
+ help
+ This adds an extra check when data is written to the flash.
+ It may help if you are verifying chip setup (timings etc) on
+ your board. There is a rare possibility that even though the
+ device thinks the write was successful, a bit could have been
+ flipped accidentally due to device wear or something else.
+
+config MTD_DATAFLASH_OTP
+ bool "DataFlash OTP support (Security Register)"
+ depends on MTD_DATAFLASH
+ help
+ Newer DataFlash chips (revisions C and D) support 128 bytes of
+ one-time-programmable (OTP) data. The first half may be written
+ (once) with up to 64 bytes of data, such as a serial number or
+ other key product data. The second half is programmed with a
+ unique-to-each-chip bit pattern at the factory.
+
+config MTD_M25P80
+ tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
+ depends on SPI_MASTER && MTD_SPI_NOR
+ help
+ This enables access to most modern SPI flash chips, used for
+ program and data storage. Series supported include Atmel AT26DF,
+ Spansion S25SL, SST 25VF, ST M25P, and Winbond W25X. Other chips
+ are supported as well. See the driver source for the current list,
+ or to add other chips.
+
+ Note that the original DataFlash chips (AT45 series, not AT26DF),
+ need an entirely different driver.
+
+ Set up your spi devices with the right board-specific platform data,
+ if you want to specify device partitioning or to use a device which
+ doesn't support the JEDEC ID instruction.
+
+config MTD_SPEAR_SMI
+ tristate "SPEAR MTD NOR Support through SMI controller"
+ depends on PLAT_SPEAR
+ default y
+ help
+ This enable SNOR support on SPEAR platforms using SMI controller
+
+config MTD_SST25L
+ tristate "Support SST25L (non JEDEC) SPI Flash chips"
+ depends on SPI_MASTER
+ help
+ This enables access to the non JEDEC SST25L SPI flash chips, used
+ for program and data storage.
+
+ Set up your spi devices with the right board-specific platform data,
+ if you want to specify device partitioning.
+
+config MTD_BCM47XXSFLASH
+ tristate "R/O support for serial flash on BCMA bus"
+ depends on BCMA_SFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ serial flash memories (only read-only mode is implemented).
+
+config MTD_SLRAM
+ tristate "Uncached system RAM"
+ help
+ If your CPU cannot cache all of the physical memory in your machine,
+ you can still use it for storage or swap by using this driver to
+ present it to the system as a Memory Technology Device.
+
+config MTD_PHRAM
+ tristate "Physical system RAM"
+ help
+ This is a re-implementation of the slram driver above.
+
+ Use this driver to access physical memory that the kernel proper
+ doesn't have access to, memory beyond the mem=xxx limit, nvram,
+ memory on the video card, etc...
+
+config MTD_LART
+ tristate "28F160xx flash driver for LART"
+ depends on SA1100_LART
+ help
+ This enables the flash driver for LART. Please note that you do
+ not need any mapping/chip driver for LART. This one does it all
+ for you, so go disable all of those if you enabled some of them (:
+
+config MTD_MTDRAM
+ tristate "Test driver using RAM"
+ help
+ This enables a test MTD device driver which uses vmalloc() to
+ provide storage. You probably want to say 'N' unless you're
+ testing stuff.
+
+config MTDRAM_TOTAL_SIZE
+ int "MTDRAM device size in KiB"
+ depends on MTD_MTDRAM
+ default "4096"
+ help
+ This allows you to configure the total size of the MTD device
+ emulated by the MTDRAM driver. If the MTDRAM driver is built
+ as a module, it is also possible to specify this as a parameter when
+ loading the module.
+
+config MTDRAM_ERASE_SIZE
+ int "MTDRAM erase block size in KiB"
+ depends on MTD_MTDRAM
+ default "128"
+ help
+ This allows you to configure the size of the erase blocks in the
+ device emulated by the MTDRAM driver. If the MTDRAM driver is built
+ as a module, it is also possible to specify this as a parameter when
+ loading the module.
+
+#If not a module (I don't want to test it as a module)
+config MTDRAM_ABS_POS
+ hex "SRAM Hexadecimal Absolute position or 0"
+ depends on MTD_MTDRAM=y
+ default "0"
+ help
+ If you have system RAM accessible by the CPU but not used by Linux
+ in normal operation, you can give the physical address at which the
+ available RAM starts, and the MTDRAM driver will use it instead of
+ allocating space from Linux's available memory. Otherwise, leave
+ this set to zero. Most people will want to leave this as zero.
+
+config MTD_BLOCK2MTD
+ tristate "MTD using block device"
+ depends on BLOCK
+ help
+ This driver allows a block device to appear as an MTD. It would
+ generally be used in the following cases:
+
+ Using Compact Flash as an MTD, these usually present themselves to
+ the system as an ATA drive.
+ Testing MTD users (eg JFFS2) on large media and media that might
+ be removed during a write (using the floppy drive).
+
+comment "Disk-On-Chip Device Drivers"
+
+config MTD_DOCG3
+ tristate "M-Systems Disk-On-Chip G3"
+ select BCH
+ select BCH_CONST_PARAMS
+ select BITREVERSE
+ ---help---
+ This provides an MTD device driver for the M-Systems DiskOnChip
+ G3 devices.
+
+ The driver provides access to G3 DiskOnChip, distributed by
+ M-Systems and now Sandisk. The support is very experimental,
+ and doesn't give access to any write operations.
+
+config MTD_ST_SPI_FSM
+ tristate "ST Microelectronics SPI FSM Serial Flash Controller"
+ depends on ARCH_STI
+ help
+ This provides an MTD device driver for the ST Microelectronics
+ SPI Fast Sequence Mode (FSM) Serial Flash Controller and support
+ for a subset of connected Serial Flash devices.
+
+if MTD_DOCG3
+config BCH_CONST_M
+ default 14
+config BCH_CONST_T
+ default 4
+endif
+
+endmenu
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
new file mode 100644
index 000000000..f0b0e611d
--- /dev/null
+++ b/drivers/mtd/devices/Makefile
@@ -0,0 +1,21 @@
+#
+# linux/drivers/mtd/devices/Makefile
+#
+
+obj-$(CONFIG_MTD_DOCG3) += docg3.o
+obj-$(CONFIG_MTD_SLRAM) += slram.o
+obj-$(CONFIG_MTD_PHRAM) += phram.o
+obj-$(CONFIG_MTD_PMC551) += pmc551.o
+obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
+obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
+obj-$(CONFIG_MTD_LART) += lart.o
+obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
+obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
+obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
+obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
+obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o
+
+
+CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
new file mode 100644
index 000000000..3d008a941
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -0,0 +1,339 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxsflash.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
+
+static const char * const probes[] = { "bcm47xxpart", NULL };
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static void bcm47xxsflash_cmd(struct bcm47xxsflash *b47s, u32 opcode)
+{
+ int i;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, BCMA_CC_FLASHCTL_START | opcode);
+ for (i = 0; i < 1000; i++) {
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHCTL) &
+ BCMA_CC_FLASHCTL_BUSY))
+ return;
+ cpu_relax();
+ }
+ pr_err("Control command failed (timeout)!\n");
+}
+
+static int bcm47xxsflash_poll(struct bcm47xxsflash *b47s, int timeout)
+{
+ unsigned long deadline = jiffies + timeout;
+
+ do {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_RDSR);
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_ST_WIP))
+ return 0;
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_STATUS);
+ if (b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_AT_READY)
+ return 0;
+ break;
+ }
+
+ cpu_relax();
+ udelay(1);
+ } while (!time_after_eq(jiffies, deadline));
+
+ pr_err("Timeout waiting for flash to be ready!\n");
+
+ return -EBUSY;
+}
+
+/**************************************************
+ * MTD ops
+ **************************************************/
+
+static int bcm47xxsflash_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int err;
+
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr);
+ /* Newer flashes have "sub-sectors" which can be erased
+ * independently with a new command: ST_SSE. The ST_SE command
+ * erases 64KB just as before.
+ */
+ if (b47s->blocksize < (64 * 1024))
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SSE);
+ else
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SE);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr << 1);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_PAGE_ERASE);
+ break;
+ }
+
+ err = bcm47xxsflash_poll(b47s, HZ);
+ if (err)
+ erase->state = MTD_ERASE_FAILED;
+ else
+ erase->state = MTD_ERASE_DONE;
+
+ if (erase->callback)
+ erase->callback(erase);
+
+ return err;
+}
+
+static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+
+ /* Check address range */
+ if ((from + len) > mtd->size)
+ return -EINVAL;
+
+ memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
+ len);
+ *retlen = len;
+
+ return len;
+}
+
+static int bcm47xxsflash_write_st(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written = 0;
+
+ /* Enable writes */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+
+ /* Write first byte */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, offset);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+
+ /* Program page */
+ if (b47s->bcma_cc->core->id.rev < 20) {
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_PP);
+ return 1; /* 1B written */
+ }
+
+ /* Program page and set CSA (on newer chips we can continue writing) */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | OPCODE_ST_PP);
+ offset++;
+ len--;
+ written++;
+
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if ((offset & 0xFF) == 0)
+ break;
+
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | *buf++);
+ offset++;
+ len--;
+ written++;
+ }
+
+ /* All done, drop CSA & poll */
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, 0);
+ udelay(1);
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_err("Flash rejected dropping CSA\n");
+
+ return written;
+}
+
+static int bcm47xxsflash_write_at(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ u32 mask = b47s->blocksize - 1;
+ u32 page = (offset & ~mask) << 1;
+ u32 byte = offset & mask;
+ int written = 0;
+
+ /* If we don't overwrite whole page, read it to the buffer first */
+ if (byte || (len < b47s->blocksize)) {
+ int err;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_LOAD);
+ /* 250 us for AT45DB321B */
+ err = bcm47xxsflash_poll(b47s, HZ / 1000);
+ if (err) {
+ pr_err("Timeout reading page 0x%X info buffer\n", page);
+ return err;
+ }
+ }
+
+ /* Change buffer content with our data */
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if (byte == b47s->blocksize)
+ break;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, byte++);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_WRITE);
+ len--;
+ written++;
+ }
+
+ /* Program page with the buffer content */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_PROGRAM);
+
+ return written;
+}
+
+static int bcm47xxsflash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written;
+
+ /* Writing functions can return without writing all passed data, for
+ * example when the hardware is too old or when we git page boundary.
+ */
+ while (len > 0) {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ written = bcm47xxsflash_write_st(mtd, to, len, buf);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ written = bcm47xxsflash_write_at(mtd, to, len, buf);
+ break;
+ default:
+ BUG_ON(1);
+ }
+ if (written < 0) {
+ pr_err("Error writing at offset 0x%llX\n", to);
+ return written;
+ }
+ to += (loff_t)written;
+ len -= written;
+ *retlen += written;
+ buf += written;
+ }
+
+ return 0;
+}
+
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
+{
+ struct mtd_info *mtd = &b47s->mtd;
+
+ mtd->priv = b47s;
+ mtd->name = "bcm47xxsflash";
+ mtd->owner = THIS_MODULE;
+
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = b47s->size;
+ mtd->erasesize = b47s->blocksize;
+ mtd->writesize = 1;
+ mtd->writebufsize = 1;
+
+ mtd->_erase = bcm47xxsflash_erase;
+ mtd->_read = bcm47xxsflash_read;
+ mtd->_write = bcm47xxsflash_write;
+}
+
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static int bcm47xxsflash_bcma_cc_read(struct bcm47xxsflash *b47s, u16 offset)
+{
+ return bcma_cc_read32(b47s->bcma_cc, offset);
+}
+
+static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset,
+ u32 value)
+{
+ bcma_cc_write32(b47s->bcma_cc, offset, value);
+}
+
+static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
+{
+ struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s;
+ int err;
+
+ b47s = devm_kzalloc(&pdev->dev, sizeof(*b47s), GFP_KERNEL);
+ if (!b47s)
+ return -ENOMEM;
+ sflash->priv = b47s;
+
+ b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+ b47s->cc_read = bcm47xxsflash_bcma_cc_read;
+ b47s->cc_write = bcm47xxsflash_bcma_cc_write;
+
+ switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ST;
+ break;
+ case BCMA_CC_FLASHT_ATSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ATMEL;
+ break;
+ }
+
+ b47s->window = sflash->window;
+ b47s->blocksize = sflash->blocksize;
+ b47s->numblocks = sflash->numblocks;
+ b47s->size = sflash->size;
+ bcm47xxsflash_fill_mtd(b47s);
+
+ err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ return err;
+ }
+
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_warn("Serial flash busy\n");
+
+ return 0;
+}
+
+static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
+{
+ struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s = sflash->priv;
+
+ mtd_device_unregister(&b47s->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcma_sflash_driver = {
+ .probe = bcm47xxsflash_bcma_probe,
+ .remove = bcm47xxsflash_bcma_remove,
+ .driver = {
+ .name = "bcma_sflash",
+ },
+};
+
+/**************************************************
+ * Init
+ **************************************************/
+
+module_platform_driver(bcma_sflash_driver);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644
index 000000000..fe93daf4f
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -0,0 +1,76 @@
+#ifndef __BCM47XXSFLASH_H
+#define __BCM47XXSFLASH_H
+
+#include <linux/mtd/mtd.h>
+
+/* Used for ST flashes only. */
+#define OPCODE_ST_WREN 0x0006 /* Write Enable */
+#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */
+#define OPCODE_ST_RDSR 0x0105 /* Read Status Register */
+#define OPCODE_ST_WRSR 0x0101 /* Write Status Register */
+#define OPCODE_ST_READ 0x0303 /* Read Data Bytes */
+#define OPCODE_ST_PP 0x0302 /* Page Program */
+#define OPCODE_ST_SE 0x02d8 /* Sector Erase */
+#define OPCODE_ST_BE 0x00c7 /* Bulk Erase */
+#define OPCODE_ST_DP 0x00b9 /* Deep Power-down */
+#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */
+#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */
+#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */
+
+/* Used for Atmel flashes only. */
+#define OPCODE_AT_READ 0x07e8
+#define OPCODE_AT_PAGE_READ 0x07d2
+#define OPCODE_AT_STATUS 0x01d7
+#define OPCODE_AT_BUF1_WRITE 0x0384
+#define OPCODE_AT_BUF2_WRITE 0x0387
+#define OPCODE_AT_BUF1_ERASE_PROGRAM 0x0283
+#define OPCODE_AT_BUF2_ERASE_PROGRAM 0x0286
+#define OPCODE_AT_BUF1_PROGRAM 0x0288
+#define OPCODE_AT_BUF2_PROGRAM 0x0289
+#define OPCODE_AT_PAGE_ERASE 0x0281
+#define OPCODE_AT_BLOCK_ERASE 0x0250
+#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define OPCODE_AT_BUF1_LOAD 0x0253
+#define OPCODE_AT_BUF2_LOAD 0x0255
+#define OPCODE_AT_BUF1_COMPARE 0x0260
+#define OPCODE_AT_BUF2_COMPARE 0x0261
+#define OPCODE_AT_BUF1_REPROGRAM 0x0258
+#define OPCODE_AT_BUF2_REPROGRAM 0x0259
+
+/* Status register bits for ST flashes */
+#define SR_ST_WIP 0x01 /* Write In Progress */
+#define SR_ST_WEL 0x02 /* Write Enable Latch */
+#define SR_ST_BP_MASK 0x1c /* Block Protect */
+#define SR_ST_BP_SHIFT 2
+#define SR_ST_SRWD 0x80 /* Status Register Write Disable */
+
+/* Status register bits for Atmel flashes */
+#define SR_AT_READY 0x80
+#define SR_AT_MISMATCH 0x40
+#define SR_AT_ID_MASK 0x38
+#define SR_AT_ID_SHIFT 3
+
+struct bcma_drv_cc;
+
+enum bcm47xxsflash_type {
+ BCM47XXSFLASH_TYPE_ATMEL,
+ BCM47XXSFLASH_TYPE_ST,
+};
+
+struct bcm47xxsflash {
+ struct bcma_drv_cc *bcma_cc;
+ int (*cc_read)(struct bcm47xxsflash *b47s, u16 offset);
+ void (*cc_write)(struct bcm47xxsflash *b47s, u16 offset, u32 value);
+
+ enum bcm47xxsflash_type type;
+
+ u32 window;
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ struct mtd_info mtd;
+};
+
+#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
new file mode 100644
index 000000000..b16f3cda9
--- /dev/null
+++ b/drivers/mtd/devices/block2mtd.c
@@ -0,0 +1,500 @@
+/*
+ * block2mtd.c - create an mtd from a block device
+ *
+ * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
+ * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
+ *
+ * Licence: GPL
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*
+ * When the first attempt at device initialization fails, we may need to
+ * wait a little bit and retry. This timeout, by default 3 seconds, gives
+ * device time to start up. Required on BCM2708 and a few other chipsets.
+ */
+#define MTD_DEFAULT_TIMEOUT 3
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/pagemap.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+
+/* Info for the block device */
+struct block2mtd_dev {
+ struct list_head list;
+ struct block_device *blkdev;
+ struct mtd_info mtd;
+ struct mutex write_mutex;
+};
+
+
+/* Static info about the MTD, used in cleanup_module */
+static LIST_HEAD(blkmtd_device_list);
+
+
+static struct page *page_read(struct address_space *mapping, int index)
+{
+ return read_mapping_page(mapping, index, NULL);
+}
+
+/* erase a specified part of the device */
+static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
+{
+ struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct page *page;
+ int index = to >> PAGE_SHIFT; // page index
+ int pages = len >> PAGE_SHIFT;
+ u_long *p;
+ u_long *max;
+
+ while (pages) {
+ page = page_read(mapping, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ max = page_address(page) + PAGE_SIZE;
+ for (p=page_address(page); p<max; p++)
+ if (*p != -1UL) {
+ lock_page(page);
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ set_page_dirty(page);
+ unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
+ break;
+ }
+
+ page_cache_release(page);
+ pages--;
+ index++;
+ }
+ return 0;
+}
+static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ size_t from = instr->addr;
+ size_t len = instr->len;
+ int err;
+
+ instr->state = MTD_ERASING;
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_erase(dev, from, len);
+ mutex_unlock(&dev->write_mutex);
+ if (err) {
+ pr_err("erase failed err = %d\n", err);
+ instr->state = MTD_ERASE_FAILED;
+ } else
+ instr->state = MTD_ERASE_DONE;
+
+ mtd_erase_callback(instr);
+ return err;
+}
+
+
+static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ struct page *page;
+ int index = from >> PAGE_SHIFT;
+ int offset = from & (PAGE_SIZE-1);
+ int cpylen;
+
+ while (len) {
+ if ((offset + len) > PAGE_SIZE)
+ cpylen = PAGE_SIZE - offset; // multiple pages
+ else
+ cpylen = len; // this page
+ len = len - cpylen;
+
+ page = page_read(dev->blkdev->bd_inode->i_mapping, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ memcpy(buf, page_address(page) + offset, cpylen);
+ page_cache_release(page);
+
+ if (retlen)
+ *retlen += cpylen;
+ buf += cpylen;
+ offset = 0;
+ index++;
+ }
+ return 0;
+}
+
+
+/* write data to the underlying device */
+static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
+ loff_t to, size_t len, size_t *retlen)
+{
+ struct page *page;
+ struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ int index = to >> PAGE_SHIFT; // page index
+ int offset = to & ~PAGE_MASK; // page offset
+ int cpylen;
+
+ while (len) {
+ if ((offset+len) > PAGE_SIZE)
+ cpylen = PAGE_SIZE - offset; // multiple pages
+ else
+ cpylen = len; // this page
+ len = len - cpylen;
+
+ page = page_read(mapping, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ if (memcmp(page_address(page)+offset, buf, cpylen)) {
+ lock_page(page);
+ memcpy(page_address(page) + offset, buf, cpylen);
+ set_page_dirty(page);
+ unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
+ }
+ page_cache_release(page);
+
+ if (retlen)
+ *retlen += cpylen;
+
+ buf += cpylen;
+ offset = 0;
+ index++;
+ }
+ return 0;
+}
+
+
+static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ int err;
+
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_write(dev, buf, to, len, retlen);
+ mutex_unlock(&dev->write_mutex);
+ if (err > 0)
+ err = 0;
+ return err;
+}
+
+
+/* sync the device - wait until the write queue is empty */
+static void block2mtd_sync(struct mtd_info *mtd)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ sync_blockdev(dev->blkdev);
+ return;
+}
+
+
+static void block2mtd_free_device(struct block2mtd_dev *dev)
+{
+ if (!dev)
+ return;
+
+ kfree(dev->mtd.name);
+
+ if (dev->blkdev) {
+ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
+ 0, -1);
+ blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ }
+
+ kfree(dev);
+}
+
+
+static struct block2mtd_dev *add_device(char *devname, int erase_size,
+ int timeout)
+{
+#ifndef MODULE
+ int i;
+#endif
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
+ struct block_device *bdev = ERR_PTR(-ENODEV);
+ struct block2mtd_dev *dev;
+ char *name;
+
+ if (!devname)
+ return NULL;
+
+ dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ /* Get a handle on the device */
+ bdev = blkdev_get_by_path(devname, mode, dev);
+
+#ifndef MODULE
+ /*
+ * We might not have the root device mounted at this point.
+ * Try to resolve the device name by other means.
+ */
+ for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
+ dev_t devt;
+
+ if (i)
+ /*
+ * Calling wait_for_device_probe in the first loop
+ * was not enough, sleep for a bit in subsequent
+ * go-arounds.
+ */
+ msleep(1000);
+ wait_for_device_probe();
+
+ devt = name_to_dev_t(devname);
+ if (!devt)
+ continue;
+ bdev = blkdev_get_by_dev(devt, mode, dev);
+ }
+#endif
+
+ if (IS_ERR(bdev)) {
+ pr_err("error: cannot open device %s\n", devname);
+ goto err_free_block2mtd;
+ }
+ dev->blkdev = bdev;
+
+ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+ pr_err("attempting to use an MTD device as a block device\n");
+ goto err_free_block2mtd;
+ }
+
+ if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
+ pr_err("erasesize must be a divisor of device size\n");
+ goto err_free_block2mtd;
+ }
+
+ mutex_init(&dev->write_mutex);
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+ name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
+ if (!name)
+ goto err_destroy_mutex;
+
+ dev->mtd.name = name;
+
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
+ dev->mtd.type = MTD_RAM;
+ dev->mtd.flags = MTD_CAP_RAM;
+ dev->mtd._erase = block2mtd_erase;
+ dev->mtd._write = block2mtd_write;
+ dev->mtd._sync = block2mtd_sync;
+ dev->mtd._read = block2mtd_read;
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
+
+ if (mtd_device_register(&dev->mtd, NULL, 0)) {
+ /* Device didn't get added, so free the entry */
+ goto err_destroy_mutex;
+ }
+
+ list_add(&dev->list, &blkmtd_device_list);
+ pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
+ dev->mtd.index,
+ dev->mtd.name + strlen("block2mtd: "),
+ dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ return dev;
+
+err_destroy_mutex:
+ mutex_destroy(&dev->write_mutex);
+err_free_block2mtd:
+ block2mtd_free_device(dev);
+ return NULL;
+}
+
+
+/* This function works similar to reguler strtoul. In addition, it
+ * allows some suffixes for a more human-readable number format:
+ * ki, Ki, kiB, KiB - multiply result with 1024
+ * Mi, MiB - multiply result with 1024^2
+ * Gi, GiB - multiply result with 1024^3
+ */
+static int ustrtoul(const char *cp, char **endp, unsigned int base)
+{
+ unsigned long result = simple_strtoul(cp, endp, base);
+ switch (**endp) {
+ case 'G' :
+ result *= 1024;
+ case 'M':
+ result *= 1024;
+ case 'K':
+ case 'k':
+ result *= 1024;
+ /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
+ if ((*endp)[1] == 'i') {
+ if ((*endp)[2] == 'B')
+ (*endp) += 3;
+ else
+ (*endp) += 2;
+ }
+ }
+ return result;
+}
+
+
+static int parse_num(size_t *num, const char *token)
+{
+ char *endp;
+ size_t n;
+
+ n = (size_t) ustrtoul(token, &endp, 0);
+ if (*endp)
+ return -EINVAL;
+
+ *num = n;
+ return 0;
+}
+
+
+static inline void kill_final_newline(char *str)
+{
+ char *newline = strrchr(str, '\n');
+ if (newline && !newline[1])
+ *newline = 0;
+}
+
+
+#ifndef MODULE
+static int block2mtd_init_called = 0;
+/* 80 for device, 12 for erase size */
+static char block2mtd_paramline[80 + 12];
+#endif
+
+static int block2mtd_setup2(const char *val)
+{
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
+ char *str = buf;
+ char *token[2];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
+ int i, ret;
+
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
+ pr_err("parameter too long\n");
+ return 0;
+ }
+
+ strcpy(str, val);
+ kill_final_newline(str);
+
+ for (i = 0; i < 2; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str) {
+ pr_err("too many arguments\n");
+ return 0;
+ }
+
+ if (!token[0]) {
+ pr_err("no argument\n");
+ return 0;
+ }
+
+ name = token[0];
+ if (strlen(name) + 1 > 80) {
+ pr_err("device name too long\n");
+ return 0;
+ }
+
+ if (token[1]) {
+ ret = parse_num(&erase_size, token[1]);
+ if (ret) {
+ pr_err("illegal erase size\n");
+ return 0;
+ }
+ }
+
+ add_device(name, erase_size, timeout);
+
+ return 0;
+}
+
+
+static int block2mtd_setup(const char *val, struct kernel_param *kp)
+{
+#ifdef MODULE
+ return block2mtd_setup2(val);
+#else
+ /* If more parameters are later passed in via
+ /sys/module/block2mtd/parameters/block2mtd
+ and block2mtd_init() has already been called,
+ we can parse the argument now. */
+
+ if (block2mtd_init_called)
+ return block2mtd_setup2(val);
+
+ /* During early boot stage, we only save the parameters
+ here. We must parse them later: if the param passed
+ from kernel boot command line, block2mtd_setup() is
+ called so early that it is not possible to resolve
+ the device (even kmalloc() fails). Deter that work to
+ block2mtd_setup2(). */
+
+ strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
+
+ return 0;
+#endif
+}
+
+
+module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+
+static int __init block2mtd_init(void)
+{
+ int ret = 0;
+
+#ifndef MODULE
+ if (strlen(block2mtd_paramline))
+ ret = block2mtd_setup2(block2mtd_paramline);
+ block2mtd_init_called = 1;
+#endif
+
+ return ret;
+}
+
+
+static void block2mtd_exit(void)
+{
+ struct list_head *pos, *next;
+
+ /* Remove the MTD devices */
+ list_for_each_safe(pos, next, &blkmtd_device_list) {
+ struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
+ block2mtd_sync(&dev->mtd);
+ mtd_device_unregister(&dev->mtd);
+ mutex_destroy(&dev->write_mutex);
+ pr_info("mtd%d: [%s] removed\n",
+ dev->mtd.index,
+ dev->mtd.name + strlen("block2mtd: "));
+ list_del(&dev->list);
+ block2mtd_free_device(dev);
+ }
+}
+
+late_initcall(block2mtd_init);
+module_exit(block2mtd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
+MODULE_DESCRIPTION("Emulate an MTD using a block device");
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
new file mode 100644
index 000000000..866d31904
--- /dev/null
+++ b/drivers/mtd/devices/docg3.c
@@ -0,0 +1,2141 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define CREATE_TRACE_POINTS
+#include "docg3.h"
+
+/*
+ * This driver handles the DiskOnChip G3 flash memory.
+ *
+ * As no specification is available from M-Systems/Sandisk, this drivers lacks
+ * several functions available on the chip, as :
+ * - IPL write
+ *
+ * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
+ * the driver assumes a 16bits data bus.
+ *
+ * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
+ * - a 1 byte Hamming code stored in the OOB for each page
+ * - a 7 bytes BCH code stored in the OOB for each page
+ * The BCH ECC is :
+ * - BCH is in GF(2^14)
+ * - BCH is over data of 520 bytes (512 page + 7 page_info bytes
+ * + 1 hamming byte)
+ * - BCH can correct up to 4 bits (t = 4)
+ * - BCH syndroms are calculated in hardware, and checked in hardware as well
+ *
+ */
+
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+ "2=reliable) : MLC normal operations are in normal mode");
+
+/**
+ * struct docg3_oobinfo - DiskOnChip G3 OOB layout
+ * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
+ * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
+ * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
+ * @oobavail: 8 available bytes remaining after ECC toll
+ */
+static struct nand_ecclayout docg3_oobinfo = {
+ .eccbytes = 8,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
+ .oobfree = {{0, 7}, {15, 1} },
+ .oobavail = 8,
+};
+
+static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
+{
+ u8 val = readb(docg3->cascade->base + reg);
+
+ trace_docg3_io(0, 8, reg, (int)val);
+ return val;
+}
+
+static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
+{
+ u16 val = readw(docg3->cascade->base + reg);
+
+ trace_docg3_io(0, 16, reg, (int)val);
+ return val;
+}
+
+static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
+{
+ writeb(val, docg3->cascade->base + reg);
+ trace_docg3_io(1, 8, reg, val);
+}
+
+static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
+{
+ writew(val, docg3->cascade->base + reg);
+ trace_docg3_io(1, 16, reg, val);
+}
+
+static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
+{
+ doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
+}
+
+static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
+{
+ doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
+}
+
+static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
+{
+ doc_writeb(docg3, addr, DOC_FLASHADDRESS);
+}
+
+static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int doc_register_readb(struct docg3 *docg3, int reg)
+{
+ u8 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readb(docg3, reg);
+ doc_vdbg("Read register %04x : %02x\n", reg, val);
+ return val;
+}
+
+static int doc_register_readw(struct docg3 *docg3, int reg)
+{
+ u16 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readw(docg3, reg);
+ doc_vdbg("Read register %04x : %04x\n", reg, val);
+ return val;
+}
+
+/**
+ * doc_delay - delay docg3 operations
+ * @docg3: the device
+ * @nbNOPs: the number of NOPs to issue
+ *
+ * As no specification is available, the right timings between chip commands are
+ * unknown. The only available piece of information are the observed nops on a
+ * working docg3 chip.
+ * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
+ * friendlier msleep() functions or blocking mdelay().
+ */
+static void doc_delay(struct docg3 *docg3, int nbNOPs)
+{
+ int i;
+
+ doc_vdbg("NOP x %d\n", nbNOPs);
+ for (i = 0; i < nbNOPs; i++)
+ doc_writeb(docg3, 0, DOC_NOP);
+}
+
+static int is_prot_seq_error(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
+}
+
+static int doc_is_ready(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & DOC_CTRL_FLASHREADY;
+}
+
+static int doc_wait_ready(struct docg3 *docg3)
+{
+ int maxWaitCycles = 100;
+
+ do {
+ doc_delay(docg3, 4);
+ cpu_relax();
+ } while (!doc_is_ready(docg3) && maxWaitCycles--);
+ doc_delay(docg3, 2);
+ if (maxWaitCycles > 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int doc_reset_seq(struct docg3 *docg3)
+{
+ int ret;
+
+ doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
+ doc_flash_sequence(docg3, DOC_SEQ_RESET);
+ doc_flash_command(docg3, DOC_CMD_RESET);
+ doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+
+ doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
+ return ret;
+}
+
+/**
+ * doc_read_data_area - Read data from data area
+ * @docg3: the device
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
+ * @first: first time read, DOC_READADDRESS should be set
+ *
+ * Reads bytes from flash data. Handles the single byte / even bytes reads.
+ */
+static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
+ int first)
+{
+ int i, cdr, len4;
+ u16 data16, *dst16;
+ u8 data8, *dst8;
+
+ doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x1;
+ len4 = len - cdr;
+
+ if (first)
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ dst16 = buf;
+ for (i = 0; i < len4; i += 2) {
+ data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
+ if (dst16) {
+ *dst16 = data16;
+ dst16++;
+ }
+ }
+
+ if (cdr) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_delay(docg3, 1);
+ dst8 = (u8 *)dst16;
+ for (i = 0; i < cdr; i++) {
+ data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
+ if (dst8) {
+ *dst8 = data8;
+ dst8++;
+ }
+ }
+ }
+}
+
+/**
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+ int i, cdr, len4;
+ u16 *src16;
+ u8 *src8;
+
+ doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ src16 = (u16 *)buf;
+ for (i = 0; i < len4; i += 2) {
+ doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+ src16++;
+ }
+
+ src8 = (u8 *)src16;
+ for (i = 0; i < cdr; i++) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+ src8++;
+ }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to normal or reliable data mode
+ * @docg3: the device
+ *
+ * The reliable data mode is a bit slower than the fast mode, but less errors
+ * occur. Entering the reliable mode cannot be done without entering the fast
+ * mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
+ */
+static void doc_set_reliable_mode(struct docg3 *docg3)
+{
+ static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+ doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+ switch (docg3->reliable) {
+ case 0:
+ break;
+ case 1:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ break;
+ case 2:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ break;
+ default:
+ doc_err("doc_set_reliable_mode(): invalid mode\n");
+ break;
+ }
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_set_asic_mode - Set the ASIC mode
+ * @docg3: the device
+ * @mode: the mode
+ *
+ * The ASIC can work in 3 modes :
+ * - RESET: all registers are zeroed
+ * - NORMAL: receives and handles commands
+ * - POWERDOWN: minimal poweruse, flash parts shut off
+ */
+static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
+{
+ int i;
+
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+
+ mode |= DOC_ASICMODE_MDWREN;
+ doc_dbg("doc_set_asic_mode(%02x)\n", mode);
+ doc_writeb(docg3, mode, DOC_ASICMODE);
+ doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_set_device_id - Sets the devices id for cascaded G3 chips
+ * @docg3: the device
+ * @id: the chip to select (amongst 0, 1, 2, 3)
+ *
+ * There can be 4 cascaded G3 chips. This function selects the one which will
+ * should be the active one.
+ */
+static void doc_set_device_id(struct docg3 *docg3, int id)
+{
+ u8 ctrl;
+
+ doc_dbg("doc_set_device_id(%d)\n", id);
+ doc_writeb(docg3, id, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+ ctrl &= ~DOC_CTRL_VIOLATION;
+ ctrl |= DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+}
+
+/**
+ * doc_set_extra_page_mode - Change flash page layout
+ * @docg3: the device
+ *
+ * Normally, the flash page is split into the data (512 bytes) and the out of
+ * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
+ * leveling counters are stored. To access this last area of 4 bytes, a special
+ * mode must be input to the flash ASIC.
+ *
+ * Returns 0 if no error occurred, -EIO else.
+ */
+static int doc_set_extra_page_mode(struct docg3 *docg3)
+{
+ int fctrl;
+
+ doc_dbg("doc_set_extra_page_mode()\n");
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
+ doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
+ doc_delay(docg3, 2);
+
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
+ return -EIO;
+ else
+ return 0;
+}
+
+/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+ ofs = ofs >> 2;
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, ofs & 0xff);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_seek - Set both flash planes to the specified block, page for reading
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @wear: if true, read will occur on the 4 extra bytes of the wear area
+ * @ofs: offset in page to read
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int wear, int ofs)
+{
+ int sector, ret = 0;
+
+ doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
+ block0, block1, page, ofs, wear);
+
+ if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_set_reliable_mode(docg3);
+ if (wear)
+ ret = doc_set_extra_page_mode(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_sequence(docg3, DOC_SEQ_READ);
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int ofs)
+{
+ int ret = 0, sector;
+
+ doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+ block0, block1, page, ofs);
+
+ doc_set_reliable_mode(docg3);
+
+ if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
+ doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+ doc_delay(docg3, 1);
+
+out:
+ return ret;
+}
+
+
+/**
+ * doc_read_page_ecc_init - Initialize hardware ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+ u8 ecc_conf1;
+
+ ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+ ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+ ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+ doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ * area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+ u8 ecc[DOC_ECC_BCH_SIZE];
+ int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ ecc[i] = bitrev8(hwecc[i]);
+ numerrs = decode_bch(docg3->cascade->bch, NULL,
+ DOC_ECC_BCH_COVERED_BYTES,
+ NULL, ecc, NULL, errorpos);
+ BUG_ON(numerrs == -EINVAL);
+ if (numerrs < 0)
+ goto out;
+
+ for (i = 0; i < numerrs; i++)
+ errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+ for (i = 0; i < numerrs; i++)
+ if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+ /* error is located in data, correct it */
+ change_bit(errorpos[i], buf);
+out:
+ doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+ return numerrs;
+}
+
+
+/**
+ * doc_read_page_prepare - Prepares reading data from a flash page
+ * @docg3: the device
+ * @block0: the first plane block index on flash memory
+ * @block1: the second plane block index on flash memory
+ * @page: the page index in the block
+ * @offset: the offset in the page (must be a multiple of 4)
+ *
+ * Prepares the page to be read in the flash memory :
+ * - tell ASIC to map the flash pages
+ * - tell ASIC to be in read mode
+ *
+ * After a call to this method, a call to doc_read_page_finish is mandatory,
+ * to end the read cycle of the flash.
+ *
+ * Read data from a flash page. The length to be read must be between 0 and
+ * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
+ * the extra bytes reading is not implemented).
+ *
+ * As pages are grouped by 2 (in 2 planes), reading from a page must be done
+ * in two steps:
+ * - one read of 512 bytes at offset 0
+ * - one read of 512 bytes at offset 512 + 16
+ *
+ * Returns 0 if successful, -EIO if a read error occurred.
+ */
+static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
+ int page, int offset)
+{
+ int wear_area = 0, ret = 0;
+
+ doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
+ block0, block1, page, offset);
+ if (offset >= DOC_LAYOUT_WEAR_OFFSET)
+ wear_area = 1;
+ if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
+ if (ret)
+ goto err;
+
+ doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
+ doc_delay(docg3, 2);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
+ doc_delay(docg3, 1);
+ if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
+ offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
+ doc_flash_address(docg3, offset >> 2);
+ doc_delay(docg3, 1);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_READ_FLASH);
+
+ return 0;
+err:
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+ return -EIO;
+}
+
+/**
+ * doc_read_page_getbytes - Reads bytes from a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be read (must be a multiple of 4)
+ * @buf: the buffer to be filled in (or NULL is forget bytes)
+ * @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
+ *
+ */
+static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
+ int first, int last_odd)
+{
+ if (last_odd && len > 0) {
+ doc_read_data_area(docg3, buf, 1, first);
+ doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+ } else {
+ doc_read_data_area(docg3, buf, len, first);
+ }
+ doc_delay(docg3, 2);
+ return len;
+}
+
+/**
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+ const u_char *buf)
+{
+ doc_write_data_area(docg3, buf, len);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
+ * @docg3: the device
+ * @hwecc: the array of 7 integers where the hardware ecc will be stored
+ */
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
+{
+ int i;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_read_page_finish - Ends reading of a flash page
+ * @docg3: the device
+ *
+ * As a side effect, resets the chip selector to 0. This ensures that after each
+ * read operation, the floor 0 is selected. Therefore, if the systems halts, the
+ * reboot will boot on floor 0, where the IPL is.
+ */
+static void doc_read_page_finish(struct docg3 *docg3)
+{
+ doc_page_finish(docg3);
+ doc_set_device_id(docg3, 0);
+}
+
+/**
+ * calc_block_sector - Calculate blocks, pages and ofs.
+
+ * @from: offset in flash
+ * @block0: first plane block index calculated
+ * @block1: second plane block index calculated
+ * @page: page calculated
+ * @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
+ */
+static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
+ int *ofs, int reliable)
+{
+ uint sector, pages_biblock;
+
+ pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ pages_biblock /= 2;
+
+ sector = from / DOC_LAYOUT_PAGE_SIZE;
+ *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
+ *block1 = *block0 + 1;
+ *page = sector % pages_biblock;
+ *page /= DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ *page *= 2;
+ if (sector % 2)
+ *ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
+ else
+ *ofs = 0;
+}
+
+/**
+ * doc_read_oob - Read out of band bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Reads flash memory OOB area of pages.
+ *
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
+ */
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ret, skip, ofs = 0;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen, nbdata, nboob;
+ u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+ int max_bitflips = 0;
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ from, ops->mode, buf, len, oobbuf, ooblen);
+ if (ooblen % DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+
+ if (from + len > mtd->size)
+ return -EINVAL;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ skip = from % DOC_LAYOUT_PAGE_SIZE;
+ mutex_lock(&docg3->cascade->lock);
+ while (ret >= 0 && (len > 0 || ooblen > 0)) {
+ calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
+ nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
+ ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
+ if (ret < 0)
+ goto out;
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ if (ret < 0)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
+ if (ret < skip)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
+ if (ret < nbdata)
+ goto err_in_read;
+ doc_read_page_getbytes(docg3,
+ DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
+ NULL, 0, (skip + nbdata) % 2);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
+ if (ret < nboob)
+ goto err_in_read;
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+ NULL, 0, nboob % 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+
+ if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+ doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf);
+ doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+ doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8);
+ doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+ }
+ doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
+ doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc);
+
+ ret = -EIO;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ ret = 0;
+ if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+ (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+ (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+ (ops->mode != MTD_OPS_RAW) &&
+ (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+ ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ ret = -EBADMSG;
+ }
+ if (ret > 0) {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(max_bitflips, ret);
+ ret = max_bitflips;
+ }
+ }
+
+ doc_read_page_finish(docg3);
+ ops->retlen += nbdata;
+ ops->oobretlen += nboob;
+ buf += nbdata;
+ oobbuf += nboob;
+ len -= nbdata;
+ ooblen -= nboob;
+ from += DOC_LAYOUT_PAGE_SIZE;
+ skip = 0;
+ }
+
+out:
+ mutex_unlock(&docg3->cascade->lock);
+ return ret;
+err_in_read:
+ doc_read_page_finish(docg3);
+ goto out;
+}
+
+/**
+ * doc_read - Read bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
+ *
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
+ *
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
+ */
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_oob_ops ops;
+ size_t ret;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.datbuf = buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_AUTO_OOB;
+
+ ret = doc_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static int doc_reload_bbt(struct docg3 *docg3)
+{
+ int block = DOC_LAYOUT_BLOCK_BBT;
+ int ret = 0, nbpages, page;
+ u_char *buf = docg3->bbt;
+
+ nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
+ for (page = 0; !ret && (page < nbpages); page++) {
+ ret = doc_read_page_prepare(docg3, block, block + 1,
+ page + DOC_LAYOUT_PAGE_BBT, 0);
+ if (!ret)
+ ret = doc_read_page_ecc_init(docg3,
+ DOC_LAYOUT_PAGE_SIZE);
+ if (!ret)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
+ buf, 1, 0);
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ }
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_block_isbad - Checks whether a block is good or not
+ * @mtd: the device
+ * @from: the offset to find the correct block
+ *
+ * Returns 1 if block is bad, 0 if block is good
+ */
+static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ofs, is_good;
+
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
+ from, block0, block1, page, ofs);
+
+ if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
+ return 0;
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
+ return !is_good;
+}
+
+#if 0
+/**
+ * doc_get_erase_count - Get block erase count
+ * @docg3: the device
+ * @from: the offset in which the block is.
+ *
+ * Get the number of times a block was erased. The number is the maximum of
+ * erase times between first and second plane (which should be equal normally).
+ *
+ * Returns The number of erases, or -EINVAL or -EIO on error.
+ */
+static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
+{
+ u8 buf[DOC_LAYOUT_WEAR_SIZE];
+ int ret, plane1_erase_count, plane2_erase_count;
+ int block0, block1, page, ofs;
+
+ doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
+ if (from % DOC_LAYOUT_PAGE_SIZE)
+ return -EINVAL;
+ calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ ret = doc_reset_seq(docg3);
+ if (!ret)
+ ret = doc_read_page_prepare(docg3, block0, block1, page,
+ ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
+ if (!ret)
+ ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
+ buf, 1, 0);
+ doc_read_page_finish(docg3);
+
+ if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
+ return -EIO;
+ plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
+ | ((u8)(~buf[5]) << 16);
+ plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
+ | ((u8)(~buf[7]) << 16);
+
+ return max(plane1_erase_count, plane2_erase_count);
+}
+#endif
+
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
+ */
+static int doc_get_op_status(struct docg3 *docg3)
+{
+ u8 status;
+
+ doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+ doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+ doc_delay(docg3, 5);
+
+ doc_ecc_disable(docg3);
+ doc_read_data_area(docg3, &status, 1, 1);
+ return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successful, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+ int i, status, ret = 0;
+
+ for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
+ msleep(20);
+ if (!doc_is_ready(docg3)) {
+ doc_dbg("Timeout reached and the chip is still not ready\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ status = doc_get_op_status(docg3);
+ if (status & DOC_PLANES_STATUS_FAIL) {
+ doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+ status);
+ ret = -EIO;
+ }
+
+out:
+ doc_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+ int ret, sector;
+
+ doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ return -EIO;
+
+ doc_set_reliable_mode(docg3);
+ doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+ sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+ doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+ doc_delay(docg3, 2);
+
+ if (is_prot_seq_error(docg3)) {
+ doc_err("Erase blocks %d,%d error\n", block0, block1);
+ return -EIO;
+ }
+
+ return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if addressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct docg3 *docg3 = mtd->priv;
+ uint64_t len;
+ int block0, block1, page, ret, ofs = 0;
+
+ doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+
+ info->state = MTD_ERASE_PENDING;
+ calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+ &ofs, docg3->reliable);
+ ret = -EINVAL;
+ if (info->addr + info->len > mtd->size || page || ofs)
+ goto reset_err;
+
+ ret = 0;
+ calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ doc_set_reliable_mode(docg3);
+ for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+ info->state = MTD_ERASING;
+ ret = doc_erase_block(docg3, block0, block1);
+ block0 += 2;
+ block1 += 2;
+ }
+ mutex_unlock(&docg3->cascade->lock);
+
+ if (ret)
+ goto reset_err;
+
+ info->state = MTD_ERASE_DONE;
+ return 0;
+
+reset_err:
+ info->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ * written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ * remaining ones are filled with hardware Hamming and BCH
+ * computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+ const u_char *oob, int autoecc)
+{
+ int block0, block1, page, ret, ofs = 0;
+ u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+ doc_dbg("doc_write_page(to=%lld)\n", to);
+ calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_write_seek(docg3, block0, block1, page, ofs);
+ if (ret)
+ goto err;
+
+ doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+ if (oob && autoecc) {
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+ doc_delay(docg3, 2);
+ oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+ hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+ &hamming);
+ doc_delay(docg3, 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+ doc_delay(docg3, 2);
+
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+ }
+ if (oob && !autoecc)
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+ doc_delay(docg3, 2);
+ doc_page_finish(docg3);
+ doc_delay(docg3, 2);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+ doc_delay(docg3, 2);
+
+ /*
+ * The wait status will perform another doc_page_finish() call, but that
+ * seems to please the docg3, so leave it.
+ */
+ ret = doc_write_erase_wait_status(docg3);
+ return ret;
+err:
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+ int autoecc;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ autoecc = 1;
+ break;
+ case MTD_OPS_RAW:
+ autoecc = 0;
+ break;
+ default:
+ autoecc = -EINVAL;
+ }
+ return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+ memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ooblen = ops->ooblen, autoecc;
+
+ if (ooblen != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ docg3->oob_write_ofs = to;
+ docg3->oob_autoecc = autoecc;
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+ ops->oobretlen = 8;
+ } else {
+ memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successful, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret, autoecc, oobdelta;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ ofs, ops->mode, buf, len, oobbuf, ooblen);
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ oobdelta = mtd->oobsize;
+ break;
+ case MTD_OPS_AUTO_OOB:
+ oobdelta = mtd->ecclayout->oobavail;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+ (ofs % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
+ if (len && ooblen &&
+ (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+ return -EINVAL;
+ if (ofs + len > mtd->size)
+ return -EINVAL;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ if (len == 0 && ooblen == 0)
+ return -EINVAL;
+ if (len == 0 && ooblen > 0)
+ return doc_backup_oob(docg3, ofs, ops);
+
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ mutex_lock(&docg3->cascade->lock);
+ while (!ret && len > 0) {
+ memset(oob, 0, sizeof(oob));
+ if (ofs == docg3->oob_write_ofs)
+ memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+ doc_fill_autooob(oob, oobbuf);
+ else if (ooblen > 0)
+ memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+ ofs += DOC_LAYOUT_PAGE_SIZE;
+ len -= DOC_LAYOUT_PAGE_SIZE;
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ if (ooblen) {
+ oobbuf += oobdelta;
+ ooblen -= oobdelta;
+ ops->oobretlen += oobdelta;
+ }
+ ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+ }
+
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return ret;
+}
+
+/**
+ * doc_write - Write a buffer to the chip
+ * @mtd: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to write (must be a full page size, ie. 512)
+ * @retlen: the number of bytes actually written (0 or 512)
+ * @buf: the buffer to get bytes from
+ *
+ * Writes data to the chip.
+ *
+ * Returns 0 if write successful, -EIO if write error
+ */
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
+ ops.datbuf = (char *)buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.oobbuf = NULL;
+ ops.ooblen = 0;
+ ops.ooboffs = 0;
+
+ ret = doc_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+ struct device_attribute *attr)
+{
+ int floor;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+
+ floor = attr->attr.name[1] - '0';
+ if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+ return NULL;
+ else
+ return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps0;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+
+ return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps1;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+
+ return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+ __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+ __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUSR|S_IWGRP, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUSR|S_IWGRP, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+ FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+ struct docg3_cascade *cascade)
+{
+ int ret = 0, floor, i = 0;
+ struct device *dev = &pdev->dev;
+
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
+ cascade->floors[floor]; floor++)
+ for (i = 0; !ret && i < 4; i++)
+ ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+ if (!ret)
+ return 0;
+ do {
+ while (--i >= 0)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+ i = 4;
+ } while (--floor >= 0);
+ return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+ struct docg3_cascade *cascade)
+{
+ struct device *dev = &pdev->dev;
+ int floor, i;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
+ floor++)
+ for (i = 0; i < 4; i++)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
+
+/*
+ * Debug sysfs entries
+ */
+static int dbg_flashctrl_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+
+ u8 fctrl;
+
+ mutex_lock(&docg3->cascade->lock);
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s, "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+ fctrl,
+ fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+ fctrl & DOC_CTRL_CE ? "active" : "inactive",
+ fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+ fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+ fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+
+ return 0;
+}
+DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
+
+static int dbg_asicmode_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+
+ int pctrl, mode;
+
+ mutex_lock(&docg3->cascade->lock);
+ pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+ mode = pctrl & 0x03;
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s,
+ "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
+ pctrl,
+ pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
+ pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
+ pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
+ mode >> 1, mode & 0x1);
+
+ switch (mode) {
+ case DOC_ASICMODE_RESET:
+ seq_puts(s, "reset");
+ break;
+ case DOC_ASICMODE_NORMAL:
+ seq_puts(s, "normal");
+ break;
+ case DOC_ASICMODE_POWERDOWN:
+ seq_puts(s, "powerdown");
+ break;
+ }
+ seq_puts(s, ")\n");
+ return 0;
+}
+DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
+
+static int dbg_device_id_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+ int id;
+
+ mutex_lock(&docg3->cascade->lock);
+ id = doc_register_readb(docg3, DOC_DEVICESELECT);
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s, "DeviceId = %d\n", id);
+ return 0;
+}
+DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
+
+static int dbg_protection_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+ int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+ mutex_lock(&docg3->cascade->lock);
+ protect = doc_register_readb(docg3, DOC_PROTECTION);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+ dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+ dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s, "Protection = 0x%02x (", protect);
+ if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
+ seq_puts(s, "FOUNDRY_OTP_LOCK,");
+ if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
+ seq_puts(s, "CUSTOMER_OTP_LOCK,");
+ if (protect & DOC_PROTECT_LOCK_INPUT)
+ seq_puts(s, "LOCK_INPUT,");
+ if (protect & DOC_PROTECT_STICKY_LOCK)
+ seq_puts(s, "STICKY_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ENABLED)
+ seq_puts(s, "PROTECTION ON,");
+ if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
+ seq_puts(s, "IPL_DOWNLOAD_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ERROR)
+ seq_puts(s, "PROTECT_ERR,");
+ else
+ seq_puts(s, "NO_PROTECT_ERR");
+ seq_puts(s, ")\n");
+
+ seq_printf(s, "DPS0 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps0, dps0_low, dps0_high,
+ !!(dps0 & DOC_DPS_OTP_PROTECTED),
+ !!(dps0 & DOC_DPS_READ_PROTECTED),
+ !!(dps0 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps0 & DOC_DPS_KEY_OK));
+ seq_printf(s, "DPS1 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps1, dps1_low, dps1_high,
+ !!(dps1 & DOC_DPS_OTP_PROTECTED),
+ !!(dps1 & DOC_DPS_READ_PROTECTED),
+ !!(dps1 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps1 & DOC_DPS_KEY_OK));
+ return 0;
+}
+DEBUGFS_RO_ATTR(protection, dbg_protection_show);
+
+static int __init doc_dbg_register(struct docg3 *docg3)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("docg3", NULL);
+ if (!root)
+ return -ENOMEM;
+
+ entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3,
+ &flashcontrol_fops);
+ if (entry)
+ entry = debugfs_create_file("asic_mode", S_IRUSR, root,
+ docg3, &asic_mode_fops);
+ if (entry)
+ entry = debugfs_create_file("device_id", S_IRUSR, root,
+ docg3, &device_id_fops);
+ if (entry)
+ entry = debugfs_create_file("protection", S_IRUSR, root,
+ docg3, &protection_fops);
+ if (entry) {
+ docg3->debugfs_root = root;
+ return 0;
+ } else {
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+ }
+}
+
+static void doc_dbg_unregister(struct docg3 *docg3)
+{
+ debugfs_remove_recursive(docg3->debugfs_root);
+}
+
+/**
+ * doc_set_driver_info - Fill the mtd_info structure and docg3 structure
+ * @chip_id: The chip ID of the supported chip
+ * @mtd: The structure to fill
+ */
+static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int cfg;
+
+ cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
+ docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+ docg3->reliable = reliable_mode;
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
+ docg3->device_id);
+ docg3->max_block = 2047;
+ break;
+ }
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ if (docg3->reliable == 2)
+ mtd->size /= 2;
+ mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ if (docg3->reliable == 2)
+ mtd->erasesize /= 2;
+ mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+ mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
+ mtd->owner = THIS_MODULE;
+ mtd->_erase = doc_erase;
+ mtd->_read = doc_read;
+ mtd->_write = doc_write;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
+ mtd->_block_isbad = doc_block_isbad;
+ mtd->ecclayout = &docg3_oobinfo;
+ mtd->ecc_strength = DOC_ECC_BCH_T;
+}
+
+/**
+ * doc_probe_device - Check if a device is available
+ * @base: the io space where the device is probed
+ * @floor: the floor of the probed device
+ * @dev: the device
+ * @cascade: the cascade of chips this devices will belong to
+ *
+ * Checks whether a device at the specified IO range, and floor is available.
+ *
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
+ */
+static struct mtd_info * __init
+doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
+{
+ int ret, bbt_nbpages;
+ u16 chip_id, chip_id_inv;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
+
+ ret = -ENOMEM;
+ docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
+ if (!docg3)
+ goto nomem1;
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ goto nomem2;
+ mtd->priv = docg3;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nomem3;
+
+ docg3->dev = dev;
+ docg3->device_id = floor;
+ docg3->cascade = cascade;
+ doc_set_device_id(docg3, docg3->device_id);
+ if (!floor)
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
+
+ chip_id = doc_register_readw(docg3, DOC_CHIPID);
+ chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
+
+ ret = 0;
+ if (chip_id != (u16)(~chip_id_inv)) {
+ goto nomem3;
+ }
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+ docg3->cascade->base, floor);
+ break;
+ default:
+ doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
+ goto nomem3;
+ }
+
+ doc_set_driver_info(chip_id, mtd);
+
+ doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ doc_reload_bbt(docg3);
+ return mtd;
+
+nomem3:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ERR_PTR(ret);
+}
+
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
+
+ mtd_device_unregister(mtd);
+ kfree(docg3->bbt);
+ kfree(docg3);
+ kfree(mtd->name);
+ kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successful)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+ int i;
+ struct docg3_cascade *cascade;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+
+ doc_dbg("docg3_resume()\n");
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+ return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int floor, i;
+ struct docg3_cascade *cascade;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+ u8 ctrl, pwr_down;
+
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = docg3_floors[floor];
+ if (!mtd)
+ continue;
+ docg3 = mtd->priv;
+
+ doc_writeb(docg3, floor, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(3000, 4000);
+ pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ }
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+ floor);
+ } else {
+ doc_err("docg3_suspend(): floor %d powerdown failed\n",
+ floor);
+ return -EIO;
+ }
+ }
+
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+ doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+ return 0;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ void __iomem *base;
+ int ret, floor;
+ struct docg3_cascade *cascade;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ return ret;
+ }
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
+
+ ret = -ENOMEM;
+ cascade = devm_kzalloc(dev, sizeof(*cascade) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!cascade)
+ return ret;
+ cascade->base = base;
+ mutex_init(&cascade->lock);
+ cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY);
+ if (!cascade->bch)
+ return ret;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = doc_probe_device(cascade, floor, dev);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto err_probe;
+ }
+ if (!mtd) {
+ if (floor == 0)
+ goto notfound;
+ else
+ continue;
+ }
+ cascade->floors[floor] = mtd;
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+ 0);
+ if (ret)
+ goto err_probe;
+ }
+
+ ret = doc_register_sysfs(pdev, cascade);
+ if (ret)
+ goto err_probe;
+
+ platform_set_drvdata(pdev, cascade);
+ doc_dbg_register(cascade->floors[0]->priv);
+ return 0;
+
+notfound:
+ ret = -ENODEV;
+ dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+ free_bch(cascade->bch);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
+ return ret;
+}
+
+/**
+ * docg3_release - Release the driver
+ * @pdev: the platform device
+ *
+ * Returns 0
+ */
+static int docg3_release(struct platform_device *pdev)
+{
+ struct docg3_cascade *cascade = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = cascade->floors[0]->priv;
+ int floor;
+
+ doc_unregister_sysfs(pdev, cascade);
+ doc_dbg_unregister(docg3);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
+
+ free_bch(docg3->cascade->bch);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id docg3_dt_ids[] = {
+ { .compatible = "m-systems,diskonchip-g3" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, docg3_dt_ids);
+#endif
+
+static struct platform_driver g3_driver = {
+ .driver = {
+ .name = "docg3",
+ .of_match_table = of_match_ptr(docg3_dt_ids),
+ },
+ .suspend = docg3_suspend,
+ .resume = docg3_resume,
+ .remove = docg3_release,
+};
+
+module_platform_driver_probe(g3_driver, docg3_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
new file mode 100644
index 000000000..19fb93f96
--- /dev/null
+++ b/drivers/mtd/devices/docg3.h
@@ -0,0 +1,370 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _MTD_DOCG3_H
+#define _MTD_DOCG3_H
+
+#include <linux/mtd/mtd.h>
+
+/*
+ * Flash memory areas :
+ * - 0x0000 .. 0x07ff : IPL
+ * - 0x0800 .. 0x0fff : Data area
+ * - 0x1000 .. 0x17ff : Registers
+ * - 0x1800 .. 0x1fff : Unknown
+ */
+#define DOC_IOSPACE_IPL 0x0000
+#define DOC_IOSPACE_DATA 0x0800
+#define DOC_IOSPACE_SIZE 0x2000
+
+/*
+ * DOC G3 layout and adressing scheme
+ * A page address for the block "b", plane "P" and page "p":
+ * address = [bbbb bPpp pppp]
+ */
+
+#define DOC_ADDR_PAGE_MASK 0x3f
+#define DOC_ADDR_BLOCK_SHIFT 6
+#define DOC_LAYOUT_NBPLANES 2
+#define DOC_LAYOUT_PAGES_PER_BLOCK 64
+#define DOC_LAYOUT_PAGE_SIZE 512
+#define DOC_LAYOUT_OOB_SIZE 16
+#define DOC_LAYOUT_WEAR_SIZE 8
+#define DOC_LAYOUT_PAGE_OOB_SIZE \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE)
+#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
+#define DOC_LAYOUT_BLOCK_SIZE \
+ (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M 14
+#define DOC_ECC_BCH_T 4
+#define DOC_ECC_BCH_PRIMPOLY 0x4443
+#define DOC_ECC_BCH_SIZE 7
+#define DOC_ECC_BCH_COVERED_BYTES \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
+ DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES \
+ (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
+
+/*
+ * Blocks distribution
+ */
+#define DOC_LAYOUT_BLOCK_BBT 0
+#define DOC_LAYOUT_BLOCK_OTP 0
+#define DOC_LAYOUT_BLOCK_FIRST_DATA 6
+
+#define DOC_LAYOUT_PAGE_BBT 4
+
+/*
+ * Extra page OOB (16 bytes wide) layout
+ */
+#define DOC_LAYOUT_OOB_PAGEINFO_OFS 0
+#define DOC_LAYOUT_OOB_HAMMING_OFS 7
+#define DOC_LAYOUT_OOB_BCH_OFS 8
+#define DOC_LAYOUT_OOB_UNUSED_OFS 15
+#define DOC_LAYOUT_OOB_PAGEINFO_SZ 7
+#define DOC_LAYOUT_OOB_HAMMING_SZ 1
+#define DOC_LAYOUT_OOB_BCH_SZ 7
+#define DOC_LAYOUT_OOB_UNUSED_SZ 1
+
+
+#define DOC_CHIPID_G3 0x200
+#define DOC_ERASE_MARK 0xaa
+#define DOC_MAX_NBFLOORS 4
+/*
+ * Flash registers
+ */
+#define DOC_CHIPID 0x1000
+#define DOC_TEST 0x1004
+#define DOC_BUSLOCK 0x1006
+#define DOC_ENDIANCONTROL 0x1008
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_CONFIGURATION 0x100e
+#define DOC_INTERRUPTCONTROL 0x1010
+#define DOC_READADDRESS 0x101a
+#define DOC_DATAEND 0x101e
+#define DOC_INTERRUPTSTATUS 0x1020
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_NOP 0x103e
+
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_ECCPRESET 0x1044
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
+
+#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_KEY 0x105c
+#define DOC_DPS1_KEY 0x105e
+#define DOC_DPS0_ADDRLOW 0x1060
+#define DOC_DPS0_ADDRHIGH 0x1062
+#define DOC_DPS1_ADDRLOW 0x1064
+#define DOC_DPS1_ADDRHIGH 0x1066
+#define DOC_DPS0_STATUS 0x106c
+#define DOC_DPS1_STATUS 0x106e
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
+
+/*
+ * Flash sequences
+ * A sequence is preset before one or more commands are input to the chip.
+ */
+#define DOC_SEQ_RESET 0x00
+#define DOC_SEQ_PAGE_SIZE_532 0x03
+#define DOC_SEQ_SET_FASTMODE 0x05
+#define DOC_SEQ_SET_RELIABLEMODE 0x09
+#define DOC_SEQ_READ 0x12
+#define DOC_SEQ_SET_PLANE1 0x0e
+#define DOC_SEQ_SET_PLANE2 0x10
+#define DOC_SEQ_PAGE_SETUP 0x1d
+#define DOC_SEQ_ERASE 0x27
+#define DOC_SEQ_PLANES_STATUS 0x31
+
+/*
+ * Flash commands
+ */
+#define DOC_CMD_READ_PLANE1 0x00
+#define DOC_CMD_SET_ADDR_READ 0x05
+#define DOC_CMD_READ_ALL_PLANES 0x30
+#define DOC_CMD_READ_PLANE2 0x50
+#define DOC_CMD_READ_FLASH 0xe0
+#define DOC_CMD_PAGE_SIZE_532 0x3c
+
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOC_CMD_PROG_CYCLE1 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_PROG_CYCLE3 0x11
+#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOC_CMD_READ_STATUS 0x70
+#define DOC_CMD_PLANES_STATUS 0x71
+
+#define DOC_CMD_RELIABLE_MODE 0x22
+#define DOC_CMD_FAST_MODE 0xa2
+
+#define DOC_CMD_RESET 0xff
+
+/*
+ * Flash register : DOC_FLASHCONTROL
+ */
+#define DOC_CTRL_VIOLATION 0x20
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN_BITS 0x08
+#define DOC_CTRL_PROTECTION_ERROR 0x04
+#define DOC_CTRL_SEQUENCE_ERROR 0x02
+#define DOC_CTRL_FLASHREADY 0x01
+
+/*
+ * Flash register : DOC_ASICMODE
+ */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/*
+ * Flash register : DOC_ECCCONF0
+ */
+#define DOC_ECCCONF0_WRITE_MODE 0x0000
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
+#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
+#define DOC_ECCCONF0_BCH_ENABLE 0x0800
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/*
+ * Flash register : DOC_ECCCONF1
+ */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_UNKOWN1 0x40
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
+#define DOC_ECCCONF1_UNKOWN3 0x10
+#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
+
+/*
+ * Flash register : DOC_PROTECTION
+ */
+#define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01
+#define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02
+#define DOC_PROTECT_LOCK_INPUT 0x04
+#define DOC_PROTECT_STICKY_LOCK 0x08
+#define DOC_PROTECT_PROTECTION_ENABLED 0x10
+#define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20
+#define DOC_PROTECT_PROTECTION_ERROR 0x80
+
+/*
+ * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS
+ */
+#define DOC_DPS_OTP_PROTECTED 0x01
+#define DOC_DPS_READ_PROTECTED 0x02
+#define DOC_DPS_WRITE_PROTECTED 0x04
+#define DOC_DPS_HW_LOCK_ENABLED 0x08
+#define DOC_DPS_KEY_OK 0x80
+
+/*
+ * Flash register : DOC_CONFIGURATION
+ */
+#define DOC_CONF_IF_CFG 0x80
+#define DOC_CONF_MAX_ID_MASK 0x30
+#define DOC_CONF_VCCQ_3V 0x01
+
+/*
+ * Flash register : DOC_READADDRESS
+ */
+#define DOC_READADDR_INC 0x8000
+#define DOC_READADDR_ONE_BYTE 0x4000
+#define DOC_READADDR_ADDR_MASK 0x1fff
+
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY 0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL 0x01
+#define DOC_PLANES_STATUS_PLANE0_KO 0x02
+#define DOC_PLANES_STATUS_PLANE1_KO 0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH 8
+
+/**
+ * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
+ * @floors: floors (ie. one physical docg3 chip is one floor)
+ * @base: IO space to access all chips in the cascade
+ * @bch: the BCH correcting control structure
+ * @lock: lock to protect docg3 IO space from concurrent accesses
+ */
+struct docg3_cascade {
+ struct mtd_info *floors[DOC_MAX_NBFLOORS];
+ void __iomem *base;
+ struct bch_control *bch;
+ struct mutex lock;
+};
+
+/**
+ * struct docg3 - DiskOnChip driver private data
+ * @dev: the device currently under control
+ * @cascade: the cascade this device belongs to
+ * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
+ * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ * reliable mode
+ * Fast mode implies more errors than normal mode.
+ * Reliable mode implies that page 2*n and 2*n+1 are clones.
+ * @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ * page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ * if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
+ * @debugfs_root: debugfs root node
+ */
+struct docg3 {
+ struct device *dev;
+ struct docg3_cascade *cascade;
+ unsigned int device_id:4;
+ unsigned int if_cfg:1;
+ unsigned int reliable:2;
+ int max_block;
+ u8 *bbt;
+ loff_t oob_write_ofs;
+ int oob_autoecc;
+ u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
+ struct dentry *debugfs_root;
+};
+
+#define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg)
+#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
+#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
+#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
+
+#define DEBUGFS_RO_ATTR(name, show_fct) \
+ static int name##_open(struct inode *inode, struct file *file) \
+ { return single_open(file, show_fct, inode->i_private); } \
+ static const struct file_operations name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = name##_open, \
+ .llseek = seq_lseek, \
+ .read = seq_read, \
+ .release = single_release \
+ };
+#endif
+
+/*
+ * Trace events part
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM docg3
+
+#if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define _MTD_DOCG3_TRACE
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(docg3_io,
+ TP_PROTO(int op, int width, u16 reg, int val),
+ TP_ARGS(op, width, reg, val),
+ TP_STRUCT__entry(
+ __field(int, op)
+ __field(unsigned char, width)
+ __field(u16, reg)
+ __field(int, val)),
+ TP_fast_assign(
+ __entry->op = op;
+ __entry->width = width;
+ __entry->reg = reg;
+ __entry->val = val;),
+ TP_printk("docg3: %s%02d reg=%04x, val=%04x",
+ __entry->op ? "write" : "read", __entry->width,
+ __entry->reg, __entry->val)
+ );
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE docg3
+#include <trace/define_trace.h>
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
new file mode 100644
index 000000000..82bd00af5
--- /dev/null
+++ b/drivers/mtd/devices/lart.c
@@ -0,0 +1,685 @@
+
+/*
+ * MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
+ *
+ * Author: Abraham vd Merwe <abraham@2d3d.co.za>
+ *
+ * Copyright (c) 2001, 2d3D, Inc.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * References:
+ *
+ * [1] 3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ * - Order Number: 290644-005
+ * - January 2000
+ *
+ * [2] MTD internal API documentation
+ * - http://www.linux-mtd.infradead.org/
+ *
+ * Limitations:
+ *
+ * Even though this driver is written for 3 Volt Fast Boot
+ * Block Flash Memory, it is rather specific to LART. With
+ * Minor modifications, notably the without data/address line
+ * mangling and different bus settings, etc. it should be
+ * trivial to adapt to other platforms.
+ *
+ * If somebody would sponsor me a different board, I'll
+ * adapt the driver (:
+ */
+
+/* debugging */
+//#define LART_DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#ifndef CONFIG_SA1100_LART
+#error This is for LART architecture only
+#endif
+
+static char module_name[] = "lart";
+
+/*
+ * These values is specific to 28Fxxxx3 flash memory.
+ * See section 2.3.1 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define FLASH_BLOCKSIZE_PARAM (4096 * BUSWIDTH)
+#define FLASH_NUMBLOCKS_16m_PARAM 8
+#define FLASH_NUMBLOCKS_8m_PARAM 8
+
+/*
+ * These values is specific to 28Fxxxx3 flash memory.
+ * See section 2.3.2 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define FLASH_BLOCKSIZE_MAIN (32768 * BUSWIDTH)
+#define FLASH_NUMBLOCKS_16m_MAIN 31
+#define FLASH_NUMBLOCKS_8m_MAIN 15
+
+/*
+ * These values are specific to LART
+ */
+
+/* general */
+#define BUSWIDTH 4 /* don't change this - a lot of the code _will_ break if you change this */
+#define FLASH_OFFSET 0xe8000000 /* see linux/arch/arm/mach-sa1100/lart.c */
+
+/* blob */
+#define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM
+#define BLOB_START 0x00000000
+#define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM)
+
+/* kernel */
+#define NUM_KERNEL_BLOCKS 7
+#define KERNEL_START (BLOB_START + BLOB_LEN)
+#define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN)
+
+/* initial ramdisk */
+#define NUM_INITRD_BLOCKS 24
+#define INITRD_START (KERNEL_START + KERNEL_LEN)
+#define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN)
+
+/*
+ * See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define READ_ARRAY 0x00FF00FF /* Read Array/Reset */
+#define READ_ID_CODES 0x00900090 /* Read Identifier Codes */
+#define ERASE_SETUP 0x00200020 /* Block Erase */
+#define ERASE_CONFIRM 0x00D000D0 /* Block Erase and Program Resume */
+#define PGM_SETUP 0x00400040 /* Program */
+#define STATUS_READ 0x00700070 /* Read Status Register */
+#define STATUS_CLEAR 0x00500050 /* Clear Status Register */
+#define STATUS_BUSY 0x00800080 /* Write State Machine Status (WSMS) */
+#define STATUS_ERASE_ERR 0x00200020 /* Erase Status (ES) */
+#define STATUS_PGM_ERR 0x00100010 /* Program Status (PS) */
+
+/*
+ * See section 4.2 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define FLASH_MANUFACTURER 0x00890089
+#define FLASH_DEVICE_8mbit_TOP 0x88f188f1
+#define FLASH_DEVICE_8mbit_BOTTOM 0x88f288f2
+#define FLASH_DEVICE_16mbit_TOP 0x88f388f3
+#define FLASH_DEVICE_16mbit_BOTTOM 0x88f488f4
+
+/***************************************************************************************************/
+
+/*
+ * The data line mapping on LART is as follows:
+ *
+ * U2 CPU | U3 CPU
+ * -------------------
+ * 0 20 | 0 12
+ * 1 22 | 1 14
+ * 2 19 | 2 11
+ * 3 17 | 3 9
+ * 4 24 | 4 0
+ * 5 26 | 5 2
+ * 6 31 | 6 7
+ * 7 29 | 7 5
+ * 8 21 | 8 13
+ * 9 23 | 9 15
+ * 10 18 | 10 10
+ * 11 16 | 11 8
+ * 12 25 | 12 1
+ * 13 27 | 13 3
+ * 14 30 | 14 6
+ * 15 28 | 15 4
+ */
+
+/* Mangle data (x) */
+#define DATA_TO_FLASH(x) \
+ ( \
+ (((x) & 0x08009000) >> 11) + \
+ (((x) & 0x00002000) >> 10) + \
+ (((x) & 0x04004000) >> 8) + \
+ (((x) & 0x00000010) >> 4) + \
+ (((x) & 0x91000820) >> 3) + \
+ (((x) & 0x22080080) >> 2) + \
+ ((x) & 0x40000400) + \
+ (((x) & 0x00040040) << 1) + \
+ (((x) & 0x00110000) << 4) + \
+ (((x) & 0x00220100) << 5) + \
+ (((x) & 0x00800208) << 6) + \
+ (((x) & 0x00400004) << 9) + \
+ (((x) & 0x00000001) << 12) + \
+ (((x) & 0x00000002) << 13) \
+ )
+
+/* Unmangle data (x) */
+#define FLASH_TO_DATA(x) \
+ ( \
+ (((x) & 0x00010012) << 11) + \
+ (((x) & 0x00000008) << 10) + \
+ (((x) & 0x00040040) << 8) + \
+ (((x) & 0x00000001) << 4) + \
+ (((x) & 0x12200104) << 3) + \
+ (((x) & 0x08820020) << 2) + \
+ ((x) & 0x40000400) + \
+ (((x) & 0x00080080) >> 1) + \
+ (((x) & 0x01100000) >> 4) + \
+ (((x) & 0x04402000) >> 5) + \
+ (((x) & 0x20008200) >> 6) + \
+ (((x) & 0x80000800) >> 9) + \
+ (((x) & 0x00001000) >> 12) + \
+ (((x) & 0x00004000) >> 13) \
+ )
+
+/*
+ * The address line mapping on LART is as follows:
+ *
+ * U3 CPU | U2 CPU
+ * -------------------
+ * 0 2 | 0 2
+ * 1 3 | 1 3
+ * 2 9 | 2 9
+ * 3 13 | 3 8
+ * 4 8 | 4 7
+ * 5 12 | 5 6
+ * 6 11 | 6 5
+ * 7 10 | 7 4
+ * 8 4 | 8 10
+ * 9 5 | 9 11
+ * 10 6 | 10 12
+ * 11 7 | 11 13
+ *
+ * BOOT BLOCK BOUNDARY
+ *
+ * 12 15 | 12 15
+ * 13 14 | 13 14
+ * 14 16 | 14 16
+ *
+ * MAIN BLOCK BOUNDARY
+ *
+ * 15 17 | 15 18
+ * 16 18 | 16 17
+ * 17 20 | 17 20
+ * 18 19 | 18 19
+ * 19 21 | 19 21
+ *
+ * As we can see from above, the addresses aren't mangled across
+ * block boundaries, so we don't need to worry about address
+ * translations except for sending/reading commands during
+ * initialization
+ */
+
+/* Mangle address (x) on chip U2 */
+#define ADDR_TO_FLASH_U2(x) \
+ ( \
+ (((x) & 0x00000f00) >> 4) + \
+ (((x) & 0x00042000) << 1) + \
+ (((x) & 0x0009c003) << 2) + \
+ (((x) & 0x00021080) << 3) + \
+ (((x) & 0x00000010) << 4) + \
+ (((x) & 0x00000040) << 5) + \
+ (((x) & 0x00000024) << 7) + \
+ (((x) & 0x00000008) << 10) \
+ )
+
+/* Unmangle address (x) on chip U2 */
+#define FLASH_U2_TO_ADDR(x) \
+ ( \
+ (((x) << 4) & 0x00000f00) + \
+ (((x) >> 1) & 0x00042000) + \
+ (((x) >> 2) & 0x0009c003) + \
+ (((x) >> 3) & 0x00021080) + \
+ (((x) >> 4) & 0x00000010) + \
+ (((x) >> 5) & 0x00000040) + \
+ (((x) >> 7) & 0x00000024) + \
+ (((x) >> 10) & 0x00000008) \
+ )
+
+/* Mangle address (x) on chip U3 */
+#define ADDR_TO_FLASH_U3(x) \
+ ( \
+ (((x) & 0x00000080) >> 3) + \
+ (((x) & 0x00000040) >> 1) + \
+ (((x) & 0x00052020) << 1) + \
+ (((x) & 0x00084f03) << 2) + \
+ (((x) & 0x00029010) << 3) + \
+ (((x) & 0x00000008) << 5) + \
+ (((x) & 0x00000004) << 7) \
+ )
+
+/* Unmangle address (x) on chip U3 */
+#define FLASH_U3_TO_ADDR(x) \
+ ( \
+ (((x) << 3) & 0x00000080) + \
+ (((x) << 1) & 0x00000040) + \
+ (((x) >> 1) & 0x00052020) + \
+ (((x) >> 2) & 0x00084f03) + \
+ (((x) >> 3) & 0x00029010) + \
+ (((x) >> 5) & 0x00000008) + \
+ (((x) >> 7) & 0x00000004) \
+ )
+
+/***************************************************************************************************/
+
+static __u8 read8 (__u32 offset)
+{
+ volatile __u8 *data = (__u8 *) (FLASH_OFFSET + offset);
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.2x\n", __func__, offset, *data);
+#endif
+ return (*data);
+}
+
+static __u32 read32 (__u32 offset)
+{
+ volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.8x\n", __func__, offset, *data);
+#endif
+ return (*data);
+}
+
+static void write32 (__u32 x,__u32 offset)
+{
+ volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
+ *data = x;
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n", __func__, offset, *data);
+#endif
+}
+
+/***************************************************************************************************/
+
+/*
+ * Probe for 16mbit flash memory on a LART board without doing
+ * too much damage. Since we need to write 1 dword to memory,
+ * we're f**cked if this happens to be DRAM since we can't
+ * restore the memory (otherwise we might exit Read Array mode).
+ *
+ * Returns 1 if we found 16mbit flash memory on LART, 0 otherwise.
+ */
+static int flash_probe (void)
+{
+ __u32 manufacturer,devtype;
+
+ /* setup "Read Identifier Codes" mode */
+ write32 (DATA_TO_FLASH (READ_ID_CODES),0x00000000);
+
+ /* probe U2. U2/U3 returns the same data since the first 3
+ * address lines is mangled in the same way */
+ manufacturer = FLASH_TO_DATA (read32 (ADDR_TO_FLASH_U2 (0x00000000)));
+ devtype = FLASH_TO_DATA (read32 (ADDR_TO_FLASH_U2 (0x00000001)));
+
+ /* put the flash back into command mode */
+ write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
+
+ return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM));
+}
+
+/*
+ * Erase one block of flash memory at offset ``offset'' which is any
+ * address within the block which should be erased.
+ *
+ * Returns 1 if successful, 0 otherwise.
+ */
+static inline int erase_block (__u32 offset)
+{
+ __u32 status;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x\n", __func__, offset);
+#endif
+
+ /* erase and confirm */
+ write32 (DATA_TO_FLASH (ERASE_SETUP),offset);
+ write32 (DATA_TO_FLASH (ERASE_CONFIRM),offset);
+
+ /* wait for block erase to finish */
+ do
+ {
+ write32 (DATA_TO_FLASH (STATUS_READ),offset);
+ status = FLASH_TO_DATA (read32 (offset));
+ }
+ while ((~status & STATUS_BUSY) != 0);
+
+ /* put the flash back into command mode */
+ write32 (DATA_TO_FLASH (READ_ARRAY),offset);
+
+ /* was the erase successful? */
+ if ((status & STATUS_ERASE_ERR))
+ {
+ printk (KERN_WARNING "%s: erase error at address 0x%.8x.\n",module_name,offset);
+ return (0);
+ }
+
+ return (1);
+}
+
+static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
+{
+ __u32 addr,len;
+ int i,first;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
+#endif
+
+ /*
+ * check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ *
+ * skip all erase regions which are ended before the start of
+ * the requested erase. Actually, to save on the calculations,
+ * we skip to the first erase region which starts after the
+ * start of the requested erase, and then go back one.
+ */
+ for (i = 0; i < mtd->numeraseregions && instr->addr >= mtd->eraseregions[i].offset; i++) ;
+ i--;
+
+ /*
+ * ok, now i is pointing at the erase region in which this
+ * erase request starts. Check the start of the requested
+ * erase range is aligned with the erase size which is in
+ * effect here.
+ */
+ if (i < 0 || (instr->addr & (mtd->eraseregions[i].erasesize - 1)))
+ return -EINVAL;
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /*
+ * next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ *
+ * as before, drop back one to point at the region in which
+ * the address actually falls
+ */
+ for (; i < mtd->numeraseregions && instr->addr + instr->len >= mtd->eraseregions[i].offset; i++) ;
+ i--;
+
+ /* is the end aligned on a block boundary? */
+ if (i < 0 || ((instr->addr + instr->len) & (mtd->eraseregions[i].erasesize - 1)))
+ return -EINVAL;
+
+ addr = instr->addr;
+ len = instr->len;
+
+ i = first;
+
+ /* now erase those blocks */
+ while (len)
+ {
+ if (!erase_block (addr))
+ {
+ instr->state = MTD_ERASE_FAILED;
+ return (-EIO);
+ }
+
+ addr += mtd->eraseregions[i].erasesize;
+ len -= mtd->eraseregions[i].erasesize;
+
+ if (addr == mtd->eraseregions[i].offset + (mtd->eraseregions[i].erasesize * mtd->eraseregions[i].numblocks)) i++;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return (0);
+}
+
+static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retlen,u_char *buf)
+{
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
+#endif
+
+ /* we always read len bytes */
+ *retlen = len;
+
+ /* first, we read bytes until we reach a dword boundary */
+ if (from & (BUSWIDTH - 1))
+ {
+ int gap = BUSWIDTH - (from & (BUSWIDTH - 1));
+
+ while (len && gap--) *buf++ = read8 (from++), len--;
+ }
+
+ /* now we read dwords until we reach a non-dword boundary */
+ while (len >= BUSWIDTH)
+ {
+ *((__u32 *) buf) = read32 (from);
+
+ buf += BUSWIDTH;
+ from += BUSWIDTH;
+ len -= BUSWIDTH;
+ }
+
+ /* top up the last unaligned bytes */
+ if (len & (BUSWIDTH - 1))
+ while (len--) *buf++ = read8 (from++);
+
+ return (0);
+}
+
+/*
+ * Write one dword ``x'' to flash memory at offset ``offset''. ``offset''
+ * must be 32 bits, i.e. it must be on a dword boundary.
+ *
+ * Returns 1 if successful, 0 otherwise.
+ */
+static inline int write_dword (__u32 offset,__u32 x)
+{
+ __u32 status;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n", __func__, offset, x);
+#endif
+
+ /* setup writing */
+ write32 (DATA_TO_FLASH (PGM_SETUP),offset);
+
+ /* write the data */
+ write32 (x,offset);
+
+ /* wait for the write to finish */
+ do
+ {
+ write32 (DATA_TO_FLASH (STATUS_READ),offset);
+ status = FLASH_TO_DATA (read32 (offset));
+ }
+ while ((~status & STATUS_BUSY) != 0);
+
+ /* put the flash back into command mode */
+ write32 (DATA_TO_FLASH (READ_ARRAY),offset);
+
+ /* was the write successful? */
+ if ((status & STATUS_PGM_ERR) || read32 (offset) != x)
+ {
+ printk (KERN_WARNING "%s: write error at address 0x%.8x.\n",module_name,offset);
+ return (0);
+ }
+
+ return (1);
+}
+
+static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen,const u_char *buf)
+{
+ __u8 tmp[4];
+ int i,n;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
+#endif
+
+ /* sanity checks */
+ if (!len) return (0);
+
+ /* first, we write a 0xFF.... padded byte until we reach a dword boundary */
+ if (to & (BUSWIDTH - 1))
+ {
+ __u32 aligned = to & ~(BUSWIDTH - 1);
+ int gap = to - aligned;
+
+ i = n = 0;
+
+ while (gap--) tmp[i++] = 0xFF;
+ while (len && i < BUSWIDTH) tmp[i++] = buf[n++], len--;
+ while (i < BUSWIDTH) tmp[i++] = 0xFF;
+
+ if (!write_dword (aligned,*((__u32 *) tmp))) return (-EIO);
+
+ to += n;
+ buf += n;
+ *retlen += n;
+ }
+
+ /* now we write dwords until we reach a non-dword boundary */
+ while (len >= BUSWIDTH)
+ {
+ if (!write_dword (to,*((__u32 *) buf))) return (-EIO);
+
+ to += BUSWIDTH;
+ buf += BUSWIDTH;
+ *retlen += BUSWIDTH;
+ len -= BUSWIDTH;
+ }
+
+ /* top up the last unaligned bytes, padded with 0xFF.... */
+ if (len & (BUSWIDTH - 1))
+ {
+ i = n = 0;
+
+ while (len--) tmp[i++] = buf[n++];
+ while (i < BUSWIDTH) tmp[i++] = 0xFF;
+
+ if (!write_dword (to,*((__u32 *) tmp))) return (-EIO);
+
+ *retlen += n;
+ }
+
+ return (0);
+}
+
+/***************************************************************************************************/
+
+static struct mtd_info mtd;
+
+static struct mtd_erase_region_info erase_regions[] = {
+ /* parameter blocks */
+ {
+ .offset = 0x00000000,
+ .erasesize = FLASH_BLOCKSIZE_PARAM,
+ .numblocks = FLASH_NUMBLOCKS_16m_PARAM,
+ },
+ /* main blocks */
+ {
+ .offset = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM,
+ .erasesize = FLASH_BLOCKSIZE_MAIN,
+ .numblocks = FLASH_NUMBLOCKS_16m_MAIN,
+ }
+};
+
+static struct mtd_partition lart_partitions[] = {
+ /* blob */
+ {
+ .name = "blob",
+ .offset = BLOB_START,
+ .size = BLOB_LEN,
+ },
+ /* kernel */
+ {
+ .name = "kernel",
+ .offset = KERNEL_START, /* MTDPART_OFS_APPEND */
+ .size = KERNEL_LEN,
+ },
+ /* initial ramdisk / file system */
+ {
+ .name = "file system",
+ .offset = INITRD_START, /* MTDPART_OFS_APPEND */
+ .size = INITRD_LEN, /* MTDPART_SIZ_FULL */
+ }
+};
+#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)
+
+static int __init lart_flash_init (void)
+{
+ int result;
+ memset (&mtd,0,sizeof (mtd));
+ printk ("MTD driver for LART. Written by Abraham vd Merwe <abraham@2d3d.co.za>\n");
+ printk ("%s: Probing for 28F160x3 flash on LART...\n",module_name);
+ if (!flash_probe ())
+ {
+ printk (KERN_WARNING "%s: Found no LART compatible flash device\n",module_name);
+ return (-ENXIO);
+ }
+ printk ("%s: This looks like a LART board to me.\n",module_name);
+ mtd.name = module_name;
+ mtd.type = MTD_NORFLASH;
+ mtd.writesize = 1;
+ mtd.writebufsize = 4;
+ mtd.flags = MTD_CAP_NORFLASH;
+ mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
+ mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
+ mtd.numeraseregions = ARRAY_SIZE(erase_regions);
+ mtd.eraseregions = erase_regions;
+ mtd._erase = flash_erase;
+ mtd._read = flash_read;
+ mtd._write = flash_write;
+ mtd.owner = THIS_MODULE;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG
+ "mtd.name = %s\n"
+ "mtd.size = 0x%.8x (%uM)\n"
+ "mtd.erasesize = 0x%.8x (%uK)\n"
+ "mtd.numeraseregions = %d\n",
+ mtd.name,
+ mtd.size,mtd.size / (1024*1024),
+ mtd.erasesize,mtd.erasesize / 1024,
+ mtd.numeraseregions);
+
+ if (mtd.numeraseregions)
+ for (result = 0; result < mtd.numeraseregions; result++)
+ printk (KERN_DEBUG
+ "\n\n"
+ "mtd.eraseregions[%d].offset = 0x%.8x\n"
+ "mtd.eraseregions[%d].erasesize = 0x%.8x (%uK)\n"
+ "mtd.eraseregions[%d].numblocks = %d\n",
+ result,mtd.eraseregions[result].offset,
+ result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
+ result,mtd.eraseregions[result].numblocks);
+
+ printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
+
+ for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
+ printk (KERN_DEBUG
+ "\n\n"
+ "lart_partitions[%d].name = %s\n"
+ "lart_partitions[%d].offset = 0x%.8x\n"
+ "lart_partitions[%d].size = 0x%.8x (%uK)\n",
+ result,lart_partitions[result].name,
+ result,lart_partitions[result].offset,
+ result,lart_partitions[result].size,lart_partitions[result].size / 1024);
+#endif
+
+ result = mtd_device_register(&mtd, lart_partitions,
+ ARRAY_SIZE(lart_partitions));
+
+ return (result);
+}
+
+static void __exit lart_flash_exit (void)
+{
+ mtd_device_unregister(&mtd);
+}
+
+module_init (lart_flash_init);
+module_exit (lart_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>");
+MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board");
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
new file mode 100644
index 000000000..3af137f49
--- /dev/null
+++ b/drivers/mtd/devices/m25p80.c
@@ -0,0 +1,332 @@
+/*
+ * MTD SPI driver for ST M25Pxx (and similar) serial flash chips
+ *
+ * Author: Mike Lavender, mike@steroidmicros.com
+ *
+ * Copyright (c) 2005, Intec Automation Inc.
+ *
+ * Some parts are based on lart.c by Abraham Van Der Merwe
+ *
+ * Cleaned up and generalized based on mtd_dataflash.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+#define MAX_CMD_SIZE 6
+struct m25p {
+ struct spi_device *spi;
+ struct spi_nor spi_nor;
+ struct mtd_info mtd;
+ u8 command[MAX_CMD_SIZE];
+};
+
+static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
+{
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
+ int ret;
+
+ ret = spi_write_then_read(spi, &code, 1, val, len);
+ if (ret < 0)
+ dev_err(&spi->dev, "error %d reading %x\n", ret, code);
+
+ return ret;
+}
+
+static void m25p_addr2cmd(struct spi_nor *nor, unsigned int addr, u8 *cmd)
+{
+ /* opcode is in cmd[0] */
+ cmd[1] = addr >> (nor->addr_width * 8 - 8);
+ cmd[2] = addr >> (nor->addr_width * 8 - 16);
+ cmd[3] = addr >> (nor->addr_width * 8 - 24);
+ cmd[4] = addr >> (nor->addr_width * 8 - 32);
+}
+
+static int m25p_cmdsz(struct spi_nor *nor)
+{
+ return 1 + nor->addr_width;
+}
+
+static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
+ int wr_en)
+{
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
+
+ flash->command[0] = opcode;
+ if (buf)
+ memcpy(&flash->command[1], buf, len);
+
+ return spi_write(spi, flash->command, len + 1);
+}
+
+static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
+ struct spi_transfer t[2] = {};
+ struct spi_message m;
+ int cmd_sz = m25p_cmdsz(nor);
+
+ spi_message_init(&m);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ cmd_sz = 1;
+
+ flash->command[0] = nor->program_opcode;
+ m25p_addr2cmd(nor, to, flash->command);
+
+ t[0].tx_buf = flash->command;
+ t[0].len = cmd_sz;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = len;
+ spi_message_add_tail(&t[1], &m);
+
+ spi_sync(spi, &m);
+
+ *retlen += m.actual_length - cmd_sz;
+}
+
+static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
+{
+ switch (nor->flash_read) {
+ case SPI_NOR_DUAL:
+ return 2;
+ case SPI_NOR_QUAD:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Read an address range from the nor chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ */
+static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct m25p *flash = nor->priv;
+ struct spi_device *spi = flash->spi;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ unsigned int dummy = nor->read_dummy;
+
+ /* convert the dummy cycles to the number of bytes */
+ dummy /= 8;
+
+ spi_message_init(&m);
+ memset(t, 0, (sizeof t));
+
+ flash->command[0] = nor->read_opcode;
+ m25p_addr2cmd(nor, from, flash->command);
+
+ t[0].tx_buf = flash->command;
+ t[0].len = m25p_cmdsz(nor) + dummy;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].rx_nbits = m25p80_rx_nbits(nor);
+ t[1].len = len;
+ spi_message_add_tail(&t[1], &m);
+
+ spi_sync(spi, &m);
+
+ *retlen = m.actual_length - m25p_cmdsz(nor) - dummy;
+ return 0;
+}
+
+static int m25p80_erase(struct spi_nor *nor, loff_t offset)
+{
+ struct m25p *flash = nor->priv;
+
+ dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
+ flash->mtd.erasesize / 1024, (u32)offset);
+
+ /* Set up command buffer. */
+ flash->command[0] = nor->erase_opcode;
+ m25p_addr2cmd(nor, offset, flash->command);
+
+ spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
+
+ return 0;
+}
+
+/*
+ * board specific setup should have ensured the SPI clock used here
+ * matches what the READ command supports, at least until this driver
+ * understands FAST_READ (for clocks over 25 MHz).
+ */
+static int m25p_probe(struct spi_device *spi)
+{
+ struct mtd_part_parser_data ppdata;
+ struct flash_platform_data *data;
+ struct m25p *flash;
+ struct spi_nor *nor;
+ enum read_mode mode = SPI_NOR_NORMAL;
+ char *flash_name = NULL;
+ int ret;
+
+ data = dev_get_platdata(&spi->dev);
+
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ nor = &flash->spi_nor;
+
+ /* install the hooks */
+ nor->read = m25p80_read;
+ nor->write = m25p80_write;
+ nor->erase = m25p80_erase;
+ nor->write_reg = m25p80_write_reg;
+ nor->read_reg = m25p80_read_reg;
+
+ nor->dev = &spi->dev;
+ nor->mtd = &flash->mtd;
+ nor->priv = flash;
+
+ spi_set_drvdata(spi, flash);
+ flash->mtd.priv = nor;
+ flash->spi = spi;
+
+ if (spi->mode & SPI_RX_QUAD)
+ mode = SPI_NOR_QUAD;
+ else if (spi->mode & SPI_RX_DUAL)
+ mode = SPI_NOR_DUAL;
+
+ if (data && data->name)
+ flash->mtd.name = data->name;
+
+ /* For some (historical?) reason many platforms provide two different
+ * names in flash_platform_data: "name" and "type". Quite often name is
+ * set to "m25p80" and then "type" provides a real chip name.
+ * If that's the case, respect "type" and ignore a "name".
+ */
+ if (data && data->type)
+ flash_name = data->type;
+ else if (!strcmp(spi->modalias, "spi-nor"))
+ flash_name = NULL; /* auto-detect */
+ else
+ flash_name = spi->modalias;
+
+ ret = spi_nor_scan(nor, flash_name, mode);
+ if (ret)
+ return ret;
+
+ ppdata.of_node = spi->dev.of_node;
+
+ return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
+ data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+}
+
+
+static int m25p_remove(struct spi_device *spi)
+{
+ struct m25p *flash = spi_get_drvdata(spi);
+
+ /* Clean up MTD stuff. */
+ return mtd_device_unregister(&flash->mtd);
+}
+
+/*
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "jedec,spi-nor").
+ *
+ * Many flash names are kept here in this list (as well as in spi-nor.c) to
+ * keep them available as module aliases for existing platforms.
+ */
+static const struct spi_device_id m25p_ids[] = {
+ {"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"},
+ {"at25df641"}, {"at26f004"}, {"at26df081a"}, {"at26df161a"},
+ {"at26df321"}, {"at45db081d"},
+ {"en25f32"}, {"en25p32"}, {"en25q32b"}, {"en25p64"},
+ {"en25q64"}, {"en25qh128"}, {"en25qh256"},
+ {"f25l32pa"},
+ {"mr25h256"}, {"mr25h10"},
+ {"gd25q32"}, {"gd25q64"},
+ {"160s33b"}, {"320s33b"}, {"640s33b"},
+ {"mx25l2005a"}, {"mx25l4005a"}, {"mx25l8005"}, {"mx25l1606e"},
+ {"mx25l3205d"}, {"mx25l3255e"}, {"mx25l6405d"}, {"mx25l12805d"},
+ {"mx25l12855e"},{"mx25l25635e"},{"mx25l25655e"},{"mx66l51235l"},
+ {"mx66l1g55g"},
+ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q256a"},
+ {"n25q512a"}, {"n25q512ax3"}, {"n25q00"},
+ {"pm25lv512"}, {"pm25lv010"}, {"pm25lq032"},
+ {"s25sl032p"}, {"s25sl064p"}, {"s25fl256s0"}, {"s25fl256s1"},
+ {"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"},
+ {"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"},
+ {"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"},
+ {"s25fl016k"}, {"s25fl064k"}, {"s25fl132k"},
+ {"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"},
+ {"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"},
+ {"sst25wf040"},
+ {"m25p05"}, {"m25p10"}, {"m25p20"}, {"m25p40"},
+ {"m25p80"}, {"m25p16"}, {"m25p32"}, {"m25p64"},
+ {"m25p128"}, {"n25q032"},
+ {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
+ {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
+ {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
+ {"m45pe10"}, {"m45pe80"}, {"m45pe16"},
+ {"m25pe20"}, {"m25pe80"}, {"m25pe16"},
+ {"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"},
+ {"m25px64"}, {"m25px80"},
+ {"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"},
+ {"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
+ {"w25x64"}, {"w25q64"}, {"w25q80"}, {"w25q80bl"},
+ {"w25q128"}, {"w25q256"}, {"cat25c11"},
+ {"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
+
+ /*
+ * Generic support for SPI NOR that can be identified by the JEDEC READ
+ * ID opcode (0x9F). Use this, if possible.
+ */
+ {"spi-nor"},
+ { },
+};
+MODULE_DEVICE_TABLE(spi, m25p_ids);
+
+static struct spi_driver m25p80_driver = {
+ .driver = {
+ .name = "m25p80",
+ .owner = THIS_MODULE,
+ },
+ .id_table = m25p_ids,
+ .probe = m25p_probe,
+ .remove = m25p_remove,
+
+ /* REVISIT: many of these chips have deep power-down modes, which
+ * should clearly be entered on suspend() to minimize power use.
+ * And also when they're otherwise idle...
+ */
+};
+
+module_spi_driver(m25p80_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("MTD SPI driver for ST M25Pxx flash chips");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
new file mode 100644
index 000000000..5c8b322ba
--- /dev/null
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2001 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/kn02.h>
+#include <asm/dec/kn03.h>
+#include <asm/io.h>
+#include <asm/paccess.h>
+
+#include "ms02-nv.h"
+
+
+static char version[] __initdata =
+ "ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DEC MS02-NV NVRAM module driver");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Addresses we probe for an MS02-NV at. Modules may be located
+ * at any 8MiB boundary within a 0MiB up to 112MiB range or at any 32MiB
+ * boundary within a 0MiB up to 448MiB range. We don't support a module
+ * at 0MiB, though.
+ */
+static ulong ms02nv_addrs[] __initdata = {
+ 0x07000000, 0x06800000, 0x06000000, 0x05800000, 0x05000000,
+ 0x04800000, 0x04000000, 0x03800000, 0x03000000, 0x02800000,
+ 0x02000000, 0x01800000, 0x01000000, 0x00800000
+};
+
+static const char ms02nv_name[] = "DEC MS02-NV NVRAM";
+static const char ms02nv_res_diag_ram[] = "Diagnostic RAM";
+static const char ms02nv_res_user_ram[] = "General-purpose RAM";
+static const char ms02nv_res_csr[] = "Control and status register";
+
+static struct mtd_info *root_ms02nv_mtd;
+
+
+static int ms02nv_read(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct ms02nv_private *mp = mtd->priv;
+
+ memcpy(buf, mp->uaddr + from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int ms02nv_write(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct ms02nv_private *mp = mtd->priv;
+
+ memcpy(mp->uaddr + to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+
+static inline uint ms02nv_probe_one(ulong addr)
+{
+ ms02nv_uint *ms02nv_diagp;
+ ms02nv_uint *ms02nv_magicp;
+ uint ms02nv_diag;
+ uint ms02nv_magic;
+ size_t size;
+
+ int err;
+
+ /*
+ * The firmware writes MS02NV_ID at MS02NV_MAGIC and also
+ * a diagnostic status at MS02NV_DIAG.
+ */
+ ms02nv_diagp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_DIAG));
+ ms02nv_magicp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_MAGIC));
+ err = get_dbe(ms02nv_magic, ms02nv_magicp);
+ if (err)
+ return 0;
+ if (ms02nv_magic != MS02NV_ID)
+ return 0;
+
+ ms02nv_diag = *ms02nv_diagp;
+ size = (ms02nv_diag & MS02NV_DIAG_SIZE_MASK) << MS02NV_DIAG_SIZE_SHIFT;
+ if (size > MS02NV_CSR)
+ size = MS02NV_CSR;
+
+ return size;
+}
+
+static int __init ms02nv_init_one(ulong addr)
+{
+ struct mtd_info *mtd;
+ struct ms02nv_private *mp;
+ struct resource *mod_res;
+ struct resource *diag_res;
+ struct resource *user_res;
+ struct resource *csr_res;
+ ulong fixaddr;
+ size_t size, fixsize;
+
+ static int version_printed;
+
+ int ret = -ENODEV;
+
+ /* The module decodes 8MiB of address space. */
+ mod_res = kzalloc(sizeof(*mod_res), GFP_KERNEL);
+ if (!mod_res)
+ return -ENOMEM;
+
+ mod_res->name = ms02nv_name;
+ mod_res->start = addr;
+ mod_res->end = addr + MS02NV_SLOT_SIZE - 1;
+ mod_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, mod_res) < 0)
+ goto err_out_mod_res;
+
+ size = ms02nv_probe_one(addr);
+ if (!size)
+ goto err_out_mod_res_rel;
+
+ if (!version_printed) {
+ printk(KERN_INFO "%s", version);
+ version_printed = 1;
+ }
+
+ ret = -ENOMEM;
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ goto err_out_mod_res_rel;
+ mp = kzalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ goto err_out_mtd;
+
+ mtd->priv = mp;
+ mp->resource.module = mod_res;
+
+ /* Firmware's diagnostic NVRAM area. */
+ diag_res = kzalloc(sizeof(*diag_res), GFP_KERNEL);
+ if (!diag_res)
+ goto err_out_mp;
+
+ diag_res->name = ms02nv_res_diag_ram;
+ diag_res->start = addr;
+ diag_res->end = addr + MS02NV_RAM - 1;
+ diag_res->flags = IORESOURCE_BUSY;
+ request_resource(mod_res, diag_res);
+
+ mp->resource.diag_ram = diag_res;
+
+ /* User-available general-purpose NVRAM area. */
+ user_res = kzalloc(sizeof(*user_res), GFP_KERNEL);
+ if (!user_res)
+ goto err_out_diag_res;
+
+ user_res->name = ms02nv_res_user_ram;
+ user_res->start = addr + MS02NV_RAM;
+ user_res->end = addr + size - 1;
+ user_res->flags = IORESOURCE_BUSY;
+ request_resource(mod_res, user_res);
+
+ mp->resource.user_ram = user_res;
+
+ /* Control and status register. */
+ csr_res = kzalloc(sizeof(*csr_res), GFP_KERNEL);
+ if (!csr_res)
+ goto err_out_user_res;
+
+ csr_res->name = ms02nv_res_csr;
+ csr_res->start = addr + MS02NV_CSR;
+ csr_res->end = addr + MS02NV_CSR + 3;
+ csr_res->flags = IORESOURCE_BUSY;
+ request_resource(mod_res, csr_res);
+
+ mp->resource.csr = csr_res;
+
+ mp->addr = phys_to_virt(addr);
+ mp->size = size;
+
+ /*
+ * Hide the firmware's diagnostic area. It may get destroyed
+ * upon a reboot. Take paging into account for mapping support.
+ */
+ fixaddr = (addr + MS02NV_RAM + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+ fixsize = (size - (fixaddr - addr)) & ~(PAGE_SIZE - 1);
+ mp->uaddr = phys_to_virt(fixaddr);
+
+ mtd->type = MTD_RAM;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->size = fixsize;
+ mtd->name = ms02nv_name;
+ mtd->owner = THIS_MODULE;
+ mtd->_read = ms02nv_read;
+ mtd->_write = ms02nv_write;
+ mtd->writesize = 1;
+
+ ret = -EIO;
+ if (mtd_device_register(mtd, NULL, 0)) {
+ printk(KERN_ERR
+ "ms02-nv: Unable to register MTD device, aborting!\n");
+ goto err_out_csr_res;
+ }
+
+ printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %zuMiB.\n",
+ mtd->index, ms02nv_name, addr, size >> 20);
+
+ mp->next = root_ms02nv_mtd;
+ root_ms02nv_mtd = mtd;
+
+ return 0;
+
+
+err_out_csr_res:
+ release_resource(csr_res);
+ kfree(csr_res);
+err_out_user_res:
+ release_resource(user_res);
+ kfree(user_res);
+err_out_diag_res:
+ release_resource(diag_res);
+ kfree(diag_res);
+err_out_mp:
+ kfree(mp);
+err_out_mtd:
+ kfree(mtd);
+err_out_mod_res_rel:
+ release_resource(mod_res);
+err_out_mod_res:
+ kfree(mod_res);
+ return ret;
+}
+
+static void __exit ms02nv_remove_one(void)
+{
+ struct mtd_info *mtd = root_ms02nv_mtd;
+ struct ms02nv_private *mp = mtd->priv;
+
+ root_ms02nv_mtd = mp->next;
+
+ mtd_device_unregister(mtd);
+
+ release_resource(mp->resource.csr);
+ kfree(mp->resource.csr);
+ release_resource(mp->resource.user_ram);
+ kfree(mp->resource.user_ram);
+ release_resource(mp->resource.diag_ram);
+ kfree(mp->resource.diag_ram);
+ release_resource(mp->resource.module);
+ kfree(mp->resource.module);
+ kfree(mp);
+ kfree(mtd);
+}
+
+
+static int __init ms02nv_init(void)
+{
+ volatile u32 *csr;
+ uint stride = 0;
+ int count = 0;
+ int i;
+
+ switch (mips_machtype) {
+ case MACH_DS5000_200:
+ csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
+ if (*csr & KN02_CSR_BNK32M)
+ stride = 2;
+ break;
+ case MACH_DS5000_2X0:
+ case MACH_DS5900:
+ csr = (volatile u32 *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR);
+ if (*csr & KN03_MCR_BNK32M)
+ stride = 2;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
+ if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
+ count++;
+
+ return (count > 0) ? 0 : -ENODEV;
+}
+
+static void __exit ms02nv_cleanup(void)
+{
+ while (root_ms02nv_mtd)
+ ms02nv_remove_one();
+}
+
+
+module_init(ms02nv_init);
+module_exit(ms02nv_cleanup);
diff --git a/drivers/mtd/devices/ms02-nv.h b/drivers/mtd/devices/ms02-nv.h
new file mode 100644
index 000000000..04deafd3a
--- /dev/null
+++ b/drivers/mtd/devices/ms02-nv.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2001, 2003 Maciej W. Rozycki
+ *
+ * DEC MS02-NV (54-20948-01) battery backed-up NVRAM module for
+ * DECstation/DECsystem 5000/2x0 and DECsystem 5900 and 5900/260
+ * systems.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+
+/*
+ * Addresses are decoded as follows:
+ *
+ * 0x000000 - 0x3fffff SRAM
+ * 0x400000 - 0x7fffff CSR
+ *
+ * Within the SRAM area the following ranges are forced by the system
+ * firmware:
+ *
+ * 0x000000 - 0x0003ff diagnostic area, destroyed upon a reboot
+ * 0x000400 - ENDofRAM storage area, available to operating systems
+ *
+ * but we can't really use the available area right from 0x000400 as
+ * the first word is used by the firmware as a status flag passed
+ * from an operating system. If anything but the valid data magic
+ * ID value is found, the firmware considers the SRAM clean, i.e.
+ * containing no valid data, and disables the battery resulting in
+ * data being erased as soon as power is switched off. So the choice
+ * for the start address of the user-available is 0x001000 which is
+ * nicely page aligned. The area between 0x000404 and 0x000fff may
+ * be used by the driver for own needs.
+ *
+ * The diagnostic area defines two status words to be read by an
+ * operating system, a magic ID to distinguish a MS02-NV board from
+ * anything else and a status information providing results of tests
+ * as well as the size of SRAM available, which can be 1MiB or 2MiB
+ * (that's what the firmware handles; no idea if 2MiB modules ever
+ * existed).
+ *
+ * The firmware only handles the MS02-NV board if installed in the
+ * last (15th) slot, so for any other location the status information
+ * stored in the SRAM cannot be relied upon. But from the hardware
+ * point of view there is no problem using up to 14 such boards in a
+ * system -- only the 1st slot needs to be filled with a DRAM module.
+ * The MS02-NV board is ECC-protected, like other MS02 memory boards.
+ *
+ * The state of the battery as provided by the CSR is reflected on
+ * the two onboard LEDs. When facing the battery side of the board,
+ * with the LEDs at the top left and the battery at the bottom right
+ * (i.e. looking from the back side of the system box), their meaning
+ * is as follows (the system has to be powered on):
+ *
+ * left LED battery disable status: lit = enabled
+ * right LED battery condition status: lit = OK
+ */
+
+/* MS02-NV iomem register offsets. */
+#define MS02NV_CSR 0x400000 /* control & status register */
+
+/* MS02-NV CSR status bits. */
+#define MS02NV_CSR_BATT_OK 0x01 /* battery OK */
+#define MS02NV_CSR_BATT_OFF 0x02 /* battery disabled */
+
+
+/* MS02-NV memory offsets. */
+#define MS02NV_DIAG 0x0003f8 /* diagnostic status */
+#define MS02NV_MAGIC 0x0003fc /* MS02-NV magic ID */
+#define MS02NV_VALID 0x000400 /* valid data magic ID */
+#define MS02NV_RAM 0x001000 /* user-exposed RAM start */
+
+/* MS02-NV diagnostic status bits. */
+#define MS02NV_DIAG_TEST 0x01 /* SRAM test done (?) */
+#define MS02NV_DIAG_RO 0x02 /* SRAM r/o test done */
+#define MS02NV_DIAG_RW 0x04 /* SRAM r/w test done */
+#define MS02NV_DIAG_FAIL 0x08 /* SRAM test failed */
+#define MS02NV_DIAG_SIZE_MASK 0xf0 /* SRAM size mask */
+#define MS02NV_DIAG_SIZE_SHIFT 0x10 /* SRAM size shift (left) */
+
+/* MS02-NV general constants. */
+#define MS02NV_ID 0x03021966 /* MS02-NV magic ID value */
+#define MS02NV_VALID_ID 0xbd100248 /* valid data magic ID value */
+#define MS02NV_SLOT_SIZE 0x800000 /* size of the address space
+ decoded by the module */
+
+
+typedef volatile u32 ms02nv_uint;
+
+struct ms02nv_private {
+ struct mtd_info *next;
+ struct {
+ struct resource *module;
+ struct resource *diag_ram;
+ struct resource *user_ram;
+ struct resource *csr;
+ } resource;
+ u_char *addr;
+ size_t size;
+ u_char *uaddr;
+};
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
new file mode 100644
index 000000000..0099aba72
--- /dev/null
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -0,0 +1,928 @@
+/*
+ * Atmel AT45xxx DataFlash MTD driver for lightweight SPI framework
+ *
+ * Largely derived from at91_dataflash.c:
+ * Copyright (C) 2003-2005 SAN People (Pty) Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/*
+ * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in
+ * each chip, which may be used for double buffered I/O; but this driver
+ * doesn't (yet) use these for any kind of i/o overlap or prefetching.
+ *
+ * Sometimes DataFlash is packaged in MMC-format cards, although the
+ * MMC stack can't (yet?) distinguish between MMC and DataFlash
+ * protocols during enumeration.
+ */
+
+/* reads can bypass the buffers */
+#define OP_READ_CONTINUOUS 0xE8
+#define OP_READ_PAGE 0xD2
+
+/* group B requests can run even while status reports "busy" */
+#define OP_READ_STATUS 0xD7 /* group B */
+
+/* move data between host and buffer */
+#define OP_READ_BUFFER1 0xD4 /* group B */
+#define OP_READ_BUFFER2 0xD6 /* group B */
+#define OP_WRITE_BUFFER1 0x84 /* group B */
+#define OP_WRITE_BUFFER2 0x87 /* group B */
+
+/* erasing flash */
+#define OP_ERASE_PAGE 0x81
+#define OP_ERASE_BLOCK 0x50
+
+/* move data between buffer and flash */
+#define OP_TRANSFER_BUF1 0x53
+#define OP_TRANSFER_BUF2 0x55
+#define OP_MREAD_BUFFER1 0xD4
+#define OP_MREAD_BUFFER2 0xD6
+#define OP_MWERASE_BUFFER1 0x83
+#define OP_MWERASE_BUFFER2 0x86
+#define OP_MWRITE_BUFFER1 0x88 /* sector must be pre-erased */
+#define OP_MWRITE_BUFFER2 0x89 /* sector must be pre-erased */
+
+/* write to buffer, then write-erase to flash */
+#define OP_PROGRAM_VIA_BUF1 0x82
+#define OP_PROGRAM_VIA_BUF2 0x85
+
+/* compare buffer to flash */
+#define OP_COMPARE_BUF1 0x60
+#define OP_COMPARE_BUF2 0x61
+
+/* read flash to buffer, then write-erase to flash */
+#define OP_REWRITE_VIA_BUF1 0x58
+#define OP_REWRITE_VIA_BUF2 0x59
+
+/* newer chips report JEDEC manufacturer and device IDs; chip
+ * serial number and OTP bits; and per-sector writeprotect.
+ */
+#define OP_READ_ID 0x9F
+#define OP_READ_SECURITY 0x77
+#define OP_WRITE_SECURITY_REVC 0x9A
+#define OP_WRITE_SECURITY 0x9B /* revision D */
+
+
+struct dataflash {
+ uint8_t command[4];
+ char name[24];
+
+ unsigned short page_offset; /* offset in flash address */
+ unsigned int page_size; /* of bytes per page */
+
+ struct mutex lock;
+ struct spi_device *spi;
+
+ struct mtd_info mtd;
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id dataflash_dt_ids[] = {
+ { .compatible = "atmel,at45", },
+ { .compatible = "atmel,dataflash", },
+ { /* sentinel */ }
+};
+#endif
+
+/* ......................................................................... */
+
+/*
+ * Return the status of the DataFlash device.
+ */
+static inline int dataflash_status(struct spi_device *spi)
+{
+ /* NOTE: at45db321c over 25 MHz wants to write
+ * a dummy byte after the opcode...
+ */
+ return spi_w8r8(spi, OP_READ_STATUS);
+}
+
+/*
+ * Poll the DataFlash device until it is READY.
+ * This usually takes 5-20 msec or so; more for sector erase.
+ */
+static int dataflash_waitready(struct spi_device *spi)
+{
+ int status;
+
+ for (;;) {
+ status = dataflash_status(spi);
+ if (status < 0) {
+ pr_debug("%s: status %d?\n",
+ dev_name(&spi->dev), status);
+ status = 0;
+ }
+
+ if (status & (1 << 7)) /* RDY/nBSY */
+ return status;
+
+ msleep(3);
+ }
+}
+
+/* ......................................................................... */
+
+/*
+ * Erase pages of flash.
+ */
+static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct dataflash *priv = mtd->priv;
+ struct spi_device *spi = priv->spi;
+ struct spi_transfer x = { };
+ struct spi_message msg;
+ unsigned blocksize = priv->page_size << 3;
+ uint8_t *command;
+ uint32_t rem;
+
+ pr_debug("%s: erase addr=0x%llx len 0x%llx\n",
+ dev_name(&spi->dev), (long long)instr->addr,
+ (long long)instr->len);
+
+ div_u64_rem(instr->len, priv->page_size, &rem);
+ if (rem)
+ return -EINVAL;
+ div_u64_rem(instr->addr, priv->page_size, &rem);
+ if (rem)
+ return -EINVAL;
+
+ spi_message_init(&msg);
+
+ x.tx_buf = command = priv->command;
+ x.len = 4;
+ spi_message_add_tail(&x, &msg);
+
+ mutex_lock(&priv->lock);
+ while (instr->len > 0) {
+ unsigned int pageaddr;
+ int status;
+ int do_block;
+
+ /* Calculate flash page address; use block erase (for speed) if
+ * we're at a block boundary and need to erase the whole block.
+ */
+ pageaddr = div_u64(instr->addr, priv->page_size);
+ do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
+ pageaddr = pageaddr << priv->page_offset;
+
+ command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
+ command[1] = (uint8_t)(pageaddr >> 16);
+ command[2] = (uint8_t)(pageaddr >> 8);
+ command[3] = 0;
+
+ pr_debug("ERASE %s: (%x) %x %x %x [%i]\n",
+ do_block ? "block" : "page",
+ command[0], command[1], command[2], command[3],
+ pageaddr);
+
+ status = spi_sync(spi, &msg);
+ (void) dataflash_waitready(spi);
+
+ if (status < 0) {
+ printk(KERN_ERR "%s: erase %x, err %d\n",
+ dev_name(&spi->dev), pageaddr, status);
+ /* REVISIT: can retry instr->retries times; or
+ * giveup and instr->fail_addr = instr->addr;
+ */
+ continue;
+ }
+
+ if (do_block) {
+ instr->addr += blocksize;
+ instr->len -= blocksize;
+ } else {
+ instr->addr += priv->page_size;
+ instr->len -= priv->page_size;
+ }
+ }
+ mutex_unlock(&priv->lock);
+
+ /* Inform MTD subsystem that erase is complete */
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+/*
+ * Read from the DataFlash device.
+ * from : Start offset in flash device
+ * len : Amount to read
+ * retlen : About of data actually read
+ * buf : Buffer containing the data
+ */
+static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct dataflash *priv = mtd->priv;
+ struct spi_transfer x[2] = { };
+ struct spi_message msg;
+ unsigned int addr;
+ uint8_t *command;
+ int status;
+
+ pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
+ (unsigned)from, (unsigned)(from + len));
+
+ /* Calculate flash page/byte address */
+ addr = (((unsigned)from / priv->page_size) << priv->page_offset)
+ + ((unsigned)from % priv->page_size);
+
+ command = priv->command;
+
+ pr_debug("READ: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ spi_message_init(&msg);
+
+ x[0].tx_buf = command;
+ x[0].len = 8;
+ spi_message_add_tail(&x[0], &msg);
+
+ x[1].rx_buf = buf;
+ x[1].len = len;
+ spi_message_add_tail(&x[1], &msg);
+
+ mutex_lock(&priv->lock);
+
+ /* Continuous read, max clock = f(car) which may be less than
+ * the peak rate available. Some chips support commands with
+ * fewer "don't care" bytes. Both buffers stay unchanged.
+ */
+ command[0] = OP_READ_CONTINUOUS;
+ command[1] = (uint8_t)(addr >> 16);
+ command[2] = (uint8_t)(addr >> 8);
+ command[3] = (uint8_t)(addr >> 0);
+ /* plus 4 "don't care" bytes */
+
+ status = spi_sync(priv->spi, &msg);
+ mutex_unlock(&priv->lock);
+
+ if (status >= 0) {
+ *retlen = msg.actual_length - 8;
+ status = 0;
+ } else
+ pr_debug("%s: read %x..%x --> %d\n",
+ dev_name(&priv->spi->dev),
+ (unsigned)from, (unsigned)(from + len),
+ status);
+ return status;
+}
+
+/*
+ * Write to the DataFlash device.
+ * to : Start offset in flash device
+ * len : Amount to write
+ * retlen : Amount of data actually written
+ * buf : Buffer containing the data
+ */
+static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct dataflash *priv = mtd->priv;
+ struct spi_device *spi = priv->spi;
+ struct spi_transfer x[2] = { };
+ struct spi_message msg;
+ unsigned int pageaddr, addr, offset, writelen;
+ size_t remaining = len;
+ u_char *writebuf = (u_char *) buf;
+ int status = -EINVAL;
+ uint8_t *command;
+
+ pr_debug("%s: write 0x%x..0x%x\n",
+ dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
+
+ spi_message_init(&msg);
+
+ x[0].tx_buf = command = priv->command;
+ x[0].len = 4;
+ spi_message_add_tail(&x[0], &msg);
+
+ pageaddr = ((unsigned)to / priv->page_size);
+ offset = ((unsigned)to % priv->page_size);
+ if (offset + len > priv->page_size)
+ writelen = priv->page_size - offset;
+ else
+ writelen = len;
+
+ mutex_lock(&priv->lock);
+ while (remaining > 0) {
+ pr_debug("write @ %i:%i len=%i\n",
+ pageaddr, offset, writelen);
+
+ /* REVISIT:
+ * (a) each page in a sector must be rewritten at least
+ * once every 10K sibling erase/program operations.
+ * (b) for pages that are already erased, we could
+ * use WRITE+MWRITE not PROGRAM for ~30% speedup.
+ * (c) WRITE to buffer could be done while waiting for
+ * a previous MWRITE/MWERASE to complete ...
+ * (d) error handling here seems to be mostly missing.
+ *
+ * Two persistent bits per page, plus a per-sector counter,
+ * could support (a) and (b) ... we might consider using
+ * the second half of sector zero, which is just one block,
+ * to track that state. (On AT91, that sector should also
+ * support boot-from-DataFlash.)
+ */
+
+ addr = pageaddr << priv->page_offset;
+
+ /* (1) Maybe transfer partial page to Buffer1 */
+ if (writelen != priv->page_size) {
+ command[0] = OP_TRANSFER_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = 0;
+
+ pr_debug("TRANSFER: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ status = spi_sync(spi, &msg);
+ if (status < 0)
+ pr_debug("%s: xfer %u -> %d\n",
+ dev_name(&spi->dev), addr, status);
+
+ (void) dataflash_waitready(priv->spi);
+ }
+
+ /* (2) Program full page via Buffer1 */
+ addr += offset;
+ command[0] = OP_PROGRAM_VIA_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = (addr & 0x000000FF);
+
+ pr_debug("PROGRAM: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ x[1].tx_buf = writebuf;
+ x[1].len = writelen;
+ spi_message_add_tail(x + 1, &msg);
+ status = spi_sync(spi, &msg);
+ spi_transfer_del(x + 1);
+ if (status < 0)
+ pr_debug("%s: pgm %u/%u -> %d\n",
+ dev_name(&spi->dev), addr, writelen, status);
+
+ (void) dataflash_waitready(priv->spi);
+
+
+#ifdef CONFIG_MTD_DATAFLASH_WRITE_VERIFY
+
+ /* (3) Compare to Buffer1 */
+ addr = pageaddr << priv->page_offset;
+ command[0] = OP_COMPARE_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = 0;
+
+ pr_debug("COMPARE: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ status = spi_sync(spi, &msg);
+ if (status < 0)
+ pr_debug("%s: compare %u -> %d\n",
+ dev_name(&spi->dev), addr, status);
+
+ status = dataflash_waitready(priv->spi);
+
+ /* Check result of the compare operation */
+ if (status & (1 << 6)) {
+ printk(KERN_ERR "%s: compare page %u, err %d\n",
+ dev_name(&spi->dev), pageaddr, status);
+ remaining = 0;
+ status = -EIO;
+ break;
+ } else
+ status = 0;
+
+#endif /* CONFIG_MTD_DATAFLASH_WRITE_VERIFY */
+
+ remaining = remaining - writelen;
+ pageaddr++;
+ offset = 0;
+ writebuf += writelen;
+ *retlen += writelen;
+
+ if (remaining > priv->page_size)
+ writelen = priv->page_size;
+ else
+ writelen = remaining;
+ }
+ mutex_unlock(&priv->lock);
+
+ return status;
+}
+
+/* ......................................................................... */
+
+#ifdef CONFIG_MTD_DATAFLASH_OTP
+
+static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *info)
+{
+ /* Report both blocks as identical: bytes 0..64, locked.
+ * Unless the user block changed from all-ones, we can't
+ * tell whether it's still writable; so we assume it isn't.
+ */
+ info->start = 0;
+ info->length = 64;
+ info->locked = 1;
+ *retlen = sizeof(*info);
+ return 0;
+}
+
+static ssize_t otp_read(struct spi_device *spi, unsigned base,
+ uint8_t *buf, loff_t off, size_t len)
+{
+ struct spi_message m;
+ size_t l;
+ uint8_t *scratch;
+ struct spi_transfer t;
+ int status;
+
+ if (off > 64)
+ return -EINVAL;
+
+ if ((off + len) > 64)
+ len = 64 - off;
+
+ spi_message_init(&m);
+
+ l = 4 + base + off + len;
+ scratch = kzalloc(l, GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+
+ /* OUT: OP_READ_SECURITY, 3 don't-care bytes, zeroes
+ * IN: ignore 4 bytes, data bytes 0..N (max 127)
+ */
+ scratch[0] = OP_READ_SECURITY;
+
+ memset(&t, 0, sizeof t);
+ t.tx_buf = scratch;
+ t.rx_buf = scratch;
+ t.len = l;
+ spi_message_add_tail(&t, &m);
+
+ dataflash_waitready(spi);
+
+ status = spi_sync(spi, &m);
+ if (status >= 0) {
+ memcpy(buf, scratch + 4 + base + off, len);
+ status = len;
+ }
+
+ kfree(scratch);
+ return status;
+}
+
+static int dataflash_read_fact_otp(struct mtd_info *mtd,
+ loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct dataflash *priv = mtd->priv;
+ int status;
+
+ /* 64 bytes, from 0..63 ... start at 64 on-chip */
+ mutex_lock(&priv->lock);
+ status = otp_read(priv->spi, 64, buf, from, len);
+ mutex_unlock(&priv->lock);
+
+ if (status < 0)
+ return status;
+ *retlen = status;
+ return 0;
+}
+
+static int dataflash_read_user_otp(struct mtd_info *mtd,
+ loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct dataflash *priv = mtd->priv;
+ int status;
+
+ /* 64 bytes, from 0..63 ... start at 0 on-chip */
+ mutex_lock(&priv->lock);
+ status = otp_read(priv->spi, 0, buf, from, len);
+ mutex_unlock(&priv->lock);
+
+ if (status < 0)
+ return status;
+ *retlen = status;
+ return 0;
+}
+
+static int dataflash_write_user_otp(struct mtd_info *mtd,
+ loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct spi_message m;
+ const size_t l = 4 + 64;
+ uint8_t *scratch;
+ struct spi_transfer t;
+ struct dataflash *priv = mtd->priv;
+ int status;
+
+ if (from >= 64) {
+ /*
+ * Attempting to write beyond the end of OTP memory,
+ * no data can be written.
+ */
+ *retlen = 0;
+ return 0;
+ }
+
+ /* Truncate the write to fit into OTP memory. */
+ if ((from + len) > 64)
+ len = 64 - from;
+
+ /* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes
+ * IN: ignore all
+ */
+ scratch = kzalloc(l, GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+ scratch[0] = OP_WRITE_SECURITY;
+ memcpy(scratch + 4 + from, buf, len);
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof t);
+ t.tx_buf = scratch;
+ t.len = l;
+ spi_message_add_tail(&t, &m);
+
+ /* Write the OTP bits, if they've not yet been written.
+ * This modifies SRAM buffer1.
+ */
+ mutex_lock(&priv->lock);
+ dataflash_waitready(priv->spi);
+ status = spi_sync(priv->spi, &m);
+ mutex_unlock(&priv->lock);
+
+ kfree(scratch);
+
+ if (status >= 0) {
+ status = 0;
+ *retlen = len;
+ }
+ return status;
+}
+
+static char *otp_setup(struct mtd_info *device, char revision)
+{
+ device->_get_fact_prot_info = dataflash_get_otp_info;
+ device->_read_fact_prot_reg = dataflash_read_fact_otp;
+ device->_get_user_prot_info = dataflash_get_otp_info;
+ device->_read_user_prot_reg = dataflash_read_user_otp;
+
+ /* rev c parts (at45db321c and at45db1281 only!) use a
+ * different write procedure; not (yet?) implemented.
+ */
+ if (revision > 'c')
+ device->_write_user_prot_reg = dataflash_write_user_otp;
+
+ return ", OTP";
+}
+
+#else
+
+static char *otp_setup(struct mtd_info *device, char revision)
+{
+ return " (OTP)";
+}
+
+#endif
+
+/* ......................................................................... */
+
+/*
+ * Register DataFlash device with MTD subsystem.
+ */
+static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
+ int pagesize, int pageoffset, char revision)
+{
+ struct dataflash *priv;
+ struct mtd_info *device;
+ struct mtd_part_parser_data ppdata;
+ struct flash_platform_data *pdata = dev_get_platdata(&spi->dev);
+ char *otp_tag = "";
+ int err = 0;
+
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ priv->spi = spi;
+ priv->page_size = pagesize;
+ priv->page_offset = pageoffset;
+
+ /* name must be usable with cmdlinepart */
+ sprintf(priv->name, "spi%d.%d-%s",
+ spi->master->bus_num, spi->chip_select,
+ name);
+
+ device = &priv->mtd;
+ device->name = (pdata && pdata->name) ? pdata->name : priv->name;
+ device->size = nr_pages * pagesize;
+ device->erasesize = pagesize;
+ device->writesize = pagesize;
+ device->owner = THIS_MODULE;
+ device->type = MTD_DATAFLASH;
+ device->flags = MTD_WRITEABLE;
+ device->_erase = dataflash_erase;
+ device->_read = dataflash_read;
+ device->_write = dataflash_write;
+ device->priv = priv;
+
+ device->dev.parent = &spi->dev;
+
+ if (revision >= 'c')
+ otp_tag = otp_setup(device, revision);
+
+ dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
+ name, (long long)((device->size + 1023) >> 10),
+ pagesize, otp_tag);
+ spi_set_drvdata(spi, priv);
+
+ ppdata.of_node = spi->dev.of_node;
+ err = mtd_device_parse_register(device, NULL, &ppdata,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
+
+ if (!err)
+ return 0;
+
+ kfree(priv);
+ return err;
+}
+
+static inline int add_dataflash(struct spi_device *spi, char *name,
+ int nr_pages, int pagesize, int pageoffset)
+{
+ return add_dataflash_otp(spi, name, nr_pages, pagesize,
+ pageoffset, 0);
+}
+
+struct flash_info {
+ char *name;
+
+ /* JEDEC id has a high byte of zero plus three data bytes:
+ * the manufacturer id, then a two byte device id.
+ */
+ uint32_t jedec_id;
+
+ /* The size listed here is what works with OP_ERASE_PAGE. */
+ unsigned nr_pages;
+ uint16_t pagesize;
+ uint16_t pageoffset;
+
+ uint16_t flags;
+#define SUP_POW2PS 0x0002 /* supports 2^N byte pages */
+#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
+};
+
+static struct flash_info dataflash_data[] = {
+
+ /*
+ * NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
+ * one with IS_POW2PS and the other without. The entry with the
+ * non-2^N byte page size can't name exact chip revisions without
+ * losing backwards compatibility for cmdlinepart.
+ *
+ * These newer chips also support 128-byte security registers (with
+ * 64 bytes one-time-programmable) and software write-protection.
+ */
+ { "AT45DB011B", 0x1f2200, 512, 264, 9, SUP_POW2PS},
+ { "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB021B", 0x1f2300, 1024, 264, 9, SUP_POW2PS},
+ { "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB041x", 0x1f2400, 2048, 264, 9, SUP_POW2PS},
+ { "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB081B", 0x1f2500, 4096, 264, 9, SUP_POW2PS},
+ { "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB161x", 0x1f2600, 4096, 528, 10, SUP_POW2PS},
+ { "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB321x", 0x1f2700, 8192, 528, 10, 0}, /* rev C */
+
+ { "AT45DB321x", 0x1f2701, 8192, 528, 10, SUP_POW2PS},
+ { "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
+ { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
+};
+
+static struct flash_info *jedec_probe(struct spi_device *spi)
+{
+ int tmp;
+ uint8_t code = OP_READ_ID;
+ uint8_t id[3];
+ uint32_t jedec;
+ struct flash_info *info;
+ int status;
+
+ /* JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ *
+ * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
+ * That's not an error; only rev C and newer chips handle it, and
+ * only Atmel sells these chips.
+ */
+ tmp = spi_write_then_read(spi, &code, 1, id, 3);
+ if (tmp < 0) {
+ pr_debug("%s: error %d reading JEDEC ID\n",
+ dev_name(&spi->dev), tmp);
+ return ERR_PTR(tmp);
+ }
+ if (id[0] != 0x1f)
+ return NULL;
+
+ jedec = id[0];
+ jedec = jedec << 8;
+ jedec |= id[1];
+ jedec = jedec << 8;
+ jedec |= id[2];
+
+ for (tmp = 0, info = dataflash_data;
+ tmp < ARRAY_SIZE(dataflash_data);
+ tmp++, info++) {
+ if (info->jedec_id == jedec) {
+ pr_debug("%s: OTP, sector protect%s\n",
+ dev_name(&spi->dev),
+ (info->flags & SUP_POW2PS)
+ ? ", binary pagesize" : ""
+ );
+ if (info->flags & SUP_POW2PS) {
+ status = dataflash_status(spi);
+ if (status < 0) {
+ pr_debug("%s: status error %d\n",
+ dev_name(&spi->dev), status);
+ return ERR_PTR(status);
+ }
+ if (status & 0x1) {
+ if (info->flags & IS_POW2PS)
+ return info;
+ } else {
+ if (!(info->flags & IS_POW2PS))
+ return info;
+ }
+ } else
+ return info;
+ }
+ }
+
+ /*
+ * Treat other chips as errors ... we won't know the right page
+ * size (it might be binary) even when we can tell which density
+ * class is involved (legacy chip id scheme).
+ */
+ dev_warn(&spi->dev, "JEDEC id %06x not handled\n", jedec);
+ return ERR_PTR(-ENODEV);
+}
+
+/*
+ * Detect and initialize DataFlash device, using JEDEC IDs on newer chips
+ * or else the ID code embedded in the status bits:
+ *
+ * Device Density ID code #Pages PageSize Offset
+ * AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
+ * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
+ * AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
+ * AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
+ * AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
+ * AT45DB0321B 32Mbit (4M) xx1101xx (0x34) 8192 528 10
+ * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
+ * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
+ */
+static int dataflash_probe(struct spi_device *spi)
+{
+ int status;
+ struct flash_info *info;
+
+ /*
+ * Try to detect dataflash by JEDEC ID.
+ * If it succeeds we know we have either a C or D part.
+ * D will support power of 2 pagesize option.
+ * Both support the security register, though with different
+ * write procedures.
+ */
+ info = jedec_probe(spi);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+ if (info != NULL)
+ return add_dataflash_otp(spi, info->name, info->nr_pages,
+ info->pagesize, info->pageoffset,
+ (info->flags & SUP_POW2PS) ? 'd' : 'c');
+
+ /*
+ * Older chips support only legacy commands, identifing
+ * capacity using bits in the status byte.
+ */
+ status = dataflash_status(spi);
+ if (status <= 0 || status == 0xff) {
+ pr_debug("%s: status error %d\n",
+ dev_name(&spi->dev), status);
+ if (status == 0 || status == 0xff)
+ status = -ENODEV;
+ return status;
+ }
+
+ /* if there's a device there, assume it's dataflash.
+ * board setup should have set spi->max_speed_max to
+ * match f(car) for continuous reads, mode 0 or 3.
+ */
+ switch (status & 0x3c) {
+ case 0x0c: /* 0 0 1 1 x x */
+ status = add_dataflash(spi, "AT45DB011B", 512, 264, 9);
+ break;
+ case 0x14: /* 0 1 0 1 x x */
+ status = add_dataflash(spi, "AT45DB021B", 1024, 264, 9);
+ break;
+ case 0x1c: /* 0 1 1 1 x x */
+ status = add_dataflash(spi, "AT45DB041x", 2048, 264, 9);
+ break;
+ case 0x24: /* 1 0 0 1 x x */
+ status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9);
+ break;
+ case 0x2c: /* 1 0 1 1 x x */
+ status = add_dataflash(spi, "AT45DB161x", 4096, 528, 10);
+ break;
+ case 0x34: /* 1 1 0 1 x x */
+ status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10);
+ break;
+ case 0x38: /* 1 1 1 x x x */
+ case 0x3c:
+ status = add_dataflash(spi, "AT45DB642x", 8192, 1056, 11);
+ break;
+ /* obsolete AT45DB1282 not (yet?) supported */
+ default:
+ dev_info(&spi->dev, "unsupported device (%x)\n",
+ status & 0x3c);
+ status = -ENODEV;
+ }
+
+ if (status < 0)
+ pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev),
+ status);
+
+ return status;
+}
+
+static int dataflash_remove(struct spi_device *spi)
+{
+ struct dataflash *flash = spi_get_drvdata(spi);
+ int status;
+
+ pr_debug("%s: remove\n", dev_name(&spi->dev));
+
+ status = mtd_device_unregister(&flash->mtd);
+ if (status == 0)
+ kfree(flash);
+ return status;
+}
+
+static struct spi_driver dataflash_driver = {
+ .driver = {
+ .name = "mtd_dataflash",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dataflash_dt_ids),
+ },
+
+ .probe = dataflash_probe,
+ .remove = dataflash_remove,
+
+ /* FIXME: investigate suspend and resume... */
+};
+
+module_spi_driver(dataflash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew Victor, David Brownell");
+MODULE_DESCRIPTION("MTD DataFlash driver");
+MODULE_ALIAS("spi:mtd_dataflash");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
new file mode 100644
index 000000000..8e2850892
--- /dev/null
+++ b/drivers/mtd/devices/mtdram.c
@@ -0,0 +1,158 @@
+/*
+ * mtdram - a test mtd device
+ * Author: Alexander Larsson <alex@cendio.se>
+ *
+ * Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
+ * Copyright (c) 2005 Joern Engel <joern@wh.fh-wedel.de>
+ *
+ * This code is GPL
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/mtdram.h>
+
+static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
+static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
+#define MTDRAM_TOTAL_SIZE (total_size * 1024)
+#define MTDRAM_ERASE_SIZE (erase_size * 1024)
+
+#ifdef MODULE
+module_param(total_size, ulong, 0);
+MODULE_PARM_DESC(total_size, "Total device size in KiB");
+module_param(erase_size, ulong, 0);
+MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
+#endif
+
+// We could store these in the mtd structure, but we only support 1 device..
+static struct mtd_info *mtd_info;
+
+static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+}
+
+static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ *virt = mtd->priv + from;
+ *retlen = len;
+ return 0;
+}
+
+static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ return (unsigned long) mtd->priv + offset;
+}
+
+static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ memcpy(buf, mtd->priv + from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ memcpy((char *)mtd->priv + to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+static void __exit cleanup_mtdram(void)
+{
+ if (mtd_info) {
+ mtd_device_unregister(mtd_info);
+ vfree(mtd_info->priv);
+ kfree(mtd_info);
+ }
+}
+
+int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
+ unsigned long size, const char *name)
+{
+ memset(mtd, 0, sizeof(*mtd));
+
+ /* Setup the MTD structure */
+ mtd->name = name;
+ mtd->type = MTD_RAM;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->size = size;
+ mtd->writesize = 1;
+ mtd->writebufsize = 64; /* Mimic CFI NOR flashes */
+ mtd->erasesize = MTDRAM_ERASE_SIZE;
+ mtd->priv = mapped_address;
+
+ mtd->owner = THIS_MODULE;
+ mtd->_erase = ram_erase;
+ mtd->_point = ram_point;
+ mtd->_unpoint = ram_unpoint;
+ mtd->_get_unmapped_area = ram_get_unmapped_area;
+ mtd->_read = ram_read;
+ mtd->_write = ram_write;
+
+ if (mtd_device_register(mtd, NULL, 0))
+ return -EIO;
+
+ return 0;
+}
+
+static int __init init_mtdram(void)
+{
+ void *addr;
+ int err;
+
+ if (!total_size)
+ return -EINVAL;
+
+ /* Allocate some memory */
+ mtd_info = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd_info)
+ return -ENOMEM;
+
+ addr = vmalloc(MTDRAM_TOTAL_SIZE);
+ if (!addr) {
+ kfree(mtd_info);
+ mtd_info = NULL;
+ return -ENOMEM;
+ }
+ err = mtdram_init_device(mtd_info, addr, MTDRAM_TOTAL_SIZE, "mtdram test device");
+ if (err) {
+ vfree(addr);
+ kfree(mtd_info);
+ mtd_info = NULL;
+ return err;
+ }
+ memset(mtd_info->priv, 0xff, MTDRAM_TOTAL_SIZE);
+ return err;
+}
+
+module_init(init_mtdram);
+module_exit(cleanup_mtdram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Larsson <alexl@redhat.com>");
+MODULE_DESCRIPTION("Simulated MTD driver for testing");
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
new file mode 100644
index 000000000..8b66e52ca
--- /dev/null
+++ b/drivers/mtd/devices/phram.c
@@ -0,0 +1,328 @@
+/**
+ * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
+ * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de>
+ *
+ * Usage:
+ *
+ * one commend line parameter per device, each in the form:
+ * phram=<name>,<start>,<len>
+ * <name> may be up to 63 characters.
+ * <start> and <len> can be octal, decimal or hexadecimal. If followed
+ * by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or
+ * gigabytes.
+ *
+ * Example:
+ * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+
+struct phram_mtd_list {
+ struct mtd_info mtd;
+ struct list_head list;
+};
+
+static LIST_HEAD(phram_list);
+
+static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ u_char *start = mtd->priv;
+
+ memset(start + instr->addr, 0xff, instr->len);
+
+ /*
+ * This'll catch a few races. Free the thing before returning :)
+ * I don't feel at all ashamed. This kind of thing is possible anyway
+ * with flash, but unlikely.
+ */
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+}
+
+static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ *virt = mtd->priv + from;
+ *retlen = len;
+ return 0;
+}
+
+static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ u_char *start = mtd->priv;
+
+ memcpy(buf, start + from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ u_char *start = mtd->priv;
+
+ memcpy(start + to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+static void unregister_devices(void)
+{
+ struct phram_mtd_list *this, *safe;
+
+ list_for_each_entry_safe(this, safe, &phram_list, list) {
+ mtd_device_unregister(&this->mtd);
+ iounmap(this->mtd.priv);
+ kfree(this->mtd.name);
+ kfree(this);
+ }
+}
+
+static int register_device(char *name, phys_addr_t start, size_t len)
+{
+ struct phram_mtd_list *new;
+ int ret = -ENOMEM;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto out0;
+
+ ret = -EIO;
+ new->mtd.priv = ioremap(start, len);
+ if (!new->mtd.priv) {
+ pr_err("ioremap failed\n");
+ goto out1;
+ }
+
+
+ new->mtd.name = name;
+ new->mtd.size = len;
+ new->mtd.flags = MTD_CAP_RAM;
+ new->mtd._erase = phram_erase;
+ new->mtd._point = phram_point;
+ new->mtd._unpoint = phram_unpoint;
+ new->mtd._read = phram_read;
+ new->mtd._write = phram_write;
+ new->mtd.owner = THIS_MODULE;
+ new->mtd.type = MTD_RAM;
+ new->mtd.erasesize = PAGE_SIZE;
+ new->mtd.writesize = 1;
+
+ ret = -EAGAIN;
+ if (mtd_device_register(&new->mtd, NULL, 0)) {
+ pr_err("Failed to register new device\n");
+ goto out2;
+ }
+
+ list_add_tail(&new->list, &phram_list);
+ return 0;
+
+out2:
+ iounmap(new->mtd.priv);
+out1:
+ kfree(new);
+out0:
+ return ret;
+}
+
+static int parse_num64(uint64_t *num64, char *token)
+{
+ size_t len;
+ int shift = 0;
+ int ret;
+
+ len = strlen(token);
+ /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
+ if (len > 2) {
+ if (token[len - 1] == 'i') {
+ switch (token[len - 2]) {
+ case 'G':
+ shift += 10;
+ case 'M':
+ shift += 10;
+ case 'k':
+ shift += 10;
+ token[len - 2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = kstrtou64(token, 0, num64);
+ *num64 <<= shift;
+
+ return ret;
+}
+
+static int parse_name(char **pname, const char *token)
+{
+ size_t len;
+ char *name;
+
+ len = strlen(token) + 1;
+ if (len > 64)
+ return -ENOSPC;
+
+ name = kstrdup(token, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ *pname = name;
+ return 0;
+}
+
+
+static inline void kill_final_newline(char *str)
+{
+ char *newline = strrchr(str, '\n');
+
+ if (newline && !newline[1])
+ *newline = 0;
+}
+
+
+#define parse_err(fmt, args...) do { \
+ pr_err(fmt , ## args); \
+ return 1; \
+} while (0)
+
+#ifndef MODULE
+static int phram_init_called;
+/*
+ * This shall contain the module parameter if any. It is of the form:
+ * - phram=<device>,<address>,<size> for module case
+ * - phram.phram=<device>,<address>,<size> for built-in case
+ * We leave 64 bytes for the device name, 20 for the address and 20 for the
+ * size.
+ * Example: phram.phram=rootfs,0xa0000000,512Mi
+ */
+static char phram_paramline[64 + 20 + 20];
+#endif
+
+static int phram_setup(const char *val)
+{
+ char buf[64 + 20 + 20], *str = buf;
+ char *token[3];
+ char *name;
+ uint64_t start;
+ uint64_t len;
+ int i, ret;
+
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf))
+ parse_err("parameter too long\n");
+
+ strcpy(str, val);
+ kill_final_newline(str);
+
+ for (i = 0; i < 3; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str)
+ parse_err("too many arguments\n");
+
+ if (!token[2])
+ parse_err("not enough arguments\n");
+
+ ret = parse_name(&name, token[0]);
+ if (ret)
+ return ret;
+
+ ret = parse_num64(&start, token[1]);
+ if (ret) {
+ kfree(name);
+ parse_err("illegal start address\n");
+ }
+
+ ret = parse_num64(&len, token[2]);
+ if (ret) {
+ kfree(name);
+ parse_err("illegal device length\n");
+ }
+
+ ret = register_device(name, start, len);
+ if (!ret)
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
+ else
+ kfree(name);
+
+ return ret;
+}
+
+static int phram_param_call(const char *val, struct kernel_param *kp)
+{
+#ifdef MODULE
+ return phram_setup(val);
+#else
+ /*
+ * If more parameters are later passed in via
+ * /sys/module/phram/parameters/phram
+ * and init_phram() has already been called,
+ * we can parse the argument now.
+ */
+
+ if (phram_init_called)
+ return phram_setup(val);
+
+ /*
+ * During early boot stage, we only save the parameters
+ * here. We must parse them later: if the param passed
+ * from kernel boot command line, phram_param_call() is
+ * called so early that it is not possible to resolve
+ * the device (even kmalloc() fails). Defer that work to
+ * phram_setup().
+ */
+
+ if (strlen(val) >= sizeof(phram_paramline))
+ return -ENOSPC;
+ strcpy(phram_paramline, val);
+
+ return 0;
+#endif
+}
+
+module_param_call(phram, phram_param_call, NULL, NULL, 000);
+MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
+
+
+static int __init init_phram(void)
+{
+ int ret = 0;
+
+#ifndef MODULE
+ if (phram_paramline[0])
+ ret = phram_setup(phram_paramline);
+ phram_init_called = 1;
+#endif
+
+ return ret;
+}
+
+static void __exit cleanup_phram(void)
+{
+ unregister_devices();
+}
+
+module_init(init_phram);
+module_exit(cleanup_phram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joern Engel <joern@wh.fh-wedel.de>");
+MODULE_DESCRIPTION("MTD driver for physical RAM");
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
new file mode 100644
index 000000000..708b7e8c8
--- /dev/null
+++ b/drivers/mtd/devices/pmc551.c
@@ -0,0 +1,856 @@
+/*
+ * PMC551 PCI Mezzanine Ram Device
+ *
+ * Author:
+ * Mark Ferrell <mferrell@mvista.com>
+ * Copyright 1999,2000 Nortel Networks
+ *
+ * License:
+ * As part of this driver was derived from the slram.c driver it
+ * falls under the same license, which is GNU General Public
+ * License v2
+ *
+ * Description:
+ * This driver is intended to support the PMC551 PCI Ram device
+ * from Ramix Inc. The PMC551 is a PMC Mezzanine module for
+ * cPCI embedded systems. The device contains a single SROM
+ * that initially programs the V370PDC chipset onboard the
+ * device, and various banks of DRAM/SDRAM onboard. This driver
+ * implements this PCI Ram device as an MTD (Memory Technology
+ * Device) so that it can be used to hold a file system, or for
+ * added swap space in embedded systems. Since the memory on
+ * this board isn't as fast as main memory we do not try to hook
+ * it into main memory as that would simply reduce performance
+ * on the system. Using it as a block device allows us to use
+ * it as high speed swap or for a high speed disk device of some
+ * sort. Which becomes very useful on diskless systems in the
+ * embedded market I might add.
+ *
+ * Notes:
+ * Due to what I assume is more buggy SROM, the 64M PMC551 I
+ * have available claims that all 4 of its DRAM banks have 64MiB
+ * of ram configured (making a grand total of 256MiB onboard).
+ * This is slightly annoying since the BAR0 size reflects the
+ * aperture size, not the dram size, and the V370PDC supplies no
+ * other method for memory size discovery. This problem is
+ * mostly only relevant when compiled as a module, as the
+ * unloading of the module with an aperture size smaller than
+ * the ram will cause the driver to detect the onboard memory
+ * size to be equal to the aperture size when the module is
+ * reloaded. Soooo, to help, the module supports an msize
+ * option to allow the specification of the onboard memory, and
+ * an asize option, to allow the specification of the aperture
+ * size. The aperture must be equal to or less then the memory
+ * size, the driver will correct this if you screw it up. This
+ * problem is not relevant for compiled in drivers as compiled
+ * in drivers only init once.
+ *
+ * Credits:
+ * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the
+ * initial example code of how to initialize this device and for
+ * help with questions I had concerning operation of the device.
+ *
+ * Most of the MTD code for this driver was originally written
+ * for the slram.o module in the MTD drivers package which
+ * allows the mapping of system memory into an MTD device.
+ * Since the PMC551 memory module is accessed in the same
+ * fashion as system memory, the slram.c code became a very nice
+ * fit to the needs of this driver. All we added was PCI
+ * detection/initialization to the driver and automatically figure
+ * out the size via the PCI detection.o, later changes by Corey
+ * Minyard set up the card to utilize a 1M sliding apature.
+ *
+ * Corey Minyard <minyard@nortelnetworks.com>
+ * * Modified driver to utilize a sliding aperture instead of
+ * mapping all memory into kernel space which turned out to
+ * be very wasteful.
+ * * Located a bug in the SROM's initialization sequence that
+ * made the memory unusable, added a fix to code to touch up
+ * the DRAM some.
+ *
+ * Bugs/FIXMEs:
+ * * MUST fix the init function to not spin on a register
+ * waiting for it to set .. this does not safely handle busted
+ * devices that never reset the register correctly which will
+ * cause the system to hang w/ a reboot being the only chance at
+ * recover. [sort of fixed, could be better]
+ * * Add I2C handling of the SROM so we can read the SROM's information
+ * about the aperture size. This should always accurately reflect the
+ * onboard memory size.
+ * * Comb the init routine. It's still a bit cludgy on a few things.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <asm/io.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+
+#define PMC551_VERSION \
+ "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
+
+#define PCI_VENDOR_ID_V3_SEMI 0x11b0
+#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
+
+#define PMC551_PCI_MEM_MAP0 0x50
+#define PMC551_PCI_MEM_MAP1 0x54
+#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
+#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
+#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
+#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
+
+#define PMC551_SDRAM_MA 0x60
+#define PMC551_SDRAM_CMD 0x62
+#define PMC551_DRAM_CFG 0x64
+#define PMC551_SYS_CTRL_REG 0x78
+
+#define PMC551_DRAM_BLK0 0x68
+#define PMC551_DRAM_BLK1 0x6c
+#define PMC551_DRAM_BLK2 0x70
+#define PMC551_DRAM_BLK3 0x74
+#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
+#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
+#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
+
+struct mypriv {
+ struct pci_dev *dev;
+ u_char *start;
+ u32 base_map0;
+ u32 curr_map0;
+ u32 asize;
+ struct mtd_info *nextpmc551;
+};
+
+static struct mtd_info *pmc551list;
+
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+
+static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
+ u_char *ptr;
+ size_t retlen;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr,
+ (long)instr->len);
+#endif
+
+ end = instr->addr + instr->len - 1;
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_hi = instr->addr & ~(priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+ soff_lo = instr->addr & (priv->asize - 1);
+
+ pmc551_point(mtd, instr->addr, instr->len, &retlen,
+ (void **)&ptr, NULL);
+
+ if (soff_hi == eoff_hi || mtd->size == priv->asize) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memset(ptr, 0xff, instr->len);
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memset(ptr, 0xff, priv->asize);
+ if (soff_hi + priv->asize >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, (priv->base_map0 | soff_hi),
+ priv->asize, &retlen,
+ (void **)&ptr, NULL);
+ }
+ memset(ptr, 0xff, eoff_lo);
+ }
+
+ out:
+ instr->state = MTD_ERASE_DONE;
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase() done\n");
+#endif
+
+ mtd_erase_callback(instr);
+ return 0;
+}
+
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi;
+ u32 soff_lo;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
+#endif
+
+ soff_hi = from & ~(priv->asize - 1);
+ soff_lo = from & (priv->asize - 1);
+
+ /* Cheap hack optimization */
+ if (priv->curr_map0 != from) {
+ pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
+ (priv->base_map0 | soff_hi));
+ priv->curr_map0 = soff_hi;
+ }
+
+ *virt = priv->start + soff_lo;
+ *retlen = len;
+ return 0;
+}
+
+static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_unpoint()\n");
+#endif
+ return 0;
+}
+
+static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
+ u_char *ptr;
+ u_char *copyto = buf;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n",
+ (long)from, (long)len, (long)priv->asize);
+#endif
+
+ end = from + len - 1;
+ soff_hi = from & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_lo = from & (priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point(mtd, from, len, retlen, (void **)&ptr, NULL);
+
+ if (soff_hi == eoff_hi) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memcpy(copyto, ptr, len);
+ copyto += len;
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memcpy(copyto, ptr, priv->asize);
+ copyto += priv->asize;
+ if (soff_hi + priv->asize >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, soff_hi, priv->asize, retlen,
+ (void **)&ptr, NULL);
+ }
+ memcpy(copyto, ptr, eoff_lo);
+ copyto += eoff_lo;
+ }
+
+ out:
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read() done\n");
+#endif
+ *retlen = copyto - buf;
+ return 0;
+}
+
+static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
+ u_char *ptr;
+ const u_char *copyfrom = buf;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n",
+ (long)to, (long)len, (long)priv->asize);
+#endif
+
+ end = to + len - 1;
+ soff_hi = to & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_lo = to & (priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point(mtd, to, len, retlen, (void **)&ptr, NULL);
+
+ if (soff_hi == eoff_hi) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memcpy(ptr, copyfrom, len);
+ copyfrom += len;
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memcpy(ptr, copyfrom, priv->asize);
+ copyfrom += priv->asize;
+ if (soff_hi >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, soff_hi, priv->asize, retlen,
+ (void **)&ptr, NULL);
+ }
+ memcpy(ptr, copyfrom, eoff_lo);
+ copyfrom += eoff_lo;
+ }
+
+ out:
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write() done\n");
+#endif
+ *retlen = copyfrom - buf;
+ return 0;
+}
+
+/*
+ * Fixup routines for the V370PDC
+ * PCI device ID 0x020011b0
+ *
+ * This function basically kick starts the DRAM oboard the card and gets it
+ * ready to be used. Before this is done the device reads VERY erratic, so
+ * much that it can crash the Linux 2.2.x series kernels when a user cat's
+ * /proc/pci .. though that is mainly a kernel bug in handling the PCI DEVSEL
+ * register. FIXME: stop spinning on registers .. must implement a timeout
+ * mechanism
+ * returns the size of the memory region found.
+ */
+static int fixup_pmc551(struct pci_dev *dev)
+{
+#ifdef CONFIG_MTD_PMC551_BUGFIX
+ u32 dram_data;
+#endif
+ u32 size, dcmd, cfg, dtmp;
+ u16 cmd, tmp, i;
+ u8 bcmd, counter;
+
+ /* Sanity Check */
+ if (!dev) {
+ return -ENODEV;
+ }
+
+ /*
+ * Attempt to reset the card
+ * FIXME: Stop Spinning registers
+ */
+ counter = 0;
+ /* unlock registers */
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5);
+ /* read in old data */
+ pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
+ /* bang the reset line up and down for a few */
+ for (i = 0; i < 10; i++) {
+ counter = 0;
+ bcmd &= ~0x80;
+ while (counter++ < 100) {
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
+ }
+ counter = 0;
+ bcmd |= 0x80;
+ while (counter++ < 100) {
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
+ }
+ }
+ bcmd |= (0x40 | 0x20);
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
+
+ /*
+ * Take care and turn off the memory on the device while we
+ * tweak the configurations
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ tmp = cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(dev, PCI_COMMAND, tmp);
+
+ /*
+ * Disable existing aperture before probing memory size
+ */
+ pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd);
+ dtmp = (dcmd | PMC551_PCI_MEM_MAP_ENABLE | PMC551_PCI_MEM_MAP_REG_EN);
+ pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp);
+ /*
+ * Grab old BAR0 config so that we can figure out memory size
+ * This is another bit of kludge going on. The reason for the
+ * redundancy is I am hoping to retain the original configuration
+ * previously assigned to the card by the BIOS or some previous
+ * fixup routine in the kernel. So we read the old config into cfg,
+ * then write all 1's to the memory space, read back the result into
+ * "size", and then write back all the old config.
+ */
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &cfg);
+#ifndef CONFIG_MTD_PMC551_BUGFIX
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ~0);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &size);
+ size = (size & PCI_BASE_ADDRESS_MEM_MASK);
+ size &= ~(size - 1);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, cfg);
+#else
+ /*
+ * Get the size of the memory by reading all the DRAM size values
+ * and adding them up.
+ *
+ * KLUDGE ALERT: the boards we are using have invalid column and
+ * row mux values. We fix them here, but this will break other
+ * memory configurations.
+ */
+ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data);
+ size = PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data);
+
+ /*
+ * Oops .. something went wrong
+ */
+ if ((size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) {
+ return -ENODEV;
+ }
+#endif /* CONFIG_MTD_PMC551_BUGFIX */
+
+ if ((cfg & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ return -ENODEV;
+ }
+
+ /*
+ * Precharge Dram
+ */
+ pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0400);
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x00bf);
+
+ /*
+ * Wait until command has gone through
+ * FIXME: register spinning issue
+ */
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+
+ /*
+ * Turn on auto refresh
+ * The loop is taken directly from Ramix's example code. I assume that
+ * this must be held high for some duration of time, but I can find no
+ * documentation refrencing the reasons why.
+ */
+ for (i = 1; i <= 8; i++) {
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0df);
+
+ /*
+ * Make certain command has gone through
+ * FIXME: register spinning issue
+ */
+ counter = 0;
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+ }
+
+ pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0020);
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0ff);
+
+ /*
+ * Wait until command completes
+ * FIXME: register spinning issue
+ */
+ counter = 0;
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+
+ pci_read_config_dword(dev, PMC551_DRAM_CFG, &dcmd);
+ dcmd |= 0x02000000;
+ pci_write_config_dword(dev, PMC551_DRAM_CFG, dcmd);
+
+ /*
+ * Check to make certain fast back-to-back, if not
+ * then set it so
+ */
+ pci_read_config_word(dev, PCI_STATUS, &cmd);
+ if ((cmd & PCI_COMMAND_FAST_BACK) == 0) {
+ cmd |= PCI_COMMAND_FAST_BACK;
+ pci_write_config_word(dev, PCI_STATUS, cmd);
+ }
+
+ /*
+ * Check to make certain the DEVSEL is set correctly, this device
+ * has a tendency to assert DEVSEL and TRDY when a write is performed
+ * to the memory when memory is read-only
+ */
+ if ((cmd & PCI_STATUS_DEVSEL_MASK) != 0x0) {
+ cmd &= ~PCI_STATUS_DEVSEL_MASK;
+ pci_write_config_word(dev, PCI_STATUS, cmd);
+ }
+ /*
+ * Set to be prefetchable and put everything back based on old cfg.
+ * it's possible that the reset of the V370PDC nuked the original
+ * setup
+ */
+ /*
+ cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+ pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
+ */
+
+ /*
+ * Turn PCI memory and I/O bus access back on
+ */
+ pci_write_config_word(dev, PCI_COMMAND,
+ PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ /*
+ * Some screen fun
+ */
+ printk(KERN_DEBUG "pmc551: %d%sB (0x%x) of %sprefetchable memory at "
+ "0x%llx\n", (size < 1024) ? size : (size < 1048576) ?
+ size >> 10 : size >> 20,
+ (size < 1024) ? "" : (size < 1048576) ? "Ki" : "Mi", size,
+ ((dcmd & (0x1 << 3)) == 0) ? "non-" : "",
+ (unsigned long long)pci_resource_start(dev, 0));
+
+ /*
+ * Check to see the state of the memory
+ */
+ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK0 Size: %d at %d\n"
+ "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK1 Size: %d at %d\n"
+ "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK2 Size: %d at %d\n"
+ "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK3 Size: %d at %d\n"
+ "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ printk(KERN_DEBUG "pmc551: Memory Access %s\n",
+ (((0x1 << 1) & cmd) == 0) ? "off" : "on");
+ printk(KERN_DEBUG "pmc551: I/O Access %s\n",
+ (((0x1 << 0) & cmd) == 0) ? "off" : "on");
+
+ pci_read_config_word(dev, PCI_STATUS, &cmd);
+ printk(KERN_DEBUG "pmc551: Devsel %s\n",
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x000) ? "Fast" :
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x200) ? "Medium" :
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x400) ? "Slow" : "Invalid");
+
+ printk(KERN_DEBUG "pmc551: %sFast Back-to-Back\n",
+ ((PCI_COMMAND_FAST_BACK & cmd) == 0) ? "Not " : "");
+
+ pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
+ printk(KERN_DEBUG "pmc551: EEPROM is under %s control\n"
+ "pmc551: System Control Register is %slocked to PCI access\n"
+ "pmc551: System Control Register is %slocked to EEPROM access\n",
+ (bcmd & 0x1) ? "software" : "hardware",
+ (bcmd & 0x20) ? "" : "un", (bcmd & 0x40) ? "" : "un");
+#endif
+ return size;
+}
+
+/*
+ * Kernel version specific module stuffages
+ */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>");
+MODULE_DESCRIPTION(PMC551_VERSION);
+
+/*
+ * Stuff these outside the ifdef so as to not bust compiled in driver support
+ */
+static int msize = 0;
+static int asize = 0;
+
+module_param(msize, int, 0);
+MODULE_PARM_DESC(msize, "memory size in MiB [1 - 1024]");
+module_param(asize, int, 0);
+MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
+
+/*
+ * PMC551 Card Initialization
+ */
+static int __init init_pmc551(void)
+{
+ struct pci_dev *PCI_Device = NULL;
+ struct mypriv *priv;
+ int found = 0;
+ struct mtd_info *mtd;
+ int length = 0;
+
+ if (msize) {
+ msize = (1 << (ffs(msize) - 1)) << 20;
+ if (msize > (1 << 30)) {
+ printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n",
+ msize);
+ return -EINVAL;
+ }
+ }
+
+ if (asize) {
+ asize = (1 << (ffs(asize) - 1)) << 20;
+ if (asize > (1 << 30)) {
+ printk(KERN_NOTICE "pmc551: Invalid aperture size "
+ "[%d]\n", asize);
+ return -EINVAL;
+ }
+ }
+
+ printk(KERN_INFO PMC551_VERSION);
+
+ /*
+ * PCU-bus chipset probe.
+ */
+ for (;;) {
+
+ if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
+ PCI_DEVICE_ID_V3_SEMI_V370PDC,
+ PCI_Device)) == NULL) {
+ break;
+ }
+
+ printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n",
+ (unsigned long long)pci_resource_start(PCI_Device, 0));
+
+ /*
+ * The PMC551 device acts VERY weird if you don't init it
+ * first. i.e. it will not correctly report devsel. If for
+ * some reason the sdram is in a wrote-protected state the
+ * device will DEVSEL when it is written to causing problems
+ * with the oldproc.c driver in
+ * some kernels (2.2.*)
+ */
+ if ((length = fixup_pmc551(PCI_Device)) <= 0) {
+ printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
+ break;
+ }
+
+ /*
+ * This is needed until the driver is capable of reading the
+ * onboard I2C SROM to discover the "real" memory size.
+ */
+ if (msize) {
+ length = msize;
+ printk(KERN_NOTICE "pmc551: Using specified memory "
+ "size 0x%x\n", length);
+ } else {
+ msize = length;
+ }
+
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ break;
+
+ priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);
+ if (!priv) {
+ kfree(mtd);
+ break;
+ }
+ mtd->priv = priv;
+ priv->dev = PCI_Device;
+
+ if (asize > length) {
+ printk(KERN_NOTICE "pmc551: reducing aperture size to "
+ "fit %dM\n", length >> 20);
+ priv->asize = asize = length;
+ } else if (asize == 0 || asize == length) {
+ printk(KERN_NOTICE "pmc551: Using existing aperture "
+ "size %dM\n", length >> 20);
+ priv->asize = asize = length;
+ } else {
+ printk(KERN_NOTICE "pmc551: Using specified aperture "
+ "size %dM\n", asize >> 20);
+ priv->asize = asize;
+ }
+ priv->start = pci_iomap(PCI_Device, 0, priv->asize);
+
+ if (!priv->start) {
+ printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
+ kfree(mtd->priv);
+ kfree(mtd);
+ break;
+ }
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551: setting aperture to %d\n",
+ ffs(priv->asize >> 20) - 1);
+#endif
+
+ priv->base_map0 = (PMC551_PCI_MEM_MAP_REG_EN
+ | PMC551_PCI_MEM_MAP_ENABLE
+ | (ffs(priv->asize >> 20) - 1) << 4);
+ priv->curr_map0 = priv->base_map0;
+ pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
+ priv->curr_map0);
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551: aperture set to %d\n",
+ (priv->base_map0 & 0xF0) >> 4);
+#endif
+
+ mtd->size = msize;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->_erase = pmc551_erase;
+ mtd->_read = pmc551_read;
+ mtd->_write = pmc551_write;
+ mtd->_point = pmc551_point;
+ mtd->_unpoint = pmc551_unpoint;
+ mtd->type = MTD_RAM;
+ mtd->name = "PMC551 RAM board";
+ mtd->erasesize = 0x10000;
+ mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
+
+ if (mtd_device_register(mtd, NULL, 0)) {
+ printk(KERN_NOTICE "pmc551: Failed to register new device\n");
+ pci_iounmap(PCI_Device, priv->start);
+ kfree(mtd->priv);
+ kfree(mtd);
+ break;
+ }
+
+ /* Keep a reference as the mtd_device_register worked */
+ pci_dev_get(PCI_Device);
+
+ printk(KERN_NOTICE "Registered pmc551 memory device.\n");
+ printk(KERN_NOTICE "Mapped %dMiB of memory from 0x%p to 0x%p\n",
+ priv->asize >> 20,
+ priv->start, priv->start + priv->asize);
+ printk(KERN_NOTICE "Total memory is %d%sB\n",
+ (length < 1024) ? length :
+ (length < 1048576) ? length >> 10 : length >> 20,
+ (length < 1024) ? "" : (length < 1048576) ? "Ki" : "Mi");
+ priv->nextpmc551 = pmc551list;
+ pmc551list = mtd;
+ found++;
+ }
+
+ /* Exited early, reference left over */
+ pci_dev_put(PCI_Device);
+
+ if (!pmc551list) {
+ printk(KERN_NOTICE "pmc551: not detected\n");
+ return -ENODEV;
+ } else {
+ printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found);
+ return 0;
+ }
+}
+
+/*
+ * PMC551 Card Cleanup
+ */
+static void __exit cleanup_pmc551(void)
+{
+ int found = 0;
+ struct mtd_info *mtd;
+ struct mypriv *priv;
+
+ while ((mtd = pmc551list)) {
+ priv = mtd->priv;
+ pmc551list = priv->nextpmc551;
+
+ if (priv->start) {
+ printk(KERN_DEBUG "pmc551: unmapping %dMiB starting at "
+ "0x%p\n", priv->asize >> 20, priv->start);
+ pci_iounmap(priv->dev, priv->start);
+ }
+ pci_dev_put(priv->dev);
+
+ kfree(mtd->priv);
+ mtd_device_unregister(mtd);
+ kfree(mtd);
+ found++;
+ }
+
+ printk(KERN_NOTICE "pmc551: %d pmc551 devices unloaded\n", found);
+}
+
+module_init(init_pmc551);
+module_exit(cleanup_pmc551);
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
new file mode 100644
index 000000000..f59a12529
--- /dev/null
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -0,0 +1,61 @@
+/*
+ * Generic/SFDP Flash Commands and Device Capabilities
+ *
+ * Copyright (C) 2013 Lee Jones <lee.jones@lianro.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _MTD_SERIAL_FLASH_CMDS_H
+#define _MTD_SERIAL_FLASH_CMDS_H
+
+/* Generic Flash Commands/OPCODEs */
+#define SPINOR_OP_RDSR2 0x35
+#define SPINOR_OP_WRVCR 0x81
+#define SPINOR_OP_RDVCR 0x85
+
+/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
+#define SPINOR_OP_READ_1_2_2 0xbb /* DUAL I/O READ */
+#define SPINOR_OP_READ_1_4_4 0xeb /* QUAD I/O READ */
+
+#define SPINOR_OP_WRITE 0x02 /* PAGE PROGRAM */
+#define SPINOR_OP_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
+#define SPINOR_OP_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
+
+/* READ commands with 32-bit addressing */
+#define SPINOR_OP_READ4_1_2_2 0xbc
+#define SPINOR_OP_READ4_1_4_4 0xec
+
+/* Configuration flags */
+#define FLASH_FLAG_SINGLE 0x000000ff
+#define FLASH_FLAG_READ_WRITE 0x00000001
+#define FLASH_FLAG_READ_FAST 0x00000002
+#define FLASH_FLAG_SE_4K 0x00000004
+#define FLASH_FLAG_SE_32K 0x00000008
+#define FLASH_FLAG_CE 0x00000010
+#define FLASH_FLAG_32BIT_ADDR 0x00000020
+#define FLASH_FLAG_RESET 0x00000040
+#define FLASH_FLAG_DYB_LOCKING 0x00000080
+
+#define FLASH_FLAG_DUAL 0x0000ff00
+#define FLASH_FLAG_READ_1_1_2 0x00000100
+#define FLASH_FLAG_READ_1_2_2 0x00000200
+#define FLASH_FLAG_READ_2_2_2 0x00000400
+#define FLASH_FLAG_WRITE_1_1_2 0x00001000
+#define FLASH_FLAG_WRITE_1_2_2 0x00002000
+#define FLASH_FLAG_WRITE_2_2_2 0x00004000
+
+#define FLASH_FLAG_QUAD 0x00ff0000
+#define FLASH_FLAG_READ_1_1_4 0x00010000
+#define FLASH_FLAG_READ_1_4_4 0x00020000
+#define FLASH_FLAG_READ_4_4_4 0x00040000
+#define FLASH_FLAG_WRITE_1_1_4 0x00100000
+#define FLASH_FLAG_WRITE_1_4_4 0x00200000
+#define FLASH_FLAG_WRITE_4_4_4 0x00400000
+
+#endif /* _MTD_SERIAL_FLASH_CMDS_H */
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
new file mode 100644
index 000000000..2fc4957cb
--- /dev/null
+++ b/drivers/mtd/devices/slram.c
@@ -0,0 +1,347 @@
+/*======================================================================
+
+ This driver provides a method to access memory not used by the kernel
+ itself (i.e. if the kernel commandline mem=xxx is used). To actually
+ use slram at least mtdblock or mtdchar is required (for block or
+ character device access).
+
+ Usage:
+
+ if compiled as loadable module:
+ modprobe slram map=<name>,<start>,<end/offset>
+ if statically linked into the kernel use the following kernel cmd.line
+ slram=<name>,<start>,<end/offset>
+
+ <name>: name of the device that will be listed in /proc/mtd
+ <start>: start of the memory region, decimal or hex (0xabcdef)
+ <end/offset>: end of the memory region. It's possible to use +0x1234
+ to specify the offset instead of the absolute address
+
+ NOTE:
+ With slram it's only possible to map a contiguous memory region. Therefore
+ if there's a device mapped somewhere in the region specified slram will
+ fail to load (see kernel log if modprobe fails).
+
+ -
+
+ Jochen Schaeuble <psionic@psionic.de>
+
+======================================================================*/
+
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+
+#define SLRAM_MAX_DEVICES_PARAMS 6 /* 3 parameters / device */
+#define SLRAM_BLK_SZ 0x4000
+
+#define T(fmt, args...) printk(KERN_DEBUG fmt, ## args)
+#define E(fmt, args...) printk(KERN_NOTICE fmt, ## args)
+
+typedef struct slram_priv {
+ u_char *start;
+ u_char *end;
+} slram_priv_t;
+
+typedef struct slram_mtd_list {
+ struct mtd_info *mtdinfo;
+ struct slram_mtd_list *next;
+} slram_mtd_list_t;
+
+#ifdef MODULE
+static char *map[SLRAM_MAX_DEVICES_PARAMS];
+
+module_param_array(map, charp, NULL, 0);
+MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
+#else
+static char *map;
+#endif
+
+static slram_mtd_list_t *slram_mtdlist = NULL;
+
+static int slram_erase(struct mtd_info *, struct erase_info *);
+static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
+ resource_size_t *);
+static int slram_unpoint(struct mtd_info *, loff_t, size_t);
+static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+
+static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ memset(priv->start + instr->addr, 0xff, instr->len);
+ /* This'll catch a few races. Free the thing before returning :)
+ * I don't feel at all ashamed. This kind of thing is possible anyway
+ * with flash, but unlikely.
+ */
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return(0);
+}
+
+static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ *virt = priv->start + from;
+ *retlen = len;
+ return(0);
+}
+
+static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ memcpy(buf, priv->start + from, len);
+ *retlen = len;
+ return(0);
+}
+
+static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ memcpy(priv->start + to, buf, len);
+ *retlen = len;
+ return(0);
+}
+
+/*====================================================================*/
+
+static int register_device(char *name, unsigned long start, unsigned long length)
+{
+ slram_mtd_list_t **curmtd;
+
+ curmtd = &slram_mtdlist;
+ while (*curmtd) {
+ curmtd = &(*curmtd)->next;
+ }
+
+ *curmtd = kmalloc(sizeof(slram_mtd_list_t), GFP_KERNEL);
+ if (!(*curmtd)) {
+ E("slram: Cannot allocate new MTD device.\n");
+ return(-ENOMEM);
+ }
+ (*curmtd)->mtdinfo = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ (*curmtd)->next = NULL;
+
+ if ((*curmtd)->mtdinfo) {
+ (*curmtd)->mtdinfo->priv =
+ kzalloc(sizeof(slram_priv_t), GFP_KERNEL);
+
+ if (!(*curmtd)->mtdinfo->priv) {
+ kfree((*curmtd)->mtdinfo);
+ (*curmtd)->mtdinfo = NULL;
+ }
+ }
+
+ if (!(*curmtd)->mtdinfo) {
+ E("slram: Cannot allocate new MTD device.\n");
+ return(-ENOMEM);
+ }
+
+ if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
+ ioremap(start, length))) {
+ E("slram: ioremap failed\n");
+ return -EIO;
+ }
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start + length;
+
+
+ (*curmtd)->mtdinfo->name = name;
+ (*curmtd)->mtdinfo->size = length;
+ (*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
+ (*curmtd)->mtdinfo->_erase = slram_erase;
+ (*curmtd)->mtdinfo->_point = slram_point;
+ (*curmtd)->mtdinfo->_unpoint = slram_unpoint;
+ (*curmtd)->mtdinfo->_read = slram_read;
+ (*curmtd)->mtdinfo->_write = slram_write;
+ (*curmtd)->mtdinfo->owner = THIS_MODULE;
+ (*curmtd)->mtdinfo->type = MTD_RAM;
+ (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
+ (*curmtd)->mtdinfo->writesize = 1;
+
+ if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
+ E("slram: Failed to register new device\n");
+ iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
+ kfree((*curmtd)->mtdinfo->priv);
+ kfree((*curmtd)->mtdinfo);
+ return(-EAGAIN);
+ }
+ T("slram: Registered device %s from %luKiB to %luKiB\n", name,
+ (start / 1024), ((start + length) / 1024));
+ T("slram: Mapped from 0x%p to 0x%p\n",
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start,
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end);
+ return(0);
+}
+
+static void unregister_devices(void)
+{
+ slram_mtd_list_t *nextitem;
+
+ while (slram_mtdlist) {
+ nextitem = slram_mtdlist->next;
+ mtd_device_unregister(slram_mtdlist->mtdinfo);
+ iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
+ kfree(slram_mtdlist->mtdinfo->priv);
+ kfree(slram_mtdlist->mtdinfo);
+ kfree(slram_mtdlist);
+ slram_mtdlist = nextitem;
+ }
+}
+
+static unsigned long handle_unit(unsigned long value, char *unit)
+{
+ if ((*unit == 'M') || (*unit == 'm')) {
+ return(value * 1024 * 1024);
+ } else if ((*unit == 'K') || (*unit == 'k')) {
+ return(value * 1024);
+ }
+ return(value);
+}
+
+static int parse_cmdline(char *devname, char *szstart, char *szlength)
+{
+ char *buffer;
+ unsigned long devstart;
+ unsigned long devlength;
+
+ if ((!devname) || (!szstart) || (!szlength)) {
+ unregister_devices();
+ return(-EINVAL);
+ }
+
+ devstart = simple_strtoul(szstart, &buffer, 0);
+ devstart = handle_unit(devstart, buffer);
+
+ if (*(szlength) != '+') {
+ devlength = simple_strtoul(szlength, &buffer, 0);
+ devlength = handle_unit(devlength, buffer);
+ if (devlength < devstart)
+ goto err_out;
+
+ devlength -= devstart;
+ } else {
+ devlength = simple_strtoul(szlength + 1, &buffer, 0);
+ devlength = handle_unit(devlength, buffer);
+ }
+ T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
+ devname, devstart, devlength);
+ if (devlength % SLRAM_BLK_SZ != 0)
+ goto err_out;
+
+ if ((devstart = register_device(devname, devstart, devlength))){
+ unregister_devices();
+ return((int)devstart);
+ }
+ return(0);
+
+err_out:
+ E("slram: Illegal length parameter.\n");
+ return(-EINVAL);
+}
+
+#ifndef MODULE
+
+static int __init mtd_slram_setup(char *str)
+{
+ map = str;
+ return(1);
+}
+
+__setup("slram=", mtd_slram_setup);
+
+#endif
+
+static int __init init_slram(void)
+{
+ char *devname;
+
+#ifndef MODULE
+ char *devstart;
+ char *devlength;
+
+ if (!map) {
+ E("slram: not enough parameters.\n");
+ return(-EINVAL);
+ }
+ while (map) {
+ devname = devstart = devlength = NULL;
+
+ if (!(devname = strsep(&map, ","))) {
+ E("slram: No devicename specified.\n");
+ break;
+ }
+ T("slram: devname = %s\n", devname);
+ if ((!map) || (!(devstart = strsep(&map, ",")))) {
+ E("slram: No devicestart specified.\n");
+ }
+ T("slram: devstart = %s\n", devstart);
+ if ((!map) || (!(devlength = strsep(&map, ",")))) {
+ E("slram: No devicelength / -end specified.\n");
+ }
+ T("slram: devlength = %s\n", devlength);
+ if (parse_cmdline(devname, devstart, devlength) != 0) {
+ return(-EINVAL);
+ }
+ }
+#else
+ int count;
+ int i;
+
+ for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
+ count++) {
+ }
+
+ if ((count % 3 != 0) || (count == 0)) {
+ E("slram: not enough parameters.\n");
+ return(-EINVAL);
+ }
+ for (i = 0; i < (count / 3); i++) {
+ devname = map[i * 3];
+
+ if (parse_cmdline(devname, map[i * 3 + 1], map[i * 3 + 2])!=0) {
+ return(-EINVAL);
+ }
+
+ }
+#endif /* !MODULE */
+
+ return(0);
+}
+
+static void __exit cleanup_slram(void)
+{
+ unregister_devices();
+}
+
+module_init(init_slram);
+module_exit(cleanup_slram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jochen Schaeuble <psionic@psionic.de>");
+MODULE_DESCRIPTION("MTD driver for uncached system RAM");
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
new file mode 100644
index 000000000..508bab3bd
--- /dev/null
+++ b/drivers/mtd/devices/spear_smi.c
@@ -0,0 +1,1092 @@
+/*
+ * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
+ * SPEAr platform
+ * The serial nor interface is largely based on drivers/mtd/m25p80.c,
+ * however the SPI interface has been replaced by SMI.
+ *
+ * Copyright © 2010 STMicroelectronics.
+ * Ashish Priyadarshi
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spear_smi.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* SMI clock rate */
+#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
+
+/* MAX time out to safely come out of a erase or write busy conditions */
+#define SMI_PROBE_TIMEOUT (HZ / 10)
+#define SMI_MAX_TIME_OUT (3 * HZ)
+
+/* timeout for command completion */
+#define SMI_CMD_TIMEOUT (HZ / 10)
+
+/* registers of smi */
+#define SMI_CR1 0x0 /* SMI control register 1 */
+#define SMI_CR2 0x4 /* SMI control register 2 */
+#define SMI_SR 0x8 /* SMI status register */
+#define SMI_TR 0xC /* SMI transmit register */
+#define SMI_RR 0x10 /* SMI receive register */
+
+/* defines for control_reg 1 */
+#define BANK_EN (0xF << 0) /* enables all banks */
+#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
+#define SW_MODE (0x1 << 28) /* enables SW Mode */
+#define WB_MODE (0x1 << 29) /* Write Burst Mode */
+#define FAST_MODE (0x1 << 15) /* Fast Mode */
+#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
+
+/* defines for control_reg 2 */
+#define SEND (0x1 << 7) /* Send data */
+#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
+#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
+#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
+#define WE (0x1 << 11) /* Write Enable */
+
+#define TX_LEN_SHIFT 0
+#define RX_LEN_SHIFT 4
+#define BANK_SHIFT 12
+
+/* defines for status register */
+#define SR_WIP 0x1 /* Write in progress */
+#define SR_WEL 0x2 /* Write enable latch */
+#define SR_BP0 0x4 /* Block protect 0 */
+#define SR_BP1 0x8 /* Block protect 1 */
+#define SR_BP2 0x10 /* Block protect 2 */
+#define SR_SRWD 0x80 /* SR write protect */
+#define TFF 0x100 /* Transfer Finished Flag */
+#define WCF 0x200 /* Transfer Finished Flag */
+#define ERF1 0x400 /* Forbidden Write Request */
+#define ERF2 0x800 /* Forbidden Access */
+
+#define WM_SHIFT 12
+
+/* flash opcodes */
+#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+
+/* Flash Device Ids maintenance section */
+
+/* data structure to maintain flash ids from different vendors */
+struct flash_device {
+ char *name;
+ u8 erase_cmd;
+ u32 device_id;
+ u32 pagesize;
+ unsigned long sectorsize;
+ unsigned long size_in_bytes;
+};
+
+#define FLASH_ID(n, es, id, psize, ssize, size) \
+{ \
+ .name = n, \
+ .erase_cmd = es, \
+ .device_id = id, \
+ .pagesize = psize, \
+ .sectorsize = ssize, \
+ .size_in_bytes = size \
+}
+
+static struct flash_device flash_devices[] = {
+ FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
+ FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
+ FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
+ FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
+ FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
+ FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
+ FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
+ FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
+ FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
+ FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
+ FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
+ FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
+ FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
+ FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
+ FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
+ FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
+ FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
+ FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
+ FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
+};
+
+/* Define spear specific structures */
+
+struct spear_snor_flash;
+
+/**
+ * struct spear_smi - Structure for SMI Device
+ *
+ * @clk: functional clock
+ * @status: current status register of SMI.
+ * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
+ * @lock: lock to prevent parallel access of SMI.
+ * @io_base: base address for registers of SMI.
+ * @pdev: platform device
+ * @cmd_complete: queue to wait for command completion of NOR-flash.
+ * @num_flashes: number of flashes actually present on board.
+ * @flash: separate structure for each Serial NOR-flash attached to SMI.
+ */
+struct spear_smi {
+ struct clk *clk;
+ u32 status;
+ unsigned long clk_rate;
+ struct mutex lock;
+ void __iomem *io_base;
+ struct platform_device *pdev;
+ wait_queue_head_t cmd_complete;
+ u32 num_flashes;
+ struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
+};
+
+/**
+ * struct spear_snor_flash - Structure for Serial NOR Flash
+ *
+ * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
+ * @dev_id: Device ID of NOR-flash.
+ * @lock: lock to manage flash read, write and erase operations
+ * @mtd: MTD info for each NOR-flash.
+ * @num_parts: Total number of partition in each bank of NOR-flash.
+ * @parts: Partition info for each bank of NOR-flash.
+ * @page_size: Page size of NOR-flash.
+ * @base_addr: Base address of NOR-flash.
+ * @erase_cmd: erase command may vary on different flash types
+ * @fast_mode: flash supports read in fast mode
+ */
+struct spear_snor_flash {
+ u32 bank;
+ u32 dev_id;
+ struct mutex lock;
+ struct mtd_info mtd;
+ u32 num_parts;
+ struct mtd_partition *parts;
+ u32 page_size;
+ void __iomem *base_addr;
+ u8 erase_cmd;
+ u8 fast_mode;
+};
+
+static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct spear_snor_flash, mtd);
+}
+
+/**
+ * spear_smi_read_sr - Read status register of flash through SMI
+ * @dev: structure of SMI information.
+ * @bank: bank to which flash is connected
+ *
+ * This routine will return the status register of the flash chip present at the
+ * given bank.
+ */
+static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in hw mode */
+ writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
+
+ /* performing a rsr instruction in hw mode */
+ writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
+ dev->io_base + SMI_CR2);
+
+ /* wait for tff */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* copy dev->status (lower 16 bits) in order to release lock */
+ if (ret > 0)
+ ret = dev->status & 0xffff;
+ else if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_wait_till_ready - wait till flash is ready
+ * @dev: structure of SMI information.
+ * @bank: flash corresponding to this bank
+ * @timeout: timeout for busy wait condition
+ *
+ * This routine checks for WIP (write in progress) bit in Status register
+ * If successful the routine returns 0 else -EBUSY
+ */
+static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
+ unsigned long timeout)
+{
+ unsigned long finish;
+ int status;
+
+ finish = jiffies + timeout;
+ do {
+ status = spear_smi_read_sr(dev, bank);
+ if (status < 0) {
+ if (status == -ETIMEDOUT)
+ continue; /* try till finish */
+ return status;
+ } else if (!(status & SR_WIP)) {
+ return 0;
+ }
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, finish));
+
+ dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
+ return -EBUSY;
+}
+
+/**
+ * spear_smi_int_handler - SMI Interrupt Handler.
+ * @irq: irq number
+ * @dev_id: structure of SMI device, embedded in dev_id.
+ *
+ * The handler clears all interrupt conditions and records the status in
+ * dev->status which is used by the driver later.
+ */
+static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
+{
+ u32 status = 0;
+ struct spear_smi *dev = dev_id;
+
+ status = readl(dev->io_base + SMI_SR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
+ /* copy the status register in dev->status */
+ dev->status |= status;
+
+ /* send the completion */
+ wake_up_interruptible(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * spear_smi_hw_init - initializes the smi controller.
+ * @dev: structure of smi device
+ *
+ * this routine initializes the smi controller wit the default values
+ */
+static void spear_smi_hw_init(struct spear_smi *dev)
+{
+ unsigned long rate = 0;
+ u32 prescale = 0;
+ u32 val;
+
+ rate = clk_get_rate(dev->clk);
+
+ /* functional clock of smi */
+ prescale = DIV_ROUND_UP(rate, dev->clk_rate);
+
+ /*
+ * setting the standard values, fast mode, prescaler for
+ * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
+ */
+ val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
+
+ mutex_lock(&dev->lock);
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
+ writel(val, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+}
+
+/**
+ * get_flash_index - match chip id from a flash list.
+ * @flash_id: a valid nor flash chip id obtained from board.
+ *
+ * try to validate the chip id by matching from a list, if not found then simply
+ * returns negative. In case of success returns index in to the flash devices
+ * array.
+ */
+static int get_flash_index(u32 flash_id)
+{
+ int index;
+
+ /* Matches chip-id to entire list of 'serial-nor flash' ids */
+ for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
+ if (flash_devices[index].device_id == flash_id)
+ return index;
+ }
+
+ /* Memory chip is not listed and not supported */
+ return -ENODEV;
+}
+
+/**
+ * spear_smi_write_enable - Enable the flash to do write operation
+ * @dev: structure of SMI device
+ * @bank: enable write for flash connected to this bank
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns 0 on success.
+ */
+static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in h/w mode */
+ writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ /* give the flash, write enable command */
+ writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ if (ret == 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev,
+ "smi controller failed on write enable\n");
+ } else if (ret > 0) {
+ /* check whether write mode status is set for required bank */
+ if (dev->status & (1 << (bank + WM_SHIFT)))
+ ret = 0;
+ else {
+ dev_err(&dev->pdev->dev, "couldn't enable write\n");
+ ret = -EIO;
+ }
+ }
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static inline u32
+get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
+{
+ u32 cmd;
+ u8 *x = (u8 *)&cmd;
+
+ x[0] = flash->erase_cmd;
+ x[1] = offset >> 16;
+ x[2] = offset >> 8;
+ x[3] = offset;
+
+ return cmd;
+}
+
+/**
+ * spear_smi_erase_sector - erase one sector of flash
+ * @dev: structure of SMI information
+ * @command: erase command to be send
+ * @bank: bank to which this command needs to be send
+ * @bytes: size of command
+ *
+ * Erase one sector of flash memory at offset ``offset'' which is any
+ * address within the sector which should be erased.
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int spear_smi_erase_sector(struct spear_smi *dev,
+ u32 bank, u32 command, u32 bytes)
+{
+ u32 ctrlreg1 = 0;
+ int ret;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
+
+ /* send command in sw mode */
+ writel(command, dev->io_base + SMI_TR);
+
+ writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
+ dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ if (ret == 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev, "sector erase failed\n");
+ } else if (ret > 0)
+ ret = 0; /* success */
+
+ /* restore ctrl regs */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+/**
+ * spear_mtd_erase - perform flash erase operation as requested by user
+ * @mtd: Provides the memory characteristics
+ * @e_info: Provides the erase information
+ *
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ u32 addr, command, bank;
+ int len, ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ bank = flash->bank;
+ if (bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ addr = e_info->addr;
+ len = e_info->len;
+
+ mutex_lock(&flash->lock);
+
+ /* now erase sectors in loop */
+ while (len) {
+ command = get_sector_erase_cmd(flash, addr);
+ /* preparing the command for flash */
+ ret = spear_smi_erase_sector(dev, bank, command, 4);
+ if (ret) {
+ e_info->state = MTD_ERASE_FAILED;
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+
+ mutex_unlock(&flash->lock);
+ e_info->state = MTD_ERASE_DONE;
+ mtd_erase_callback(e_info);
+
+ return 0;
+}
+
+/**
+ * spear_mtd_read - performs flash read operation as requested by the user
+ * @mtd: MTD information of the memory bank
+ * @from: Address from which to start read
+ * @len: Number of bytes to be read
+ * @retlen: Fills the Number of bytes actually read
+ * @buf: Fills this after reading
+ *
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void __iomem *src;
+ u32 ctrlreg1, val;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ src = flash->base_addr + from;
+
+ mutex_lock(&flash->lock);
+
+ /* wait till previous write/erase is done. */
+ ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+
+ mutex_lock(&dev->lock);
+ /* put smi in hw mode not wbt mode */
+ ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
+ val &= ~(SW_MODE | WB_MODE);
+ if (flash->fast_mode)
+ val |= FAST_MODE;
+
+ writel(val, dev->io_base + SMI_CR1);
+
+ memcpy_fromio(buf, src, len);
+
+ /* restore ctrl reg1 */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+
+ *retlen = len;
+ mutex_unlock(&flash->lock);
+
+ return 0;
+}
+
+static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
+ void __iomem *dest, const void *src, size_t len)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ /* wait until finished previous write command. */
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ /* put smi in write enable */
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ /* put smi in hw, write burst mode */
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ memcpy_toio(dest, src, len);
+
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+/**
+ * spear_mtd_write - performs write operation as requested by the user.
+ * @mtd: MTD information of the memory bank.
+ * @to: Address to write.
+ * @len: Number of bytes to be written.
+ * @retlen: Number of bytes actually wrote.
+ * @buf: Buffer from which the data to be taken.
+ *
+ * Write an address range to the flash chip. Data must be written in
+ * flash_page_size chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void __iomem *dest;
+ u32 page_offset, page_size;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ dest = flash->base_addr + to;
+ mutex_lock(&flash->lock);
+
+ page_offset = (u32)to % flash->page_size;
+
+ /* do if all the bytes fit onto one page */
+ if (page_offset + len <= flash->page_size) {
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
+ if (!ret)
+ *retlen += len;
+ } else {
+ u32 i;
+
+ /* the size of data remaining on the first page */
+ page_size = flash->page_size - page_offset;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
+ page_size);
+ if (ret)
+ goto err_write;
+ else
+ *retlen += page_size;
+
+ /* write everything in pagesize chunks */
+ for (i = page_size; i < len; i += page_size) {
+ page_size = len - i;
+ if (page_size > flash->page_size)
+ page_size = flash->page_size;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
+ buf + i, page_size);
+ if (ret)
+ break;
+ else
+ *retlen += page_size;
+ }
+ }
+
+err_write:
+ mutex_unlock(&flash->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_probe_flash - Detects the NOR Flash chip.
+ * @dev: structure of SMI information.
+ * @bank: bank on which flash must be probed
+ *
+ * This routine will check whether there exists a flash chip on a given memory
+ * bank ID.
+ * Return index of the probed flash in flash devices structure
+ */
+static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 val = 0;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ dev->status = 0; /* Will be set in interrupt handler */
+ /* put smi in sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val | SW_MODE, dev->io_base + SMI_CR1);
+
+ /* send readid command in sw mode */
+ writel(OPCODE_RDID, dev->io_base + SMI_TR);
+
+ val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
+ (3 << RX_LEN_SHIFT) | TFIE;
+ writel(val, dev->io_base + SMI_CR2);
+
+ /* wait for TFF */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+ if (ret <= 0) {
+ ret = -ENODEV;
+ goto err_probe;
+ }
+
+ /* get memory chip id */
+ val = readl(dev->io_base + SMI_RR);
+ val &= 0x00ffffff;
+ ret = get_flash_index(val);
+
+err_probe:
+ /* clear sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+
+#ifdef CONFIG_OF
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *pp = NULL;
+ const __be32 *addr;
+ u32 val;
+ int len;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ of_property_read_u32(np, "clock-rate", &val);
+ pdata->clk_rate = val;
+
+ pdata->board_flash_info = devm_kzalloc(&pdev->dev,
+ sizeof(*pdata->board_flash_info),
+ GFP_KERNEL);
+
+ /* Fill structs for each subnode (flash device) */
+ while ((pp = of_get_next_child(np, pp))) {
+ struct spear_smi_flash_info *flash_info;
+
+ flash_info = &pdata->board_flash_info[i];
+ pdata->np[i] = pp;
+
+ /* Read base-addr and size from DT */
+ addr = of_get_property(pp, "reg", &len);
+ pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
+ pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
+
+ if (of_get_property(pp, "st,smi-fast-mode", NULL))
+ pdata->board_flash_info->fast_mode = 1;
+
+ i++;
+ }
+
+ pdata->num_flashes = i;
+
+ return 0;
+}
+#else
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+static int spear_smi_setup_banks(struct platform_device *pdev,
+ u32 bank, struct device_node *np)
+{
+ struct spear_smi *dev = platform_get_drvdata(pdev);
+ struct mtd_part_parser_data ppdata = {};
+ struct spear_smi_flash_info *flash_info;
+ struct spear_smi_plat_data *pdata;
+ struct spear_snor_flash *flash;
+ struct mtd_partition *parts = NULL;
+ int count = 0;
+ int flash_index;
+ int ret = 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (bank > pdata->num_flashes - 1)
+ return -EINVAL;
+
+ flash_info = &pdata->board_flash_info[bank];
+ if (!flash_info)
+ return -ENODEV;
+
+ flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC);
+ if (!flash)
+ return -ENOMEM;
+ flash->bank = bank;
+ flash->fast_mode = flash_info->fast_mode ? 1 : 0;
+ mutex_init(&flash->lock);
+
+ /* verify whether nor flash is really present on board */
+ flash_index = spear_smi_probe_flash(dev, bank);
+ if (flash_index < 0) {
+ dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
+ return flash_index;
+ }
+ /* map the memory for nor flash chip */
+ flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base,
+ flash_info->size);
+ if (!flash->base_addr)
+ return -EIO;
+
+ dev->flash[bank] = flash;
+ flash->mtd.priv = dev;
+
+ if (flash_info->name)
+ flash->mtd.name = flash_info->name;
+ else
+ flash->mtd.name = flash_devices[flash_index].name;
+
+ flash->mtd.type = MTD_NORFLASH;
+ flash->mtd.writesize = 1;
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.size = flash_info->size;
+ flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
+ flash->page_size = flash_devices[flash_index].pagesize;
+ flash->mtd.writebufsize = flash->page_size;
+ flash->erase_cmd = flash_devices[flash_index].erase_cmd;
+ flash->mtd._erase = spear_mtd_erase;
+ flash->mtd._read = spear_mtd_read;
+ flash->mtd._write = spear_mtd_write;
+ flash->dev_id = flash_devices[flash_index].device_id;
+
+ dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
+ flash->mtd.name, flash->mtd.size,
+ flash->mtd.size / (1024 * 1024));
+
+ dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
+ flash->mtd.erasesize, flash->mtd.erasesize / 1024);
+
+#ifndef CONFIG_OF
+ if (flash_info->partitions) {
+ parts = flash_info->partitions;
+ count = flash_info->nr_partitions;
+ }
+#endif
+ ppdata.of_node = np;
+
+ ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts,
+ count);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * spear_smi_probe - Entry routine
+ * @pdev: platform device structure
+ *
+ * This is the first routine which gets invoked during booting and does all
+ * initialization/allocation work. The routine looks for available memory banks,
+ * and do proper init for any found one.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_smi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spear_smi_plat_data *pdata = NULL;
+ struct spear_smi *dev;
+ struct resource *smi_base;
+ int irq, ret = 0;
+ int i;
+
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdev->dev.platform_data = pdata;
+ ret = spear_smi_probe_config_dt(pdev, np);
+ if (ret) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "invalid smi irq\n");
+ goto err;
+ }
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dev->io_base = devm_ioremap_resource(&pdev->dev, smi_base);
+ if (IS_ERR(dev->io_base)) {
+ ret = PTR_ERR(dev->io_base);
+ goto err;
+ }
+
+ dev->pdev = pdev;
+ dev->clk_rate = pdata->clk_rate;
+
+ if (dev->clk_rate > SMI_MAX_CLOCK_FREQ)
+ dev->clk_rate = SMI_MAX_CLOCK_FREQ;
+
+ dev->num_flashes = pdata->num_flashes;
+
+ if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
+ dev_err(&pdev->dev, "exceeding max number of flashes\n");
+ dev->num_flashes = MAX_NUM_FLASH_CHIP;
+ }
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret)
+ goto err;
+
+ ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
+ pdev->name, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
+ goto err_irq;
+ }
+
+ mutex_init(&dev->lock);
+ init_waitqueue_head(&dev->cmd_complete);
+ spear_smi_hw_init(dev);
+ platform_set_drvdata(pdev, dev);
+
+ /* loop for each serial nor-flash which is connected to smi */
+ for (i = 0; i < dev->num_flashes; i++) {
+ ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "bank setup failed\n");
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ clk_disable_unprepare(dev->clk);
+err:
+ return ret;
+}
+
+/**
+ * spear_smi_remove - Exit routine
+ * @pdev: platform device structure
+ *
+ * free all allocations and delete the partitions.
+ */
+static int spear_smi_remove(struct platform_device *pdev)
+{
+ struct spear_smi *dev;
+ struct spear_snor_flash *flash;
+ int ret, i;
+
+ dev = platform_get_drvdata(pdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "dev is null\n");
+ return -ENODEV;
+ }
+
+ /* clean up for all nor flash */
+ for (i = 0; i < dev->num_flashes; i++) {
+ flash = dev->flash[i];
+ if (!flash)
+ continue;
+
+ /* clean up mtd stuff */
+ ret = mtd_device_unregister(&flash->mtd);
+ if (ret)
+ dev_err(&pdev->dev, "error removing mtd\n");
+ }
+
+ clk_disable_unprepare(dev->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int spear_smi_suspend(struct device *dev)
+{
+ struct spear_smi *sdev = dev_get_drvdata(dev);
+
+ if (sdev && sdev->clk)
+ clk_disable_unprepare(sdev->clk);
+
+ return 0;
+}
+
+static int spear_smi_resume(struct device *dev)
+{
+ struct spear_smi *sdev = dev_get_drvdata(dev);
+ int ret = -EPERM;
+
+ if (sdev && sdev->clk)
+ ret = clk_prepare_enable(sdev->clk);
+
+ if (!ret)
+ spear_smi_hw_init(sdev);
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id spear_smi_id_table[] = {
+ { .compatible = "st,spear600-smi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_smi_id_table);
+#endif
+
+static struct platform_driver spear_smi_driver = {
+ .driver = {
+ .name = "smi",
+ .bus = &platform_bus_type,
+ .of_match_table = of_match_ptr(spear_smi_id_table),
+ .pm = &spear_smi_pm_ops,
+ },
+ .probe = spear_smi_probe,
+ .remove = spear_smi_remove,
+};
+module_platform_driver(spear_smi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
+MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
new file mode 100644
index 000000000..c63ecbcad
--- /dev/null
+++ b/drivers/mtd/devices/sst25l.c
@@ -0,0 +1,431 @@
+/*
+ * sst25l.c
+ *
+ * Driver for SST25L SPI Flash chips
+ *
+ * Copyright © 2009 Bluewater Systems Ltd
+ * Author: Andre Renaud <andre@bluewatersys.com>
+ * Author: Ryan Mallon
+ *
+ * Based on m25p80.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+/* Erases can take up to 3 seconds! */
+#define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000)
+
+#define SST25L_CMD_WRSR 0x01 /* Write status register */
+#define SST25L_CMD_WRDI 0x04 /* Write disable */
+#define SST25L_CMD_RDSR 0x05 /* Read status register */
+#define SST25L_CMD_WREN 0x06 /* Write enable */
+#define SST25L_CMD_READ 0x03 /* High speed read */
+
+#define SST25L_CMD_EWSR 0x50 /* Enable write status register */
+#define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */
+#define SST25L_CMD_READ_ID 0x90 /* Read device ID */
+#define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */
+
+#define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */
+#define SST25L_STATUS_WREN (1 << 1) /* Write enabled */
+#define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */
+#define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */
+
+struct sst25l_flash {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct mtd_info mtd;
+};
+
+struct flash_info {
+ const char *name;
+ uint16_t device_id;
+ unsigned page_size;
+ unsigned nr_pages;
+ unsigned erase_size;
+};
+
+#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
+
+static struct flash_info sst25l_flash_info[] = {
+ {"sst25lf020a", 0xbf43, 256, 1024, 4096},
+ {"sst25lf040a", 0xbf44, 256, 2048, 4096},
+};
+
+static int sst25l_status(struct sst25l_flash *flash, int *status)
+{
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[2];
+ int err;
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_RDSR;
+ cmd_resp[1] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(flash->spi, &m);
+ if (err < 0)
+ return err;
+
+ *status = cmd_resp[1];
+ return 0;
+}
+
+static int sst25l_write_enable(struct sst25l_flash *flash, int enable)
+{
+ unsigned char command[2];
+ int status, err;
+
+ command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI;
+ err = spi_write(flash->spi, command, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_EWSR;
+ err = spi_write(flash->spi, command, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_WRSR;
+ command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1;
+ err = spi_write(flash->spi, command, 2);
+ if (err)
+ return err;
+
+ if (enable) {
+ err = sst25l_status(flash, &status);
+ if (err)
+ return err;
+ if (!(status & SST25L_STATUS_WREN))
+ return -EROFS;
+ }
+
+ return 0;
+}
+
+static int sst25l_wait_till_ready(struct sst25l_flash *flash)
+{
+ unsigned long deadline;
+ int status, err;
+
+ deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+ do {
+ err = sst25l_status(flash, &status);
+ if (err)
+ return err;
+ if (!(status & SST25L_STATUS_BUSY))
+ return 0;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset)
+{
+ unsigned char command[4];
+ int err;
+
+ err = sst25l_write_enable(flash, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_SECTOR_ERASE;
+ command[1] = offset >> 16;
+ command[2] = offset >> 8;
+ command[3] = offset;
+ err = spi_write(flash->spi, command, 4);
+ if (err)
+ return err;
+
+ err = sst25l_wait_till_ready(flash);
+ if (err)
+ return err;
+
+ return sst25l_write_enable(flash, 0);
+}
+
+static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ uint32_t addr, end;
+ int err;
+
+ /* Sanity checks */
+ if ((uint32_t)instr->len % mtd->erasesize)
+ return -EINVAL;
+
+ if ((uint32_t)instr->addr % mtd->erasesize)
+ return -EINVAL;
+
+ addr = instr->addr;
+ end = addr + instr->len;
+
+ mutex_lock(&flash->lock);
+
+ err = sst25l_wait_till_ready(flash);
+ if (err) {
+ mutex_unlock(&flash->lock);
+ return err;
+ }
+
+ while (addr < end) {
+ err = sst25l_erase_sector(flash, addr);
+ if (err) {
+ mutex_unlock(&flash->lock);
+ instr->state = MTD_ERASE_FAILED;
+ dev_err(&flash->spi->dev, "Erase failed\n");
+ return err;
+ }
+
+ addr += mtd->erasesize;
+ }
+
+ mutex_unlock(&flash->lock);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+}
+
+static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ struct spi_transfer transfer[2];
+ struct spi_message message;
+ unsigned char command[4];
+ int ret;
+
+ spi_message_init(&message);
+ memset(&transfer, 0, sizeof(transfer));
+
+ command[0] = SST25L_CMD_READ;
+ command[1] = from >> 16;
+ command[2] = from >> 8;
+ command[3] = from;
+
+ transfer[0].tx_buf = command;
+ transfer[0].len = sizeof(command);
+ spi_message_add_tail(&transfer[0], &message);
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+ spi_message_add_tail(&transfer[1], &message);
+
+ mutex_lock(&flash->lock);
+
+ /* Wait for previous write/erase to complete */
+ ret = sst25l_wait_till_ready(flash);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+
+ spi_sync(flash->spi, &message);
+
+ if (retlen && message.actual_length > sizeof(command))
+ *retlen += message.actual_length - sizeof(command);
+
+ mutex_unlock(&flash->lock);
+ return 0;
+}
+
+static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ int i, j, ret, bytes, copied = 0;
+ unsigned char command[5];
+
+ if ((uint32_t)to % mtd->writesize)
+ return -EINVAL;
+
+ mutex_lock(&flash->lock);
+
+ ret = sst25l_write_enable(flash, 1);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < len; i += mtd->writesize) {
+ ret = sst25l_wait_till_ready(flash);
+ if (ret)
+ goto out;
+
+ /* Write the first byte of the page */
+ command[0] = SST25L_CMD_AAI_PROGRAM;
+ command[1] = (to + i) >> 16;
+ command[2] = (to + i) >> 8;
+ command[3] = (to + i);
+ command[4] = buf[i];
+ ret = spi_write(flash->spi, command, 5);
+ if (ret < 0)
+ goto out;
+ copied++;
+
+ /*
+ * Write the remaining bytes using auto address
+ * increment mode
+ */
+ bytes = min_t(uint32_t, mtd->writesize, len - i);
+ for (j = 1; j < bytes; j++, copied++) {
+ ret = sst25l_wait_till_ready(flash);
+ if (ret)
+ goto out;
+
+ command[1] = buf[i + j];
+ ret = spi_write(flash->spi, command, 2);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ ret = sst25l_write_enable(flash, 0);
+
+ if (retlen)
+ *retlen = copied;
+
+ mutex_unlock(&flash->lock);
+ return ret;
+}
+
+static struct flash_info *sst25l_match_device(struct spi_device *spi)
+{
+ struct flash_info *flash_info = NULL;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[6];
+ int i, err;
+ uint16_t id;
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_READ_ID;
+ cmd_resp[1] = 0;
+ cmd_resp[2] = 0;
+ cmd_resp[3] = 0;
+ cmd_resp[4] = 0xff;
+ cmd_resp[5] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(spi, &m);
+ if (err < 0) {
+ dev_err(&spi->dev, "error reading device id\n");
+ return NULL;
+ }
+
+ id = (cmd_resp[4] << 8) | cmd_resp[5];
+
+ for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
+ if (sst25l_flash_info[i].device_id == id)
+ flash_info = &sst25l_flash_info[i];
+
+ if (!flash_info)
+ dev_err(&spi->dev, "unknown id %.4x\n", id);
+
+ return flash_info;
+}
+
+static int sst25l_probe(struct spi_device *spi)
+{
+ struct flash_info *flash_info;
+ struct sst25l_flash *flash;
+ struct flash_platform_data *data;
+ int ret;
+
+ flash_info = sst25l_match_device(spi);
+ if (!flash_info)
+ return -ENODEV;
+
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+ spi_set_drvdata(spi, flash);
+
+ data = dev_get_platdata(&spi->dev);
+ if (data && data->name)
+ flash->mtd.name = data->name;
+ else
+ flash->mtd.name = dev_name(&spi->dev);
+
+ flash->mtd.type = MTD_NORFLASH;
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.erasesize = flash_info->erase_size;
+ flash->mtd.writesize = flash_info->page_size;
+ flash->mtd.writebufsize = flash_info->page_size;
+ flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
+ flash->mtd._erase = sst25l_erase;
+ flash->mtd._read = sst25l_read;
+ flash->mtd._write = sst25l_write;
+
+ dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
+ (long long)flash->mtd.size >> 10);
+
+ pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ flash->mtd.name,
+ (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
+ flash->mtd.erasesize, flash->mtd.erasesize / 1024,
+ flash->mtd.numeraseregions);
+
+
+ ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
+ data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (ret)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int sst25l_remove(struct spi_device *spi)
+{
+ struct sst25l_flash *flash = spi_get_drvdata(spi);
+
+ return mtd_device_unregister(&flash->mtd);
+}
+
+static struct spi_driver sst25l_driver = {
+ .driver = {
+ .name = "sst25l",
+ .owner = THIS_MODULE,
+ },
+ .probe = sst25l_probe,
+ .remove = sst25l_remove,
+};
+
+module_spi_driver(sst25l_driver);
+
+MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
+MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
+ "Ryan Mallon");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
new file mode 100644
index 000000000..3060025c8
--- /dev/null
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -0,0 +1,2178 @@
+/*
+ * st_spi_fsm.c - ST Fast Sequence Mode (FSM) Serial Flash Controller
+ *
+ * Author: Angus Clark <angus.clark@st.com>
+ *
+ * Copyright (C) 2010-2014 STMicroelectronics Limited
+ *
+ * JEDEC probe based on drivers/mtd/devices/m25p80.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#include "serial_flash_cmds.h"
+
+/*
+ * FSM SPI Controller Registers
+ */
+#define SPI_CLOCKDIV 0x0010
+#define SPI_MODESELECT 0x0018
+#define SPI_CONFIGDATA 0x0020
+#define SPI_STA_MODE_CHANGE 0x0028
+#define SPI_FAST_SEQ_TRANSFER_SIZE 0x0100
+#define SPI_FAST_SEQ_ADD1 0x0104
+#define SPI_FAST_SEQ_ADD2 0x0108
+#define SPI_FAST_SEQ_ADD_CFG 0x010c
+#define SPI_FAST_SEQ_OPC1 0x0110
+#define SPI_FAST_SEQ_OPC2 0x0114
+#define SPI_FAST_SEQ_OPC3 0x0118
+#define SPI_FAST_SEQ_OPC4 0x011c
+#define SPI_FAST_SEQ_OPC5 0x0120
+#define SPI_MODE_BITS 0x0124
+#define SPI_DUMMY_BITS 0x0128
+#define SPI_FAST_SEQ_FLASH_STA_DATA 0x012c
+#define SPI_FAST_SEQ_1 0x0130
+#define SPI_FAST_SEQ_2 0x0134
+#define SPI_FAST_SEQ_3 0x0138
+#define SPI_FAST_SEQ_4 0x013c
+#define SPI_FAST_SEQ_CFG 0x0140
+#define SPI_FAST_SEQ_STA 0x0144
+#define SPI_QUAD_BOOT_SEQ_INIT_1 0x0148
+#define SPI_QUAD_BOOT_SEQ_INIT_2 0x014c
+#define SPI_QUAD_BOOT_READ_SEQ_1 0x0150
+#define SPI_QUAD_BOOT_READ_SEQ_2 0x0154
+#define SPI_PROGRAM_ERASE_TIME 0x0158
+#define SPI_MULT_PAGE_REPEAT_SEQ_1 0x015c
+#define SPI_MULT_PAGE_REPEAT_SEQ_2 0x0160
+#define SPI_STATUS_WR_TIME_REG 0x0164
+#define SPI_FAST_SEQ_DATA_REG 0x0300
+
+/*
+ * Register: SPI_MODESELECT
+ */
+#define SPI_MODESELECT_CONTIG 0x01
+#define SPI_MODESELECT_FASTREAD 0x02
+#define SPI_MODESELECT_DUALIO 0x04
+#define SPI_MODESELECT_FSM 0x08
+#define SPI_MODESELECT_QUADBOOT 0x10
+
+/*
+ * Register: SPI_CONFIGDATA
+ */
+#define SPI_CFG_DEVICE_ST 0x1
+#define SPI_CFG_DEVICE_ATMEL 0x4
+#define SPI_CFG_MIN_CS_HIGH(x) (((x) & 0xfff) << 4)
+#define SPI_CFG_CS_SETUPHOLD(x) (((x) & 0xff) << 16)
+#define SPI_CFG_DATA_HOLD(x) (((x) & 0xff) << 24)
+
+#define SPI_CFG_DEFAULT_MIN_CS_HIGH SPI_CFG_MIN_CS_HIGH(0x0AA)
+#define SPI_CFG_DEFAULT_CS_SETUPHOLD SPI_CFG_CS_SETUPHOLD(0xA0)
+#define SPI_CFG_DEFAULT_DATA_HOLD SPI_CFG_DATA_HOLD(0x00)
+
+/*
+ * Register: SPI_FAST_SEQ_TRANSFER_SIZE
+ */
+#define TRANSFER_SIZE(x) ((x) * 8)
+
+/*
+ * Register: SPI_FAST_SEQ_ADD_CFG
+ */
+#define ADR_CFG_CYCLES_ADD1(x) ((x) << 0)
+#define ADR_CFG_PADS_1_ADD1 (0x0 << 6)
+#define ADR_CFG_PADS_2_ADD1 (0x1 << 6)
+#define ADR_CFG_PADS_4_ADD1 (0x3 << 6)
+#define ADR_CFG_CSDEASSERT_ADD1 (1 << 8)
+#define ADR_CFG_CYCLES_ADD2(x) ((x) << (0+16))
+#define ADR_CFG_PADS_1_ADD2 (0x0 << (6+16))
+#define ADR_CFG_PADS_2_ADD2 (0x1 << (6+16))
+#define ADR_CFG_PADS_4_ADD2 (0x3 << (6+16))
+#define ADR_CFG_CSDEASSERT_ADD2 (1 << (8+16))
+
+/*
+ * Register: SPI_FAST_SEQ_n
+ */
+#define SEQ_OPC_OPCODE(x) ((x) << 0)
+#define SEQ_OPC_CYCLES(x) ((x) << 8)
+#define SEQ_OPC_PADS_1 (0x0 << 14)
+#define SEQ_OPC_PADS_2 (0x1 << 14)
+#define SEQ_OPC_PADS_4 (0x3 << 14)
+#define SEQ_OPC_CSDEASSERT (1 << 16)
+
+/*
+ * Register: SPI_FAST_SEQ_CFG
+ */
+#define SEQ_CFG_STARTSEQ (1 << 0)
+#define SEQ_CFG_SWRESET (1 << 5)
+#define SEQ_CFG_CSDEASSERT (1 << 6)
+#define SEQ_CFG_READNOTWRITE (1 << 7)
+#define SEQ_CFG_ERASE (1 << 8)
+#define SEQ_CFG_PADS_1 (0x0 << 16)
+#define SEQ_CFG_PADS_2 (0x1 << 16)
+#define SEQ_CFG_PADS_4 (0x3 << 16)
+
+/*
+ * Register: SPI_MODE_BITS
+ */
+#define MODE_DATA(x) (x & 0xff)
+#define MODE_CYCLES(x) ((x & 0x3f) << 16)
+#define MODE_PADS_1 (0x0 << 22)
+#define MODE_PADS_2 (0x1 << 22)
+#define MODE_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_DUMMY_BITS
+ */
+#define DUMMY_CYCLES(x) ((x & 0x3f) << 16)
+#define DUMMY_PADS_1 (0x0 << 22)
+#define DUMMY_PADS_2 (0x1 << 22)
+#define DUMMY_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_FAST_SEQ_FLASH_STA_DATA
+ */
+#define STA_DATA_BYTE1(x) ((x & 0xff) << 0)
+#define STA_DATA_BYTE2(x) ((x & 0xff) << 8)
+#define STA_PADS_1 (0x0 << 16)
+#define STA_PADS_2 (0x1 << 16)
+#define STA_PADS_4 (0x3 << 16)
+#define STA_CSDEASSERT (0x1 << 20)
+#define STA_RDNOTWR (0x1 << 21)
+
+/*
+ * FSM SPI Instruction Opcodes
+ */
+#define STFSM_OPC_CMD 0x1
+#define STFSM_OPC_ADD 0x2
+#define STFSM_OPC_STA 0x3
+#define STFSM_OPC_MODE 0x4
+#define STFSM_OPC_DUMMY 0x5
+#define STFSM_OPC_DATA 0x6
+#define STFSM_OPC_WAIT 0x7
+#define STFSM_OPC_JUMP 0x8
+#define STFSM_OPC_GOTO 0x9
+#define STFSM_OPC_STOP 0xF
+
+/*
+ * FSM SPI Instructions (== opcode + operand).
+ */
+#define STFSM_INSTR(cmd, op) ((cmd) | ((op) << 4))
+
+#define STFSM_INST_CMD1 STFSM_INSTR(STFSM_OPC_CMD, 1)
+#define STFSM_INST_CMD2 STFSM_INSTR(STFSM_OPC_CMD, 2)
+#define STFSM_INST_CMD3 STFSM_INSTR(STFSM_OPC_CMD, 3)
+#define STFSM_INST_CMD4 STFSM_INSTR(STFSM_OPC_CMD, 4)
+#define STFSM_INST_CMD5 STFSM_INSTR(STFSM_OPC_CMD, 5)
+#define STFSM_INST_ADD1 STFSM_INSTR(STFSM_OPC_ADD, 1)
+#define STFSM_INST_ADD2 STFSM_INSTR(STFSM_OPC_ADD, 2)
+
+#define STFSM_INST_DATA_WRITE STFSM_INSTR(STFSM_OPC_DATA, 1)
+#define STFSM_INST_DATA_READ STFSM_INSTR(STFSM_OPC_DATA, 2)
+
+#define STFSM_INST_STA_RD1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_WR1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_RD2 STFSM_INSTR(STFSM_OPC_STA, 0x2)
+#define STFSM_INST_STA_WR1_2 STFSM_INSTR(STFSM_OPC_STA, 0x3)
+
+#define STFSM_INST_MODE STFSM_INSTR(STFSM_OPC_MODE, 0)
+#define STFSM_INST_DUMMY STFSM_INSTR(STFSM_OPC_DUMMY, 0)
+#define STFSM_INST_WAIT STFSM_INSTR(STFSM_OPC_WAIT, 0)
+#define STFSM_INST_STOP STFSM_INSTR(STFSM_OPC_STOP, 0)
+
+#define STFSM_DEFAULT_EMI_FREQ 100000000UL /* 100 MHz */
+#define STFSM_DEFAULT_WR_TIME (STFSM_DEFAULT_EMI_FREQ * (15/1000)) /* 15ms */
+
+#define STFSM_FLASH_SAFE_FREQ 10000000UL /* 10 MHz */
+
+#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */
+
+/* S25FLxxxS commands */
+#define S25FL_CMD_WRITE4_1_1_4 0x34
+#define S25FL_CMD_SE4 0xdc
+#define S25FL_CMD_CLSR 0x30
+#define S25FL_CMD_DYBWR 0xe1
+#define S25FL_CMD_DYBRD 0xe0
+#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with
+ * 'SPINOR_OP_WRITE_1_4_4'
+ * as found on N25Qxxx devices! */
+
+/* Status register */
+#define FLASH_STATUS_BUSY 0x01
+#define FLASH_STATUS_WEL 0x02
+#define FLASH_STATUS_BP0 0x04
+#define FLASH_STATUS_BP1 0x08
+#define FLASH_STATUS_BP2 0x10
+#define FLASH_STATUS_SRWP0 0x80
+#define FLASH_STATUS_TIMEOUT 0xff
+/* S25FL Error Flags */
+#define S25FL_STATUS_E_ERR 0x20
+#define S25FL_STATUS_P_ERR 0x40
+
+#define N25Q_CMD_WRVCR 0x81
+#define N25Q_CMD_RDVCR 0x85
+#define N25Q_CMD_RDVECR 0x65
+#define N25Q_CMD_RDNVCR 0xb5
+#define N25Q_CMD_WRNVCR 0xb1
+
+#define FLASH_PAGESIZE 256 /* In Bytes */
+#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */
+#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */
+
+/*
+ * Flags to tweak operation of default read/write/erase routines
+ */
+#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001
+#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002
+#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008
+#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010
+
+struct stfsm_seq {
+ uint32_t data_size;
+ uint32_t addr1;
+ uint32_t addr2;
+ uint32_t addr_cfg;
+ uint32_t seq_opc[5];
+ uint32_t mode;
+ uint32_t dummy;
+ uint32_t status;
+ uint8_t seq[16];
+ uint32_t seq_cfg;
+} __packed __aligned(4);
+
+struct stfsm {
+ struct device *dev;
+ void __iomem *base;
+ struct resource *region;
+ struct mtd_info mtd;
+ struct mutex lock;
+ struct flash_info *info;
+ struct clk *clk;
+
+ uint32_t configuration;
+ uint32_t fifo_dir_delay;
+ bool booted_from_spi;
+ bool reset_signal;
+ bool reset_por;
+
+ struct stfsm_seq stfsm_seq_read;
+ struct stfsm_seq stfsm_seq_write;
+ struct stfsm_seq stfsm_seq_en_32bit_addr;
+};
+
+/* Parameters to configure a READ or WRITE FSM sequence */
+struct seq_rw_config {
+ uint32_t flags; /* flags to support config */
+ uint8_t cmd; /* FLASH command */
+ int write; /* Write Sequence */
+ uint8_t addr_pads; /* No. of addr pads (MODE & DUMMY) */
+ uint8_t data_pads; /* No. of data pads */
+ uint8_t mode_data; /* MODE data */
+ uint8_t mode_cycles; /* No. of MODE cycles */
+ uint8_t dummy_cycles; /* No. of DUMMY cycles */
+};
+
+/* SPI Flash Device Table */
+struct flash_info {
+ char *name;
+ /*
+ * JEDEC id zero means "no ID" (most older chips); otherwise it has
+ * a high byte of zero plus three data bytes: the manufacturer id,
+ * then a two byte device id.
+ */
+ u32 jedec_id;
+ u16 ext_id;
+ /*
+ * The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+ u32 flags;
+ /*
+ * Note, where FAST_READ is supported, freq_max specifies the
+ * FAST_READ frequency, not the READ frequency.
+ */
+ u32 max_freq;
+ int (*config)(struct stfsm *);
+};
+
+static int stfsm_n25q_config(struct stfsm *fsm);
+static int stfsm_mx25_config(struct stfsm *fsm);
+static int stfsm_s25fl_config(struct stfsm *fsm);
+static int stfsm_w25q_config(struct stfsm *fsm);
+
+static struct flash_info flash_types[] = {
+ /*
+ * ST Microelectronics/Numonyx --
+ * (newer production versions may have feature updates
+ * (eg faster operating frequency)
+ */
+#define M25P_FLAG (FLASH_FLAG_READ_WRITE | FLASH_FLAG_READ_FAST)
+ { "m25p40", 0x202013, 0, 64 * 1024, 8, M25P_FLAG, 25, NULL },
+ { "m25p80", 0x202014, 0, 64 * 1024, 16, M25P_FLAG, 25, NULL },
+ { "m25p16", 0x202015, 0, 64 * 1024, 32, M25P_FLAG, 25, NULL },
+ { "m25p32", 0x202016, 0, 64 * 1024, 64, M25P_FLAG, 50, NULL },
+ { "m25p64", 0x202017, 0, 64 * 1024, 128, M25P_FLAG, 50, NULL },
+ { "m25p128", 0x202018, 0, 256 * 1024, 64, M25P_FLAG, 50, NULL },
+
+#define M25PX_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL },
+ { "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL },
+
+ /* Macronix MX25xxx
+ * - Support for 'FLASH_FLAG_WRITE_1_4_4' is omitted for devices
+ * where operating frequency must be reduced.
+ */
+#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_SE_4K | \
+ FLASH_FLAG_SE_32K)
+ { "mx25l3255e", 0xc29e16, 0, 64 * 1024, 64,
+ (MX25_FLAG | FLASH_FLAG_WRITE_1_4_4), 86,
+ stfsm_mx25_config},
+ { "mx25l25635e", 0xc22019, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config },
+ { "mx25l25655e", 0xc22619, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config},
+
+#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_2 | \
+ FLASH_FLAG_WRITE_1_2_2 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_WRITE_1_4_4)
+ { "n25q128", 0x20ba18, 0, 64 * 1024, 256, N25Q_FLAG, 108,
+ stfsm_n25q_config },
+ { "n25q256", 0x20ba19, 0, 64 * 1024, 512,
+ N25Q_FLAG | FLASH_FLAG_32BIT_ADDR, 108, stfsm_n25q_config },
+
+ /*
+ * Spansion S25FLxxxP
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ */
+#define S25FLXXXP_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_READ_FAST)
+ { "s25fl032p", 0x010215, 0x4d00, 64 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config},
+ { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+
+ /*
+ * Spansion S25FLxxxS
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ * - RESET# signal supported by die but not bristled out on all
+ * package types. The package type is a function of board design,
+ * so this information is captured in the board's flags.
+ * - Supports 'DYB' sector protection. Depending on variant, sectors
+ * may default to locked state on power-on.
+ */
+#define S25FLXXXS_FLAG (S25FLXXXP_FLAG | \
+ FLASH_FLAG_RESET | \
+ FLASH_FLAG_DYB_LOCKING)
+ { "s25fl128s0", 0x012018, 0x0300, 256 * 1024, 64, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl128s1", 0x012018, 0x0301, 64 * 1024, 256, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl256s0", 0x010219, 0x4d00, 256 * 1024, 128,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+ { "s25fl256s1", 0x010219, 0x4d01, 64 * 1024, 512,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+#define W25X_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "w25x40", 0xef3013, 0, 64 * 1024, 8, W25X_FLAG, 75, NULL },
+ { "w25x80", 0xef3014, 0, 64 * 1024, 16, W25X_FLAG, 75, NULL },
+ { "w25x16", 0xef3015, 0, 64 * 1024, 32, W25X_FLAG, 75, NULL },
+ { "w25x32", 0xef3016, 0, 64 * 1024, 64, W25X_FLAG, 75, NULL },
+ { "w25x64", 0xef3017, 0, 64 * 1024, 128, W25X_FLAG, 75, NULL },
+
+ /* Winbond -- w25q "blocks" are 64K, "sectors" are 4KiB */
+#define W25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4)
+ { "w25q80", 0xef4014, 0, 64 * 1024, 16, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q16", 0xef4015, 0, 64 * 1024, 32, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q32", 0xef4016, 0, 64 * 1024, 64, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q64", 0xef4017, 0, 64 * 1024, 128, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+
+ /* Sentinel */
+ { NULL, 0x000000, 0, 0, 0, 0, 0, NULL },
+};
+
+/*
+ * FSM message sequence configurations:
+ *
+ * All configs are presented in order of preference
+ */
+
+/* Default READ configurations, in order of preference */
+static struct seq_rw_config default_read_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* Default WRITE configurations */
+static struct seq_rw_config default_write_configs[] = {
+ {FLASH_FLAG_WRITE_1_4_4, SPINOR_OP_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_4, SPINOR_OP_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_2_2, SPINOR_OP_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_2, SPINOR_OP_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_WRITE, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [N25Qxxx] Configuration
+ */
+#define N25Q_VCR_DUMMY_CYCLES(x) (((x) & 0xf) << 4)
+#define N25Q_VCR_XIP_DISABLED ((uint8_t)0x1 << 3)
+#define N25Q_VCR_WRAP_CONT 0x3
+
+/* N25Q 3-byte Address READ configurations
+ * - 'FAST' variants configured for 8 dummy cycles.
+ *
+ * Note, the number of dummy cycles used for 'FAST' READ operations is
+ * configurable and would normally be tuned according to the READ command and
+ * operating frequency. However, this applies universally to all 'FAST' READ
+ * commands, including those used by the SPIBoot controller, and remains in
+ * force until the device is power-cycled. Since the SPIBoot controller is
+ * hard-wired to use 8 dummy cycles, we must configure the device to also use 8
+ * cycles.
+ */
+static struct seq_rw_config n25q_read3_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* N25Q 4-byte Address READ configurations
+ * - use special 4-byte address READ commands (reduces overheads, and
+ * reduces risk of hitting watchdog reset issues).
+ * - 'FAST' variants configured for 8 dummy cycles (see note above.)
+ */
+static struct seq_rw_config n25q_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [MX25xxx] Configuration
+ */
+#define MX25_STATUS_QE (0x1 << 6)
+
+static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD1;
+ seq->seq[1] = STFSM_INST_WAIT;
+ seq->seq[2] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+/*
+ * [S25FLxxx] Configuration
+ */
+#define STFSM_S25FL_CONFIG_QE (0x1 << 1)
+
+/*
+ * S25FLxxxS devices provide three ways of supporting 32-bit addressing: Bank
+ * Register, Extended Address Modes, and a 32-bit address command set. The
+ * 32-bit address command set is used here, since it avoids any problems with
+ * entering a state that is incompatible with the SPIBoot Controller.
+ */
+static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ4_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ4_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ4_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ4_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ4_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ4, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
+ {FLASH_FLAG_WRITE_1_1_4, S25FL_CMD_WRITE4_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, S25FL_CMD_WRITE4, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [W25Qxxx] Configuration
+ */
+#define W25Q_STATUS_QE (0x1 << 1)
+
+static struct stfsm_seq stfsm_seq_read_jedec = {
+ .data_size = TRANSFER_SIZE(8),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_read_status_fifo = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_sector = {
+ /* 'addr_cfg' configured during initialisation */
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_SE)),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_chip = {
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_CHIP_ERASE) | SEQ_OPC_CSDEASSERT),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_write_status = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WRSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+/* Dummy sequence to read one byte of data from flash into the FIFO */
+static const struct stfsm_seq stfsm_seq_load_fifo_byte = {
+ .data_size = TRANSFER_SIZE(1),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B));
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD2;
+ seq->seq[1] = STFSM_INST_CMD1;
+ seq->seq[2] = STFSM_INST_WAIT;
+ seq->seq[3] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+static inline int stfsm_is_idle(struct stfsm *fsm)
+{
+ return readl(fsm->base + SPI_FAST_SEQ_STA) & 0x10;
+}
+
+static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
+{
+ return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
+}
+
+static inline void stfsm_load_seq(struct stfsm *fsm,
+ const struct stfsm_seq *seq)
+{
+ void __iomem *dst = fsm->base + SPI_FAST_SEQ_TRANSFER_SIZE;
+ const uint32_t *src = (const uint32_t *)seq;
+ int words = sizeof(*seq) / sizeof(*src);
+
+ BUG_ON(!stfsm_is_idle(fsm));
+
+ while (words--) {
+ writel(*src, dst);
+ src++;
+ dst += 4;
+ }
+}
+
+static void stfsm_wait_seq(struct stfsm *fsm)
+{
+ unsigned long deadline;
+ int timeout = 0;
+
+ deadline = jiffies + msecs_to_jiffies(STFSM_MAX_WAIT_SEQ_MS);
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ if (stfsm_is_idle(fsm))
+ return;
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on sequence completion\n");
+}
+
+static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
+{
+ uint32_t remaining = size >> 2;
+ uint32_t avail;
+ uint32_t words;
+
+ dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size);
+
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
+
+ while (remaining) {
+ for (;;) {
+ avail = stfsm_fifo_available(fsm);
+ if (avail)
+ break;
+ udelay(1);
+ }
+ words = min(avail, remaining);
+ remaining -= words;
+
+ readsl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+ buf += words;
+ }
+}
+
+/*
+ * Clear the data FIFO
+ *
+ * Typically, this is only required during driver initialisation, where no
+ * assumptions can be made regarding the state of the FIFO.
+ *
+ * The process of clearing the FIFO is complicated by fact that while it is
+ * possible for the FIFO to contain an arbitrary number of bytes [1], the
+ * SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words
+ * present. Furthermore, data can only be drained from the FIFO by reading
+ * complete 32-bit words.
+ *
+ * With this in mind, a two stage process is used to the clear the FIFO:
+ *
+ * 1. Read any complete 32-bit words from the FIFO, as reported by the
+ * SPI_FAST_SEQ_STA register.
+ *
+ * 2. Mop up any remaining bytes. At this point, it is not known if there
+ * are 0, 1, 2, or 3 bytes in the FIFO. To handle all cases, a dummy FSM
+ * sequence is used to load one byte at a time, until a complete 32-bit
+ * word is formed; at most, 4 bytes will need to be loaded.
+ *
+ * [1] It is theoretically possible for the FIFO to contain an arbitrary number
+ * of bits. However, since there are no known use-cases that leave
+ * incomplete bytes in the FIFO, only words and bytes are considered here.
+ */
+static void stfsm_clear_fifo(struct stfsm *fsm)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_load_fifo_byte;
+ uint32_t words, i;
+
+ /* 1. Clear any 32-bit words */
+ words = stfsm_fifo_available(fsm);
+ if (words) {
+ for (i = 0; i < words; i++)
+ readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+ dev_dbg(fsm->dev, "cleared %d words from FIFO\n", words);
+ }
+
+ /*
+ * 2. Clear any remaining bytes
+ * - Load the FIFO, one byte at a time, until a complete 32-bit word
+ * is available.
+ */
+ for (i = 0, words = 0; i < 4 && !words; i++) {
+ stfsm_load_seq(fsm, seq);
+ stfsm_wait_seq(fsm);
+ words = stfsm_fifo_available(fsm);
+ }
+
+ /* - A single word must be available now */
+ if (words != 1) {
+ dev_err(fsm->dev, "failed to clear bytes from the data FIFO\n");
+ return;
+ }
+
+ /* - Read the 32-bit word */
+ readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+
+ dev_dbg(fsm->dev, "cleared %d byte(s) from the data FIFO\n", 4 - i);
+}
+
+static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
+ uint32_t size)
+{
+ uint32_t words = size >> 2;
+
+ dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size);
+
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
+
+ writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+
+ return size;
+}
+
+static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr;
+ uint32_t cmd = enter ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd) |
+ SEQ_OPC_CSDEASSERT);
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static uint8_t stfsm_wait_busy(struct stfsm *fsm)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ unsigned long deadline;
+ uint32_t status;
+ int timeout = 0;
+
+ /* Use RDRS1 */
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR));
+
+ /* Load read_status sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /*
+ * Repeat until busy bit is deasserted, or timeout, or error (S25FLxxxS)
+ */
+ deadline = jiffies + FLASH_MAX_BUSY_WAIT;
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ stfsm_wait_seq(fsm);
+
+ stfsm_read_fifo(fsm, &status, 4);
+
+ if ((status & FLASH_STATUS_BUSY) == 0)
+ return 0;
+
+ if ((fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) &&
+ ((status & S25FL_STATUS_P_ERR) ||
+ (status & S25FL_STATUS_E_ERR)))
+ return (uint8_t)(status & 0xff);
+
+ if (!timeout)
+ /* Restart */
+ writel(seq->seq_cfg, fsm->base + SPI_FAST_SEQ_CFG);
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on wait_busy\n");
+
+ return FLASH_STATUS_TIMEOUT;
+}
+
+static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
+ uint8_t *data, int bytes)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ uint32_t tmp;
+ uint8_t *t = (uint8_t *)&tmp;
+ int i;
+
+ dev_dbg(fsm->dev, "read 'status' register [0x%02x], %d byte(s)\n",
+ cmd, bytes);
+
+ BUG_ON(bytes != 1 && bytes != 2);
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd)),
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ for (i = 0; i < bytes; i++)
+ data[i] = t[i];
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_write_status(struct stfsm *fsm, uint8_t cmd,
+ uint16_t data, int bytes, int wait_busy)
+{
+ struct stfsm_seq *seq = &stfsm_seq_write_status;
+
+ dev_dbg(fsm->dev,
+ "write 'status' register [0x%02x], %d byte(s), 0x%04x\n"
+ " %s wait-busy\n", cmd, bytes, data, wait_busy ? "with" : "no");
+
+ BUG_ON(bytes != 1 && bytes != 2);
+
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd));
+
+ seq->status = (uint32_t)data | STA_PADS_1 | STA_CSDEASSERT;
+ seq->seq[2] = (bytes == 1) ? STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ if (wait_busy)
+ stfsm_wait_busy(fsm);
+
+ return 0;
+}
+
+/*
+ * SoC reset on 'boot-from-spi' systems
+ *
+ * Certain modes of operation cause the Flash device to enter a particular state
+ * for a period of time (e.g. 'Erase Sector', 'Quad Enable', and 'Enter 32-bit
+ * Addr' commands). On boot-from-spi systems, it is important to consider what
+ * happens if a warm reset occurs during this period. The SPIBoot controller
+ * assumes that Flash device is in its default reset state, 24-bit address mode,
+ * and ready to accept commands. This can be achieved using some form of
+ * on-board logic/controller to force a device POR in response to a SoC-level
+ * reset or by making use of the device reset signal if available (limited
+ * number of devices only).
+ *
+ * Failure to take such precautions can cause problems following a warm reset.
+ * For some operations (e.g. ERASE), there is little that can be done. For
+ * other modes of operation (e.g. 32-bit addressing), options are often
+ * available that can help minimise the window in which a reset could cause a
+ * problem.
+ *
+ */
+static bool stfsm_can_handle_soc_reset(struct stfsm *fsm)
+{
+ /* Reset signal is available on the board and supported by the device */
+ if (fsm->reset_signal && fsm->info->flags & FLASH_FLAG_RESET)
+ return true;
+
+ /* Board-level logic forces a power-on-reset */
+ if (fsm->reset_por)
+ return true;
+
+ /* Reset is not properly handled and may result in failure to reboot */
+ return false;
+}
+
+/* Configure 'addr_cfg' according to addressing mode */
+static void stfsm_prepare_erasesec_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq)
+{
+ int addr1_cycles = fsm->info->flags & FLASH_FLAG_32BIT_ADDR ? 16 : 8;
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(addr1_cycles) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+/* Search for preferred configuration based on available flags */
+static struct seq_rw_config *
+stfsm_search_seq_rw_configs(struct stfsm *fsm,
+ struct seq_rw_config cfgs[])
+{
+ struct seq_rw_config *config;
+ int flags = fsm->info->flags;
+
+ for (config = cfgs; config->cmd != 0; config++)
+ if ((config->flags & flags) == config->flags)
+ return config;
+
+ return NULL;
+}
+
+/* Prepare a READ/WRITE sequence according to configuration parameters */
+static void stfsm_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfg)
+{
+ int addr1_cycles, addr2_cycles;
+ int i = 0;
+
+ memset(seq, 0, sizeof(*seq));
+
+ /* Add READ/WRITE OPC */
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cfg->cmd));
+
+ /* Add WREN OPC for a WRITE sequence */
+ if (cfg->write)
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ /* Address configuration (24 or 32-bit addresses) */
+ addr1_cycles = (fsm->info->flags & FLASH_FLAG_32BIT_ADDR) ? 16 : 8;
+ addr1_cycles /= cfg->addr_pads;
+ addr2_cycles = 16 / cfg->addr_pads;
+ seq->addr_cfg = ((addr1_cycles & 0x3f) << 0 | /* ADD1 cycles */
+ (cfg->addr_pads - 1) << 6 | /* ADD1 pads */
+ (addr2_cycles & 0x3f) << 16 | /* ADD2 cycles */
+ ((cfg->addr_pads - 1) << 22)); /* ADD2 pads */
+
+ /* Data/Sequence configuration */
+ seq->seq_cfg = ((cfg->data_pads - 1) << 16 |
+ SEQ_CFG_STARTSEQ |
+ SEQ_CFG_CSDEASSERT);
+ if (!cfg->write)
+ seq->seq_cfg |= SEQ_CFG_READNOTWRITE;
+
+ /* Mode configuration (no. of pads taken from addr cfg) */
+ seq->mode = ((cfg->mode_data & 0xff) << 0 | /* data */
+ (cfg->mode_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+ /* Dummy configuration (no. of pads taken from addr cfg) */
+ seq->dummy = ((cfg->dummy_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+
+ /* Instruction sequence */
+ i = 0;
+ if (cfg->write)
+ seq->seq[i++] = STFSM_INST_CMD2;
+
+ seq->seq[i++] = STFSM_INST_CMD1;
+
+ seq->seq[i++] = STFSM_INST_ADD1;
+ seq->seq[i++] = STFSM_INST_ADD2;
+
+ if (cfg->mode_cycles)
+ seq->seq[i++] = STFSM_INST_MODE;
+
+ if (cfg->dummy_cycles)
+ seq->seq[i++] = STFSM_INST_DUMMY;
+
+ seq->seq[i++] =
+ cfg->write ? STFSM_INST_DATA_WRITE : STFSM_INST_DATA_READ;
+ seq->seq[i++] = STFSM_INST_STOP;
+}
+
+static int stfsm_search_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfgs)
+{
+ struct seq_rw_config *config;
+
+ config = stfsm_search_seq_rw_configs(fsm, cfgs);
+ if (!config) {
+ dev_err(fsm->dev, "failed to find suitable config\n");
+ return -EINVAL;
+ }
+
+ stfsm_prepare_rw_seq(fsm, seq, config);
+
+ return 0;
+}
+
+/* Prepare a READ/WRITE/ERASE 'default' sequences */
+static int stfsm_prepare_rwe_seqs_default(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ int ret;
+
+ /* Configure 'READ' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ default_read_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep WRITE sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ return 0;
+}
+
+static int stfsm_mx25_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint32_t data_pads;
+ uint8_t sta;
+ int ret;
+ bool soc_reset;
+
+ /*
+ * Use default READ/WRITE sequences
+ */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure 32-bit Address Support
+ */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /* Configure 'enter_32bitaddr' FSM sequence */
+ stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi)
+ /* If we can handle SoC resets, we enable 32-bit address
+ * mode pervasively */
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ else
+ /* Else, enable/disable 32-bit addressing before/after
+ * each operation */
+ fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR |
+ CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ }
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sta, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(sta & MX25_STATUS_QE)) {
+ /* Set 'QE' */
+ sta |= MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
+ } else {
+ if (sta & MX25_STATUS_QE) {
+ /* Clear 'QE' */
+ sta &= ~MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int stfsm_n25q_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint8_t vcr;
+ int ret = 0;
+ bool soc_reset;
+
+ /* Configure 'READ' sequence */
+ if (flags & FLASH_FLAG_32BIT_ADDR)
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read4_configs);
+ else
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read3_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prepare READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence (default configs) */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "preparing WRITE sequence using flags [0x%08x] failed\n",
+ flags);
+ return ret;
+ }
+
+ /* * Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ /* Configure 32-bit address support */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ stfsm_n25q_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi) {
+ /*
+ * If we can handle SoC resets, we enable 32-bit
+ * address mode pervasively
+ */
+ stfsm_enter_32bit_addr(fsm, 1);
+ } else {
+ /*
+ * If not, enable/disable for WRITE and ERASE
+ * operations (READ uses special commands)
+ */
+ fsm->configuration = (CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ }
+ }
+
+ /*
+ * Configure device to use 8 dummy cycles
+ */
+ vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED |
+ N25Q_VCR_WRAP_CONT);
+ stfsm_write_status(fsm, N25Q_CMD_WRVCR, vcr, 1, 0);
+
+ return 0;
+}
+
+static void stfsm_s25fl_prepare_erasesec_seq_32(struct stfsm_seq *seq)
+{
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_SE4));
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+static void stfsm_s25fl_read_dyb(struct stfsm *fsm, uint32_t offs, uint8_t *dby)
+{
+ uint32_t tmp;
+ struct stfsm_seq seq = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBRD)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ *dby = (uint8_t)(tmp >> 24);
+
+ stfsm_wait_seq(fsm);
+}
+
+static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .status = (uint32_t)dby | STA_PADS_1 | STA_CSDEASSERT,
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+ stfsm_wait_seq(fsm);
+
+ stfsm_wait_busy(fsm);
+}
+
+static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_CLSR) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WRDI) |
+ SEQ_OPC_CSDEASSERT),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_s25fl_config(struct stfsm *fsm)
+{
+ struct flash_info *info = fsm->info;
+ uint32_t flags = info->flags;
+ uint32_t data_pads;
+ uint32_t offs;
+ uint16_t sta_wr;
+ uint8_t sr1, cr1, dyb;
+ int update_sr = 0;
+ int ret;
+
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /*
+ * Prepare Read/Write/Erase sequences according to S25FLxxx
+ * 32-bit address command set
+ */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ stfsm_s25fl_read4_configs);
+ if (ret)
+ return ret;
+
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ stfsm_s25fl_write4_configs);
+ if (ret)
+ return ret;
+
+ stfsm_s25fl_prepare_erasesec_seq_32(&stfsm_seq_erase_sector);
+
+ } else {
+ /* Use default configurations for 24-bit addressing */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * For devices that support 'DYB' sector locking, check lock status and
+ * unlock sectors if necessary (some variants power-on with sectors
+ * locked by default)
+ */
+ if (flags & FLASH_FLAG_DYB_LOCKING) {
+ offs = 0;
+ for (offs = 0; offs < info->sector_size * info->n_sectors;) {
+ stfsm_s25fl_read_dyb(fsm, offs, &dyb);
+ if (dyb == 0x00)
+ stfsm_s25fl_write_dyb(fsm, offs, 0xff);
+
+ /* Handle bottom/top 4KiB parameter sectors */
+ if ((offs < info->sector_size * 2) ||
+ (offs >= (info->sector_size - info->n_sectors * 4)))
+ offs += 0x1000;
+ else
+ offs += 0x10000;
+ }
+ }
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR2, &cr1, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
+ /* Set 'QE' */
+ cr1 |= STFSM_S25FL_CONFIG_QE;
+
+ update_sr = 1;
+ }
+ } else {
+ if (cr1 & STFSM_S25FL_CONFIG_QE) {
+ /* Clear 'QE' */
+ cr1 &= ~STFSM_S25FL_CONFIG_QE;
+
+ update_sr = 1;
+ }
+ }
+ if (update_sr) {
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sta_wr = ((uint16_t)cr1 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta_wr, 2, 1);
+ }
+
+ /*
+ * S25FLxxx devices support Program and Error error flags.
+ * Configure driver to check flags and clear if necessary.
+ */
+ fsm->configuration |= CFG_S25FL_CHECK_ERROR_FLAGS;
+
+ return 0;
+}
+
+static int stfsm_w25q_config(struct stfsm *fsm)
+{
+ uint32_t data_pads;
+ uint8_t sr1, sr2;
+ uint16_t sr_wr;
+ int update_sr = 0;
+ int ret;
+
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR2, &sr2, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(sr2 & W25Q_STATUS_QE)) {
+ /* Set 'QE' */
+ sr2 |= W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ } else {
+ if (sr2 & W25Q_STATUS_QE) {
+ /* Clear 'QE' */
+ sr2 &= ~W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ }
+ if (update_sr) {
+ /* Write status register */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sr_wr = ((uint16_t)sr2 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sr_wr, 2, 1);
+ }
+
+ return 0;
+}
+
+static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
+ uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_read;
+ uint32_t data_pads;
+ uint32_t read_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *p;
+
+ dev_dbg(fsm->dev, "reading %d bytes from 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must read in multiples of 32 cycles (or 32*pads/8 Bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ read_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
+
+ /* Handle non-aligned size */
+ size_ub = (size + read_mask) & ~read_mask;
+ size_lb = size & ~read_mask;
+ size_mop = size & read_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ if (size_lb)
+ stfsm_read_fifo(fsm, (uint32_t *)p, size_lb);
+
+ if (size_mop) {
+ stfsm_read_fifo(fsm, tmp, read_mask + 1);
+ memcpy(p + size_lb, &tmp, size_mop);
+ }
+
+ /* Handle non-aligned buf */
+ if ((uintptr_t)buf & 0x3)
+ memcpy(buf, page_buf, size);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ stfsm_clear_fifo(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return 0;
+}
+
+static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
+ uint32_t size, uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_write;
+ uint32_t data_pads;
+ uint32_t write_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t i;
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *t = (uint8_t *)&tmp;
+ const uint8_t *p;
+ int ret;
+
+ dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must write in multiples of 32 cycles (or 32*pads/8 bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ write_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ if ((uintptr_t)buf & 0x3) {
+ memcpy(page_buf, buf, size);
+ p = (uint8_t *)page_buf;
+ } else {
+ p = buf;
+ }
+
+ /* Handle non-aligned size */
+ size_ub = (size + write_mask) & ~write_mask;
+ size_lb = size & ~write_mask;
+ size_mop = size & write_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ /* Need to set FIFO to write mode, before writing data to FIFO (see
+ * GNBvb79594)
+ */
+ writel(0x00040000, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /*
+ * Before writing data to the FIFO, apply a small delay to allow a
+ * potential change of FIFO direction to complete.
+ */
+ if (fsm->fifo_dir_delay == 0)
+ readl(fsm->base + SPI_FAST_SEQ_CFG);
+ else
+ udelay(fsm->fifo_dir_delay);
+
+
+ /* Write data to FIFO, before starting sequence (see GNBvd79593) */
+ if (size_lb) {
+ stfsm_write_fifo(fsm, (uint32_t *)p, size_lb);
+ p += size_lb;
+ }
+
+ /* Handle non-aligned size */
+ if (size_mop) {
+ memset(t, 0xff, write_mask + 1); /* fill with 0xff's */
+ for (i = 0; i < size_mop; i++)
+ t[i] = *p++;
+
+ stfsm_write_fifo(fsm, tmp, write_mask + 1);
+ }
+
+ /* Start sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return 0;
+}
+
+/*
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ */
+static int stfsm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ uint32_t bytes;
+
+ dev_dbg(fsm->dev, "%s from 0x%08x, len %zd\n",
+ __func__, (u32)from, len);
+
+ mutex_lock(&fsm->lock);
+
+ while (len > 0) {
+ bytes = min_t(size_t, len, FLASH_PAGESIZE);
+
+ stfsm_read(fsm, buf, bytes, from);
+
+ buf += bytes;
+ from += bytes;
+ len -= bytes;
+
+ *retlen += bytes;
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ return 0;
+}
+
+static int stfsm_erase_sector(struct stfsm *fsm, uint32_t offset)
+{
+ struct stfsm_seq *seq = &stfsm_seq_erase_sector;
+ int ret;
+
+ dev_dbg(fsm->dev, "erasing sector at 0x%08x\n", offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return ret;
+}
+
+static int stfsm_erase_chip(struct stfsm *fsm)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_erase_chip;
+
+ dev_dbg(fsm->dev, "erasing chip\n");
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return stfsm_wait_busy(fsm);
+}
+
+/*
+ * Write an address range to the flash chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+
+ u32 page_offs;
+ u32 bytes;
+ uint8_t *b = (uint8_t *)buf;
+ int ret = 0;
+
+ dev_dbg(fsm->dev, "%s to 0x%08x, len %zd\n", __func__, (u32)to, len);
+
+ /* Offset within page */
+ page_offs = to % FLASH_PAGESIZE;
+
+ mutex_lock(&fsm->lock);
+
+ while (len) {
+ /* Write up to page boundary */
+ bytes = min_t(size_t, FLASH_PAGESIZE - page_offs, len);
+
+ ret = stfsm_write(fsm, b, bytes, to);
+ if (ret)
+ goto out1;
+
+ b += bytes;
+ len -= bytes;
+ to += bytes;
+
+ /* We are now page-aligned */
+ page_offs = 0;
+
+ *retlen += bytes;
+
+ }
+
+out1:
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+/*
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int stfsm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ u32 addr, len;
+ int ret;
+
+ dev_dbg(fsm->dev, "%s at 0x%llx, len %lld\n", __func__,
+ (long long)instr->addr, (long long)instr->len);
+
+ addr = instr->addr;
+ len = instr->len;
+
+ mutex_lock(&fsm->lock);
+
+ /* Whole-chip erase? */
+ if (len == mtd->size) {
+ ret = stfsm_erase_chip(fsm);
+ if (ret)
+ goto out1;
+ } else {
+ while (len) {
+ ret = stfsm_erase_sector(fsm, addr);
+ if (ret)
+ goto out1;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+
+out1:
+ instr->state = MTD_ERASE_FAILED;
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+static void stfsm_read_jedec(struct stfsm *fsm, uint8_t *jedec)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_read_jedec;
+ uint32_t tmp[2];
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, tmp, 8);
+
+ memcpy(jedec, tmp, 5);
+
+ stfsm_wait_seq(fsm);
+}
+
+static struct flash_info *stfsm_jedec_probe(struct stfsm *fsm)
+{
+ struct flash_info *info;
+ u16 ext_jedec;
+ u32 jedec;
+ u8 id[5];
+
+ stfsm_read_jedec(fsm, id);
+
+ jedec = id[0] << 16 | id[1] << 8 | id[2];
+ /*
+ * JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ */
+ ext_jedec = id[3] << 8 | id[4];
+
+ dev_dbg(fsm->dev, "JEDEC = 0x%08x [%02x %02x %02x %02x %02x]\n",
+ jedec, id[0], id[1], id[2], id[3], id[4]);
+
+ for (info = flash_types; info->name; info++) {
+ if (info->jedec_id == jedec) {
+ if (info->ext_id && info->ext_id != ext_jedec)
+ continue;
+ return info;
+ }
+ }
+ dev_err(fsm->dev, "Unrecognized JEDEC id %06x\n", jedec);
+
+ return NULL;
+}
+
+static int stfsm_set_mode(struct stfsm *fsm, uint32_t mode)
+{
+ int ret, timeout = 10;
+
+ /* Wait for controller to accept mode change */
+ while (--timeout) {
+ ret = readl(fsm->base + SPI_STA_MODE_CHANGE);
+ if (ret & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (!timeout)
+ return -EBUSY;
+
+ writel(mode, fsm->base + SPI_MODESELECT);
+
+ return 0;
+}
+
+static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
+{
+ uint32_t emi_freq;
+ uint32_t clk_div;
+
+ emi_freq = clk_get_rate(fsm->clk);
+
+ /*
+ * Calculate clk_div - values between 2 and 128
+ * Multiple of 2, rounded up
+ */
+ clk_div = 2 * DIV_ROUND_UP(emi_freq, 2 * spi_freq);
+ if (clk_div < 2)
+ clk_div = 2;
+ else if (clk_div > 128)
+ clk_div = 128;
+
+ /*
+ * Determine a suitable delay for the IP to complete a change of
+ * direction of the FIFO. The required delay is related to the clock
+ * divider used. The following heuristics are based on empirical tests,
+ * using a 100MHz EMI clock.
+ */
+ if (clk_div <= 4)
+ fsm->fifo_dir_delay = 0;
+ else if (clk_div <= 10)
+ fsm->fifo_dir_delay = 1;
+ else
+ fsm->fifo_dir_delay = DIV_ROUND_UP(clk_div, 10);
+
+ dev_dbg(fsm->dev, "emi_clk = %uHZ, spi_freq = %uHZ, clk_div = %u\n",
+ emi_freq, spi_freq, clk_div);
+
+ writel(clk_div, fsm->base + SPI_CLOCKDIV);
+}
+
+static int stfsm_init(struct stfsm *fsm)
+{
+ int ret;
+
+ /* Perform a soft reset of the FSM controller */
+ writel(SEQ_CFG_SWRESET, fsm->base + SPI_FAST_SEQ_CFG);
+ udelay(1);
+ writel(0, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /* Set clock to 'safe' frequency initially */
+ stfsm_set_freq(fsm, STFSM_FLASH_SAFE_FREQ);
+
+ /* Switch to FSM */
+ ret = stfsm_set_mode(fsm, SPI_MODESELECT_FSM);
+ if (ret)
+ return ret;
+
+ /* Set timing parameters */
+ writel(SPI_CFG_DEVICE_ST |
+ SPI_CFG_DEFAULT_MIN_CS_HIGH |
+ SPI_CFG_DEFAULT_CS_SETUPHOLD |
+ SPI_CFG_DEFAULT_DATA_HOLD,
+ fsm->base + SPI_CONFIGDATA);
+ writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG);
+
+ /*
+ * Set the FSM 'WAIT' delay to the minimum workable value. Note, for
+ * our purposes, the WAIT instruction is used purely to achieve
+ * "sequence validity" rather than actually implement a delay.
+ */
+ writel(0x00000001, fsm->base + SPI_PROGRAM_ERASE_TIME);
+
+ /* Clear FIFO, just in case */
+ stfsm_clear_fifo(fsm);
+
+ return 0;
+}
+
+static void stfsm_fetch_platform_configs(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *regmap;
+ uint32_t boot_device_reg;
+ uint32_t boot_device_spi;
+ uint32_t boot_device; /* Value we read from *boot_device_reg */
+ int ret;
+
+ /* Booting from SPI NOR Flash is the default */
+ fsm->booted_from_spi = true;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(regmap))
+ goto boot_device_fail;
+
+ fsm->reset_signal = of_property_read_bool(np, "st,reset-signal");
+
+ fsm->reset_por = of_property_read_bool(np, "st,reset-por");
+
+ /* Where in the syscon the boot device information lives */
+ ret = of_property_read_u32(np, "st,boot-device-reg", &boot_device_reg);
+ if (ret)
+ goto boot_device_fail;
+
+ /* Boot device value when booted from SPI NOR */
+ ret = of_property_read_u32(np, "st,boot-device-spi", &boot_device_spi);
+ if (ret)
+ goto boot_device_fail;
+
+ ret = regmap_read(regmap, boot_device_reg, &boot_device);
+ if (ret)
+ goto boot_device_fail;
+
+ if (boot_device != boot_device_spi)
+ fsm->booted_from_spi = false;
+
+ return;
+
+boot_device_fail:
+ dev_warn(&pdev->dev,
+ "failed to fetch boot device, assuming boot from SPI\n");
+}
+
+static int stfsm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ struct flash_info *info;
+ struct resource *res;
+ struct stfsm *fsm;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "No DT found\n");
+ return -EINVAL;
+ }
+ ppdata.of_node = np;
+
+ fsm = devm_kzalloc(&pdev->dev, sizeof(*fsm), GFP_KERNEL);
+ if (!fsm)
+ return -ENOMEM;
+
+ fsm->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, fsm);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Resource not found\n");
+ return -ENODEV;
+ }
+
+ fsm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsm->base)) {
+ dev_err(&pdev->dev,
+ "Failed to reserve memory region %pR\n", res);
+ return PTR_ERR(fsm->base);
+ }
+
+ fsm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fsm->clk)) {
+ dev_err(fsm->dev, "Couldn't find EMI clock.\n");
+ return PTR_ERR(fsm->clk);
+ }
+
+ ret = clk_prepare_enable(fsm->clk);
+ if (ret) {
+ dev_err(fsm->dev, "Failed to enable EMI clock.\n");
+ return ret;
+ }
+
+ mutex_init(&fsm->lock);
+
+ ret = stfsm_init(fsm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialise FSM Controller\n");
+ return ret;
+ }
+
+ stfsm_fetch_platform_configs(pdev);
+
+ /* Detect SPI FLASH device */
+ info = stfsm_jedec_probe(fsm);
+ if (!info)
+ return -ENODEV;
+ fsm->info = info;
+
+ /* Use device size to determine address width */
+ if (info->sector_size * info->n_sectors > 0x1000000)
+ info->flags |= FLASH_FLAG_32BIT_ADDR;
+
+ /*
+ * Configure READ/WRITE/ERASE sequences according to platform and
+ * device flags.
+ */
+ if (info->config) {
+ ret = info->config(fsm);
+ if (ret)
+ return ret;
+ } else {
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+ }
+
+ fsm->mtd.name = info->name;
+ fsm->mtd.dev.parent = &pdev->dev;
+ fsm->mtd.type = MTD_NORFLASH;
+ fsm->mtd.writesize = 4;
+ fsm->mtd.writebufsize = fsm->mtd.writesize;
+ fsm->mtd.flags = MTD_CAP_NORFLASH;
+ fsm->mtd.size = info->sector_size * info->n_sectors;
+ fsm->mtd.erasesize = info->sector_size;
+
+ fsm->mtd._read = stfsm_mtd_read;
+ fsm->mtd._write = stfsm_mtd_write;
+ fsm->mtd._erase = stfsm_mtd_erase;
+
+ dev_info(&pdev->dev,
+ "Found serial flash device: %s\n"
+ " size = %llx (%lldMiB) erasesize = 0x%08x (%uKiB)\n",
+ info->name,
+ (long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
+ fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
+
+ return mtd_device_parse_register(&fsm->mtd, NULL, &ppdata, NULL, 0);
+}
+
+static int stfsm_remove(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+
+ return mtd_device_unregister(&fsm->mtd);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stfsmfsm_suspend(struct device *dev)
+{
+ struct stfsm *fsm = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(fsm->clk);
+
+ return 0;
+}
+
+static int stfsmfsm_resume(struct device *dev)
+{
+ struct stfsm *fsm = dev_get_drvdata(dev);
+
+ clk_prepare_enable(fsm->clk);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
+
+static const struct of_device_id stfsm_match[] = {
+ { .compatible = "st,spi-fsm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stfsm_match);
+
+static struct platform_driver stfsm_driver = {
+ .probe = stfsm_probe,
+ .remove = stfsm_remove,
+ .driver = {
+ .name = "st-spi-fsm",
+ .of_match_table = stfsm_match,
+ .pm = &stfsm_pm_ops,
+ },
+};
+module_platform_driver(stfsm_driver);
+
+MODULE_AUTHOR("Angus Clark <angus.clark@st.com>");
+MODULE_DESCRIPTION("ST SPI FSM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
new file mode 100644
index 000000000..dabf08450
--- /dev/null
+++ b/drivers/mtd/ftl.c
@@ -0,0 +1,1114 @@
+/* This version ported to the Linux-MTD system by dwmw2@infradead.org
+ *
+ * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
+ *
+ * Based on:
+ */
+/*======================================================================
+
+ A Flash Translation Layer memory card driver
+
+ This driver implements a disk-like block device driver with an
+ apparent block size of 512 bytes for flash memory cards.
+
+ ftl_cs.c 1.62 2000/02/01 00:59:04
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright © 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+ LEGAL NOTE: The FTL format is patented by M-Systems. They have
+ granted a license for its use with PCMCIA devices:
+
+ "M-Systems grants a royalty-free, non-exclusive license under
+ any presently existing M-Systems intellectual property rights
+ necessary for the design and development of FTL-compatible
+ drivers, file systems and utilities using the data formats with
+ PCMCIA PC Cards as described in the PCMCIA Flash Translation
+ Layer (FTL) Specification."
+
+ Use of the FTL format for non-PCMCIA applications may be an
+ infringement of these patents. For additional information,
+ contact M-Systems directly. M-Systems since acquired by Sandisk.
+
+======================================================================*/
+#include <linux/mtd/blktrans.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+/*#define PSYCHO_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/hdreg.h>
+#include <linux/vmalloc.h>
+#include <linux/blkpg.h>
+#include <asm/uaccess.h>
+
+#include <linux/mtd/ftl.h>
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+static int shuffle_freq = 50;
+module_param(shuffle_freq, int, 0);
+
+/*====================================================================*/
+
+/* Major device # for FTL device */
+#ifndef FTL_MAJOR
+#define FTL_MAJOR 44
+#endif
+
+
+/*====================================================================*/
+
+/* Maximum number of separate memory devices we'll allow */
+#define MAX_DEV 4
+
+/* Maximum number of regions per device */
+#define MAX_REGION 4
+
+/* Maximum number of partitions in an FTL region */
+#define PART_BITS 4
+
+/* Maximum number of outstanding erase requests per socket */
+#define MAX_ERASE 8
+
+/* Sector size -- shouldn't need to change */
+#define SECTOR_SIZE 512
+
+
+/* Each memory region corresponds to a minor device */
+typedef struct partition_t {
+ struct mtd_blktrans_dev mbd;
+ uint32_t state;
+ uint32_t *VirtualBlockMap;
+ uint32_t FreeTotal;
+ struct eun_info_t {
+ uint32_t Offset;
+ uint32_t EraseCount;
+ uint32_t Free;
+ uint32_t Deleted;
+ } *EUNInfo;
+ struct xfer_info_t {
+ uint32_t Offset;
+ uint32_t EraseCount;
+ uint16_t state;
+ } *XferInfo;
+ uint16_t bam_index;
+ uint32_t *bam_cache;
+ uint16_t DataUnits;
+ uint32_t BlocksPerUnit;
+ erase_unit_header_t header;
+} partition_t;
+
+/* Partition state flags */
+#define FTL_FORMATTED 0x01
+
+/* Transfer unit states */
+#define XFER_UNKNOWN 0x00
+#define XFER_ERASING 0x01
+#define XFER_ERASED 0x02
+#define XFER_PREPARED 0x03
+#define XFER_FAILED 0x04
+
+/*====================================================================*/
+
+
+static void ftl_erase_callback(struct erase_info *done);
+
+
+/*======================================================================
+
+ Scan_header() checks to see if a memory region contains an FTL
+ partition. build_maps() reads all the erase unit headers, builds
+ the erase unit map, and then builds the virtual page map.
+
+======================================================================*/
+
+static int scan_header(partition_t *part)
+{
+ erase_unit_header_t header;
+ loff_t offset, max_offset;
+ size_t ret;
+ int err;
+ part->header.FormattedSize = 0;
+ max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
+ /* Search first megabyte for a valid FTL header */
+ for (offset = 0;
+ (offset + sizeof(header)) < max_offset;
+ offset += part->mbd.mtd->erasesize ? : 0x2000) {
+
+ err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+ (unsigned char *)&header);
+
+ if (err)
+ return err;
+
+ if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
+ }
+
+ if (offset == max_offset) {
+ printk(KERN_NOTICE "ftl_cs: FTL header not found.\n");
+ return -ENOENT;
+ }
+ if (header.BlockSize != 9 ||
+ (header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||
+ (header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {
+ printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n");
+ return -1;
+ }
+ if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {
+ printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n",
+ 1 << header.EraseUnitSize,part->mbd.mtd->erasesize);
+ return -1;
+ }
+ part->header = header;
+ return 0;
+}
+
+static int build_maps(partition_t *part)
+{
+ erase_unit_header_t header;
+ uint16_t xvalid, xtrans, i;
+ unsigned blocks, j;
+ int hdr_ok, ret = -1;
+ ssize_t retval;
+ loff_t offset;
+
+ /* Set up erase unit maps */
+ part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -
+ part->header.NumTransferUnits;
+ part->EUNInfo = kmalloc(part->DataUnits * sizeof(struct eun_info_t),
+ GFP_KERNEL);
+ if (!part->EUNInfo)
+ goto out;
+ for (i = 0; i < part->DataUnits; i++)
+ part->EUNInfo[i].Offset = 0xffffffff;
+ part->XferInfo =
+ kmalloc(part->header.NumTransferUnits * sizeof(struct xfer_info_t),
+ GFP_KERNEL);
+ if (!part->XferInfo)
+ goto out_EUNInfo;
+
+ xvalid = xtrans = 0;
+ for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
+ offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
+ << part->header.EraseUnitSize);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+ (unsigned char *)&header);
+
+ if (ret)
+ goto out_XferInfo;
+
+ ret = -1;
+ /* Is this a transfer partition? */
+ hdr_ok = (strcmp(header.DataOrgTuple+3, "FTL100") == 0);
+ if (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) &&
+ (part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) {
+ part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset;
+ part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount =
+ le32_to_cpu(header.EraseCount);
+ xvalid++;
+ } else {
+ if (xtrans == part->header.NumTransferUnits) {
+ printk(KERN_NOTICE "ftl_cs: format error: too many "
+ "transfer units!\n");
+ goto out_XferInfo;
+ }
+ if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) {
+ part->XferInfo[xtrans].state = XFER_PREPARED;
+ part->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount);
+ } else {
+ part->XferInfo[xtrans].state = XFER_UNKNOWN;
+ /* Pick anything reasonable for the erase count */
+ part->XferInfo[xtrans].EraseCount =
+ le32_to_cpu(part->header.EraseCount);
+ }
+ part->XferInfo[xtrans].Offset = offset;
+ xtrans++;
+ }
+ }
+ /* Check for format trouble */
+ header = part->header;
+ if ((xtrans != header.NumTransferUnits) ||
+ (xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) {
+ printk(KERN_NOTICE "ftl_cs: format error: erase units "
+ "don't add up!\n");
+ goto out_XferInfo;
+ }
+
+ /* Set up virtual page map */
+ blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
+ part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
+ if (!part->VirtualBlockMap)
+ goto out_XferInfo;
+
+ memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
+ part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
+
+ part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!part->bam_cache)
+ goto out_VirtualBlockMap;
+
+ part->bam_index = 0xffff;
+ part->FreeTotal = 0;
+
+ for (i = 0; i < part->DataUnits; i++) {
+ part->EUNInfo[i].Free = 0;
+ part->EUNInfo[i].Deleted = 0;
+ offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
+
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
+ (unsigned char *)part->bam_cache);
+
+ if (ret)
+ goto out_bam_cache;
+
+ for (j = 0; j < part->BlocksPerUnit; j++) {
+ if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) {
+ part->EUNInfo[i].Free++;
+ part->FreeTotal++;
+ } else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) &&
+ (BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks))
+ part->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] =
+ (i << header.EraseUnitSize) + (j << header.BlockSize);
+ else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j])))
+ part->EUNInfo[i].Deleted++;
+ }
+ }
+
+ ret = 0;
+ goto out;
+
+out_bam_cache:
+ kfree(part->bam_cache);
+out_VirtualBlockMap:
+ vfree(part->VirtualBlockMap);
+out_XferInfo:
+ kfree(part->XferInfo);
+out_EUNInfo:
+ kfree(part->EUNInfo);
+out:
+ return ret;
+} /* build_maps */
+
+/*======================================================================
+
+ Erase_xfer() schedules an asynchronous erase operation for a
+ transfer unit.
+
+======================================================================*/
+
+static int erase_xfer(partition_t *part,
+ uint16_t xfernum)
+{
+ int ret;
+ struct xfer_info_t *xfer;
+ struct erase_info *erase;
+
+ xfer = &part->XferInfo[xfernum];
+ pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
+ xfer->state = XFER_ERASING;
+
+ /* Is there a free erase slot? Always in MTD. */
+
+
+ erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ if (!erase)
+ return -ENOMEM;
+
+ erase->mtd = part->mbd.mtd;
+ erase->callback = ftl_erase_callback;
+ erase->addr = xfer->Offset;
+ erase->len = 1 << part->header.EraseUnitSize;
+ erase->priv = (u_long)part;
+
+ ret = mtd_erase(part->mbd.mtd, erase);
+
+ if (!ret)
+ xfer->EraseCount++;
+ else
+ kfree(erase);
+
+ return ret;
+} /* erase_xfer */
+
+/*======================================================================
+
+ Prepare_xfer() takes a freshly erased transfer unit and gives
+ it an appropriate header.
+
+======================================================================*/
+
+static void ftl_erase_callback(struct erase_info *erase)
+{
+ partition_t *part;
+ struct xfer_info_t *xfer;
+ int i;
+
+ /* Look up the transfer unit */
+ part = (partition_t *)(erase->priv);
+
+ for (i = 0; i < part->header.NumTransferUnits; i++)
+ if (part->XferInfo[i].Offset == erase->addr) break;
+
+ if (i == part->header.NumTransferUnits) {
+ printk(KERN_NOTICE "ftl_cs: internal error: "
+ "erase lookup failed!\n");
+ return;
+ }
+
+ xfer = &part->XferInfo[i];
+ if (erase->state == MTD_ERASE_DONE)
+ xfer->state = XFER_ERASED;
+ else {
+ xfer->state = XFER_FAILED;
+ printk(KERN_NOTICE "ftl_cs: erase failed: state = %d\n",
+ erase->state);
+ }
+
+ kfree(erase);
+
+} /* ftl_erase_callback */
+
+static int prepare_xfer(partition_t *part, int i)
+{
+ erase_unit_header_t header;
+ struct xfer_info_t *xfer;
+ int nbam, ret;
+ uint32_t ctl;
+ ssize_t retlen;
+ loff_t offset;
+
+ xfer = &part->XferInfo[i];
+ xfer->state = XFER_FAILED;
+
+ pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
+
+ /* Write the transfer unit header */
+ header = part->header;
+ header.LogicalEUN = cpu_to_le16(0xffff);
+ header.EraseCount = cpu_to_le32(xfer->EraseCount);
+
+ ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+ (u_char *)&header);
+
+ if (ret) {
+ return ret;
+ }
+
+ /* Write the BAM stub */
+ nbam = (part->BlocksPerUnit * sizeof(uint32_t) +
+ le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
+
+ offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
+ ctl = cpu_to_le32(BLOCK_CONTROL);
+
+ for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
+
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&ctl);
+
+ if (ret)
+ return ret;
+ }
+ xfer->state = XFER_PREPARED;
+ return 0;
+
+} /* prepare_xfer */
+
+/*======================================================================
+
+ Copy_erase_unit() takes a full erase block and a transfer unit,
+ copies everything to the transfer unit, then swaps the block
+ pointers.
+
+ All data blocks are copied to the corresponding blocks in the
+ target unit, so the virtual block map does not need to be
+ updated.
+
+======================================================================*/
+
+static int copy_erase_unit(partition_t *part, uint16_t srcunit,
+ uint16_t xferunit)
+{
+ u_char buf[SECTOR_SIZE];
+ struct eun_info_t *eun;
+ struct xfer_info_t *xfer;
+ uint32_t src, dest, free, i;
+ uint16_t unit;
+ int ret;
+ ssize_t retlen;
+ loff_t offset;
+ uint16_t srcunitswap = cpu_to_le16(srcunit);
+
+ eun = &part->EUNInfo[srcunit];
+ xfer = &part->XferInfo[xferunit];
+ pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
+ eun->Offset, xfer->Offset);
+
+
+ /* Read current BAM */
+ if (part->bam_index != srcunit) {
+
+ offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
+
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+ (u_char *)(part->bam_cache));
+
+ /* mark the cache bad, in case we get an error later */
+ part->bam_index = 0xffff;
+
+ if (ret) {
+ printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
+ return ret;
+ }
+ }
+
+ /* Write the LogicalEUN for the transfer unit */
+ xfer->state = XFER_UNKNOWN;
+ offset = xfer->Offset + 20; /* Bad! */
+ unit = cpu_to_le16(0x7fff);
+
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+ (u_char *)&unit);
+
+ if (ret) {
+ printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
+ return ret;
+ }
+
+ /* Copy all data blocks from source unit to transfer unit */
+ src = eun->Offset; dest = xfer->Offset;
+
+ free = 0;
+ ret = 0;
+ for (i = 0; i < part->BlocksPerUnit; i++) {
+ switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) {
+ case BLOCK_CONTROL:
+ /* This gets updated later */
+ break;
+ case BLOCK_DATA:
+ case BLOCK_REPLACEMENT:
+ ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+ if (ret) {
+ printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
+ return ret;
+ }
+
+
+ ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+ if (ret) {
+ printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
+ return ret;
+ }
+
+ break;
+ default:
+ /* All other blocks must be free */
+ part->bam_cache[i] = cpu_to_le32(0xffffffff);
+ free++;
+ break;
+ }
+ src += SECTOR_SIZE;
+ dest += SECTOR_SIZE;
+ }
+
+ /* Write the BAM to the transfer unit */
+ ret = mtd_write(part->mbd.mtd,
+ xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t),
+ &retlen,
+ (u_char *)part->bam_cache);
+ if (ret) {
+ printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
+ return ret;
+ }
+
+
+ /* All clear? Then update the LogicalEUN again */
+ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+ &retlen, (u_char *)&srcunitswap);
+
+ if (ret) {
+ printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
+ return ret;
+ }
+
+
+ /* Update the maps and usage stats*/
+ i = xfer->EraseCount;
+ xfer->EraseCount = eun->EraseCount;
+ eun->EraseCount = i;
+ i = xfer->Offset;
+ xfer->Offset = eun->Offset;
+ eun->Offset = i;
+ part->FreeTotal -= eun->Free;
+ part->FreeTotal += free;
+ eun->Free = free;
+ eun->Deleted = 0;
+
+ /* Now, the cache should be valid for the new block */
+ part->bam_index = srcunit;
+
+ return 0;
+} /* copy_erase_unit */
+
+/*======================================================================
+
+ reclaim_block() picks a full erase unit and a transfer unit and
+ then calls copy_erase_unit() to copy one to the other. Then, it
+ schedules an erase on the expired block.
+
+ What's a good way to decide which transfer unit and which erase
+ unit to use? Beats me. My way is to always pick the transfer
+ unit with the fewest erases, and usually pick the data unit with
+ the most deleted blocks. But with a small probability, pick the
+ oldest data unit instead. This means that we generally postpone
+ the next reclamation as long as possible, but shuffle static
+ stuff around a bit for wear leveling.
+
+======================================================================*/
+
+static int reclaim_block(partition_t *part)
+{
+ uint16_t i, eun, xfer;
+ uint32_t best;
+ int queued, ret;
+
+ pr_debug("ftl_cs: reclaiming space...\n");
+ pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
+ /* Pick the least erased transfer unit */
+ best = 0xffffffff; xfer = 0xffff;
+ do {
+ queued = 0;
+ for (i = 0; i < part->header.NumTransferUnits; i++) {
+ int n=0;
+ if (part->XferInfo[i].state == XFER_UNKNOWN) {
+ pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
+ n=1;
+ erase_xfer(part, i);
+ }
+ if (part->XferInfo[i].state == XFER_ERASING) {
+ pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
+ n=1;
+ queued = 1;
+ }
+ else if (part->XferInfo[i].state == XFER_ERASED) {
+ pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
+ n=1;
+ prepare_xfer(part, i);
+ }
+ if (part->XferInfo[i].state == XFER_PREPARED) {
+ pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
+ n=1;
+ if (part->XferInfo[i].EraseCount <= best) {
+ best = part->XferInfo[i].EraseCount;
+ xfer = i;
+ }
+ }
+ if (!n)
+ pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
+
+ }
+ if (xfer == 0xffff) {
+ if (queued) {
+ pr_debug("ftl_cs: waiting for transfer "
+ "unit to be prepared...\n");
+ mtd_sync(part->mbd.mtd);
+ } else {
+ static int ne = 0;
+ if (++ne < 5)
+ printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
+ "suitable transfer units!\n");
+ else
+ pr_debug("ftl_cs: reclaim failed: no "
+ "suitable transfer units!\n");
+
+ return -EIO;
+ }
+ }
+ } while (xfer == 0xffff);
+
+ eun = 0;
+ if ((jiffies % shuffle_freq) == 0) {
+ pr_debug("ftl_cs: recycling freshest block...\n");
+ best = 0xffffffff;
+ for (i = 0; i < part->DataUnits; i++)
+ if (part->EUNInfo[i].EraseCount <= best) {
+ best = part->EUNInfo[i].EraseCount;
+ eun = i;
+ }
+ } else {
+ best = 0;
+ for (i = 0; i < part->DataUnits; i++)
+ if (part->EUNInfo[i].Deleted >= best) {
+ best = part->EUNInfo[i].Deleted;
+ eun = i;
+ }
+ if (best == 0) {
+ static int ne = 0;
+ if (++ne < 5)
+ printk(KERN_NOTICE "ftl_cs: reclaim failed: "
+ "no free blocks!\n");
+ else
+ pr_debug("ftl_cs: reclaim failed: "
+ "no free blocks!\n");
+
+ return -EIO;
+ }
+ }
+ ret = copy_erase_unit(part, eun, xfer);
+ if (!ret)
+ erase_xfer(part, xfer);
+ else
+ printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n");
+ return ret;
+} /* reclaim_block */
+
+/*======================================================================
+
+ Find_free() searches for a free block. If necessary, it updates
+ the BAM cache for the erase unit containing the free block. It
+ returns the block index -- the erase unit is just the currently
+ cached unit. If there are no free blocks, it returns 0 -- this
+ is never a valid data block because it contains the header.
+
+======================================================================*/
+
+#ifdef PSYCHO_DEBUG
+static void dump_lists(partition_t *part)
+{
+ int i;
+ printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal);
+ for (i = 0; i < part->DataUnits; i++)
+ printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, "
+ "%d deleted\n", i,
+ part->EUNInfo[i].Offset >> part->header.EraseUnitSize,
+ part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);
+}
+#endif
+
+static uint32_t find_free(partition_t *part)
+{
+ uint16_t stop, eun;
+ uint32_t blk;
+ size_t retlen;
+ int ret;
+
+ /* Find an erase unit with some free space */
+ stop = (part->bam_index == 0xffff) ? 0 : part->bam_index;
+ eun = stop;
+ do {
+ if (part->EUNInfo[eun].Free != 0) break;
+ /* Wrap around at end of table */
+ if (++eun == part->DataUnits) eun = 0;
+ } while (eun != stop);
+
+ if (part->EUNInfo[eun].Free == 0)
+ return 0;
+
+ /* Is this unit's BAM cached? */
+ if (eun != part->bam_index) {
+ /* Invalidate cache */
+ part->bam_index = 0xffff;
+
+ ret = mtd_read(part->mbd.mtd,
+ part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(uint32_t),
+ &retlen,
+ (u_char *)(part->bam_cache));
+
+ if (ret) {
+ printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
+ return 0;
+ }
+ part->bam_index = eun;
+ }
+
+ /* Find a free block */
+ for (blk = 0; blk < part->BlocksPerUnit; blk++)
+ if (BLOCK_FREE(le32_to_cpu(part->bam_cache[blk]))) break;
+ if (blk == part->BlocksPerUnit) {
+#ifdef PSYCHO_DEBUG
+ static int ne = 0;
+ if (++ne == 1)
+ dump_lists(part);
+#endif
+ printk(KERN_NOTICE "ftl_cs: bad free list!\n");
+ return 0;
+ }
+ pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
+ return blk;
+
+} /* find_free */
+
+
+/*======================================================================
+
+ Read a series of sectors from an FTL partition.
+
+======================================================================*/
+
+static int ftl_read(partition_t *part, caddr_t buffer,
+ u_long sector, u_long nblocks)
+{
+ uint32_t log_addr, bsize;
+ u_long i;
+ int ret;
+ size_t offset, retlen;
+
+ pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
+ part, sector, nblocks);
+ if (!(part->state & FTL_FORMATTED)) {
+ printk(KERN_NOTICE "ftl_cs: bad partition\n");
+ return -EIO;
+ }
+ bsize = 1 << part->header.EraseUnitSize;
+
+ for (i = 0; i < nblocks; i++) {
+ if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
+ printk(KERN_NOTICE "ftl_cs: bad read offset\n");
+ return -EIO;
+ }
+ log_addr = part->VirtualBlockMap[sector+i];
+ if (log_addr == 0xffffffff)
+ memset(buffer, 0, SECTOR_SIZE);
+ else {
+ offset = (part->EUNInfo[log_addr / bsize].Offset
+ + (log_addr % bsize));
+ ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+ (u_char *)buffer);
+
+ if (ret) {
+ printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
+ return ret;
+ }
+ }
+ buffer += SECTOR_SIZE;
+ }
+ return 0;
+} /* ftl_read */
+
+/*======================================================================
+
+ Write a series of sectors to an FTL partition
+
+======================================================================*/
+
+static int set_bam_entry(partition_t *part, uint32_t log_addr,
+ uint32_t virt_addr)
+{
+ uint32_t bsize, blk, le_virt_addr;
+#ifdef PSYCHO_DEBUG
+ uint32_t old_addr;
+#endif
+ uint16_t eun;
+ int ret;
+ size_t retlen, offset;
+
+ pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
+ part, log_addr, virt_addr);
+ bsize = 1 << part->header.EraseUnitSize;
+ eun = log_addr / bsize;
+ blk = (log_addr % bsize) / SECTOR_SIZE;
+ offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
+ le32_to_cpu(part->header.BAMOffset));
+
+#ifdef PSYCHO_DEBUG
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&old_addr);
+ if (ret) {
+ printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
+ return ret;
+ }
+ old_addr = le32_to_cpu(old_addr);
+
+ if (((virt_addr == 0xfffffffe) && !BLOCK_FREE(old_addr)) ||
+ ((virt_addr == 0) && (BLOCK_TYPE(old_addr) != BLOCK_DATA)) ||
+ (!BLOCK_DELETED(virt_addr) && (old_addr != 0xfffffffe))) {
+ static int ne = 0;
+ if (++ne < 5) {
+ printk(KERN_NOTICE "ftl_cs: set_bam_entry() inconsistency!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, old = 0x%x"
+ ", new = 0x%x\n", log_addr, old_addr, virt_addr);
+ }
+ return -EIO;
+ }
+#endif
+ le_virt_addr = cpu_to_le32(virt_addr);
+ if (part->bam_index == eun) {
+#ifdef PSYCHO_DEBUG
+ if (le32_to_cpu(part->bam_cache[blk]) != old_addr) {
+ static int ne = 0;
+ if (++ne < 5) {
+ printk(KERN_NOTICE "ftl_cs: set_bam_entry() "
+ "inconsistency!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, cache"
+ " = 0x%x\n",
+ le32_to_cpu(part->bam_cache[blk]), old_addr);
+ }
+ return -EIO;
+ }
+#endif
+ part->bam_cache[blk] = le_virt_addr;
+ }
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&le_virt_addr);
+
+ if (ret) {
+ printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, new = 0x%x\n",
+ log_addr, virt_addr);
+ }
+ return ret;
+} /* set_bam_entry */
+
+static int ftl_write(partition_t *part, caddr_t buffer,
+ u_long sector, u_long nblocks)
+{
+ uint32_t bsize, log_addr, virt_addr, old_addr, blk;
+ u_long i;
+ int ret;
+ size_t retlen, offset;
+
+ pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
+ part, sector, nblocks);
+ if (!(part->state & FTL_FORMATTED)) {
+ printk(KERN_NOTICE "ftl_cs: bad partition\n");
+ return -EIO;
+ }
+ /* See if we need to reclaim space, before we start */
+ while (part->FreeTotal < nblocks) {
+ ret = reclaim_block(part);
+ if (ret)
+ return ret;
+ }
+
+ bsize = 1 << part->header.EraseUnitSize;
+
+ virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
+ for (i = 0; i < nblocks; i++) {
+ if (virt_addr >= le32_to_cpu(part->header.FormattedSize)) {
+ printk(KERN_NOTICE "ftl_cs: bad write offset\n");
+ return -EIO;
+ }
+
+ /* Grab a free block */
+ blk = find_free(part);
+ if (blk == 0) {
+ static int ne = 0;
+ if (++ne < 5)
+ printk(KERN_NOTICE "ftl_cs: internal error: "
+ "no free blocks!\n");
+ return -ENOSPC;
+ }
+
+ /* Tag the BAM entry, and write the new block */
+ log_addr = part->bam_index * bsize + blk * SECTOR_SIZE;
+ part->EUNInfo[part->bam_index].Free--;
+ part->FreeTotal--;
+ if (set_bam_entry(part, log_addr, 0xfffffffe))
+ return -EIO;
+ part->EUNInfo[part->bam_index].Deleted++;
+ offset = (part->EUNInfo[part->bam_index].Offset +
+ blk * SECTOR_SIZE);
+ ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
+
+ if (ret) {
+ printk(KERN_NOTICE "ftl_cs: block write failed!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
+ " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
+ offset);
+ return -EIO;
+ }
+
+ /* Only delete the old entry when the new entry is ready */
+ old_addr = part->VirtualBlockMap[sector+i];
+ if (old_addr != 0xffffffff) {
+ part->VirtualBlockMap[sector+i] = 0xffffffff;
+ part->EUNInfo[old_addr/bsize].Deleted++;
+ if (set_bam_entry(part, old_addr, 0))
+ return -EIO;
+ }
+
+ /* Finally, set up the new pointers */
+ if (set_bam_entry(part, log_addr, virt_addr))
+ return -EIO;
+ part->VirtualBlockMap[sector+i] = log_addr;
+ part->EUNInfo[part->bam_index].Deleted--;
+
+ buffer += SECTOR_SIZE;
+ virt_addr += SECTOR_SIZE;
+ }
+ return 0;
+} /* ftl_write */
+
+static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ partition_t *part = (void *)dev;
+ u_long sect;
+
+ /* Sort of arbitrary: round size down to 4KiB boundary */
+ sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
+
+ geo->heads = 1;
+ geo->sectors = 8;
+ geo->cylinders = sect >> 3;
+
+ return 0;
+}
+
+static int ftl_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ return ftl_read((void *)dev, buf, block, 1);
+}
+
+static int ftl_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ return ftl_write((void *)dev, buf, block, 1);
+}
+
+static int ftl_discardsect(struct mtd_blktrans_dev *dev,
+ unsigned long sector, unsigned nr_sects)
+{
+ partition_t *part = (void *)dev;
+ uint32_t bsize = 1 << part->header.EraseUnitSize;
+
+ pr_debug("FTL erase sector %ld for %d sectors\n",
+ sector, nr_sects);
+
+ while (nr_sects) {
+ uint32_t old_addr = part->VirtualBlockMap[sector];
+ if (old_addr != 0xffffffff) {
+ part->VirtualBlockMap[sector] = 0xffffffff;
+ part->EUNInfo[old_addr/bsize].Deleted++;
+ if (set_bam_entry(part, old_addr, 0))
+ return -EIO;
+ }
+ nr_sects--;
+ sector++;
+ }
+
+ return 0;
+}
+/*====================================================================*/
+
+static void ftl_freepart(partition_t *part)
+{
+ vfree(part->VirtualBlockMap);
+ part->VirtualBlockMap = NULL;
+ kfree(part->EUNInfo);
+ part->EUNInfo = NULL;
+ kfree(part->XferInfo);
+ part->XferInfo = NULL;
+ kfree(part->bam_cache);
+ part->bam_cache = NULL;
+} /* ftl_freepart */
+
+static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ partition_t *partition;
+
+ partition = kzalloc(sizeof(partition_t), GFP_KERNEL);
+
+ if (!partition) {
+ printk(KERN_WARNING "No memory to scan for FTL on %s\n",
+ mtd->name);
+ return;
+ }
+
+ partition->mbd.mtd = mtd;
+
+ if ((scan_header(partition) == 0) &&
+ (build_maps(partition) == 0)) {
+
+ partition->state = FTL_FORMATTED;
+#ifdef PCMCIA_DEBUG
+ printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n",
+ le32_to_cpu(partition->header.FormattedSize) >> 10);
+#endif
+ partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
+
+ partition->mbd.tr = tr;
+ partition->mbd.devnum = -1;
+ if (!add_mtd_blktrans_dev((void *)partition))
+ return;
+ }
+
+ kfree(partition);
+}
+
+static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ del_mtd_blktrans_dev(dev);
+ ftl_freepart((partition_t *)dev);
+}
+
+static struct mtd_blktrans_ops ftl_tr = {
+ .name = "ftl",
+ .major = FTL_MAJOR,
+ .part_bits = PART_BITS,
+ .blksize = SECTOR_SIZE,
+ .readsect = ftl_readsect,
+ .writesect = ftl_writesect,
+ .discard = ftl_discardsect,
+ .getgeo = ftl_getgeo,
+ .add_mtd = ftl_add_mtd,
+ .remove_dev = ftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_ftl(void)
+{
+ return register_mtd_blktrans(&ftl_tr);
+}
+
+static void __exit cleanup_ftl(void)
+{
+ deregister_mtd_blktrans(&ftl_tr);
+}
+
+module_init(init_ftl);
+module_exit(cleanup_ftl);
+
+
+MODULE_LICENSE("Dual MPL/GPL");
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
new file mode 100644
index 000000000..b66b54187
--- /dev/null
+++ b/drivers/mtd/inftlcore.c
@@ -0,0 +1,968 @@
+/*
+ * inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL)
+ *
+ * Copyright © 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ * Based heavily on the nftlcore.c code which is:
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/hdreg.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/inftl.h>
+#include <linux/mtd/nand.h>
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+/*
+ * Maximum number of loops while examining next block, to have a
+ * chance to detect consistency problems (they should never happen
+ * because of the checks done in the mounting.
+ */
+#define MAX_LOOPS 10000
+
+static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct INFTLrecord *inftl;
+ unsigned long temp;
+
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
+ return;
+ /* OK, this is moderately ugly. But probably safe. Alternatives? */
+ if (memcmp(mtd->name, "DiskOnChip", 10))
+ return;
+
+ if (!mtd->_block_isbad) {
+ printk(KERN_ERR
+"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
+"Please use the new diskonchip driver under the NAND subsystem.\n");
+ return;
+ }
+
+ pr_debug("INFTL: add_mtd for %s\n", mtd->name);
+
+ inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
+
+ if (!inftl)
+ return;
+
+ inftl->mbd.mtd = mtd;
+ inftl->mbd.devnum = -1;
+
+ inftl->mbd.tr = tr;
+
+ if (INFTL_mount(inftl) < 0) {
+ printk(KERN_WARNING "INFTL: could not mount device\n");
+ kfree(inftl);
+ return;
+ }
+
+ /* OK, it's a new one. Set up all the data structures. */
+
+ /* Calculate geometry */
+ inftl->cylinders = 1024;
+ inftl->heads = 16;
+
+ temp = inftl->cylinders * inftl->heads;
+ inftl->sectors = inftl->mbd.size / temp;
+ if (inftl->mbd.size % temp) {
+ inftl->sectors++;
+ temp = inftl->cylinders * inftl->sectors;
+ inftl->heads = inftl->mbd.size / temp;
+
+ if (inftl->mbd.size % temp) {
+ inftl->heads++;
+ temp = inftl->heads * inftl->sectors;
+ inftl->cylinders = inftl->mbd.size / temp;
+ }
+ }
+
+ if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) {
+ /*
+ Oh no we don't have
+ mbd.size == heads * cylinders * sectors
+ */
+ printk(KERN_WARNING "INFTL: cannot calculate a geometry to "
+ "match size of 0x%lx.\n", inftl->mbd.size);
+ printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d "
+ "(== 0x%lx sects)\n",
+ inftl->cylinders, inftl->heads , inftl->sectors,
+ (long)inftl->cylinders * (long)inftl->heads *
+ (long)inftl->sectors );
+ }
+
+ if (add_mtd_blktrans_dev(&inftl->mbd)) {
+ kfree(inftl->PUtable);
+ kfree(inftl->VUtable);
+ kfree(inftl);
+ return;
+ }
+#ifdef PSYCHO_DEBUG
+ printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
+#endif
+ return;
+}
+
+static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct INFTLrecord *inftl = (void *)dev;
+
+ pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+
+ kfree(inftl->PUtable);
+ kfree(inftl->VUtable);
+}
+
+/*
+ * Actual INFTL access routines.
+ */
+
+/*
+ * Read oob data from flash
+ */
+int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+/*
+ * Write oob data to flash
+ */
+int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+/*
+ * Write data and oob to flash
+ */
+static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_oob_ops ops;
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = oob;
+ ops.datbuf = buf;
+ ops.len = len;
+
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ *retlen = ops.retlen;
+ return res;
+}
+
+/*
+ * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition.
+ * This function is used when the give Virtual Unit Chain.
+ */
+static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
+{
+ u16 pot = inftl->LastFreeEUN;
+ int silly = inftl->nb_blocks;
+
+ pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n",
+ inftl, desperate);
+
+ /*
+ * Normally, we force a fold to happen before we run out of free
+ * blocks completely.
+ */
+ if (!desperate && inftl->numfreeEUNs < 2) {
+ pr_debug("INFTL: there are too few free EUNs (%d)\n",
+ inftl->numfreeEUNs);
+ return BLOCK_NIL;
+ }
+
+ /* Scan for a free block */
+ do {
+ if (inftl->PUtable[pot] == BLOCK_FREE) {
+ inftl->LastFreeEUN = pot;
+ return pot;
+ }
+
+ if (++pot > inftl->lastEUN)
+ pot = 0;
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: no free blocks found! "
+ "EUN range = %d - %d\n", 0, inftl->LastFreeEUN);
+ return BLOCK_NIL;
+ }
+ } while (pot != inftl->LastFreeEUN);
+
+ return BLOCK_NIL;
+}
+
+static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock)
+{
+ u16 BlockMap[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN, prevEUN, status;
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ int block, silly;
+ unsigned int targetEUN;
+ struct inftl_oob oob;
+ size_t retlen;
+
+ pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n",
+ inftl, thisVUC, pendingblock);
+
+ memset(BlockMap, 0xff, sizeof(BlockMap));
+ memset(BlockDeleted, 0, sizeof(BlockDeleted));
+
+ thisEUN = targetEUN = inftl->VUtable[thisVUC];
+
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "INFTL: trying to fold non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /*
+ * Scan to find the Erase Unit which holds the actual data for each
+ * 512-byte block within the Chain.
+ */
+ silly = MAX_LOOPS;
+ while (thisEUN < inftl->nb_blocks) {
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
+ if ((BlockMap[block] != BLOCK_NIL) ||
+ BlockDeleted[block])
+ continue;
+
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
+ + (block * SECTORSIZE), 16, &retlen,
+ (char *)&oob) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = oob.b.Status | oob.b.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_USED:
+ BlockMap[block] = thisEUN;
+ continue;
+ case SECTOR_DELETED:
+ BlockDeleted[block] = 1;
+ continue;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status "
+ "for block %d in EUN %d: %x\n",
+ block, thisEUN, status);
+ break;
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ /*
+ * OK. We now know the location of every block in the Virtual Unit
+ * Chain, and the Erase Unit into which we are supposed to be copying.
+ * Go for it.
+ */
+ pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN);
+
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
+ unsigned char movebuf[SECTORSIZE];
+ int ret;
+
+ /*
+ * If it's in the target EUN already, or if it's pending write,
+ * do nothing.
+ */
+ if (BlockMap[block] == targetEUN || (pendingblock ==
+ (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) {
+ continue;
+ }
+
+ /*
+ * Copy only in non free block (free blocks can only
+ * happen in case of media errors or deleted blocks).
+ */
+ if (BlockMap[block] == BLOCK_NIL)
+ continue;
+
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
+ if (ret != -EIO)
+ pr_debug("INFTL: error went away on retry?\n");
+ }
+ memset(&oob, 0xff, sizeof(struct inftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
+ (block * SECTORSIZE), SECTORSIZE, &retlen,
+ movebuf, (char *)&oob);
+ }
+
+ /*
+ * Newest unit in chain now contains data from _all_ older units.
+ * So go through and erase each unit in chain, oldest first. (This
+ * is important, by doing oldest first if we crash/reboot then it
+ * it is relatively simple to clean up the mess).
+ */
+ pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC);
+
+ for (;;) {
+ /* Find oldest unit in chain. */
+ thisEUN = inftl->VUtable[thisVUC];
+ prevEUN = BLOCK_NIL;
+ while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
+ prevEUN = thisEUN;
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ /* Check if we are all done */
+ if (thisEUN == targetEUN)
+ break;
+
+ /* Unlink the last block from the chain. */
+ inftl->PUtable[prevEUN] = BLOCK_NIL;
+
+ /* Now try to erase it. */
+ if (INFTL_formatblock(inftl, thisEUN) < 0) {
+ /*
+ * Could not erase : mark block as reserved.
+ */
+ inftl->PUtable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* Correctly erased : mark it as free */
+ inftl->PUtable[thisEUN] = BLOCK_FREE;
+ inftl->numfreeEUNs++;
+ }
+ }
+
+ return targetEUN;
+}
+
+static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
+{
+ /*
+ * This is the part that needs some cleverness applied.
+ * For now, I'm doing the minimum applicable to actually
+ * get the thing to work.
+ * Wear-levelling and other clever stuff needs to be implemented
+ * and we also need to do some assessment of the results when
+ * the system loses power half-way through the routine.
+ */
+ u16 LongestChain = 0;
+ u16 ChainLength = 0, thislen;
+ u16 chain, EUN;
+
+ pr_debug("INFTL: INFTL_makefreeblock(inftl=%p,"
+ "pending=%d)\n", inftl, pendingblock);
+
+ for (chain = 0; chain < inftl->nb_blocks; chain++) {
+ EUN = inftl->VUtable[chain];
+ thislen = 0;
+
+ while (EUN <= inftl->lastEUN) {
+ thislen++;
+ EUN = inftl->PUtable[EUN];
+ if (thislen > 0xff00) {
+ printk(KERN_WARNING "INFTL: endless loop in "
+ "Virtual Chain %d: Unit %x\n",
+ chain, EUN);
+ /*
+ * Actually, don't return failure.
+ * Just ignore this chain and get on with it.
+ */
+ thislen = 0;
+ break;
+ }
+ }
+
+ if (thislen > ChainLength) {
+ ChainLength = thislen;
+ LongestChain = chain;
+ }
+ }
+
+ if (ChainLength < 2) {
+ printk(KERN_WARNING "INFTL: no Virtual Unit Chains available "
+ "for folding. Failing request\n");
+ return BLOCK_NIL;
+ }
+
+ return INFTL_foldchain(inftl, LongestChain, pendingblock);
+}
+
+static int nrbits(unsigned int val, int bitcount)
+{
+ int i, total = 0;
+
+ for (i = 0; (i < bitcount); i++)
+ total += (((0x1 << i) & val) ? 1 : 0);
+ return total;
+}
+
+/*
+ * INFTL_findwriteunit: Return the unit number into which we can write
+ * for this block. Make it available if it isn't already.
+ */
+static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
+{
+ unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
+ unsigned int thisEUN, writeEUN, prev_block, status;
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ struct inftl_oob oob;
+ struct inftl_bci bci;
+ unsigned char anac, nacs, parity;
+ size_t retlen;
+ int silly, silly2 = 3;
+
+ pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n",
+ inftl, block);
+
+ do {
+ /*
+ * Scan the media to find a unit in the VUC which has
+ * a free space for the block in question.
+ */
+ writeEUN = BLOCK_NIL;
+ thisEUN = inftl->VUtable[thisVUC];
+ silly = MAX_LOOPS;
+
+ while (thisEUN <= inftl->lastEUN) {
+ inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci);
+
+ status = bci.Status | bci.Status1;
+ pr_debug("INFTL: status of block %d in EUN %d is %x\n",
+ block , writeEUN, status);
+
+ switch(status) {
+ case SECTOR_FREE:
+ writeEUN = thisEUN;
+ break;
+ case SECTOR_DELETED:
+ case SECTOR_USED:
+ /* Can't go any further */
+ goto hitused;
+ case SECTOR_IGNORE:
+ break;
+ default:
+ /*
+ * Invalid block. Don't use it any more.
+ * Must implement.
+ */
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in "
+ "Virtual Unit Chain 0x%x\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /* Skip to next block in chain */
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+hitused:
+ if (writeEUN != BLOCK_NIL)
+ return writeEUN;
+
+
+ /*
+ * OK. We didn't find one in the existing chain, or there
+ * is no existing chain. Allocate a new one.
+ */
+ writeEUN = INFTL_findfreeblock(inftl, 0);
+
+ if (writeEUN == BLOCK_NIL) {
+ /*
+ * That didn't work - there were no free blocks just
+ * waiting to be picked up. We're going to have to fold
+ * a chain to make room.
+ */
+ thisEUN = INFTL_makefreeblock(inftl, block);
+
+ /*
+ * Hopefully we free something, lets try again.
+ * This time we are desperate...
+ */
+ pr_debug("INFTL: using desperate==1 to find free EUN "
+ "to accommodate write to VUC %d\n",
+ thisVUC);
+ writeEUN = INFTL_findfreeblock(inftl, 1);
+ if (writeEUN == BLOCK_NIL) {
+ /*
+ * Ouch. This should never happen - we should
+ * always be able to make some room somehow.
+ * If we get here, we've allocated more storage
+ * space than actual media, or our makefreeblock
+ * routine is missing something.
+ */
+ printk(KERN_WARNING "INFTL: cannot make free "
+ "space.\n");
+#ifdef DEBUG
+ INFTL_dumptables(inftl);
+ INFTL_dumpVUchains(inftl);
+#endif
+ return BLOCK_NIL;
+ }
+ }
+
+ /*
+ * Insert new block into virtual chain. Firstly update the
+ * block headers in flash...
+ */
+ anac = 0;
+ nacs = 0;
+ thisEUN = inftl->VUtable[thisVUC];
+ if (thisEUN != BLOCK_NIL) {
+ inftl_read_oob(mtd, thisEUN * inftl->EraseSize
+ + 8, 8, &retlen, (char *)&oob.u);
+ anac = oob.u.a.ANAC + 1;
+ nacs = oob.u.a.NACs + 1;
+ }
+
+ prev_block = inftl->VUtable[thisVUC];
+ if (prev_block < inftl->nb_blocks)
+ prev_block -= inftl->firstEUN;
+
+ parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0;
+ parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0;
+ parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0;
+ parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0;
+
+ oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC);
+ oob.u.a.prevUnitNo = cpu_to_le16(prev_block);
+ oob.u.a.ANAC = anac;
+ oob.u.a.NACs = nacs;
+ oob.u.a.parityPerField = parity;
+ oob.u.a.discarded = 0xaa;
+
+ inftl_write_oob(mtd, writeEUN * inftl->EraseSize + 8, 8,
+ &retlen, (char *)&oob.u);
+
+ /* Also back up header... */
+ oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC);
+ oob.u.b.prevUnitNo = cpu_to_le16(prev_block);
+ oob.u.b.ANAC = anac;
+ oob.u.b.NACs = nacs;
+ oob.u.b.parityPerField = parity;
+ oob.u.b.discarded = 0xaa;
+
+ inftl_write_oob(mtd, writeEUN * inftl->EraseSize +
+ SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u);
+
+ inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC];
+ inftl->VUtable[thisVUC] = writeEUN;
+
+ inftl->numfreeEUNs--;
+ return writeEUN;
+
+ } while (silly2--);
+
+ printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return BLOCK_NIL;
+}
+
+/*
+ * Given a Virtual Unit Chain, see if it can be deleted, and if so do it.
+ */
+static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
+{
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ unsigned char BlockUsed[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN, status;
+ int block, silly;
+ struct inftl_bci bci;
+ size_t retlen;
+
+ pr_debug("INFTL: INFTL_trydeletechain(inftl=%p,"
+ "thisVUC=%d)\n", inftl, thisVUC);
+
+ memset(BlockUsed, 0, sizeof(BlockUsed));
+ memset(BlockDeleted, 0, sizeof(BlockDeleted));
+
+ thisEUN = inftl->VUtable[thisVUC];
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "INFTL: trying to delete non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return;
+ }
+
+ /*
+ * Scan through the Erase Units to determine whether any data is in
+ * each of the 512-byte blocks within the Chain.
+ */
+ silly = MAX_LOOPS;
+ while (thisEUN < inftl->nb_blocks) {
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) {
+ if (BlockUsed[block] || BlockDeleted[block])
+ continue;
+
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
+ + (block * SECTORSIZE), 8 , &retlen,
+ (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_USED:
+ BlockUsed[block] = 1;
+ continue;
+ case SECTOR_DELETED:
+ BlockDeleted[block] = 1;
+ continue;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status "
+ "for block %d in EUN %d: 0x%x\n",
+ block, thisEUN, status);
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++)
+ if (BlockUsed[block])
+ return;
+
+ /*
+ * For each block in the chain free it and make it available
+ * for future use. Erase from the oldest unit first.
+ */
+ pr_debug("INFTL: deleting empty VUC %d\n", thisVUC);
+
+ for (;;) {
+ u16 *prevEUN = &inftl->VUtable[thisVUC];
+ thisEUN = *prevEUN;
+
+ /* If the chain is all gone already, we're done */
+ if (thisEUN == BLOCK_NIL) {
+ pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
+ return;
+ }
+
+ /* Find oldest unit in chain. */
+ while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
+ BUG_ON(thisEUN >= inftl->nb_blocks);
+
+ prevEUN = &inftl->PUtable[thisEUN];
+ thisEUN = *prevEUN;
+ }
+
+ pr_debug("Deleting EUN %d from VUC %d\n",
+ thisEUN, thisVUC);
+
+ if (INFTL_formatblock(inftl, thisEUN) < 0) {
+ /*
+ * Could not erase : mark block as reserved.
+ */
+ inftl->PUtable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* Correctly erased : mark it as free */
+ inftl->PUtable[thisEUN] = BLOCK_FREE;
+ inftl->numfreeEUNs++;
+ }
+
+ /* Now sort out whatever was pointing to it... */
+ *prevEUN = BLOCK_NIL;
+
+ /* Ideally we'd actually be responsive to new
+ requests while we're doing this -- if there's
+ free space why should others be made to wait? */
+ cond_resched();
+ }
+
+ inftl->VUtable[thisVUC] = BLOCK_NIL;
+}
+
+static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
+{
+ unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ size_t retlen;
+ struct inftl_bci bci;
+
+ pr_debug("INFTL: INFTL_deleteblock(inftl=%p,"
+ "block=%d)\n", inftl, block);
+
+ while (thisEUN < inftl->nb_blocks) {
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_DELETED:
+ thisEUN = BLOCK_NIL;
+ goto foundit;
+ case SECTOR_USED:
+ goto foundit;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status for "
+ "block %d in EUN %d: 0x%x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n",
+ block / (inftl->EraseSize / SECTORSIZE));
+ return 1;
+ }
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+foundit:
+ if (thisEUN != BLOCK_NIL) {
+ loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
+
+ if (inftl_read_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
+ return -EIO;
+ bci.Status = bci.Status1 = SECTOR_DELETED;
+ if (inftl_write_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
+ return -EIO;
+ INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE));
+ }
+ return 0;
+}
+
+static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct INFTLrecord *inftl = (void *)mbd;
+ unsigned int writeEUN;
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ size_t retlen;
+ struct inftl_oob oob;
+ char *p, *pend;
+
+ pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
+
+ /* Is block all zero? */
+ pend = buffer + SECTORSIZE;
+ for (p = buffer; p < pend && !*p; p++)
+ ;
+
+ if (p < pend) {
+ writeEUN = INFTL_findwriteunit(inftl, block);
+
+ if (writeEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "inftl_writeblock(): cannot find "
+ "block to write to\n");
+ /*
+ * If we _still_ haven't got a block to use,
+ * we're screwed.
+ */
+ return 1;
+ }
+
+ memset(&oob, 0xff, sizeof(struct inftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ inftl_write(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) +
+ blockofs, SECTORSIZE, &retlen, (char *)buffer,
+ (char *)&oob);
+ /*
+ * need to write SECTOR_USED flags since they are not written
+ * in mtd_writeecc
+ */
+ } else {
+ INFTL_deleteblock(inftl, block);
+ }
+
+ return 0;
+}
+
+static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct INFTLrecord *inftl = (void *)mbd;
+ unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ struct inftl_bci bci;
+ size_t retlen;
+
+ pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
+
+ while (thisEUN < inftl->nb_blocks) {
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_DELETED:
+ thisEUN = BLOCK_NIL;
+ goto foundit;
+ case SECTOR_USED:
+ goto foundit;
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status for "
+ "block %ld in EUN %d: 0x%04x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in "
+ "Virtual Unit Chain 0x%lx\n",
+ block / (inftl->EraseSize / SECTORSIZE));
+ return 1;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+foundit:
+ if (thisEUN == BLOCK_NIL) {
+ /* The requested block is not on the media, return all 0x00 */
+ memset(buffer, 0, SECTORSIZE);
+ } else {
+ size_t retlen;
+ loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
+ int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+
+ /* Handle corrected bit flips gracefully */
+ if (ret < 0 && !mtd_is_bitflip(ret))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int inftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct INFTLrecord *inftl = (void *)dev;
+
+ geo->heads = inftl->heads;
+ geo->sectors = inftl->sectors;
+ geo->cylinders = inftl->cylinders;
+
+ return 0;
+}
+
+static struct mtd_blktrans_ops inftl_tr = {
+ .name = "inftl",
+ .major = INFTL_MAJOR,
+ .part_bits = INFTL_PARTN_BITS,
+ .blksize = 512,
+ .getgeo = inftl_getgeo,
+ .readsect = inftl_readblock,
+ .writesect = inftl_writeblock,
+ .add_mtd = inftl_add_mtd,
+ .remove_dev = inftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_inftl(void)
+{
+ return register_mtd_blktrans(&inftl_tr);
+}
+
+static void __exit cleanup_inftl(void)
+{
+ deregister_mtd_blktrans(&inftl_tr);
+}
+
+module_init(init_inftl);
+module_exit(cleanup_inftl);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>, David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
+MODULE_DESCRIPTION("Support code for Inverse Flash Translation Layer, used on M-Systems DiskOnChip 2000, Millennium and Millennium Plus");
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
new file mode 100644
index 000000000..1388c8d7f
--- /dev/null
+++ b/drivers/mtd/inftlmount.c
@@ -0,0 +1,791 @@
+/*
+ * inftlmount.c -- INFTL mount code with extensive checks.
+ *
+ * Author: Greg Ungerer (gerg@snapgear.com)
+ * Copyright © 2002-2003, Greg Ungerer (gerg@snapgear.com)
+ *
+ * Based heavily on the nftlmount.c code which is:
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright © 2000 Netgem S.A.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/inftl.h>
+
+/*
+ * find_boot_record: Find the INFTL Media Header and its Spare copy which
+ * contains the various device information of the INFTL partition and
+ * Bad Unit Table. Update the PUtable[] table according to the Bad
+ * Unit Table. PUtable[] is used for management of Erase Unit in
+ * other routines in inftlcore.c and inftlmount.c.
+ */
+static int find_boot_record(struct INFTLrecord *inftl)
+{
+ struct inftl_unittail h1;
+ //struct inftl_oob oob;
+ unsigned int i, block;
+ u8 buf[SECTORSIZE];
+ struct INFTLMediaHeader *mh = &inftl->MediaHdr;
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ struct INFTLPartition *ip;
+ size_t retlen;
+
+ pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl);
+
+ /*
+ * Assume logical EraseSize == physical erasesize for starting the
+ * scan. We'll sort it out later if we find a MediaHeader which says
+ * otherwise.
+ */
+ inftl->EraseSize = inftl->mbd.mtd->erasesize;
+ inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
+
+ inftl->MediaUnit = BLOCK_NIL;
+
+ /* Search for a valid boot record */
+ for (block = 0; block < inftl->nb_blocks; block++) {
+ int ret;
+
+ /*
+ * Check for BNAND header first. Then whinge if it's found
+ * but later checks fail.
+ */
+ ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
+ /* We ignore ret in case the ECC of the MediaHeader is invalid
+ (which is apparently acceptable) */
+ if (retlen != SECTORSIZE) {
+ static int warncount = 5;
+
+ if (warncount) {
+ printk(KERN_WARNING "INFTL: block read at 0x%x "
+ "of mtd%d failed: %d\n",
+ block * inftl->EraseSize,
+ inftl->mbd.mtd->index, ret);
+ if (!--warncount)
+ printk(KERN_WARNING "INFTL: further "
+ "failures for this block will "
+ "not be printed\n");
+ }
+ continue;
+ }
+
+ if (retlen < 6 || memcmp(buf, "BNAND", 6)) {
+ /* BNAND\0 not found. Continue */
+ continue;
+ }
+
+ /* To be safer with BIOS, also use erase mark as discriminant */
+ ret = inftl_read_oob(mtd,
+ block * inftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen,(char *)&h1);
+ if (ret < 0) {
+ printk(KERN_WARNING "INFTL: ANAND header found at "
+ "0x%x in mtd%d, but OOB data read failed "
+ "(err %d)\n", block * inftl->EraseSize,
+ inftl->mbd.mtd->index, ret);
+ continue;
+ }
+
+
+ /*
+ * This is the first we've seen.
+ * Copy the media header structure into place.
+ */
+ memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
+
+ /* Read the spare media header at offset 4096 */
+ mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+ &retlen, buf);
+ if (retlen != SECTORSIZE) {
+ printk(KERN_WARNING "INFTL: Unable to read spare "
+ "Media Header\n");
+ return -1;
+ }
+ /* Check if this one is the same as the first one we found. */
+ if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) {
+ printk(KERN_WARNING "INFTL: Primary and spare Media "
+ "Headers disagree.\n");
+ return -1;
+ }
+
+ mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
+ mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
+ mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
+ mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
+ mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
+ mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
+
+ pr_debug("INFTL: Media Header ->\n"
+ " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = 0x%x\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ mh->OsakVersion, mh->PercentUsed);
+
+ if (mh->NoOfBDTLPartitions == 0) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed: NoOfBDTLPartitions (%d) == 0, "
+ "must be at least 1\n", mh->NoOfBDTLPartitions);
+ return -1;
+ }
+
+ if ((mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions) > 4) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed: Total Partitions (%d) > 4, "
+ "BDTL=%d Binary=%d\n", mh->NoOfBDTLPartitions +
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->NoOfBinaryPartitions);
+ return -1;
+ }
+
+ if (mh->BlockMultiplierBits > 1) {
+ printk(KERN_WARNING "INFTL: sorry, we don't support "
+ "UnitSizeFactor 0x%02x\n",
+ mh->BlockMultiplierBits);
+ return -1;
+ } else if (mh->BlockMultiplierBits == 1) {
+ printk(KERN_WARNING "INFTL: support for INFTL with "
+ "UnitSizeFactor 0x%02x is experimental\n",
+ mh->BlockMultiplierBits);
+ inftl->EraseSize = inftl->mbd.mtd->erasesize <<
+ mh->BlockMultiplierBits;
+ inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
+ block >>= mh->BlockMultiplierBits;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &mh->Partitions[i];
+ ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
+ ip->firstUnit = le32_to_cpu(ip->firstUnit);
+ ip->lastUnit = le32_to_cpu(ip->lastUnit);
+ ip->flags = le32_to_cpu(ip->flags);
+ ip->spareUnits = le32_to_cpu(ip->spareUnits);
+ ip->Reserved0 = le32_to_cpu(ip->Reserved0);
+
+ pr_debug(" PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if (ip->Reserved0 != ip->firstUnit) {
+ struct erase_info *instr = &inftl->instr;
+
+ instr->mtd = inftl->mbd.mtd;
+
+ /*
+ * Most likely this is using the
+ * undocumented qiuck mount feature.
+ * We don't support that, we will need
+ * to erase the hidden block for full
+ * compatibility.
+ */
+ instr->addr = ip->Reserved0 * inftl->EraseSize;
+ instr->len = inftl->EraseSize;
+ mtd_erase(mtd, instr);
+ }
+ if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
+ printk(KERN_WARNING "INFTL: Media Header "
+ "Partition %d sanity check failed\n"
+ " firstUnit %d : lastUnit %d > "
+ "virtualUnits %d\n", i, ip->lastUnit,
+ ip->firstUnit, ip->Reserved0);
+ return -1;
+ }
+ if (ip->Reserved1 != 0) {
+ printk(KERN_WARNING "INFTL: Media Header "
+ "Partition %d sanity check failed: "
+ "Reserved1 %d != 0\n",
+ i, ip->Reserved1);
+ return -1;
+ }
+
+ if (ip->flags & INFTL_BDTL)
+ break;
+ }
+
+ if (i >= 4) {
+ printk(KERN_WARNING "INFTL: Media Header Partition "
+ "sanity check failed:\n No partition "
+ "marked as Disk Partition\n");
+ return -1;
+ }
+
+ inftl->nb_boot_blocks = ip->firstUnit;
+ inftl->numvunits = ip->virtualUnits;
+ if (inftl->numvunits > (inftl->nb_blocks -
+ inftl->nb_boot_blocks - 2)) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed:\n numvunits (%d) > nb_blocks "
+ "(%d) - nb_boot_blocks(%d) - 2\n",
+ inftl->numvunits, inftl->nb_blocks,
+ inftl->nb_boot_blocks);
+ return -1;
+ }
+
+ inftl->mbd.size = inftl->numvunits *
+ (inftl->EraseSize / SECTORSIZE);
+
+ /*
+ * Block count is set to last used EUN (we won't need to keep
+ * any meta-data past that point).
+ */
+ inftl->firstEUN = ip->firstUnit;
+ inftl->lastEUN = ip->lastUnit;
+ inftl->nb_blocks = ip->lastUnit + 1;
+
+ /* Memory alloc */
+ inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!inftl->PUtable) {
+ printk(KERN_WARNING "INFTL: allocation of PUtable "
+ "failed (%zd bytes)\n",
+ inftl->nb_blocks * sizeof(u16));
+ return -ENOMEM;
+ }
+
+ inftl->VUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!inftl->VUtable) {
+ kfree(inftl->PUtable);
+ printk(KERN_WARNING "INFTL: allocation of VUtable "
+ "failed (%zd bytes)\n",
+ inftl->nb_blocks * sizeof(u16));
+ return -ENOMEM;
+ }
+
+ /* Mark the blocks before INFTL MediaHeader as reserved */
+ for (i = 0; i < inftl->nb_boot_blocks; i++)
+ inftl->PUtable[i] = BLOCK_RESERVED;
+ /* Mark all remaining blocks as potentially containing data */
+ for (; i < inftl->nb_blocks; i++)
+ inftl->PUtable[i] = BLOCK_NOTEXPLORED;
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ inftl->PUtable[block] = BLOCK_RESERVED;
+
+ /* Read Bad Erase Unit Table and modify PUtable[] accordingly */
+ for (i = 0; i < inftl->nb_blocks; i++) {
+ int physblock;
+ /* If any of the physical eraseblocks are bad, don't
+ use the unit. */
+ for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
+ if (mtd_block_isbad(inftl->mbd.mtd,
+ i * inftl->EraseSize + physblock))
+ inftl->PUtable[i] = BLOCK_RESERVED;
+ }
+ }
+
+ inftl->MediaUnit = block;
+ return 0;
+ }
+
+ /* Not found. */
+ return -1;
+}
+
+static int memcmpb(void *a, int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if (c != ((unsigned char *)a)[i])
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * check_free_sector: check if a free sector is actually FREE,
+ * i.e. All 0xff in data and oob area.
+ */
+static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
+ int len, int check_oob)
+{
+ u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ size_t retlen;
+ int i;
+
+ for (i = 0; i < len; i += SECTORSIZE) {
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
+ return -1;
+ if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
+ return -1;
+
+ if (check_oob) {
+ if(inftl_read_oob(mtd, address, mtd->oobsize,
+ &retlen, &buf[SECTORSIZE]) < 0)
+ return -1;
+ if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
+ return -1;
+ }
+ address += SECTORSIZE;
+ }
+
+ return 0;
+}
+
+/*
+ * INFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase
+ * Unit and Update INFTL metadata. Each erase operation is
+ * checked with check_free_sectors.
+ *
+ * Return: 0 when succeed, -1 on error.
+ *
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
+ */
+int INFTL_formatblock(struct INFTLrecord *inftl, int block)
+{
+ size_t retlen;
+ struct inftl_unittail uci;
+ struct erase_info *instr = &inftl->instr;
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ int physblock;
+
+ pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
+
+ memset(instr, 0, sizeof(struct erase_info));
+
+ /* FIXME: Shouldn't we be setting the 'discarded' flag to zero
+ _first_? */
+
+ /* Use async erase interface, test return code */
+ instr->mtd = inftl->mbd.mtd;
+ instr->addr = block * inftl->EraseSize;
+ instr->len = inftl->mbd.mtd->erasesize;
+ /* Erase one physical eraseblock at a time, even though the NAND api
+ allows us to group them. This way we if we have a failure, we can
+ mark only the failed block in the bbt. */
+ for (physblock = 0; physblock < inftl->EraseSize;
+ physblock += instr->len, instr->addr += instr->len) {
+ mtd_erase(inftl->mbd.mtd, instr);
+
+ if (instr->state == MTD_ERASE_FAILED) {
+ printk(KERN_WARNING "INFTL: error while formatting block %d\n",
+ block);
+ goto fail;
+ }
+
+ /*
+ * Check the "freeness" of Erase Unit before updating metadata.
+ * FixMe: is this check really necessary? Since we have check
+ * the return code after the erase operation.
+ */
+ if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0)
+ goto fail;
+ }
+
+ uci.EraseMark = cpu_to_le16(ERASE_MARK);
+ uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
+ uci.Reserved[0] = 0;
+ uci.Reserved[1] = 0;
+ uci.Reserved[2] = 0;
+ uci.Reserved[3] = 0;
+ instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
+ if (inftl_write_oob(mtd, instr->addr + 8, 8, &retlen, (char *)&uci) < 0)
+ goto fail;
+ return 0;
+fail:
+ /* could not format, update the bad block table (caller is responsible
+ for setting the PUtable to BLOCK_RESERVED on failure) */
+ mtd_block_markbad(inftl->mbd.mtd, instr->addr);
+ return -1;
+}
+
+/*
+ * format_chain: Format an invalid Virtual Unit chain. It frees all the Erase
+ * Units in a Virtual Unit Chain, i.e. all the units are disconnected.
+ *
+ * Since the chain is invalid then we will have to erase it from its
+ * head (normally for INFTL we go from the oldest). But if it has a
+ * loop then there is no oldest...
+ */
+static void format_chain(struct INFTLrecord *inftl, unsigned int first_block)
+{
+ unsigned int block = first_block, block1;
+
+ printk(KERN_WARNING "INFTL: formatting chain at block %d\n",
+ first_block);
+
+ for (;;) {
+ block1 = inftl->PUtable[block];
+
+ printk(KERN_WARNING "INFTL: formatting block %d\n", block);
+ if (INFTL_formatblock(inftl, block) < 0) {
+ /*
+ * Cannot format !!!! Mark it as Bad Unit,
+ */
+ inftl->PUtable[block] = BLOCK_RESERVED;
+ } else {
+ inftl->PUtable[block] = BLOCK_FREE;
+ }
+
+ /* Goto next block on the chain */
+ block = block1;
+
+ if (block == BLOCK_NIL || block >= inftl->lastEUN)
+ break;
+ }
+}
+
+void INFTL_dumptables(struct INFTLrecord *s)
+{
+ int i;
+
+ pr_debug("-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("VUtable[%d] ->", s->nb_blocks);
+ for (i = 0; i < s->nb_blocks; i++) {
+ if ((i % 8) == 0)
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->VUtable[i]);
+ }
+
+ pr_debug("\n-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
+ for (i = 0; i <= s->lastEUN; i++) {
+ if ((i % 8) == 0)
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->PUtable[i]);
+ }
+
+ pr_debug("\n-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("INFTL ->\n"
+ " EraseSize = %d\n"
+ " h/s/c = %d/%d/%d\n"
+ " numvunits = %d\n"
+ " firstEUN = %d\n"
+ " lastEUN = %d\n"
+ " numfreeEUNs = %d\n"
+ " LastFreeEUN = %d\n"
+ " nb_blocks = %d\n"
+ " nb_boot_blocks = %d",
+ s->EraseSize, s->heads, s->sectors, s->cylinders,
+ s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
+ s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
+
+ pr_debug("\n-------------------------------------------"
+ "----------------------------------\n");
+}
+
+void INFTL_dumpVUchains(struct INFTLrecord *s)
+{
+ int logical, block, i;
+
+ pr_debug("-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("INFTL Virtual Unit Chains:\n");
+ for (logical = 0; logical < s->nb_blocks; logical++) {
+ block = s->VUtable[logical];
+ if (block >= s->nb_blocks)
+ continue;
+ pr_debug(" LOGICAL %d --> %d ", logical, block);
+ for (i = 0; i < s->nb_blocks; i++) {
+ if (s->PUtable[block] == BLOCK_NIL)
+ break;
+ block = s->PUtable[block];
+ pr_debug("%d ", block);
+ }
+ pr_debug("\n");
+ }
+
+ pr_debug("-------------------------------------------"
+ "----------------------------------\n");
+}
+
+int INFTL_mount(struct INFTLrecord *s)
+{
+ struct mtd_info *mtd = s->mbd.mtd;
+ unsigned int block, first_block, prev_block, last_block;
+ unsigned int first_logical_block, logical_block, erase_mark;
+ int chain_length, do_format_chain;
+ struct inftl_unithead1 h0;
+ struct inftl_unittail h1;
+ size_t retlen;
+ int i;
+ u8 *ANACtable, ANAC;
+
+ pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s);
+
+ /* Search for INFTL MediaHeader and Spare INFTL Media Header */
+ if (find_boot_record(s) < 0) {
+ printk(KERN_WARNING "INFTL: could not find valid boot record?\n");
+ return -ENXIO;
+ }
+
+ /* Init the logical to physical table */
+ for (i = 0; i < s->nb_blocks; i++)
+ s->VUtable[i] = BLOCK_NIL;
+
+ logical_block = block = BLOCK_NIL;
+
+ /* Temporary buffer to store ANAC numbers. */
+ ANACtable = kcalloc(s->nb_blocks, sizeof(u8), GFP_KERNEL);
+ if (!ANACtable) {
+ printk(KERN_WARNING "INFTL: allocation of ANACtable "
+ "failed (%zd bytes)\n",
+ s->nb_blocks * sizeof(u8));
+ return -ENOMEM;
+ }
+
+ /*
+ * First pass is to explore each physical unit, and construct the
+ * virtual chains that exist (newest physical unit goes into VUtable).
+ * Any block that is in any way invalid will be left in the
+ * NOTEXPLORED state. Then at the end we will try to format it and
+ * mark it as free.
+ */
+ pr_debug("INFTL: pass 1, explore each unit\n");
+ for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
+ if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
+ continue;
+
+ do_format_chain = 0;
+ first_logical_block = BLOCK_NIL;
+ last_block = BLOCK_NIL;
+ block = first_block;
+
+ for (chain_length = 0; ; chain_length++) {
+
+ if ((chain_length == 0) &&
+ (s->PUtable[block] != BLOCK_NOTEXPLORED)) {
+ /* Nothing to do here, onto next block */
+ break;
+ }
+
+ if (inftl_read_oob(mtd, block * s->EraseSize + 8,
+ 8, &retlen, (char *)&h0) < 0 ||
+ inftl_read_oob(mtd, block * s->EraseSize +
+ 2 * SECTORSIZE + 8, 8, &retlen,
+ (char *)&h1) < 0) {
+ /* Should never happen? */
+ do_format_chain++;
+ break;
+ }
+
+ logical_block = le16_to_cpu(h0.virtualUnitNo);
+ prev_block = le16_to_cpu(h0.prevUnitNo);
+ erase_mark = le16_to_cpu((h1.EraseMark | h1.EraseMark1));
+ ANACtable[block] = h0.ANAC;
+
+ /* Previous block is relative to start of Partition */
+ if (prev_block < s->nb_blocks)
+ prev_block += s->firstEUN;
+
+ /* Already explored partial chain? */
+ if (s->PUtable[block] != BLOCK_NOTEXPLORED) {
+ /* Check if chain for this logical */
+ if (logical_block == first_logical_block) {
+ if (last_block != BLOCK_NIL)
+ s->PUtable[last_block] = block;
+ }
+ break;
+ }
+
+ /* Check for invalid block */
+ if (erase_mark != ERASE_MARK) {
+ printk(KERN_WARNING "INFTL: corrupt block %d "
+ "in chain %d, chain length %d, erase "
+ "mark 0x%x?\n", block, first_block,
+ chain_length, erase_mark);
+ /*
+ * Assume end of chain, probably incomplete
+ * fold/erase...
+ */
+ if (chain_length == 0)
+ do_format_chain++;
+ break;
+ }
+
+ /* Check for it being free already then... */
+ if ((logical_block == BLOCK_FREE) ||
+ (logical_block == BLOCK_NIL)) {
+ s->PUtable[block] = BLOCK_FREE;
+ break;
+ }
+
+ /* Sanity checks on block numbers */
+ if ((logical_block >= s->nb_blocks) ||
+ ((prev_block >= s->nb_blocks) &&
+ (prev_block != BLOCK_NIL))) {
+ if (chain_length > 0) {
+ printk(KERN_WARNING "INFTL: corrupt "
+ "block %d in chain %d?\n",
+ block, first_block);
+ do_format_chain++;
+ }
+ break;
+ }
+
+ if (first_logical_block == BLOCK_NIL) {
+ first_logical_block = logical_block;
+ } else {
+ if (first_logical_block != logical_block) {
+ /* Normal for folded chain... */
+ break;
+ }
+ }
+
+ /*
+ * Current block is valid, so if we followed a virtual
+ * chain to get here then we can set the previous
+ * block pointer in our PUtable now. Then move onto
+ * the previous block in the chain.
+ */
+ s->PUtable[block] = BLOCK_NIL;
+ if (last_block != BLOCK_NIL)
+ s->PUtable[last_block] = block;
+ last_block = block;
+ block = prev_block;
+
+ /* Check for end of chain */
+ if (block == BLOCK_NIL)
+ break;
+
+ /* Validate next block before following it... */
+ if (block > s->lastEUN) {
+ printk(KERN_WARNING "INFTL: invalid previous "
+ "block %d in chain %d?\n", block,
+ first_block);
+ do_format_chain++;
+ break;
+ }
+ }
+
+ if (do_format_chain) {
+ format_chain(s, first_block);
+ continue;
+ }
+
+ /*
+ * Looks like a valid chain then. It may not really be the
+ * newest block in the chain, but it is the newest we have
+ * found so far. We might update it in later iterations of
+ * this loop if we find something newer.
+ */
+ s->VUtable[first_logical_block] = first_block;
+ logical_block = BLOCK_NIL;
+ }
+
+ INFTL_dumptables(s);
+
+ /*
+ * Second pass, check for infinite loops in chains. These are
+ * possible because we don't update the previous pointers when
+ * we fold chains. No big deal, just fix them up in PUtable.
+ */
+ pr_debug("INFTL: pass 2, validate virtual chains\n");
+ for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
+ block = s->VUtable[logical_block];
+ last_block = BLOCK_NIL;
+
+ /* Check for free/reserved/nil */
+ if (block >= BLOCK_RESERVED)
+ continue;
+
+ ANAC = ANACtable[block];
+ for (i = 0; i < s->numvunits; i++) {
+ if (s->PUtable[block] == BLOCK_NIL)
+ break;
+ if (s->PUtable[block] > s->lastEUN) {
+ printk(KERN_WARNING "INFTL: invalid prev %d, "
+ "in virtual chain %d\n",
+ s->PUtable[block], logical_block);
+ s->PUtable[block] = BLOCK_NIL;
+
+ }
+ if (ANACtable[block] != ANAC) {
+ /*
+ * Chain must point back to itself. This is ok,
+ * but we will need adjust the tables with this
+ * newest block and oldest block.
+ */
+ s->VUtable[logical_block] = block;
+ s->PUtable[last_block] = BLOCK_NIL;
+ break;
+ }
+
+ ANAC--;
+ last_block = block;
+ block = s->PUtable[block];
+ }
+
+ if (i >= s->nb_blocks) {
+ /*
+ * Uhoo, infinite chain with valid ANACS!
+ * Format whole chain...
+ */
+ format_chain(s, first_block);
+ }
+ }
+
+ INFTL_dumptables(s);
+ INFTL_dumpVUchains(s);
+
+ /*
+ * Third pass, format unreferenced blocks and init free block count.
+ */
+ s->numfreeEUNs = 0;
+ s->LastFreeEUN = BLOCK_NIL;
+
+ pr_debug("INFTL: pass 3, format unused blocks\n");
+ for (block = s->firstEUN; block <= s->lastEUN; block++) {
+ if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
+ printk("INFTL: unreferenced block %d, formatting it\n",
+ block);
+ if (INFTL_formatblock(s, block) < 0)
+ s->PUtable[block] = BLOCK_RESERVED;
+ else
+ s->PUtable[block] = BLOCK_FREE;
+ }
+ if (s->PUtable[block] == BLOCK_FREE) {
+ s->numfreeEUNs++;
+ if (s->LastFreeEUN == BLOCK_NIL)
+ s->LastFreeEUN = block;
+ }
+ }
+
+ kfree(ANACtable);
+ return 0;
+}
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
new file mode 100644
index 000000000..3a19cbee2
--- /dev/null
+++ b/drivers/mtd/lpddr/Kconfig
@@ -0,0 +1,29 @@
+menu "LPDDR & LPDDR2 PCM memory drivers"
+ depends on MTD
+
+config MTD_LPDDR
+ tristate "Support for LPDDR flash chips"
+ select MTD_QINFO_PROBE
+ help
+ This option enables support of LPDDR (Low power double data rate)
+ flash chips. Synonymous with Mobile-DDR. It is a new standard for
+ DDR memories, intended for battery-operated systems.
+
+config MTD_QINFO_PROBE
+ depends on MTD_LPDDR
+ tristate "Detect flash chips by QINFO probe"
+ help
+ Device Information for LPDDR chips is offered through the Overlay
+ Window QINFO interface, permits software to be used for entire
+ families of devices. This serves similar purpose of CFI on legacy
+ Flash products
+
+config MTD_LPDDR2_NVM
+ # ARM dependency is only for writel_relaxed()
+ depends on MTD && ARM
+ tristate "Support for LPDDR2-NVM flash chips"
+ help
+ This option enables support of PCM memories with a LPDDR2-NVM
+ (Low power double data rate 2) interface.
+
+endmenu
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
new file mode 100644
index 000000000..881d440d4
--- /dev/null
+++ b/drivers/mtd/lpddr/Makefile
@@ -0,0 +1,7 @@
+#
+# linux/drivers/mtd/lpddr/Makefile
+#
+
+obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
+obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
+obj-$(CONFIG_MTD_LPDDR2_NVM) += lpddr2_nvm.o
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
new file mode 100644
index 000000000..063cec40d
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -0,0 +1,507 @@
+/*
+ * LPDDR2-NVM MTD driver. This module provides read, write, erase, lock/unlock
+ * support for LPDDR2-NVM PCM memories
+ *
+ * Copyright © 2012 Micron Technology, Inc.
+ *
+ * Vincenzo Aliberti <vincenzo.aliberti@gmail.com>
+ * Domenico Manna <domenico.manna@gmail.com>
+ * Many thanks to Andrea Vigilante for initial enabling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+
+/* Parameters */
+#define ERASE_BLOCKSIZE (0x00020000/2) /* in Word */
+#define WRITE_BUFFSIZE (0x00000400/2) /* in Word */
+#define OW_BASE_ADDRESS 0x00000000 /* OW offset */
+#define BUS_WIDTH 0x00000020 /* x32 devices */
+
+/* PFOW symbols address offset */
+#define PFOW_QUERY_STRING_P (0x0000/2) /* in Word */
+#define PFOW_QUERY_STRING_F (0x0002/2) /* in Word */
+#define PFOW_QUERY_STRING_O (0x0004/2) /* in Word */
+#define PFOW_QUERY_STRING_W (0x0006/2) /* in Word */
+
+/* OW registers address */
+#define CMD_CODE_OFS (0x0080/2) /* in Word */
+#define CMD_DATA_OFS (0x0084/2) /* in Word */
+#define CMD_ADD_L_OFS (0x0088/2) /* in Word */
+#define CMD_ADD_H_OFS (0x008A/2) /* in Word */
+#define MPR_L_OFS (0x0090/2) /* in Word */
+#define MPR_H_OFS (0x0092/2) /* in Word */
+#define CMD_EXEC_OFS (0x00C0/2) /* in Word */
+#define STATUS_REG_OFS (0x00CC/2) /* in Word */
+#define PRG_BUFFER_OFS (0x0010/2) /* in Word */
+
+/* Datamask */
+#define MR_CFGMASK 0x8000
+#define SR_OK_DATAMASK 0x0080
+
+/* LPDDR2-NVM Commands */
+#define LPDDR2_NVM_LOCK 0x0061
+#define LPDDR2_NVM_UNLOCK 0x0062
+#define LPDDR2_NVM_SW_PROGRAM 0x0041
+#define LPDDR2_NVM_SW_OVERWRITE 0x0042
+#define LPDDR2_NVM_BUF_PROGRAM 0x00E9
+#define LPDDR2_NVM_BUF_OVERWRITE 0x00EA
+#define LPDDR2_NVM_ERASE 0x0020
+
+/* LPDDR2-NVM Registers offset */
+#define LPDDR2_MODE_REG_DATA 0x0040
+#define LPDDR2_MODE_REG_CFG 0x0050
+
+/*
+ * Internal Type Definitions
+ * pcm_int_data contains memory controller details:
+ * @reg_data : LPDDR2_MODE_REG_DATA register address after remapping
+ * @reg_cfg : LPDDR2_MODE_REG_CFG register address after remapping
+ * &bus_width: memory bus-width (eg: x16 2 Bytes, x32 4 Bytes)
+ */
+struct pcm_int_data {
+ void __iomem *ctl_regs;
+ int bus_width;
+};
+
+static DEFINE_MUTEX(lpdd2_nvm_mutex);
+
+/*
+ * Build a map_word starting from an u_long
+ */
+static inline map_word build_map_word(u_long myword)
+{
+ map_word val = { {0} };
+ val.x[0] = myword;
+ return val;
+}
+
+/*
+ * Build Mode Register Configuration DataMask based on device bus-width
+ */
+static inline u_int build_mr_cfgmask(u_int bus_width)
+{
+ u_int val = MR_CFGMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = val << 16;
+
+ return val;
+}
+
+/*
+ * Build Status Register OK DataMask based on device bus-width
+ */
+static inline u_int build_sr_ok_datamask(u_int bus_width)
+{
+ u_int val = SR_OK_DATAMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = (val << 16)+val;
+
+ return val;
+}
+
+/*
+ * Evaluates Overlay Window Control Registers address
+ */
+static inline u_long ow_reg_add(struct map_info *map, u_long offset)
+{
+ u_long val = 0;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ val = map->pfow_base + offset*pcm_data->bus_width;
+
+ return val;
+}
+
+/*
+ * Enable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_enable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x01, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Disable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_disable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x02, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Execute lpddr2-nvm operations
+ */
+static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code,
+ u_long cmd_data, u_long cmd_add, u_long cmd_mpr, u_char *buf)
+{
+ map_word add_l = { {0} }, add_h = { {0} }, mpr_l = { {0} },
+ mpr_h = { {0} }, data_l = { {0} }, cmd = { {0} },
+ exec_cmd = { {0} }, sr;
+ map_word data_h = { {0} }; /* only for 2x x16 devices stacked */
+ u_long i, status_reg, prg_buff_ofs;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_int sr_ok_datamask = build_sr_ok_datamask(pcm_data->bus_width);
+
+ /* Builds low and high words for OW Control Registers */
+ add_l.x[0] = cmd_add & 0x0000FFFF;
+ add_h.x[0] = (cmd_add >> 16) & 0x0000FFFF;
+ mpr_l.x[0] = cmd_mpr & 0x0000FFFF;
+ mpr_h.x[0] = (cmd_mpr >> 16) & 0x0000FFFF;
+ cmd.x[0] = cmd_code & 0x0000FFFF;
+ exec_cmd.x[0] = 0x0001;
+ data_l.x[0] = cmd_data & 0x0000FFFF;
+ data_h.x[0] = (cmd_data >> 16) & 0x0000FFFF; /* only for 2x x16 */
+
+ /* Set Overlay Window Control Registers */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS));
+ map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS));
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS));
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS));
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS));
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS));
+ if (pcm_data->bus_width == 0x0004) { /* 2x16 devices stacked */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2);
+ map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2);
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2);
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2);
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2);
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2);
+ }
+
+ /* Fill Program Buffer */
+ if ((cmd_code == LPDDR2_NVM_BUF_PROGRAM) ||
+ (cmd_code == LPDDR2_NVM_BUF_OVERWRITE)) {
+ prg_buff_ofs = (map_read(map,
+ ow_reg_add(map, PRG_BUFFER_OFS))).x[0];
+ for (i = 0; i < cmd_mpr; i++) {
+ map_write(map, build_map_word(buf[i]), map->pfow_base +
+ prg_buff_ofs + i);
+ }
+ }
+
+ /* Command Execute */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS));
+ if (pcm_data->bus_width == 0x0004) /* 2x16 devices stacked */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2);
+
+ /* Status Register Check */
+ do {
+ sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS));
+ status_reg = sr.x[0];
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices stacked */
+ sr = map_read(map, ow_reg_add(map,
+ STATUS_REG_OFS) + 2);
+ status_reg += sr.x[0] << 16;
+ }
+ } while ((status_reg & sr_ok_datamask) != sr_ok_datamask);
+
+ return (((status_reg & sr_ok_datamask) == sr_ok_datamask) ? 0 : -EIO);
+}
+
+/*
+ * Execute lpddr2-nvm operations @ block level
+ */
+static int lpddr2_nvm_do_block_op(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len, u_char block_op)
+{
+ struct map_info *map = mtd->priv;
+ u_long add, end_add;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ add = start_add;
+ end_add = add + len;
+
+ do {
+ ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL);
+ if (ret)
+ goto out;
+ add += mtd->erasesize;
+ } while (add < end_add);
+
+out:
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * verify presence of PFOW string
+ */
+static int lpddr2_nvm_pfow_present(struct map_info *map)
+{
+ map_word pfow_val[4];
+ unsigned int found = 1;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Load string from array */
+ pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P));
+ pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F));
+ pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O));
+ pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W));
+
+ /* Verify the string loaded vs expected */
+ if (!map_word_equal(map, build_map_word('P'), pfow_val[0]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('F'), pfow_val[1]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('O'), pfow_val[2]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('W'), pfow_val[3]))
+ found = 0;
+
+ ow_disable(map);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+
+ return found;
+}
+
+/*
+ * lpddr2_nvm driver read method
+ */
+static int lpddr2_nvm_read(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ *retlen = len;
+
+ map_copy_from(map, buf, start_add, *retlen);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return 0;
+}
+
+/*
+ * lpddr2_nvm driver write method
+ */
+static int lpddr2_nvm_write(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_long add, current_len, tot_len, target_len, my_data;
+ u_char *write_buf = (u_char *)buf;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Set start value for the variables */
+ add = start_add;
+ target_len = len;
+ tot_len = 0;
+
+ while (tot_len < target_len) {
+ if (!(IS_ALIGNED(add, mtd->writesize))) { /* do sw program */
+ my_data = write_buf[tot_len];
+ my_data += (write_buf[tot_len+1]) << 8;
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices */
+ my_data += (write_buf[tot_len+2]) << 16;
+ my_data += (write_buf[tot_len+3]) << 24;
+ }
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE,
+ my_data, add, 0x00, NULL);
+ if (ret)
+ goto out;
+
+ add += pcm_data->bus_width;
+ tot_len += pcm_data->bus_width;
+ } else { /* do buffer program */
+ current_len = min(target_len - tot_len,
+ (u_long) mtd->writesize);
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE,
+ 0x00, add, current_len, write_buf + tot_len);
+ if (ret)
+ goto out;
+
+ add += current_len;
+ tot_len += current_len;
+ }
+ }
+
+out:
+ *retlen = tot_len;
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * lpddr2_nvm driver erase method
+ */
+static int lpddr2_nvm_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int ret = lpddr2_nvm_do_block_op(mtd, instr->addr, instr->len,
+ LPDDR2_NVM_ERASE);
+ if (!ret) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ }
+
+ return ret;
+}
+
+/*
+ * lpddr2_nvm driver unlock method
+ */
+static int lpddr2_nvm_unlock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_UNLOCK);
+}
+
+/*
+ * lpddr2_nvm driver lock method
+ */
+static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
+}
+
+/*
+ * lpddr2_nvm driver probe method
+ */
+static int lpddr2_nvm_probe(struct platform_device *pdev)
+{
+ struct map_info *map;
+ struct mtd_info *mtd;
+ struct resource *add_range;
+ struct resource *control_regs;
+ struct pcm_int_data *pcm_data;
+
+ /* Allocate memory control_regs data structures */
+ pcm_data = devm_kzalloc(&pdev->dev, sizeof(*pcm_data), GFP_KERNEL);
+ if (!pcm_data)
+ return -ENOMEM;
+
+ pcm_data->bus_width = BUS_WIDTH;
+
+ /* Allocate memory for map_info & mtd_info data structures */
+ map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ mtd = devm_kzalloc(&pdev->dev, sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ /* lpddr2_nvm address range */
+ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Populate map_info data structure */
+ *map = (struct map_info) {
+ .virt = devm_ioremap_resource(&pdev->dev, add_range),
+ .name = pdev->dev.init_name,
+ .phys = add_range->start,
+ .size = resource_size(add_range),
+ .bankwidth = pcm_data->bus_width / 2,
+ .pfow_base = OW_BASE_ADDRESS,
+ .fldrv_priv = pcm_data,
+ };
+ if (IS_ERR(map->virt))
+ return PTR_ERR(map->virt);
+
+ simple_map_init(map); /* fill with default methods */
+
+ control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs);
+ if (IS_ERR(pcm_data->ctl_regs))
+ return PTR_ERR(pcm_data->ctl_regs);
+
+ /* Populate mtd_info data structure */
+ *mtd = (struct mtd_info) {
+ .name = pdev->dev.init_name,
+ .type = MTD_RAM,
+ .priv = map,
+ .size = resource_size(add_range),
+ .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
+ .writesize = 1,
+ .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
+ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+ ._read = lpddr2_nvm_read,
+ ._write = lpddr2_nvm_write,
+ ._erase = lpddr2_nvm_erase,
+ ._unlock = lpddr2_nvm_unlock,
+ ._lock = lpddr2_nvm_lock,
+ };
+
+ /* Verify the presence of the device looking for PFOW string */
+ if (!lpddr2_nvm_pfow_present(map)) {
+ pr_err("device not recognized\n");
+ return -EINVAL;
+ }
+ /* Parse partitions and register the MTD device */
+ return mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+}
+
+/*
+ * lpddr2_nvm driver remove method
+ */
+static int lpddr2_nvm_remove(struct platform_device *pdev)
+{
+ return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
+}
+
+/* Initialize platform_driver data structure for lpddr2_nvm */
+static struct platform_driver lpddr2_nvm_drv = {
+ .driver = {
+ .name = "lpddr2_nvm",
+ },
+ .probe = lpddr2_nvm_probe,
+ .remove = lpddr2_nvm_remove,
+};
+
+module_platform_driver(lpddr2_nvm_drv);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincenzo Aliberti <vincenzo.aliberti@gmail.com>");
+MODULE_DESCRIPTION("MTD driver for LPDDR2-NVM PCM memories");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
new file mode 100644
index 000000000..018c75faa
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -0,0 +1,751 @@
+/*
+ * LPDDR flash memory device operations. This module provides read, write,
+ * erase, lock/unlock support for LPDDR flash memories
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ * Many thanks to Roman Borisov for initial enabling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ * TODO:
+ * Implement VPP management
+ * Implement XIP support
+ * Implement OTP support
+ */
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, u_char *buf);
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf);
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, void **mtdbuf, resource_size_t *phys);
+static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
+static int get_chip(struct map_info *map, struct flchip *chip, int mode);
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip);
+
+struct mtd_info *lpddr_cmdset(struct map_info *map)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ struct flchip_shared *shared;
+ struct flchip *chip;
+ struct mtd_info *mtd;
+ int numchips;
+ int i, j;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->_read = lpddr_read;
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->flags &= ~MTD_BIT_WRITEABLE;
+ mtd->_erase = lpddr_erase;
+ mtd->_write = lpddr_write_buffers;
+ mtd->_writev = lpddr_writev;
+ mtd->_lock = lpddr_lock;
+ mtd->_unlock = lpddr_unlock;
+ if (map_is_linear(map)) {
+ mtd->_point = lpddr_point;
+ mtd->_unpoint = lpddr_unpoint;
+ }
+ mtd->size = 1 << lpddr->qinfo->DevSizeShift;
+ mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
+ mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
+
+ shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
+ GFP_KERNEL);
+ if (!shared) {
+ kfree(lpddr);
+ kfree(mtd);
+ return NULL;
+ }
+
+ chip = &lpddr->chips[0];
+ numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
+ for (i = 0; i < numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+ mutex_init(&shared[i].lock);
+ for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
+ *chip = lpddr->chips[i];
+ chip->start += j << lpddr->chipshift;
+ chip->oldstate = chip->state = FL_READY;
+ chip->priv = &shared[i];
+ /* those should be reset too since
+ they create memory references. */
+ init_waitqueue_head(&chip->wq);
+ mutex_init(&chip->mutex);
+ chip++;
+ }
+ }
+
+ return mtd;
+}
+EXPORT_SYMBOL(lpddr_cmdset);
+
+static int wait_for_ready(struct map_info *map, struct flchip *chip,
+ unsigned int chip_op_time)
+{
+ unsigned int timeo, reset_timeo, sleep_time;
+ unsigned int dsr;
+ flstate_t chip_state = chip->state;
+ int ret = 0;
+
+ /* set our timeout to 8 times the expected delay */
+ timeo = chip_op_time * 8;
+ if (!timeo)
+ timeo = 500000;
+ reset_timeo = timeo;
+ sleep_time = chip_op_time / 2;
+
+ for (;;) {
+ dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+ if (dsr & DSR_READY_STATUS)
+ break;
+ if (!timeo) {
+ printk(KERN_ERR "%s: Flash timeout error state %d \n",
+ map->name, chip_state);
+ ret = -ETIME;
+ break;
+ }
+
+ /* OK Still waiting. Drop the lock, wait a while and retry. */
+ mutex_unlock(&chip->mutex);
+ if (sleep_time >= 1000000/HZ) {
+ /*
+ * Half of the normal delay still remaining
+ * can be performed with a sleeping delay instead
+ * of busy waiting.
+ */
+ msleep(sleep_time/1000);
+ timeo -= sleep_time;
+ sleep_time = 1000000/HZ;
+ } else {
+ udelay(1);
+ cond_resched();
+ timeo--;
+ }
+ mutex_lock(&chip->mutex);
+
+ while (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ }
+ if (chip->erase_suspended || chip->write_suspended) {
+ /* Suspend has occurred while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->erase_suspended = chip->write_suspended = 0;
+ }
+ }
+ /* check status for errors */
+ if (dsr & DSR_ERR) {
+ /* Clear DSR*/
+ map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
+ printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
+ map->name, dsr);
+ print_drs_error(dsr);
+ ret = -EIO;
+ }
+ chip->state = FL_READY;
+ return ret;
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+{
+ int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+ if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
+ && chip->state != FL_SYNCING) {
+ /*
+ * OK. We have possibility for contension on the write/erase
+ * operations which are global to the real chip and not per
+ * partition. So let's fight it over in the partition which
+ * currently has authority on the operation.
+ *
+ * The rules are as follows:
+ *
+ * - any write operation must own shared->writing.
+ *
+ * - any erase operation must own _both_ shared->writing and
+ * shared->erasing.
+ *
+ * - contension arbitration is handled in the owner's context.
+ *
+ * The 'shared' struct can be read and/or written only when
+ * its lock is taken.
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+ * The engine to perform desired operation on this
+ * partition is already in use by someone else.
+ * Let's fight over it in the context of the chip
+ * currently using it. If it is possible to suspend,
+ * that other partition will do just that, otherwise
+ * it'll happily send us to sleep. In any case, when
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+ ret = chip_ready(map, contender, mode);
+ mutex_lock(&chip->mutex);
+
+ if (ret == -EAGAIN) {
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ if (ret) {
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already in FL_SYNCING
+ * state. Put contender and retry. */
+ if (chip->state == FL_SYNCING) {
+ put_chip(map, contender);
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ mutex_unlock(&contender->mutex);
+ }
+
+ /* Check if we have suspended erase on this chip.
+ Must sleep in such a case. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ goto retry;
+ }
+
+ /* We now own it */
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+ mutex_unlock(&shared->lock);
+ }
+
+ ret = chip_ready(map, chip, mode);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return ret;
+}
+
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* Prevent setting state FL_SYNCING for chip in suspended state. */
+ if (FL_SYNCING == mode && FL_READY != chip->oldstate)
+ goto sleep;
+
+ switch (chip->state) {
+ case FL_READY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (!lpddr->qinfo->SuspEraseSupp ||
+ !(mode == FL_READY || mode == FL_POINT))
+ goto sleep;
+
+ map_write(map, CMD(LPDDR_SUSPEND),
+ map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ ret = wait_for_ready(map, chip, 0);
+ if (ret) {
+ /* Oops. something got wrong. */
+ /* Resume and pretend we weren't here. */
+ put_chip(map, chip);
+ printk(KERN_ERR "%s: suspend operation failed."
+ "State may be wrong \n", map->name);
+ return -EIO;
+ }
+ chip->erase_suspended = 1;
+ chip->state = FL_READY;
+ return 0;
+ /* Erase suspend */
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+
+ default:
+sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ return -EAGAIN;
+ }
+}
+
+static void put_chip(struct map_info *map, struct flchip *chip)
+{
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+ if (shared->writing && shared->writing != chip) {
+ /* give back the ownership */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
+ wake_up(&chip->wq);
+ return;
+ }
+ shared->erasing = NULL;
+ shared->writing = NULL;
+ } else if (shared->erasing == chip && shared->writing != chip) {
+ /*
+ * We own the ability to erase without the ability
+ * to write, which means the erase was suspended
+ * and some other partition is currently writing.
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+ mutex_unlock(&shared->lock);
+ }
+
+ switch (chip->oldstate) {
+ case FL_ERASING:
+ map_write(map, CMD(LPDDR_RESUME),
+ map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+ case FL_READY:
+ break;
+ default:
+ printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
+ map->name, chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const struct kvec **pvec,
+ unsigned long *pvec_seek, int len)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ map_word datum;
+ int ret, wbufsize, word_gap, words;
+ const struct kvec *vec;
+ unsigned long vec_seek;
+ unsigned long prog_buf_ofs;
+
+ wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_WRITING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ /* Figure out the number of words to write */
+ word_gap = (-adr & (map_bankwidth(map)-1));
+ words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
+ if (!word_gap) {
+ words--;
+ } else {
+ word_gap = map_bankwidth(map) - word_gap;
+ adr -= word_gap;
+ datum = map_word_ff(map);
+ }
+ /* Write data */
+ /* Get the program buffer offset from PFOW register data first*/
+ prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
+ map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
+ vec = *pvec;
+ vec_seek = *pvec_seek;
+ do {
+ int n = map_bankwidth(map) - word_gap;
+
+ if (n > vec->iov_len - vec_seek)
+ n = vec->iov_len - vec_seek;
+ if (n > len)
+ n = len;
+
+ if (!word_gap && (len < map_bankwidth(map)))
+ datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum,
+ vec->iov_base + vec_seek, word_gap, n);
+
+ len -= n;
+ word_gap += n;
+ if (!len || word_gap == map_bankwidth(map)) {
+ map_write(map, datum, prog_buf_ofs);
+ prog_buf_ofs += map_bankwidth(map);
+ word_gap = 0;
+ }
+
+ vec_seek += n;
+ if (vec_seek == vec->iov_len) {
+ vec++;
+ vec_seek = 0;
+ }
+ } while (len);
+ *pvec = vec;
+ *pvec_seek = vec_seek;
+
+ /* GO GO GO */
+ send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
+ chip->state = FL_WRITING;
+ ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
+ if (ret) {
+ printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
+ map->name, ret, adr);
+ goto out;
+ }
+
+ out: put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_ERASING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
+ chip->state = FL_ERASING;
+ ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
+ if (ret) {
+ printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
+ map->name, ret, adr);
+ goto out;
+ }
+ out: put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_READY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ map_copy_from(map, buf, adr, len);
+ *retlen = len;
+
+ put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, void **mtdbuf, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ unsigned long ofs, last_end = 0;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret = 0;
+
+ if (!map->virt)
+ return -EINVAL;
+
+ /* ofs: offset within the first chip that the first read should start */
+ ofs = adr - (chipnum << lpddr->chipshift);
+ *mtdbuf = (void *)map->virt + chip->start + ofs;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= lpddr->numchips)
+ break;
+
+ /* We cannot point across chips that are virtually disjoint */
+ if (!last_end)
+ last_end = chip->start;
+ else if (chip->start != last_end)
+ break;
+
+ if ((len + ofs - 1) >> lpddr->chipshift)
+ thislen = (1<<lpddr->chipshift) - ofs;
+ else
+ thislen = len;
+ /* get the chip */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_POINT);
+ mutex_unlock(&chip->mutex);
+ if (ret)
+ break;
+
+ chip->state = FL_POINT;
+ chip->ref_point_counter++;
+ *retlen += thislen;
+ len -= thislen;
+
+ ofs = 0;
+ last_end += 1 << lpddr->chipshift;
+ chipnum++;
+ chip = &lpddr->chips[chipnum];
+ }
+ return 0;
+}
+
+static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift, err = 0;
+ unsigned long ofs;
+
+ /* ofs: offset within the first chip that the first read should start */
+ ofs = adr - (chipnum << lpddr->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+ struct flchip *chip;
+
+ chip = &lpddr->chips[chipnum];
+ if (chipnum >= lpddr->numchips)
+ break;
+
+ if ((len + ofs - 1) >> lpddr->chipshift)
+ thislen = (1<<lpddr->chipshift) - ofs;
+ else
+ thislen = len;
+
+ mutex_lock(&chip->mutex);
+ if (chip->state == FL_POINT) {
+ chip->ref_point_counter--;
+ if (chip->ref_point_counter == 0)
+ chip->state = FL_READY;
+ } else {
+ printk(KERN_WARNING "%s: Warning: unpoint called on non"
+ "pointed region\n", map->name);
+ err = -EINVAL;
+ }
+
+ put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+
+ len -= thislen;
+ ofs = 0;
+ chipnum++;
+ }
+
+ return err;
+}
+
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct kvec vec;
+
+ vec.iov_base = (void *) buf;
+ vec.iov_len = len;
+
+ return lpddr_writev(mtd, &vec, 1, to, retlen);
+}
+
+
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs, vec_seek, i;
+ int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+ size_t len = 0;
+
+ for (i = 0; i < count; i++)
+ len += vecs[i].iov_len;
+
+ if (!len)
+ return 0;
+
+ chipnum = to >> lpddr->chipshift;
+
+ ofs = to;
+ vec_seek = 0;
+
+ do {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+
+ ret = do_write_buffer(map, &lpddr->chips[chipnum],
+ ofs, &vecs, &vec_seek, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ (*retlen) += size;
+ len -= size;
+
+ /* Be nice and reschedule with the chip in a usable
+ * state for other processes */
+ cond_resched();
+
+ } while (len);
+
+ return 0;
+}
+
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ unsigned long ofs, len;
+ int ret;
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
+
+ ofs = instr->addr;
+ len = instr->len;
+
+ while (len > 0) {
+ ret = do_erase_oneblock(mtd, ofs);
+ if (ret)
+ return ret;
+ ofs += size;
+ len -= size;
+ }
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+#define DO_XXLOCK_LOCK 1
+#define DO_XXLOCK_UNLOCK 2
+static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+{
+ int ret = 0;
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ if (thunk == DO_XXLOCK_LOCK) {
+ send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
+ chip->state = FL_LOCKING;
+ } else if (thunk == DO_XXLOCK_UNLOCK) {
+ send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
+ chip->state = FL_UNLOCKING;
+ } else
+ BUG();
+
+ ret = wait_for_ready(map, chip, 1);
+ if (ret) {
+ printk(KERN_ERR "%s: block unlock error status %d \n",
+ map->name, ret);
+ goto out;
+ }
+out: put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
+}
+
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
+MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
new file mode 100644
index 000000000..69f211234
--- /dev/null
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -0,0 +1,249 @@
+/*
+ * Probing flash chips with QINFO records.
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
+struct mtd_info *lpddr_probe(struct map_info *map);
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
+static int lpddr_pfow_present(struct map_info *map,
+ struct lpddr_private *lpddr);
+
+static struct qinfo_query_info qinfo_array[] = {
+ /* General device info */
+ {0, 0, "DevSizeShift", "Device size 2^n bytes"},
+ {0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
+ /* Erase block information */
+ {1, 1, "TotalBlocksNum", "Total number of blocks"},
+ {1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
+ /* Partition information */
+ {2, 1, "HWPartsNum", "Number of hardware partitions"},
+ /* Optional features */
+ {5, 1, "SuspEraseSupp", "Suspend erase supported"},
+ /* Operation typical time */
+ {10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
+ {10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
+ {10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
+ {10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
+};
+
+static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
+{
+ int qinfo_lines = ARRAY_SIZE(qinfo_array);
+ int i;
+ int bankwidth = map_bankwidth(map) * 8;
+ int major, minor;
+
+ for (i = 0; i < qinfo_lines; i++) {
+ if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
+ major = qinfo_array[i].major & ((1 << bankwidth) - 1);
+ minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
+ return minor | (major << bankwidth);
+ }
+ }
+ printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
+ BUG();
+ return -1;
+}
+
+static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
+{
+ unsigned int dsr, val;
+ int bits_per_chip = map_bankwidth(map) * 8;
+ unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
+ int attempts = 20;
+
+ /* Write a request for the PFOW record */
+ map_write(map, CMD(LPDDR_INFO_QUERY),
+ map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_L);
+ map_write(map, CMD(adr >> bits_per_chip),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_H);
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+
+ while ((attempts--) > 0) {
+ dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+ if (dsr & DSR_READY_STATUS)
+ break;
+ udelay(10);
+ }
+
+ val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
+ return val;
+}
+
+static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
+{
+ map_word pfow_val[4];
+
+ /* Check identification string */
+ pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
+ pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
+ pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
+ pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
+
+ if (!map_word_equal(map, CMD('P'), pfow_val[0]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('F'), pfow_val[1]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('O'), pfow_val[2]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('W'), pfow_val[3]))
+ goto out;
+
+ return 1; /* "PFOW" is found */
+out:
+ printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
+ map->name, map->pfow_base);
+ return 0;
+}
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
+{
+
+ lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
+ if (!lpddr->qinfo)
+ return 0;
+
+ /* Get the ManuID */
+ lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
+ /* Get the DeviceID */
+ lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
+ /* read parameters from chip qinfo table */
+ lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
+ lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
+ lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
+ lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
+ lpddr->qinfo->UniformBlockSizeShift =
+ lpddr_info_query(map, "UniformBlockSizeShift");
+ lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
+ lpddr->qinfo->SingleWordProgTime =
+ lpddr_info_query(map, "SingleWordProgTime");
+ lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
+ lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
+ return 1;
+}
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
+{
+ struct lpddr_private lpddr;
+ struct lpddr_private *retlpddr;
+ int numvirtchips;
+
+
+ if ((map->pfow_base + 0x1000) >= map->size) {
+ printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
+ "the map(0x%08lx)\n", map->name,
+ (unsigned long)map->pfow_base, map->size - 1);
+ return NULL;
+ }
+ memset(&lpddr, 0, sizeof(struct lpddr_private));
+ if (!lpddr_pfow_present(map, &lpddr))
+ return NULL;
+
+ if (!lpddr_chip_setup(map, &lpddr))
+ return NULL;
+
+ /* Ok so we found a chip */
+ lpddr.chipshift = lpddr.qinfo->DevSizeShift;
+ lpddr.numchips = 1;
+
+ numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
+ retlpddr = kzalloc(sizeof(struct lpddr_private) +
+ numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ if (!retlpddr)
+ return NULL;
+
+ memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
+
+ retlpddr->numchips = numvirtchips;
+ retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
+ __ffs(retlpddr->qinfo->HWPartsNum);
+
+ return retlpddr;
+}
+
+struct mtd_info *lpddr_probe(struct map_info *map)
+{
+ struct mtd_info *mtd = NULL;
+ struct lpddr_private *lpddr;
+
+ /* First probe the map to see if we havecan open PFOW here */
+ lpddr = lpddr_probe_chip(map);
+ if (!lpddr)
+ return NULL;
+
+ map->fldrv_priv = lpddr;
+ mtd = lpddr_cmdset(map);
+ if (mtd) {
+ if (mtd->size > map->size) {
+ printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
+ "to %ldKiB\n", (unsigned long)mtd->size >> 10,
+ (unsigned long)map->size >> 10);
+ mtd->size = map->size;
+ }
+ return mtd;
+ }
+
+ kfree(lpddr->qinfo);
+ kfree(lpddr);
+ map->fldrv_priv = NULL;
+ return NULL;
+}
+
+static struct mtd_chip_driver lpddr_chipdrv = {
+ .probe = lpddr_probe,
+ .name = "qinfo_probe",
+ .module = THIS_MODULE
+};
+
+static int __init lpddr_probe_init(void)
+{
+ register_mtd_chip_driver(&lpddr_chipdrv);
+ return 0;
+}
+
+static void __exit lpddr_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&lpddr_chipdrv);
+}
+
+module_init(lpddr_probe_init);
+module_exit(lpddr_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>");
+MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
+
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
new file mode 100644
index 000000000..e715ae906
--- /dev/null
+++ b/drivers/mtd/maps/Kconfig
@@ -0,0 +1,402 @@
+menu "Mapping drivers for chip access"
+ depends on MTD!=n
+ depends on HAS_IOMEM
+
+config MTD_COMPLEX_MAPPINGS
+ bool "Support non-linear mappings of flash chips"
+ help
+ This causes the chip drivers to allow for complicated
+ paged mappings of flash chips.
+
+config MTD_PHYSMAP
+ tristate "Flash device in physical memory map"
+ depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR
+ help
+ This provides a 'mapping' driver which allows the NOR Flash and
+ ROM driver code to communicate with chips which are mapped
+ physically into the CPU's memory. You will need to configure
+ the physical address and size of the flash chips on your
+ particular board as well as the bus width, either statically
+ with config options or at run-time.
+
+ To compile this driver as a module, choose M here: the
+ module will be called physmap.
+
+config MTD_PHYSMAP_COMPAT
+ bool "Physmap compat support"
+ depends on MTD_PHYSMAP
+ default n
+ help
+ Setup a simple mapping via the Kconfig options. Normally the
+ physmap configuration options are done via your board's
+ resource file.
+
+ If unsure, say N here.
+
+config MTD_PHYSMAP_START
+ hex "Physical start address of flash mapping"
+ depends on MTD_PHYSMAP_COMPAT
+ default "0x8000000"
+ help
+ This is the physical memory location at which the flash chips
+ are mapped on your particular target board. Refer to the
+ memory map which should hopefully be in the documentation for
+ your board.
+
+config MTD_PHYSMAP_LEN
+ hex "Physical length of flash mapping"
+ depends on MTD_PHYSMAP_COMPAT
+ default "0"
+ help
+ This is the total length of the mapping of the flash chips on
+ your particular board. If there is space, or aliases, in the
+ physical memory map between the chips, this could be larger
+ than the total amount of flash present. Refer to the memory
+ map which should hopefully be in the documentation for your
+ board.
+
+config MTD_PHYSMAP_BANKWIDTH
+ int "Bank width in octets"
+ depends on MTD_PHYSMAP_COMPAT
+ default "2"
+ help
+ This is the total width of the data bus of the flash devices
+ in octets. For example, if you have a data bus width of 32
+ bits, you would set the bus width octet value to 4. This is
+ used internally by the CFI drivers.
+
+config MTD_PHYSMAP_OF
+ tristate "Memory device in physical memory map based on OF description"
+ depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM)
+ help
+ This provides a 'mapping' driver which allows the NOR Flash, ROM
+ and RAM driver code to communicate with chips which are mapped
+ physically into the CPU's memory. The mapping description here is
+ taken from OF device tree.
+
+config MTD_PMC_MSP_EVM
+ tristate "CFI Flash device mapped on PMC-Sierra MSP"
+ depends on PMC_MSP && MTD_CFI
+ help
+ This provides a 'mapping' driver which supports the way
+ in which user-programmable flash chips are connected on the
+ PMC-Sierra MSP eval/demo boards.
+
+choice
+ prompt "Maximum mappable memory available for flash IO"
+ depends on MTD_PMC_MSP_EVM
+ default MSP_FLASH_MAP_LIMIT_32M
+
+config MSP_FLASH_MAP_LIMIT_32M
+ bool "32M"
+
+endchoice
+
+config MSP_FLASH_MAP_LIMIT
+ hex
+ default "0x02000000"
+ depends on MSP_FLASH_MAP_LIMIT_32M
+
+config MTD_SUN_UFLASH
+ tristate "Sun Microsystems userflash support"
+ depends on SPARC && MTD_CFI && PCI
+ help
+ This provides a 'mapping' driver which supports the way in
+ which user-programmable flash chips are connected on various
+ Sun Microsystems boardsets. This driver will require CFI support
+ in the kernel, so if you did not enable CFI previously, do that now.
+
+config MTD_SC520CDP
+ tristate "CFI Flash device mapped on AMD SC520 CDP"
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
+ help
+ The SC520 CDP board has two banks of CFI-compliant chips and one
+ Dual-in-line JEDEC chip. This 'mapping' driver supports that
+ arrangement, implementing three MTD devices.
+
+config MTD_NETSC520
+ tristate "CFI Flash device mapped on AMD NetSc520"
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
+ help
+ This enables access routines for the flash chips on the AMD NetSc520
+ demonstration board. If you have one of these boards and would like
+ to use the flash chips on it, say 'Y'.
+
+config MTD_TS5500
+ tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
+ depends on TS5500 || COMPILE_TEST
+ select MTD_JEDECPROBE
+ select MTD_CFI_AMDSTD
+ help
+ This provides a driver for the on-board flash of the Technologic
+ System's TS-5500 board. The 2MB flash is split into 3 partitions
+ which are accessed as separate MTD devices.
+
+ mtd0 and mtd2 are the two BIOS drives, which use the resident
+ flash disk (RFD) flash translation layer.
+
+ mtd1 allows you to reprogram your BIOS. BE VERY CAREFUL.
+
+ Note that jumper 3 ("Write Enable Drive A") must be set
+ otherwise detection won't succeed.
+
+config MTD_SBC_GXX
+ tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
+ depends on X86 && MTD_CFI_INTELEXT && MTD_COMPLEX_MAPPINGS
+ help
+ This provides a driver for the on-board flash of Arcom Control
+ Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
+ By default the flash is split into 3 partitions which are accessed
+ as separate MTD devices. This board utilizes Intel StrataFlash.
+ More info at
+ <http://www.arcomcontrols.com/products/icp/pc104/processors/SBC_GX1.htm>.
+
+config MTD_PXA2XX
+ tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
+ depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
+ help
+ This provides a driver for the NOR flash attached to a PXA2xx chip.
+
+config MTD_SCx200_DOCFLASH
+ tristate "Flash device mapped with DOCCS on NatSemi SCx200"
+ depends on SCx200 && MTD_CFI
+ help
+ Enable support for a flash chip mapped using the DOCCS signal on a
+ National Semiconductor SCx200 processor.
+
+ If you don't know what to do here, say N.
+
+ If compiled as a module, it will be called scx200_docflash.
+
+config MTD_AMD76XROM
+ tristate "BIOS flash chip on AMD76x southbridge"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for treating the BIOS flash chip on AMD76x motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_ICHXROM
+ tristate "BIOS flash chip on Intel Controller Hub 2/3/4/5"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for treating the BIOS flash chip on ICHX motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_ESB2ROM
+ tristate "BIOS flash chip on Intel ESB Controller Hub 2"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on ESB2 motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_CK804XROM
+ tristate "BIOS flash chip on Nvidia CK804"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on nvidia motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_SCB2_FLASH
+ tristate "BIOS flash chip on Intel SCB2 boards"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on Intel SCB2 boards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_TSUNAMI
+ tristate "Flash chips on Tsunami TIG bus"
+ depends on ALPHA_TSUNAMI && MTD_COMPLEX_MAPPINGS
+ help
+ Support for the flash chip on Tsunami TIG bus.
+
+config MTD_NETtel
+ tristate "CFI flash device on SnapGear/SecureEdge"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for flash chips on NETtel/SecureEdge/SnapGear boards.
+
+config MTD_LANTIQ
+ tristate "Lantiq SoC NOR support"
+ depends on LANTIQ
+ help
+ Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
+
+config MTD_L440GX
+ tristate "BIOS flash chip on Intel L440GX boards"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for treating the BIOS flash chip on Intel L440GX motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_CFI_FLAGADM
+ tristate "CFI Flash device mapping on FlagaDM"
+ depends on PPC_8xx && MTD_CFI
+ help
+ Mapping for the Flaga digital module. If you don't have one, ignore
+ this setting.
+
+config MTD_SOLUTIONENGINE
+ tristate "CFI Flash device mapped on Hitachi SolutionEngine"
+ depends on SOLUTION_ENGINE && MTD_CFI && MTD_REDBOOT_PARTS
+ help
+ This enables access to the flash chips on the Hitachi SolutionEngine and
+ similar boards. Say 'Y' if you are building a kernel for such a board.
+
+config MTD_SA1100
+ tristate "CFI Flash device mapped on StrongARM SA11x0"
+ depends on MTD_CFI && ARCH_SA1100
+ help
+ This enables access to the flash chips on most platforms based on
+ the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
+ If you have such a board, say 'Y'.
+
+config MTD_DC21285
+ tristate "CFI Flash device mapped on DC21285 Footbridge"
+ depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS
+ help
+ This provides a driver for the flash accessed using Intel's
+ 21285 bridge used with Intel's StrongARM processors. More info at
+ <http://www.intel.com/design/bridge/docs/21285_documentation.htm>.
+
+config MTD_IXP4XX
+ tristate "CFI Flash device mapped on Intel IXP4xx based systems"
+ depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX
+ help
+ This enables MTD access to flash devices on platforms based
+ on Intel's IXP4xx family of network processors such as the
+ IXDP425 and Coyote. If you have an IXP4xx based board and
+ would like to use the flash chips on it, say 'Y'.
+
+config MTD_IMPA7
+ tristate "JEDEC Flash device mapped on impA7"
+ depends on ARM && MTD_JEDECPROBE
+ help
+ This enables access to the NOR Flash on the impA7 board of
+ implementa GmbH. If you have such a board, say 'Y' here.
+
+# This needs CFI or JEDEC, depending on the cards found.
+config MTD_PCI
+ tristate "PCI MTD driver"
+ depends on PCI && MTD_COMPLEX_MAPPINGS
+ help
+ Mapping for accessing flash devices on add-in cards like the Intel XScale
+ IQ80310 card, and the Intel EBSA285 card in blank ROM programming mode
+ (please see the manual for the link settings).
+
+ If you are not sure, say N.
+
+config MTD_PCMCIA
+ tristate "PCMCIA MTD driver"
+ depends on PCMCIA && MTD_COMPLEX_MAPPINGS
+ help
+ Map driver for accessing PCMCIA linear flash memory cards. These
+ cards are usually around 4-16MiB in size. This does not include
+ Compact Flash cards which are treated as IDE devices.
+
+config MTD_PCMCIA_ANONYMOUS
+ bool "Use PCMCIA MTD drivers for anonymous PCMCIA cards"
+ depends on MTD_PCMCIA
+ help
+ If this option is enabled, PCMCIA cards which do not report
+ anything about themselves are assumed to be MTD cards.
+
+ If unsure, say N.
+
+config MTD_BFIN_ASYNC
+ tristate "Blackfin BF533-STAMP Flash Chip Support"
+ depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS
+ default y
+ help
+ Map driver which allows for simultaneous utilization of
+ ethernet and CFI parallel flash.
+
+ If compiled as a module, it will be called bfin-async-flash.
+
+config MTD_GPIO_ADDR
+ tristate "GPIO-assisted Flash Chip Support"
+ depends on GPIOLIB
+ depends on MTD_COMPLEX_MAPPINGS
+ help
+ Map driver which allows flashes to be partially physically addressed
+ and assisted by GPIOs.
+
+ If compiled as a module, it will be called gpio-addr-flash.
+
+config MTD_UCLINUX
+ bool "Generic uClinux RAM/ROM filesystem support"
+ depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
+ help
+ Map driver to support image based filesystems for uClinux.
+
+config MTD_INTEL_VR_NOR
+ tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
+ depends on PCI
+ help
+ Map driver for a NOR flash bank located on the Expansion Bus of the
+ Intel Vermilion Range chipset.
+
+config MTD_RBTX4939
+ tristate "Map driver for RBTX4939 board"
+ depends on TOSHIBA_RBTX4939 && MTD_CFI && MTD_COMPLEX_MAPPINGS
+ help
+ Map driver for NOR flash chips on RBTX4939 board.
+
+config MTD_PLATRAM
+ tristate "Map driver for platform device RAM (mtd-ram)"
+ select MTD_RAM
+ help
+ Map driver for RAM areas described via the platform device
+ system.
+
+ This selection automatically selects the map_ram driver.
+
+config MTD_VMU
+ tristate "Map driver for Dreamcast VMU"
+ depends on MAPLE
+ help
+ This driver enables access to the Dreamcast Visual Memory Unit (VMU).
+
+ Most Dreamcast users will want to say Y here.
+
+ To build this as a module select M here, the module will be called
+ vmu-flash.
+
+config MTD_PISMO
+ tristate "MTD discovery driver for PISMO modules"
+ depends on I2C
+ depends on ARCH_VERSATILE
+ help
+ This driver allows for discovery of PISMO modules - see
+ <http://www.pismoworld.org/>. These are small modules containing
+ up to five memory devices (eg, SRAM, flash, DOC) described by an
+ I2C EEPROM.
+
+ This driver does not create any MTD maps itself; instead it
+ creates MTD physmap and MTD SRAM platform devices. If you
+ enable this option, you should consider enabling MTD_PHYSMAP
+ and/or MTD_PLATRAM according to the devices on your module.
+
+ When built as a module, it will be called pismo.ko
+
+config MTD_LATCH_ADDR
+ tristate "Latch-assisted Flash Chip Support"
+ depends on MTD_COMPLEX_MAPPINGS
+ help
+ Map driver which allows flashes to be partially physically addressed
+ and have the upper address lines set by a board specific code.
+
+ If compiled as a module, it will be called latch-addr-flash.
+
+endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
new file mode 100644
index 000000000..141c91a5b
--- /dev/null
+++ b/drivers/mtd/maps/Makefile
@@ -0,0 +1,45 @@
+#
+# linux/drivers/maps/Makefile
+#
+
+ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y)
+obj-$(CONFIG_MTD) += map_funcs.o
+endif
+
+# Chip mappings
+obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
+obj-$(CONFIG_MTD_DC21285) += dc21285.o
+obj-$(CONFIG_MTD_L440GX) += l440gx.o
+obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
+obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o
+obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
+obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
+obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
+obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
+obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
+obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
+obj-$(CONFIG_MTD_PISMO) += pismo.o
+obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
+obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
+obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
+obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
+obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
+obj-$(CONFIG_MTD_NETSC520) += netsc520.o
+obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o
+obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
+obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
+obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
+obj-$(CONFIG_MTD_PCI) += pci.o
+obj-$(CONFIG_MTD_IMPA7) += impa7.o
+obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
+obj-$(CONFIG_MTD_NETtel) += nettel.o
+obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
+obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
+obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
+obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
+obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
+obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
+obj-$(CONFIG_MTD_VMU) += vmu-flash.o
+obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
+obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
+obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
new file mode 100644
index 000000000..f7207b0a7
--- /dev/null
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -0,0 +1,349 @@
+/*
+ * amd76xrom.c
+ *
+ * Normal mappings of chips in physical memory
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+
+#define xstr(s) str(s)
+#define str(s) #s
+#define MOD_NAME xstr(KBUILD_BASENAME)
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+struct amd76xrom_window {
+ void __iomem *virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct amd76xrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+/* The 2 bits controlling the window size are often set to allow reading
+ * the BIOS, but too small to allow writing, since the lock registers are
+ * 4MiB lower in the address space than the data.
+ *
+ * This is intended to prevent flashing the bios, perhaps accidentally.
+ *
+ * This parameter allows the normal driver to over-ride the BIOS settings.
+ *
+ * The bits are 6 and 7. If both bits are set, it is a 5MiB window.
+ * If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
+ * 64KiB window.
+ *
+ */
+static uint win_size_bits;
+module_param(win_size_bits, uint, 0);
+MODULE_PARM_DESC(win_size_bits, "ROM window size bits override for 0x43 byte, normally set by BIOS.");
+
+static struct amd76xrom_window amd76xrom_window = {
+ .maps = LIST_HEAD_INIT(amd76xrom_window.maps),
+};
+
+static void amd76xrom_cleanup(struct amd76xrom_window *window)
+{
+ struct amd76xrom_map_info *map, *scratch;
+ u8 byte;
+
+ if (window->pdev) {
+ /* Disable writes through the rom window */
+ pci_read_config_byte(window->pdev, 0x40, &byte);
+ pci_write_config_byte(window->pdev, 0x40, byte & ~1);
+ pci_dev_put(window->pdev);
+ }
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent) {
+ release_resource(&map->rsrc);
+ }
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ window->pdev = NULL;
+ }
+}
+
+
+static int amd76xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ u8 byte;
+ struct amd76xrom_window *window = &amd76xrom_window;
+ struct amd76xrom_map_info *map = NULL;
+ unsigned long map_top;
+
+ /* Remember the pci dev I find the window in - already have a ref */
+ window->pdev = pdev;
+
+ /* Enable the selected rom window. This is often incorrectly
+ * set up by the BIOS, and the 4MiB offset for the lock registers
+ * requires the full 5MiB of window space.
+ *
+ * This 'write, then read' approach leaves the bits for
+ * other uses of the hardware info.
+ */
+ pci_read_config_byte(pdev, 0x43, &byte);
+ pci_write_config_byte(pdev, 0x43, byte | win_size_bits );
+
+ /* Assume the rom window is properly setup, and find it's size */
+ pci_read_config_byte(pdev, 0x43, &byte);
+ if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) {
+ window->phys = 0xffb00000; /* 5MiB */
+ }
+ else if ((byte & (1<<7)) == (1<<7)) {
+ window->phys = 0xffc00000; /* 4MiB */
+ }
+ else {
+ window->phys = 0xffff0000; /* 64KiB */
+ }
+ window->size = 0xffffffffUL - window->phys + 1UL;
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to a fragment of the window being
+ * "reseved" by the BIOS. In the case that the
+ * request_mem_region() fails then once the rom size is
+ * discovered we will try to reserve the unreserved fragment.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_ERR MOD_NAME
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ return -EBUSY;
+ }
+
+
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, 0x40, &byte);
+ pci_write_config_byte(pdev, 0x40, byte | 1);
+
+ /* FIXME handle registers 0x80 - 0x8C the bios region locks */
+
+ /* For write accesses caches are useless */
+ window->virt = ioremap_nocache(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000) {
+ map_top = 0xffc00000;
+ }
+#endif
+ /* Loop through and look for rom chips */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ }
+ if (!map) {
+ printk(KERN_ERR MOD_NAME ": kmalloc failed");
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* There is no generic VPP support */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++) {
+ cfi->chips[i].start += offset;
+ }
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ amd76xrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void amd76xrom_remove_one(struct pci_dev *pdev)
+{
+ struct amd76xrom_window *window = &amd76xrom_window;
+
+ amd76xrom_cleanup(window);
+}
+
+static struct pci_device_id amd76xrom_pci_tbl[] = {
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7440,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, 0x7468 }, /* amd8111 support */
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, amd76xrom_pci_tbl);
+
+#if 0
+static struct pci_driver amd76xrom_driver = {
+ .name = MOD_NAME,
+ .id_table = amd76xrom_pci_tbl,
+ .probe = amd76xrom_init_one,
+ .remove = amd76xrom_remove_one,
+};
+#endif
+
+static int __init init_amd76xrom(void)
+{
+ struct pci_dev *pdev;
+ struct pci_device_id *id;
+ pdev = NULL;
+ for(id = amd76xrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ break;
+ }
+ }
+ if (pdev) {
+ return amd76xrom_init_one(pdev, &amd76xrom_pci_tbl[0]);
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&amd76xrom_driver);
+#endif
+}
+
+static void __exit cleanup_amd76xrom(void)
+{
+ amd76xrom_remove_one(amd76xrom_window.pdev);
+}
+
+module_init(init_amd76xrom);
+module_exit(cleanup_amd76xrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
new file mode 100644
index 000000000..41730feea
--- /dev/null
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -0,0 +1,196 @@
+/*
+ * drivers/mtd/maps/bfin-async-flash.c
+ *
+ * Handle the case where flash memory and ethernet mac/phy are
+ * mapped onto the same async bank. The BF533-STAMP does this
+ * for example. All board-specific configuration goes in your
+ * board resources file.
+ *
+ * Copyright 2000 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/blackfin.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <asm/unaligned.h>
+
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
+
+#define DRIVER_NAME "bfin-async-flash"
+
+struct async_state {
+ struct mtd_info *mtd;
+ struct map_info map;
+ int enet_flash_pin;
+ uint32_t flash_ambctl0, flash_ambctl1;
+ uint32_t save_ambctl0, save_ambctl1;
+ unsigned long irq_flags;
+};
+
+static void switch_to_flash(struct async_state *state)
+{
+ local_irq_save(state->irq_flags);
+
+ gpio_set_value(state->enet_flash_pin, 0);
+
+ state->save_ambctl0 = bfin_read_EBIU_AMBCTL0();
+ state->save_ambctl1 = bfin_read_EBIU_AMBCTL1();
+ bfin_write_EBIU_AMBCTL0(state->flash_ambctl0);
+ bfin_write_EBIU_AMBCTL1(state->flash_ambctl1);
+ SSYNC();
+}
+
+static void switch_back(struct async_state *state)
+{
+ bfin_write_EBIU_AMBCTL0(state->save_ambctl0);
+ bfin_write_EBIU_AMBCTL1(state->save_ambctl1);
+ SSYNC();
+
+ gpio_set_value(state->enet_flash_pin, 1);
+
+ local_irq_restore(state->irq_flags);
+}
+
+static map_word bfin_flash_read(struct map_info *map, unsigned long ofs)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+ uint16_t word;
+ map_word test;
+
+ switch_to_flash(state);
+
+ word = readw(map->virt + ofs);
+
+ switch_back(state);
+
+ test.x[0] = word;
+ return test;
+}
+
+static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+
+ switch_to_flash(state);
+
+ memcpy(to, map->virt + from, len);
+
+ switch_back(state);
+}
+
+static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+ uint16_t d;
+
+ d = d1.x[0];
+
+ switch_to_flash(state);
+
+ writew(d, map->virt + ofs);
+ SSYNC();
+
+ switch_back(state);
+}
+
+static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ struct async_state *state = (struct async_state *)map->map_priv_1;
+
+ switch_to_flash(state);
+
+ memcpy(map->virt + to, from, len);
+ SSYNC();
+
+ switch_back(state);
+}
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
+
+static int bfin_flash_probe(struct platform_device *pdev)
+{
+ struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev);
+ struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ struct async_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->map.name = DRIVER_NAME;
+ state->map.read = bfin_flash_read;
+ state->map.copy_from = bfin_flash_copy_from;
+ state->map.write = bfin_flash_write;
+ state->map.copy_to = bfin_flash_copy_to;
+ state->map.bankwidth = pdata->width;
+ state->map.size = resource_size(memory);
+ state->map.virt = (void __iomem *)memory->start;
+ state->map.phys = memory->start;
+ state->map.map_priv_1 = (unsigned long)state;
+ state->enet_flash_pin = platform_get_irq(pdev, 0);
+ state->flash_ambctl0 = flash_ambctl->start;
+ state->flash_ambctl1 = flash_ambctl->end;
+
+ if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) {
+ pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin);
+ kfree(state);
+ return -EBUSY;
+ }
+ gpio_direction_output(state->enet_flash_pin, 1);
+
+ pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8);
+ state->mtd = do_map_probe(memory->name, &state->map);
+ if (!state->mtd) {
+ gpio_free(state->enet_flash_pin);
+ kfree(state);
+ return -ENXIO;
+ }
+
+ mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+ pdata->parts, pdata->nr_parts);
+
+ platform_set_drvdata(pdev, state);
+
+ return 0;
+}
+
+static int bfin_flash_remove(struct platform_device *pdev)
+{
+ struct async_state *state = platform_get_drvdata(pdev);
+ gpio_free(state->enet_flash_pin);
+ mtd_device_unregister(state->mtd);
+ map_destroy(state->mtd);
+ kfree(state);
+ return 0;
+}
+
+static struct platform_driver bfin_flash_driver = {
+ .probe = bfin_flash_probe,
+ .remove = bfin_flash_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(bfin_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
new file mode 100644
index 000000000..d504b3d17
--- /dev/null
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+
+/* We split the flash chip up into four parts.
+ * 1: bootloader first 128k (0x00000000 - 0x0001FFFF) size 0x020000
+ * 2: kernel 640k (0x00020000 - 0x000BFFFF) size 0x0A0000
+ * 3: compressed 1536k root ramdisk (0x000C0000 - 0x0023FFFF) size 0x180000
+ * 4: writeable diskpartition (jffs)(0x00240000 - 0x003FFFFF) size 0x1C0000
+ */
+
+#define FLASH_PHYS_ADDR 0x40000000
+#define FLASH_SIZE 0x400000
+
+#define FLASH_PARTITION0_ADDR 0x00000000
+#define FLASH_PARTITION0_SIZE 0x00020000
+
+#define FLASH_PARTITION1_ADDR 0x00020000
+#define FLASH_PARTITION1_SIZE 0x000A0000
+
+#define FLASH_PARTITION2_ADDR 0x000C0000
+#define FLASH_PARTITION2_SIZE 0x00180000
+
+#define FLASH_PARTITION3_ADDR 0x00240000
+#define FLASH_PARTITION3_SIZE 0x001C0000
+
+
+static struct map_info flagadm_map = {
+ .name = "FlagaDM flash device",
+ .size = FLASH_SIZE,
+ .bankwidth = 2,
+};
+
+static struct mtd_partition flagadm_parts[] = {
+ {
+ .name = "Bootloader",
+ .offset = FLASH_PARTITION0_ADDR,
+ .size = FLASH_PARTITION0_SIZE
+ },
+ {
+ .name = "Kernel image",
+ .offset = FLASH_PARTITION1_ADDR,
+ .size = FLASH_PARTITION1_SIZE
+ },
+ {
+ .name = "Initial ramdisk image",
+ .offset = FLASH_PARTITION2_ADDR,
+ .size = FLASH_PARTITION2_SIZE
+ },
+ {
+ .name = "Persistent storage",
+ .offset = FLASH_PARTITION3_ADDR,
+ .size = FLASH_PARTITION3_SIZE
+ }
+};
+
+#define PARTITION_COUNT ARRAY_SIZE(flagadm_parts)
+
+static struct mtd_info *mymtd;
+
+static int __init init_flagadm(void)
+{
+ printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
+ FLASH_SIZE, FLASH_PHYS_ADDR);
+
+ flagadm_map.phys = FLASH_PHYS_ADDR;
+ flagadm_map.virt = ioremap(FLASH_PHYS_ADDR,
+ FLASH_SIZE);
+
+ if (!flagadm_map.virt) {
+ printk("Failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&flagadm_map);
+
+ mymtd = do_map_probe("cfi_probe", &flagadm_map);
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+ mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
+ printk(KERN_NOTICE "FlagaDM flash device initialized\n");
+ return 0;
+ }
+
+ iounmap((void __iomem *)flagadm_map.virt);
+ return -ENXIO;
+}
+
+static void __exit cleanup_flagadm(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+ if (flagadm_map.virt) {
+ iounmap((void __iomem *)flagadm_map.virt);
+ flagadm_map.virt = NULL;
+ }
+}
+
+module_init(init_flagadm);
+module_exit(cleanup_flagadm);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>");
+MODULE_DESCRIPTION("MTD map driver for Flaga digital module");
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
new file mode 100644
index 000000000..0455166f0
--- /dev/null
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -0,0 +1,387 @@
+/*
+ * ck804xrom.c
+ *
+ * Normal mappings of chips in physical memory
+ *
+ * Dave Olsen <dolsen@lnxi.com>
+ * Ryan Jackson <rjackson@lnxi.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+
+#define MOD_NAME KBUILD_BASENAME
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024)
+
+#define DEV_CK804 1
+#define DEV_MCP55 2
+
+struct ck804xrom_window {
+ void __iomem *virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct ck804xrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+/*
+ * The following applies to ck804 only:
+ * The 2 bits controlling the window size are often set to allow reading
+ * the BIOS, but too small to allow writing, since the lock registers are
+ * 4MiB lower in the address space than the data.
+ *
+ * This is intended to prevent flashing the bios, perhaps accidentally.
+ *
+ * This parameter allows the normal driver to override the BIOS settings.
+ *
+ * The bits are 6 and 7. If both bits are set, it is a 5MiB window.
+ * If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
+ * 64KiB window.
+ *
+ * The following applies to mcp55 only:
+ * The 15 bits controlling the window size are distributed as follows:
+ * byte @0x88: bit 0..7
+ * byte @0x8c: bit 8..15
+ * word @0x90: bit 16..30
+ * If all bits are enabled, we have a 16? MiB window
+ * Please set win_size_bits to 0x7fffffff if you actually want to do something
+ */
+static uint win_size_bits = 0;
+module_param(win_size_bits, uint, 0);
+MODULE_PARM_DESC(win_size_bits, "ROM window size bits override, normally set by BIOS.");
+
+static struct ck804xrom_window ck804xrom_window = {
+ .maps = LIST_HEAD_INIT(ck804xrom_window.maps),
+};
+
+static void ck804xrom_cleanup(struct ck804xrom_window *window)
+{
+ struct ck804xrom_map_info *map, *scratch;
+ u8 byte;
+
+ if (window->pdev) {
+ /* Disable writes through the rom window */
+ pci_read_config_byte(window->pdev, 0x6d, &byte);
+ pci_write_config_byte(window->pdev, 0x6d, byte & ~1);
+ }
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent)
+ release_resource(&map->rsrc);
+
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ }
+ pci_dev_put(window->pdev);
+}
+
+
+static int ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ u8 byte;
+ u16 word;
+ struct ck804xrom_window *window = &ck804xrom_window;
+ struct ck804xrom_map_info *map = NULL;
+ unsigned long map_top;
+
+ /* Remember the pci dev I find the window in */
+ window->pdev = pci_dev_get(pdev);
+
+ switch (ent->driver_data) {
+ case DEV_CK804:
+ /* Enable the selected rom window. This is often incorrectly
+ * set up by the BIOS, and the 4MiB offset for the lock registers
+ * requires the full 5MiB of window space.
+ *
+ * This 'write, then read' approach leaves the bits for
+ * other uses of the hardware info.
+ */
+ pci_read_config_byte(pdev, 0x88, &byte);
+ pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
+
+ /* Assume the rom window is properly setup, and find it's size */
+ pci_read_config_byte(pdev, 0x88, &byte);
+
+ if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
+ window->phys = 0xffb00000; /* 5MiB */
+ else if ((byte & (1<<7)) == (1<<7))
+ window->phys = 0xffc00000; /* 4MiB */
+ else
+ window->phys = 0xffff0000; /* 64KiB */
+ break;
+
+ case DEV_MCP55:
+ pci_read_config_byte(pdev, 0x88, &byte);
+ pci_write_config_byte(pdev, 0x88, byte | (win_size_bits & 0xff));
+
+ pci_read_config_byte(pdev, 0x8c, &byte);
+ pci_write_config_byte(pdev, 0x8c, byte | ((win_size_bits & 0xff00) >> 8));
+
+ pci_read_config_word(pdev, 0x90, &word);
+ pci_write_config_word(pdev, 0x90, word | ((win_size_bits & 0x7fff0000) >> 16));
+
+ window->phys = 0xff000000; /* 16MiB, hardcoded for now */
+ break;
+ }
+
+ window->size = 0xffffffffUL - window->phys + 1UL;
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to a fragment of the window being
+ * "reserved" by the BIOS. In the case that the
+ * request_mem_region() fails then once the rom size is
+ * discovered we will try to reserve the unreserved fragment.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_ERR MOD_NAME
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ }
+
+
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, 0x6d, &byte);
+ pci_write_config_byte(pdev, 0x6d, byte | 1);
+
+ /* FIXME handle registers 0x80 - 0x8C the bios region locks */
+
+ /* For write accesses caches are useless */
+ window->virt = ioremap_nocache(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for a rom chip at */
+ map_top = window->phys;
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4MiB of the address space.
+ */
+ if (map_top < 0xffc00000)
+ map_top = 0xffc00000;
+#endif
+ /* Loop through and look for rom chips. Since we don't know the
+ * starting address for each chip, probe every ROM_PROBE_STEP_SIZE
+ * bytes from the starting address of the window.
+ */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map)
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+
+ if (!map) {
+ printk(KERN_ERR MOD_NAME ": kmalloc failed");
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* There is no generic VPP support */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++)
+ cfi->chips[i].start += offset;
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ ck804xrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void ck804xrom_remove_one(struct pci_dev *pdev)
+{
+ struct ck804xrom_window *window = &ck804xrom_window;
+
+ ck804xrom_cleanup(window);
+}
+
+static struct pci_device_id ck804xrom_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0051), .driver_data = DEV_CK804 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0360), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0361), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0362), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0363), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0364), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0365), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0366), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0367), .driver_data = DEV_MCP55 },
+ { 0, }
+};
+
+#if 0
+MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl);
+
+static struct pci_driver ck804xrom_driver = {
+ .name = MOD_NAME,
+ .id_table = ck804xrom_pci_tbl,
+ .probe = ck804xrom_init_one,
+ .remove = ck804xrom_remove_one,
+};
+#endif
+
+static int __init init_ck804xrom(void)
+{
+ struct pci_dev *pdev;
+ struct pci_device_id *id;
+ int retVal;
+ pdev = NULL;
+
+ for(id = ck804xrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev)
+ break;
+ }
+ if (pdev) {
+ retVal = ck804xrom_init_one(pdev, id);
+ pci_dev_put(pdev);
+ return retVal;
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&ck804xrom_driver);
+#endif
+}
+
+static void __exit cleanup_ck804xrom(void)
+{
+ ck804xrom_remove_one(ck804xrom_window.pdev);
+}
+
+module_init(init_ck804xrom);
+module_exit(cleanup_ck804xrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>, Dave Olsen <dolsen@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the Nvidia ck804 southbridge");
+
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
new file mode 100644
index 000000000..70a3db3ab
--- /dev/null
+++ b/drivers/mtd/maps/dc21285.c
@@ -0,0 +1,231 @@
+/*
+ * MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip)
+ *
+ * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * This code is GPL
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/hardware/dec21285.h>
+#include <asm/mach-types.h>
+
+
+static struct mtd_info *dc21285_mtd;
+
+#ifdef CONFIG_ARCH_NETWINDER
+/*
+ * This is really ugly, but it seams to be the only
+ * realiable way to do it, as the cpld state machine
+ * is unpredictible. So we have a 25us penalty per
+ * write access.
+ */
+static void nw_en_write(void)
+{
+ unsigned long flags;
+
+ /*
+ * we want to write a bit pattern XXX1 to Xilinx to enable
+ * the write gate, which will be open for about the next 2ms.
+ */
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+
+ /*
+ * let the ISA bus to catch on...
+ */
+ udelay(25);
+}
+#else
+#define nw_en_write() do { } while (0)
+#endif
+
+static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = *(uint8_t*)(map->virt + ofs);
+ return val;
+}
+
+static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = *(uint16_t*)(map->virt + ofs);
+ return val;
+}
+
+static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = *(uint32_t*)(map->virt + ofs);
+ return val;
+}
+
+static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy(to, (void*)(map->virt + from), len);
+}
+
+static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
+{
+ if (machine_is_netwinder())
+ nw_en_write();
+ *CSR_ROMWRITEREG = adr & 3;
+ adr &= ~3;
+ *(uint8_t*)(map->virt + adr) = d.x[0];
+}
+
+static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
+{
+ if (machine_is_netwinder())
+ nw_en_write();
+ *CSR_ROMWRITEREG = adr & 3;
+ adr &= ~3;
+ *(uint16_t*)(map->virt + adr) = d.x[0];
+}
+
+static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
+{
+ if (machine_is_netwinder())
+ nw_en_write();
+ *(uint32_t*)(map->virt + adr) = d.x[0];
+}
+
+static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while (len > 0) {
+ map_word d;
+ d.x[0] = *((uint32_t*)from);
+ dc21285_write32(map, d, to);
+ from += 4;
+ to += 4;
+ len -= 4;
+ }
+}
+
+static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while (len > 0) {
+ map_word d;
+ d.x[0] = *((uint16_t*)from);
+ dc21285_write16(map, d, to);
+ from += 2;
+ to += 2;
+ len -= 2;
+ }
+}
+
+static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ map_word d;
+ d.x[0] = *((uint8_t*)from);
+ dc21285_write8(map, d, to);
+ from++;
+ to++;
+ len--;
+}
+
+static struct map_info dc21285_map = {
+ .name = "DC21285 flash",
+ .phys = NO_XIP,
+ .size = 16*1024*1024,
+ .copy_from = dc21285_copy_from,
+};
+
+/* Partition stuff */
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int __init init_dc21285(void)
+{
+ /* Determine bankwidth */
+ switch (*CSR_SA110_CNTL & (3<<14)) {
+ case SA110_CNTL_ROMWIDTH_8:
+ dc21285_map.bankwidth = 1;
+ dc21285_map.read = dc21285_read8;
+ dc21285_map.write = dc21285_write8;
+ dc21285_map.copy_to = dc21285_copy_to_8;
+ break;
+ case SA110_CNTL_ROMWIDTH_16:
+ dc21285_map.bankwidth = 2;
+ dc21285_map.read = dc21285_read16;
+ dc21285_map.write = dc21285_write16;
+ dc21285_map.copy_to = dc21285_copy_to_16;
+ break;
+ case SA110_CNTL_ROMWIDTH_32:
+ dc21285_map.bankwidth = 4;
+ dc21285_map.read = dc21285_read32;
+ dc21285_map.write = dc21285_write32;
+ dc21285_map.copy_to = dc21285_copy_to_32;
+ break;
+ default:
+ printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
+ return -ENXIO;
+ }
+ printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
+ dc21285_map.bankwidth*8);
+
+ /* Let's map the flash area */
+ dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024);
+ if (!dc21285_map.virt) {
+ printk("Failed to ioremap\n");
+ return -EIO;
+ }
+
+ if (machine_is_ebsa285()) {
+ dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
+ } else {
+ dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
+ }
+
+ if (!dc21285_mtd) {
+ iounmap(dc21285_map.virt);
+ return -ENXIO;
+ }
+
+ dc21285_mtd->owner = THIS_MODULE;
+
+ mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
+
+ if(machine_is_ebsa285()) {
+ /*
+ * Flash timing is determined with bits 19-16 of the
+ * CSR_SA110_CNTL. The value is the number of wait cycles, or
+ * 0 for 16 cycles (the default). Cycles are 20 ns.
+ * Here we use 7 for 140 ns flash chips.
+ */
+ /* access time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
+ /* burst time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
+ /* tristate time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
+ }
+
+ return 0;
+}
+
+static void __exit cleanup_dc21285(void)
+{
+ mtd_device_unregister(dc21285_mtd);
+ map_destroy(dc21285_mtd);
+ iounmap(dc21285_map.virt);
+}
+
+module_init(init_dc21285);
+module_exit(cleanup_dc21285);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
+MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
new file mode 100644
index 000000000..f784cf0ca
--- /dev/null
+++ b/drivers/mtd/maps/esb2rom.c
@@ -0,0 +1,452 @@
+/*
+ * esb2rom.c
+ *
+ * Normal mappings of flash chips in physical memory
+ * through the Intel ESB2 Southbridge.
+ *
+ * This was derived from ichxrom.c in May 2006 by
+ * Lew Glendenning <lglendenning@lnxi.com>
+ *
+ * Eric Biederman, of course, was a major help in this effort.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+#define MOD_NAME KBUILD_BASENAME
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+#define BIOS_CNTL 0xDC
+#define BIOS_LOCK_ENABLE 0x02
+#define BIOS_WRITE_ENABLE 0x01
+
+/* This became a 16-bit register, and EN2 has disappeared */
+#define FWH_DEC_EN1 0xD8
+#define FWH_F8_EN 0x8000
+#define FWH_F0_EN 0x4000
+#define FWH_E8_EN 0x2000
+#define FWH_E0_EN 0x1000
+#define FWH_D8_EN 0x0800
+#define FWH_D0_EN 0x0400
+#define FWH_C8_EN 0x0200
+#define FWH_C0_EN 0x0100
+#define FWH_LEGACY_F_EN 0x0080
+#define FWH_LEGACY_E_EN 0x0040
+/* reserved 0x0020 and 0x0010 */
+#define FWH_70_EN 0x0008
+#define FWH_60_EN 0x0004
+#define FWH_50_EN 0x0002
+#define FWH_40_EN 0x0001
+
+/* these are 32-bit values */
+#define FWH_SEL1 0xD0
+#define FWH_SEL2 0xD4
+
+#define FWH_8MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN | FWH_60_EN | FWH_50_EN | FWH_40_EN)
+
+#define FWH_7MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN | FWH_60_EN | FWH_50_EN)
+
+#define FWH_6MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN | FWH_60_EN)
+
+#define FWH_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN)
+
+#define FWH_4MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN)
+
+#define FWH_3_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN)
+
+#define FWH_3MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN)
+
+#define FWH_2_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN)
+
+#define FWH_2MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN)
+
+#define FWH_1_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN)
+
+#define FWH_1MiB (FWH_F8_EN | FWH_F0_EN)
+
+#define FWH_0_5MiB (FWH_F8_EN)
+
+
+struct esb2rom_window {
+ void __iomem* virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct esb2rom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+static struct esb2rom_window esb2rom_window = {
+ .maps = LIST_HEAD_INIT(esb2rom_window.maps),
+};
+
+static void esb2rom_cleanup(struct esb2rom_window *window)
+{
+ struct esb2rom_map_info *map, *scratch;
+ u8 byte;
+
+ /* Disable writes through the rom window */
+ pci_read_config_byte(window->pdev, BIOS_CNTL, &byte);
+ pci_write_config_byte(window->pdev, BIOS_CNTL,
+ byte & ~BIOS_WRITE_ENABLE);
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent)
+ release_resource(&map->rsrc);
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ }
+ pci_dev_put(window->pdev);
+}
+
+static int esb2rom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ struct esb2rom_window *window = &esb2rom_window;
+ struct esb2rom_map_info *map = NULL;
+ unsigned long map_top;
+ u8 byte;
+ u16 word;
+
+ /* For now I just handle the ecb2 and I assume there
+ * are not a lot of resources up at the top of the address
+ * space. It is possible to handle other devices in the
+ * top 16MiB but it is very painful. Also since
+ * you can only really attach a FWH to an ICHX there
+ * a number of simplifications you can make.
+ *
+ * Also you can page firmware hubs if an 8MiB window isn't enough
+ * but don't currently handle that case either.
+ */
+ window->pdev = pci_dev_get(pdev);
+
+ /* RLG: experiment 2. Force the window registers to the widest values */
+
+/*
+ pci_read_config_word(pdev, FWH_DEC_EN1, &word);
+ printk(KERN_DEBUG "Original FWH_DEC_EN1 : %x\n", word);
+ pci_write_config_byte(pdev, FWH_DEC_EN1, 0xff);
+ pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
+ printk(KERN_DEBUG "New FWH_DEC_EN1 : %x\n", byte);
+
+ pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
+ printk(KERN_DEBUG "Original FWH_DEC_EN2 : %x\n", byte);
+ pci_write_config_byte(pdev, FWH_DEC_EN2, 0x0f);
+ pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
+ printk(KERN_DEBUG "New FWH_DEC_EN2 : %x\n", byte);
+*/
+
+ /* Find a region continuous to the end of the ROM window */
+ window->phys = 0;
+ pci_read_config_word(pdev, FWH_DEC_EN1, &word);
+ printk(KERN_DEBUG "pci_read_config_word : %x\n", word);
+
+ if ((word & FWH_8MiB) == FWH_8MiB)
+ window->phys = 0xff400000;
+ else if ((word & FWH_7MiB) == FWH_7MiB)
+ window->phys = 0xff500000;
+ else if ((word & FWH_6MiB) == FWH_6MiB)
+ window->phys = 0xff600000;
+ else if ((word & FWH_5MiB) == FWH_5MiB)
+ window->phys = 0xFF700000;
+ else if ((word & FWH_4MiB) == FWH_4MiB)
+ window->phys = 0xffc00000;
+ else if ((word & FWH_3_5MiB) == FWH_3_5MiB)
+ window->phys = 0xffc80000;
+ else if ((word & FWH_3MiB) == FWH_3MiB)
+ window->phys = 0xffd00000;
+ else if ((word & FWH_2_5MiB) == FWH_2_5MiB)
+ window->phys = 0xffd80000;
+ else if ((word & FWH_2MiB) == FWH_2MiB)
+ window->phys = 0xffe00000;
+ else if ((word & FWH_1_5MiB) == FWH_1_5MiB)
+ window->phys = 0xffe80000;
+ else if ((word & FWH_1MiB) == FWH_1MiB)
+ window->phys = 0xfff00000;
+ else if ((word & FWH_0_5MiB) == FWH_0_5MiB)
+ window->phys = 0xfff80000;
+
+ if (window->phys == 0) {
+ printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
+ goto out;
+ }
+
+ /* reserved 0x0020 and 0x0010 */
+ window->phys -= 0x400000UL;
+ window->size = (0xffffffffUL - window->phys) + 1UL;
+
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, BIOS_CNTL, &byte);
+ if (!(byte & BIOS_WRITE_ENABLE) && (byte & (BIOS_LOCK_ENABLE))) {
+ /* The BIOS will generate an error if I enable
+ * this device, so don't even try.
+ */
+ printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
+ goto out;
+ }
+ pci_write_config_byte(pdev, BIOS_CNTL, byte | BIOS_WRITE_ENABLE);
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to the window being "reseved" by the BIOS.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ }
+
+ /* Map the firmware hub into my address space. */
+ window->virt = ioremap_nocache(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+ if ((window->phys & 0x3fffff) != 0) {
+ /* if not aligned on 4MiB, look 4MiB lower in address space */
+ map_top = window->phys + 0x400000;
+ }
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * (Insane hardware design, but most copied Intel's.)
+ * ==> Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000)
+ map_top = 0xffc00000;
+#endif
+ /* Loop through and look for rom chips */
+ while ((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map)
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ printk(KERN_ERR MOD_NAME ": kmalloc failed");
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* Firmware hubs only use vpp when being programmed
+ * in a factory setting. So in-place programming
+ * needs to use a different method.
+ */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1) {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++)
+ cfi->chips[i].start += offset;
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ esb2rom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void esb2rom_remove_one(struct pci_dev *pdev)
+{
+ struct esb2rom_window *window = &esb2rom_window;
+ esb2rom_cleanup(window);
+}
+
+static struct pci_device_id esb2rom_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, },
+};
+
+#if 0
+MODULE_DEVICE_TABLE(pci, esb2rom_pci_tbl);
+
+static struct pci_driver esb2rom_driver = {
+ .name = MOD_NAME,
+ .id_table = esb2rom_pci_tbl,
+ .probe = esb2rom_init_one,
+ .remove = esb2rom_remove_one,
+};
+#endif
+
+static int __init init_esb2rom(void)
+{
+ struct pci_dev *pdev;
+ struct pci_device_id *id;
+ int retVal;
+
+ pdev = NULL;
+ for (id = esb2rom_pci_tbl; id->vendor; id++) {
+ printk(KERN_DEBUG "device id = %x\n", id->device);
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ printk(KERN_DEBUG "matched device = %x\n", id->device);
+ break;
+ }
+ }
+ if (pdev) {
+ printk(KERN_DEBUG "matched device id %x\n", id->device);
+ retVal = esb2rom_init_one(pdev, &esb2rom_pci_tbl[0]);
+ pci_dev_put(pdev);
+ printk(KERN_DEBUG "retVal = %d\n", retVal);
+ return retVal;
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&esb2rom_driver);
+#endif
+}
+
+static void __exit cleanup_esb2rom(void)
+{
+ esb2rom_remove_one(esb2rom_window.pdev);
+}
+
+module_init(init_esb2rom);
+module_exit(cleanup_esb2rom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lew Glendenning <lglendenning@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ESB2 southbridge");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
new file mode 100644
index 000000000..2fb346091
--- /dev/null
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -0,0 +1,302 @@
+/*
+ * drivers/mtd/maps/gpio-addr-flash.c
+ *
+ * Handle the case where a flash device is mostly addressed using physical
+ * line and supplemented by GPIOs. This way you can hook up say a 8MiB flash
+ * to a 2MiB memory range and use the GPIOs to select a particular range.
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright © 2005-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
+
+#define DRIVER_NAME "gpio-addr-flash"
+#define PFX DRIVER_NAME ": "
+
+/**
+ * struct async_state - keep GPIO flash state
+ * @mtd: MTD state for this mapping
+ * @map: MTD map state for this flash
+ * @gpio_count: number of GPIOs used to address
+ * @gpio_addrs: array of GPIOs to twiddle
+ * @gpio_values: cached GPIO values
+ * @win_size: dedicated memory size (if no GPIOs)
+ */
+struct async_state {
+ struct mtd_info *mtd;
+ struct map_info map;
+ size_t gpio_count;
+ unsigned *gpio_addrs;
+ int *gpio_values;
+ unsigned long win_size;
+};
+#define gf_map_info_to_state(mi) ((struct async_state *)(mi)->map_priv_1)
+
+/**
+ * gf_set_gpios() - set GPIO address lines to access specified flash offset
+ * @state: GPIO flash state
+ * @ofs: desired offset to access
+ *
+ * Rather than call the GPIO framework every time, cache the last-programmed
+ * value. This speeds up sequential accesses (which are by far the most common
+ * type). We rely on the GPIO framework to treat non-zero value as high so
+ * that we don't have to normalize the bits.
+ */
+static void gf_set_gpios(struct async_state *state, unsigned long ofs)
+{
+ size_t i = 0;
+ int value;
+ ofs /= state->win_size;
+ do {
+ value = ofs & (1 << i);
+ if (state->gpio_values[i] != value) {
+ gpio_set_value(state->gpio_addrs[i], value);
+ state->gpio_values[i] = value;
+ }
+ } while (++i < state->gpio_count);
+}
+
+/**
+ * gf_read() - read a word at the specified offset
+ * @map: MTD map state
+ * @ofs: desired offset to read
+ */
+static map_word gf_read(struct map_info *map, unsigned long ofs)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+ uint16_t word;
+ map_word test;
+
+ gf_set_gpios(state, ofs);
+
+ word = readw(map->virt + (ofs % state->win_size));
+ test.x[0] = word;
+ return test;
+}
+
+/**
+ * gf_copy_from() - copy a chunk of data from the flash
+ * @map: MTD map state
+ * @to: memory to copy to
+ * @from: flash offset to copy from
+ * @len: how much to copy
+ *
+ * The "from" region may straddle more than one window, so toggle the GPIOs for
+ * each window region before reading its data.
+ */
+static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+
+ int this_len;
+
+ while (len) {
+ if ((from % state->win_size) + len > state->win_size)
+ this_len = state->win_size - (from % state->win_size);
+ else
+ this_len = len;
+
+ gf_set_gpios(state, from);
+ memcpy_fromio(to, map->virt + (from % state->win_size),
+ this_len);
+ len -= this_len;
+ from += this_len;
+ to += this_len;
+ }
+}
+
+/**
+ * gf_write() - write a word at the specified offset
+ * @map: MTD map state
+ * @ofs: desired offset to write
+ */
+static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+ uint16_t d;
+
+ gf_set_gpios(state, ofs);
+
+ d = d1.x[0];
+ writew(d, map->virt + (ofs % state->win_size));
+}
+
+/**
+ * gf_copy_to() - copy a chunk of data to the flash
+ * @map: MTD map state
+ * @to: flash offset to copy to
+ * @from: memory to copy from
+ * @len: how much to copy
+ *
+ * See gf_copy_from() caveat.
+ */
+static void gf_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+
+ int this_len;
+
+ while (len) {
+ if ((to % state->win_size) + len > state->win_size)
+ this_len = state->win_size - (to % state->win_size);
+ else
+ this_len = len;
+
+ gf_set_gpios(state, to);
+ memcpy_toio(map->virt + (to % state->win_size), from, len);
+
+ len -= this_len;
+ to += this_len;
+ from += this_len;
+ }
+}
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
+
+/**
+ * gpio_flash_probe() - setup a mapping for a GPIO assisted flash
+ * @pdev: platform device
+ *
+ * The platform resource layout expected looks something like:
+ * struct mtd_partition partitions[] = { ... };
+ * struct physmap_flash_data flash_data = { ... };
+ * unsigned flash_gpios[] = { GPIO_XX, GPIO_XX, ... };
+ * struct resource flash_resource[] = {
+ * {
+ * .name = "cfi_probe",
+ * .start = 0x20000000,
+ * .end = 0x201fffff,
+ * .flags = IORESOURCE_MEM,
+ * }, {
+ * .start = (unsigned long)flash_gpios,
+ * .end = ARRAY_SIZE(flash_gpios),
+ * .flags = IORESOURCE_IRQ,
+ * }
+ * };
+ * struct platform_device flash_device = {
+ * .name = "gpio-addr-flash",
+ * .dev = { .platform_data = &flash_data, },
+ * .num_resources = ARRAY_SIZE(flash_resource),
+ * .resource = flash_resource,
+ * ...
+ * };
+ */
+static int gpio_flash_probe(struct platform_device *pdev)
+{
+ size_t i, arr_size;
+ struct physmap_flash_data *pdata;
+ struct resource *memory;
+ struct resource *gpios;
+ struct async_state *state;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!memory || !gpios || !gpios->end)
+ return -EINVAL;
+
+ arr_size = sizeof(int) * gpios->end;
+ state = kzalloc(sizeof(*state) + arr_size, GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ /*
+ * We cast start/end to known types in the boards file, so cast
+ * away their pointer types here to the known types (gpios->xxx).
+ */
+ state->gpio_count = gpios->end;
+ state->gpio_addrs = (void *)(unsigned long)gpios->start;
+ state->gpio_values = (void *)(state + 1);
+ state->win_size = resource_size(memory);
+ memset(state->gpio_values, 0xff, arr_size);
+
+ state->map.name = DRIVER_NAME;
+ state->map.read = gf_read;
+ state->map.copy_from = gf_copy_from;
+ state->map.write = gf_write;
+ state->map.copy_to = gf_copy_to;
+ state->map.bankwidth = pdata->width;
+ state->map.size = state->win_size * (1 << state->gpio_count);
+ state->map.virt = ioremap_nocache(memory->start, state->map.size);
+ state->map.phys = NO_XIP;
+ state->map.map_priv_1 = (unsigned long)state;
+
+ platform_set_drvdata(pdev, state);
+
+ i = 0;
+ do {
+ if (gpio_request(state->gpio_addrs[i], DRIVER_NAME)) {
+ pr_devinit(KERN_ERR PFX "failed to request gpio %d\n",
+ state->gpio_addrs[i]);
+ while (i--)
+ gpio_free(state->gpio_addrs[i]);
+ kfree(state);
+ return -EBUSY;
+ }
+ gpio_direction_output(state->gpio_addrs[i], 0);
+ } while (++i < state->gpio_count);
+
+ pr_devinit(KERN_NOTICE PFX "probing %d-bit flash bus\n",
+ state->map.bankwidth * 8);
+ state->mtd = do_map_probe(memory->name, &state->map);
+ if (!state->mtd) {
+ for (i = 0; i < state->gpio_count; ++i)
+ gpio_free(state->gpio_addrs[i]);
+ kfree(state);
+ return -ENXIO;
+ }
+
+
+ mtd_device_parse_register(state->mtd, part_probe_types, NULL,
+ pdata->parts, pdata->nr_parts);
+
+ return 0;
+}
+
+static int gpio_flash_remove(struct platform_device *pdev)
+{
+ struct async_state *state = platform_get_drvdata(pdev);
+ size_t i = 0;
+ do {
+ gpio_free(state->gpio_addrs[i]);
+ } while (++i < state->gpio_count);
+ mtd_device_unregister(state->mtd);
+ map_destroy(state->mtd);
+ kfree(state);
+ return 0;
+}
+
+static struct platform_driver gpio_flash_driver = {
+ .probe = gpio_flash_probe,
+ .remove = gpio_flash_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(gpio_flash_driver);
+
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
new file mode 100644
index 000000000..c7478e18f
--- /dev/null
+++ b/drivers/mtd/maps/ichxrom.c
@@ -0,0 +1,380 @@
+/*
+ * ichxrom.c
+ *
+ * Normal mappings of chips in physical memory
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+#define xstr(s) str(s)
+#define str(s) #s
+#define MOD_NAME xstr(KBUILD_BASENAME)
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+#define BIOS_CNTL 0x4e
+#define FWH_DEC_EN1 0xE3
+#define FWH_DEC_EN2 0xF0
+#define FWH_SEL1 0xE8
+#define FWH_SEL2 0xEE
+
+struct ichxrom_window {
+ void __iomem* virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct ichxrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+static struct ichxrom_window ichxrom_window = {
+ .maps = LIST_HEAD_INIT(ichxrom_window.maps),
+};
+
+static void ichxrom_cleanup(struct ichxrom_window *window)
+{
+ struct ichxrom_map_info *map, *scratch;
+ u16 word;
+
+ /* Disable writes through the rom window */
+ pci_read_config_word(window->pdev, BIOS_CNTL, &word);
+ pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+ pci_dev_put(window->pdev);
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent)
+ release_resource(&map->rsrc);
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ window->pdev = NULL;
+ }
+}
+
+
+static int ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ struct ichxrom_window *window = &ichxrom_window;
+ struct ichxrom_map_info *map = NULL;
+ unsigned long map_top;
+ u8 byte;
+ u16 word;
+
+ /* For now I just handle the ichx and I assume there
+ * are not a lot of resources up at the top of the address
+ * space. It is possible to handle other devices in the
+ * top 16MB but it is very painful. Also since
+ * you can only really attach a FWH to an ICHX there
+ * a number of simplifications you can make.
+ *
+ * Also you can page firmware hubs if an 8MB window isn't enough
+ * but don't currently handle that case either.
+ */
+ window->pdev = pdev;
+
+ /* Find a region continuous to the end of the ROM window */
+ window->phys = 0;
+ pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
+ if (byte == 0xff) {
+ window->phys = 0xffc00000;
+ pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
+ if ((byte & 0x0f) == 0x0f) {
+ window->phys = 0xff400000;
+ }
+ else if ((byte & 0x0e) == 0x0e) {
+ window->phys = 0xff500000;
+ }
+ else if ((byte & 0x0c) == 0x0c) {
+ window->phys = 0xff600000;
+ }
+ else if ((byte & 0x08) == 0x08) {
+ window->phys = 0xff700000;
+ }
+ }
+ else if ((byte & 0xfe) == 0xfe) {
+ window->phys = 0xffc80000;
+ }
+ else if ((byte & 0xfc) == 0xfc) {
+ window->phys = 0xffd00000;
+ }
+ else if ((byte & 0xf8) == 0xf8) {
+ window->phys = 0xffd80000;
+ }
+ else if ((byte & 0xf0) == 0xf0) {
+ window->phys = 0xffe00000;
+ }
+ else if ((byte & 0xe0) == 0xe0) {
+ window->phys = 0xffe80000;
+ }
+ else if ((byte & 0xc0) == 0xc0) {
+ window->phys = 0xfff00000;
+ }
+ else if ((byte & 0x80) == 0x80) {
+ window->phys = 0xfff80000;
+ }
+
+ if (window->phys == 0) {
+ printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
+ goto out;
+ }
+ window->phys -= 0x400000UL;
+ window->size = (0xffffffffUL - window->phys) + 1UL;
+
+ /* Enable writes through the rom window */
+ pci_read_config_word(pdev, BIOS_CNTL, &word);
+ if (!(word & 1) && (word & (1<<1))) {
+ /* The BIOS will generate an error if I enable
+ * this device, so don't even try.
+ */
+ printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
+ goto out;
+ }
+ pci_write_config_word(pdev, BIOS_CNTL, word | 1);
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to the window being "reseved" by the BIOS.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ }
+
+ /* Map the firmware hub into my address space. */
+ window->virt = ioremap_nocache(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+ if ((window->phys & 0x3fffff) != 0) {
+ map_top = window->phys + 0x400000;
+ }
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000) {
+ map_top = 0xffc00000;
+ }
+#endif
+ /* Loop through and look for rom chips */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ }
+ if (!map) {
+ printk(KERN_ERR MOD_NAME ": kmalloc failed");
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* Firmware hubs only use vpp when being programmed
+ * in a factory setting. So in-place programming
+ * needs to use a different method.
+ */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++) {
+ cfi->chips[i].start += offset;
+ }
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ ichxrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void ichxrom_remove_one(struct pci_dev *pdev)
+{
+ struct ichxrom_window *window = &ichxrom_window;
+ ichxrom_cleanup(window);
+}
+
+static struct pci_device_id ichxrom_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, },
+};
+
+#if 0
+MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl);
+
+static struct pci_driver ichxrom_driver = {
+ .name = MOD_NAME,
+ .id_table = ichxrom_pci_tbl,
+ .probe = ichxrom_init_one,
+ .remove = ichxrom_remove_one,
+};
+#endif
+
+static int __init init_ichxrom(void)
+{
+ struct pci_dev *pdev;
+ struct pci_device_id *id;
+
+ pdev = NULL;
+ for (id = ichxrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ break;
+ }
+ }
+ if (pdev) {
+ return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]);
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&ichxrom_driver);
+#endif
+}
+
+static void __exit cleanup_ichxrom(void)
+{
+ ichxrom_remove_one(ichxrom_window.pdev);
+}
+
+module_init(init_ichxrom);
+module_exit(cleanup_ichxrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
new file mode 100644
index 000000000..15bbda03b
--- /dev/null
+++ b/drivers/mtd/maps/impa7.c
@@ -0,0 +1,119 @@
+/*
+ * Handle mapping of the NOR flash on implementa A7 boards
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
+#define WINDOW_SIZE0 0x00800000
+#define WINDOW_ADDR1 0x10000000 /* physical properties of flash */
+#define WINDOW_SIZE1 0x00800000
+#define NUM_FLASHBANKS 2
+#define BUSWIDTH 4
+
+#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
+#define MTDID "impa7-%d" /* for mtdparts= partitioning */
+
+static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
+
+static const char * const rom_probe_types[] = { "jedec_probe", NULL };
+
+static struct map_info impa7_map[NUM_FLASHBANKS] = {
+ {
+ .name = "impA7 NOR Flash Bank #0",
+ .size = WINDOW_SIZE0,
+ .bankwidth = BUSWIDTH,
+ },
+ {
+ .name = "impA7 NOR Flash Bank #1",
+ .size = WINDOW_SIZE1,
+ .bankwidth = BUSWIDTH,
+ },
+};
+
+/*
+ * MTD partitioning stuff
+ */
+static struct mtd_partition partitions[] =
+{
+ {
+ .name = "FileSystem",
+ .size = 0x800000,
+ .offset = 0x00000000
+ },
+};
+
+static int __init init_impa7(void)
+{
+ const char * const *type;
+ int i;
+ static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
+ { WINDOW_ADDR0, WINDOW_SIZE0 },
+ { WINDOW_ADDR1, WINDOW_SIZE1 },
+ };
+ int devicesfound = 0;
+
+ for(i=0; i<NUM_FLASHBANKS; i++)
+ {
+ printk(KERN_NOTICE MSG_PREFIX "probing 0x%08lx at 0x%08lx\n",
+ pt[i].size, pt[i].addr);
+
+ impa7_map[i].phys = pt[i].addr;
+ impa7_map[i].virt = ioremap(pt[i].addr, pt[i].size);
+ if (!impa7_map[i].virt) {
+ printk(MSG_PREFIX "failed to ioremap\n");
+ return -EIO;
+ }
+ simple_map_init(&impa7_map[i]);
+
+ impa7_mtd[i] = NULL;
+ type = rom_probe_types;
+ for(; !impa7_mtd[i] && *type; type++) {
+ impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
+ }
+
+ if (impa7_mtd[i]) {
+ impa7_mtd[i]->owner = THIS_MODULE;
+ devicesfound++;
+ mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
+ partitions,
+ ARRAY_SIZE(partitions));
+ } else {
+ iounmap((void __iomem *)impa7_map[i].virt);
+ }
+ }
+ return devicesfound == 0 ? -ENXIO : 0;
+}
+
+static void __exit cleanup_impa7(void)
+{
+ int i;
+ for (i=0; i<NUM_FLASHBANKS; i++) {
+ if (impa7_mtd[i]) {
+ mtd_device_unregister(impa7_mtd[i]);
+ map_destroy(impa7_mtd[i]);
+ iounmap((void __iomem *)impa7_map[i].virt);
+ impa7_map[i].virt = NULL;
+ }
+ }
+}
+
+module_init(init_impa7);
+module_exit(cleanup_impa7);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Bartusek <pba@sysgo.de>");
+MODULE_DESCRIPTION("MTD map driver for implementa impA7");
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
new file mode 100644
index 000000000..5ab71f0e1
--- /dev/null
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -0,0 +1,265 @@
+/*
+ * drivers/mtd/maps/intel_vr_nor.c
+ *
+ * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
+ * Vermilion Range chipset.
+ *
+ * The Vermilion Range Expansion Bus supports four chip selects, each of which
+ * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
+ * is a 256MiB memory region containing the address spaces for all four of the
+ * chip selects, with start addresses hardcoded on 64MiB boundaries.
+ *
+ * This map driver only supports NOR flash on chip select 0. The buswidth
+ * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
+ * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
+ * not modify the value in the EXP_TIMING_CS0 register except to enable writing
+ * and disable boot acceleration. The timing parameters in the register are
+ * assumed to have been properly initialized by the BIOS. The reset default
+ * timing parameters are maximally conservative (slow), so access to the flash
+ * will be slower than it should be if the BIOS has not initialized the timing
+ * parameters.
+ *
+ * Author: Andy Lowe <alowe@mvista.com>
+ *
+ * 2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+
+#define DRV_NAME "vr_nor"
+
+struct vr_nor_mtd {
+ void __iomem *csr_base;
+ struct map_info map;
+ struct mtd_info *info;
+ struct pci_dev *dev;
+};
+
+/* Expansion Bus Configuration and Status Registers are in BAR 0 */
+#define EXP_CSR_MBAR 0
+/* Expansion Bus Memory Window is BAR 1 */
+#define EXP_WIN_MBAR 1
+/* Maximum address space for Chip Select 0 is 64MiB */
+#define CS0_SIZE 0x04000000
+/* Chip Select 0 is at offset 0 in the Memory Window */
+#define CS0_START 0x0
+/* Chip Select 0 Timing Register is at offset 0 in CSR */
+#define EXP_TIMING_CS0 0x00
+#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
+#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
+#define TIMING_WR_EN (1 << 1) /* Write Enable */
+#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
+#define TIMING_MASK 0x3FFF0000
+
+static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
+{
+ mtd_device_unregister(p->info);
+}
+
+static int vr_nor_init_partitions(struct vr_nor_mtd *p)
+{
+ /* register the flash bank */
+ /* partition the flash bank */
+ return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
+}
+
+static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
+{
+ map_destroy(p->info);
+}
+
+static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
+{
+ static const char * const probe_types[] =
+ { "cfi_probe", "jedec_probe", NULL };
+ const char * const *type;
+
+ for (type = probe_types; !p->info && *type; type++)
+ p->info = do_map_probe(*type, &p->map);
+ if (!p->info)
+ return -ENODEV;
+
+ p->info->owner = THIS_MODULE;
+
+ return 0;
+}
+
+static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
+{
+ unsigned int exp_timing_cs0;
+
+ /* write-protect the flash bank */
+ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
+ exp_timing_cs0 &= ~TIMING_WR_EN;
+ writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
+
+ /* unmap the flash window */
+ iounmap(p->map.virt);
+
+ /* unmap the csr window */
+ iounmap(p->csr_base);
+}
+
+/*
+ * Initialize the map_info structure and map the flash.
+ * Returns 0 on success, nonzero otherwise.
+ */
+static int vr_nor_init_maps(struct vr_nor_mtd *p)
+{
+ unsigned long csr_phys, csr_len;
+ unsigned long win_phys, win_len;
+ unsigned int exp_timing_cs0;
+ int err;
+
+ csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
+ csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
+ win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
+ win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
+
+ if (!csr_phys || !csr_len || !win_phys || !win_len)
+ return -ENODEV;
+
+ if (win_len < (CS0_START + CS0_SIZE))
+ return -ENXIO;
+
+ p->csr_base = ioremap_nocache(csr_phys, csr_len);
+ if (!p->csr_base)
+ return -ENOMEM;
+
+ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
+ if (!(exp_timing_cs0 & TIMING_CS_EN)) {
+ dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
+ "is disabled.\n");
+ err = -ENODEV;
+ goto release;
+ }
+ if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
+ dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
+ "is configured for maximally slow access times.\n");
+ }
+ p->map.name = DRV_NAME;
+ p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
+ p->map.phys = win_phys + CS0_START;
+ p->map.size = CS0_SIZE;
+ p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
+ if (!p->map.virt) {
+ err = -ENOMEM;
+ goto release;
+ }
+ simple_map_init(&p->map);
+
+ /* Enable writes to flash bank */
+ exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
+ writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
+
+ return 0;
+
+ release:
+ iounmap(p->csr_base);
+ return err;
+}
+
+static struct pci_device_id vr_nor_pci_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
+ {0,}
+};
+
+static void vr_nor_pci_remove(struct pci_dev *dev)
+{
+ struct vr_nor_mtd *p = pci_get_drvdata(dev);
+
+ vr_nor_destroy_partitions(p);
+ vr_nor_destroy_mtd_setup(p);
+ vr_nor_destroy_maps(p);
+ kfree(p);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+}
+
+static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct vr_nor_mtd *p = NULL;
+ unsigned int exp_timing_cs0;
+ int err;
+
+ err = pci_enable_device(dev);
+ if (err)
+ goto out;
+
+ err = pci_request_regions(dev, DRV_NAME);
+ if (err)
+ goto disable_dev;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!p)
+ goto release;
+
+ p->dev = dev;
+
+ err = vr_nor_init_maps(p);
+ if (err)
+ goto release;
+
+ err = vr_nor_mtd_setup(p);
+ if (err)
+ goto destroy_maps;
+
+ err = vr_nor_init_partitions(p);
+ if (err)
+ goto destroy_mtd_setup;
+
+ pci_set_drvdata(dev, p);
+
+ return 0;
+
+ destroy_mtd_setup:
+ map_destroy(p->info);
+
+ destroy_maps:
+ /* write-protect the flash bank */
+ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
+ exp_timing_cs0 &= ~TIMING_WR_EN;
+ writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
+
+ /* unmap the flash window */
+ iounmap(p->map.virt);
+
+ /* unmap the csr window */
+ iounmap(p->csr_base);
+
+ release:
+ kfree(p);
+ pci_release_regions(dev);
+
+ disable_dev:
+ pci_disable_device(dev);
+
+ out:
+ return err;
+}
+
+static struct pci_driver vr_nor_pci_driver = {
+ .name = DRV_NAME,
+ .probe = vr_nor_pci_probe,
+ .remove = vr_nor_pci_remove,
+ .id_table = vr_nor_pci_ids,
+};
+
+module_pci_driver(vr_nor_pci_driver);
+
+MODULE_AUTHOR("Andy Lowe");
+MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
new file mode 100644
index 000000000..b44307410
--- /dev/null
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -0,0 +1,261 @@
+/*
+ * drivers/mtd/maps/ixp4xx.c
+ *
+ * MTD Map file for IXP4XX based systems. Please do not make per-board
+ * changes in here. If your board needs special setup, do it in your
+ * platform level code in arch/arm/mach-ixp4xx/board-setup.c
+ *
+ * Original Author: Intel Corporation
+ * Maintainer: Deepak Saxena <dsaxena@mvista.com>
+ *
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/mach/flash.h>
+
+#include <linux/reboot.h>
+
+/*
+ * Read/write a 16 bit word from flash address 'addr'.
+ *
+ * When the cpu is in little-endian mode it swizzles the address lines
+ * ('address coherency') so we need to undo the swizzling to ensure commands
+ * and the like end up on the correct flash address.
+ *
+ * To further complicate matters, due to the way the expansion bus controller
+ * handles 32 bit reads, the byte stream ABCD is stored on the flash as:
+ * D15 D0
+ * +---+---+
+ * | A | B | 0
+ * +---+---+
+ * | C | D | 2
+ * +---+---+
+ * This means that on LE systems each 16 bit word must be swapped. Note that
+ * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
+ * data and other flash commands which are always in D7-D0.
+ */
+#ifndef __ARMEB__
+#ifndef CONFIG_MTD_CFI_BE_BYTE_SWAP
+# error CONFIG_MTD_CFI_BE_BYTE_SWAP required
+#endif
+
+static inline u16 flash_read16(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
+}
+
+#define BYTE0(h) ((h) & 0xFF)
+#define BYTE1(h) (((h) >> 8) & 0xFF)
+
+#else
+
+static inline u16 flash_read16(const void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(d, addr);
+}
+
+#define BYTE0(h) (((h) >> 8) & 0xFF)
+#define BYTE1(h) ((h) & 0xFF)
+#endif
+
+static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = flash_read16(map->virt + ofs);
+ return val;
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void ixp4xx_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ u8 *dest = (u8 *) to;
+ void __iomem *src = map->virt + from;
+
+ if (len <= 0)
+ return;
+
+ if (from & 1) {
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
+ --len;
+ }
+
+ while (len >= 2) {
+ u16 data = flash_read16(src);
+ *dest++ = BYTE0(data);
+ *dest++ = BYTE1(data);
+ src += 2;
+ len -= 2;
+ }
+
+ if (len > 0)
+ *dest++ = BYTE0(flash_read16(src));
+}
+
+/*
+ * Unaligned writes are ignored, causing the 8-bit
+ * probe to fail and proceed to the 16-bit probe (which succeeds).
+ */
+static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ if (!(adr & 1))
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+/*
+ * Fast write16 function without the probing check above
+ */
+static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+struct ixp4xx_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+};
+
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int ixp4xx_flash_remove(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
+ struct ixp4xx_flash_info *info = platform_get_drvdata(dev);
+
+ if(!info)
+ return 0;
+
+ if (info->mtd) {
+ mtd_device_unregister(info->mtd);
+ map_destroy(info->mtd);
+ }
+
+ if (plat->exit)
+ plat->exit();
+
+ return 0;
+}
+
+static int ixp4xx_flash_probe(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
+ struct ixp4xx_flash_info *info;
+ struct mtd_part_parser_data ppdata = {
+ .origin = dev->resource->start,
+ };
+ int err = -1;
+
+ if (!plat)
+ return -ENODEV;
+
+ if (plat->init) {
+ err = plat->init();
+ if (err)
+ return err;
+ }
+
+ info = devm_kzalloc(&dev->dev, sizeof(struct ixp4xx_flash_info),
+ GFP_KERNEL);
+ if(!info) {
+ err = -ENOMEM;
+ goto Error;
+ }
+
+ platform_set_drvdata(dev, info);
+
+ /*
+ * Tell the MTD layer we're not 1:1 mapped so that it does
+ * not attempt to do a direct access on us.
+ */
+ info->map.phys = NO_XIP;
+ info->map.size = resource_size(dev->resource);
+
+ /*
+ * We only support 16-bit accesses for now. If and when
+ * any board use 8-bit access, we'll fixup the driver to
+ * handle that.
+ */
+ info->map.bankwidth = 2;
+ info->map.name = dev_name(&dev->dev);
+ info->map.read = ixp4xx_read16;
+ info->map.write = ixp4xx_probe_write16;
+ info->map.copy_from = ixp4xx_copy_from;
+
+ info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
+ goto Error;
+ }
+
+ info->mtd = do_map_probe(plat->map_name, &info->map);
+ if (!info->mtd) {
+ printk(KERN_ERR "IXP4XXFlash: map_probe failed\n");
+ err = -ENXIO;
+ goto Error;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+ /* Use the fast version */
+ info->map.write = ixp4xx_write16;
+
+ err = mtd_device_parse_register(info->mtd, probes, &ppdata,
+ plat->parts, plat->nr_parts);
+ if (err) {
+ printk(KERN_ERR "Could not parse partitions\n");
+ goto Error;
+ }
+
+ return 0;
+
+Error:
+ ixp4xx_flash_remove(dev);
+ return err;
+}
+
+static struct platform_driver ixp4xx_flash_driver = {
+ .probe = ixp4xx_flash_probe,
+ .remove = ixp4xx_flash_remove,
+ .driver = {
+ .name = "IXP4XX-Flash",
+ },
+};
+
+module_platform_driver(ixp4xx_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
+MODULE_AUTHOR("Deepak Saxena");
+MODULE_ALIAS("platform:IXP4XX-Flash");
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
new file mode 100644
index 000000000..74bd98ee6
--- /dev/null
+++ b/drivers/mtd/maps/l440gx.c
@@ -0,0 +1,166 @@
+/*
+ * BIOS Flash chip on Intel 440GX board.
+ *
+ * Bugs this currently does not work under linuxBIOS.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+#define PIIXE_IOBASE_RESOURCE 11
+
+#define WINDOW_ADDR 0xfff00000
+#define WINDOW_SIZE 0x00100000
+#define BUSWIDTH 1
+
+static u32 iobase;
+#define IOBASE iobase
+#define TRIBUF_PORT (IOBASE+0x37)
+#define VPP_PORT (IOBASE+0x28)
+
+static struct mtd_info *mymtd;
+
+
+/* Is this really the vpp port? */
+static DEFINE_SPINLOCK(l440gx_vpp_lock);
+static int l440gx_vpp_refcnt;
+static void l440gx_set_vpp(struct map_info *map, int vpp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&l440gx_vpp_lock, flags);
+ if (vpp) {
+ if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
+ outl(inl(VPP_PORT) | 1, VPP_PORT);
+ } else {
+ if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
+ outl(inl(VPP_PORT) & ~1, VPP_PORT);
+ }
+ spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
+}
+
+static struct map_info l440gx_map = {
+ .name = "L440GX BIOS",
+ .size = WINDOW_SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = WINDOW_ADDR,
+#if 0
+ /* FIXME verify that this is the
+ * appripriate code for vpp enable/disable
+ */
+ .set_vpp = l440gx_set_vpp
+#endif
+};
+
+static int __init init_l440gx(void)
+{
+ struct pci_dev *dev, *pm_dev;
+ struct resource *pm_iobase;
+ __u16 word;
+
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
+
+ pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
+
+ pci_dev_put(dev);
+
+ if (!dev || !pm_dev) {
+ printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
+ pci_dev_put(pm_dev);
+ return -ENODEV;
+ }
+
+ l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+
+ if (!l440gx_map.virt) {
+ printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
+ pci_dev_put(pm_dev);
+ return -ENOMEM;
+ }
+ simple_map_init(&l440gx_map);
+ printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
+
+ /* Setup the pm iobase resource
+ * This code should move into some kind of generic bridge
+ * driver but for the moment I'm content with getting the
+ * allocation correct.
+ */
+ pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE];
+ if (!(pm_iobase->flags & IORESOURCE_IO)) {
+ pm_iobase->name = "pm iobase";
+ pm_iobase->start = 0;
+ pm_iobase->end = 63;
+ pm_iobase->flags = IORESOURCE_IO;
+
+ /* Put the current value in the resource */
+ pci_read_config_dword(pm_dev, 0x40, &iobase);
+ iobase &= ~1;
+ pm_iobase->start += iobase & ~1;
+ pm_iobase->end += iobase & ~1;
+
+ pci_dev_put(pm_dev);
+
+ /* Allocate the resource region */
+ if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
+ pci_dev_put(dev);
+ pci_dev_put(pm_dev);
+ printk(KERN_WARNING "Could not allocate pm iobase resource\n");
+ iounmap(l440gx_map.virt);
+ return -ENXIO;
+ }
+ }
+ /* Set the iobase */
+ iobase = pm_iobase->start;
+ pci_write_config_dword(pm_dev, 0x40, iobase | 1);
+
+
+ /* Set XBCS# */
+ pci_read_config_word(dev, 0x4e, &word);
+ word |= 0x4;
+ pci_write_config_word(dev, 0x4e, word);
+
+ /* Supply write voltage to the chip */
+ l440gx_set_vpp(&l440gx_map, 1);
+
+ /* Enable the gate on the WE line */
+ outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT);
+
+ printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");
+
+ mymtd = do_map_probe("jedec_probe", &l440gx_map);
+ if (!mymtd) {
+ printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n");
+ mymtd = do_map_probe("map_rom", &l440gx_map);
+ }
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+
+ mtd_device_register(mymtd, NULL, 0);
+ return 0;
+ }
+
+ iounmap(l440gx_map.virt);
+ return -ENXIO;
+}
+
+static void __exit cleanup_l440gx(void)
+{
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+
+ iounmap(l440gx_map.virt);
+}
+
+module_init(init_l440gx);
+module_exit(cleanup_l440gx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
new file mode 100644
index 000000000..33d26f5be
--- /dev/null
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -0,0 +1,216 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/cfi.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/of.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The NOR flash is connected to the same external bus unit (EBU) as PCI.
+ * To make PCI work we need to enable the endianness swapping for the address
+ * written to the EBU. This endianness swapping works for PCI correctly but
+ * fails for attached NOR devices. To workaround this we need to use a complex
+ * map. The workaround involves swapping all addresses whilst probing the chip.
+ * Once probing is complete we stop swapping the addresses but swizzle the
+ * unlock addresses to ensure that access to the NOR device works correctly.
+ */
+
+enum {
+ LTQ_NOR_PROBING,
+ LTQ_NOR_NORMAL
+};
+
+struct ltq_mtd {
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct map_info *map;
+};
+
+static const char ltq_map_name[] = "ltq_nor";
+static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL };
+
+static map_word
+ltq_read16(struct map_info *map, unsigned long adr)
+{
+ unsigned long flags;
+ map_word temp;
+
+ if (map->map_priv_1 == LTQ_NOR_PROBING)
+ adr ^= 2;
+ spin_lock_irqsave(&ebu_lock, flags);
+ temp.x[0] = *(u16 *)(map->virt + adr);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+ return temp;
+}
+
+static void
+ltq_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ unsigned long flags;
+
+ if (map->map_priv_1 == LTQ_NOR_PROBING)
+ adr ^= 2;
+ spin_lock_irqsave(&ebu_lock, flags);
+ *(u16 *)(map->virt + adr) = d.x[0];
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/*
+ * The following 2 functions copy data between iomem and a cached memory
+ * section. As memcpy() makes use of pre-fetching we cannot use it here.
+ * The normal alternative of using memcpy_{to,from}io also makes use of
+ * memcpy() on MIPS so it is not applicable either. We are therefore stuck
+ * with having to use our own loop.
+ */
+static void
+ltq_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ unsigned char *f = (unsigned char *)map->virt + from;
+ unsigned char *t = (unsigned char *)to;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ while (len--)
+ *t++ = *f++;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+static void
+ltq_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
+{
+ unsigned char *f = (unsigned char *)from;
+ unsigned char *t = (unsigned char *)map->virt + to;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ while (len--)
+ *t++ = *f++;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+static int
+ltq_mtd_probe(struct platform_device *pdev)
+{
+ struct mtd_part_parser_data ppdata;
+ struct ltq_mtd *ltq_mtd;
+ struct cfi_private *cfi;
+ int err;
+
+ if (of_machine_is_compatible("lantiq,falcon") &&
+ (ltq_boot_select() != BS_FLASH)) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
+ ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL);
+ if (!ltq_mtd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ltq_mtd);
+
+ ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ltq_mtd->res) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ return -ENOENT;
+ }
+
+ ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
+ GFP_KERNEL);
+ if (!ltq_mtd->map)
+ return -ENOMEM;
+
+ ltq_mtd->map->phys = ltq_mtd->res->start;
+ ltq_mtd->map->size = resource_size(ltq_mtd->res);
+ ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
+
+ ltq_mtd->map->name = ltq_map_name;
+ ltq_mtd->map->bankwidth = 2;
+ ltq_mtd->map->read = ltq_read16;
+ ltq_mtd->map->write = ltq_write16;
+ ltq_mtd->map->copy_from = ltq_copy_from;
+ ltq_mtd->map->copy_to = ltq_copy_to;
+
+ ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
+ ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
+ ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
+
+ if (!ltq_mtd->mtd) {
+ dev_err(&pdev->dev, "probing failed\n");
+ return -ENXIO;
+ }
+
+ ltq_mtd->mtd->owner = THIS_MODULE;
+
+ cfi = ltq_mtd->map->fldrv_priv;
+ cfi->addr_unlock1 ^= 1;
+ cfi->addr_unlock2 ^= 1;
+
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+ &ppdata, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add partitions\n");
+ goto err_destroy;
+ }
+
+ return 0;
+
+err_destroy:
+ map_destroy(ltq_mtd->mtd);
+ return err;
+}
+
+static int
+ltq_mtd_remove(struct platform_device *pdev)
+{
+ struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
+
+ if (ltq_mtd && ltq_mtd->mtd) {
+ mtd_device_unregister(ltq_mtd->mtd);
+ map_destroy(ltq_mtd->mtd);
+ }
+ return 0;
+}
+
+static const struct of_device_id ltq_mtd_match[] = {
+ { .compatible = "lantiq,nor" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
+static struct platform_driver ltq_mtd_driver = {
+ .probe = ltq_mtd_probe,
+ .remove = ltq_mtd_remove,
+ .driver = {
+ .name = "ltq-nor",
+ .of_match_table = ltq_mtd_match,
+ },
+};
+
+module_platform_driver(ltq_mtd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Lantiq SoC NOR");
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
new file mode 100644
index 000000000..cadfbe051
--- /dev/null
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -0,0 +1,230 @@
+/*
+ * Interface for NOR flash driver whose high address lines are latched
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright © 2005-2008 Analog Devices Inc.
+ * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/latch-addr-flash.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "latch-addr-flash"
+
+struct latch_addr_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+
+ void (*set_window)(unsigned long offset, void *data);
+ void *data;
+
+ /* cache; could be found out of res */
+ unsigned long win_mask;
+
+ spinlock_t lock;
+};
+
+static map_word lf_read(struct map_info *map, unsigned long ofs)
+{
+ struct latch_addr_flash_info *info;
+ map_word datum;
+
+ info = (struct latch_addr_flash_info *)map->map_priv_1;
+
+ spin_lock(&info->lock);
+
+ info->set_window(ofs, info->data);
+ datum = inline_map_read(map, info->win_mask & ofs);
+
+ spin_unlock(&info->lock);
+
+ return datum;
+}
+
+static void lf_write(struct map_info *map, map_word datum, unsigned long ofs)
+{
+ struct latch_addr_flash_info *info;
+
+ info = (struct latch_addr_flash_info *)map->map_priv_1;
+
+ spin_lock(&info->lock);
+
+ info->set_window(ofs, info->data);
+ inline_map_write(map, datum, info->win_mask & ofs);
+
+ spin_unlock(&info->lock);
+}
+
+static void lf_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct latch_addr_flash_info *info =
+ (struct latch_addr_flash_info *) map->map_priv_1;
+ unsigned n;
+
+ while (len > 0) {
+ n = info->win_mask + 1 - (from & info->win_mask);
+ if (n > len)
+ n = len;
+
+ spin_lock(&info->lock);
+
+ info->set_window(from, info->data);
+ memcpy_fromio(to, map->virt + (from & info->win_mask), n);
+
+ spin_unlock(&info->lock);
+
+ to += n;
+ from += n;
+ len -= n;
+ }
+}
+
+static char *rom_probe_types[] = { "cfi_probe", NULL };
+
+static int latch_addr_flash_remove(struct platform_device *dev)
+{
+ struct latch_addr_flash_info *info;
+ struct latch_addr_flash_data *latch_addr_data;
+
+ info = platform_get_drvdata(dev);
+ if (info == NULL)
+ return 0;
+
+ latch_addr_data = dev_get_platdata(&dev->dev);
+
+ if (info->mtd != NULL) {
+ mtd_device_unregister(info->mtd);
+ map_destroy(info->mtd);
+ }
+
+ if (info->map.virt != NULL)
+ iounmap(info->map.virt);
+
+ if (info->res != NULL)
+ release_mem_region(info->res->start, resource_size(info->res));
+
+ kfree(info);
+
+ if (latch_addr_data->done)
+ latch_addr_data->done(latch_addr_data->data);
+
+ return 0;
+}
+
+static int latch_addr_flash_probe(struct platform_device *dev)
+{
+ struct latch_addr_flash_data *latch_addr_data;
+ struct latch_addr_flash_info *info;
+ resource_size_t win_base = dev->resource->start;
+ resource_size_t win_size = resource_size(dev->resource);
+ char **probe_type;
+ int chipsel;
+ int err;
+
+ latch_addr_data = dev_get_platdata(&dev->dev);
+ if (latch_addr_data == NULL)
+ return -ENODEV;
+
+ pr_notice("latch-addr platform flash device: %#llx byte "
+ "window at %#.8llx\n",
+ (unsigned long long)win_size, (unsigned long long)win_base);
+
+ chipsel = dev->id;
+
+ if (latch_addr_data->init) {
+ err = latch_addr_data->init(latch_addr_data->data, chipsel);
+ if (err != 0)
+ return err;
+ }
+
+ info = kzalloc(sizeof(struct latch_addr_flash_info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ platform_set_drvdata(dev, info);
+
+ info->res = request_mem_region(win_base, win_size, DRIVER_NAME);
+ if (info->res == NULL) {
+ dev_err(&dev->dev, "Could not reserve memory region\n");
+ err = -EBUSY;
+ goto free_info;
+ }
+
+ info->map.name = DRIVER_NAME;
+ info->map.size = latch_addr_data->size;
+ info->map.bankwidth = latch_addr_data->width;
+
+ info->map.phys = NO_XIP;
+ info->map.virt = ioremap(win_base, win_size);
+ if (!info->map.virt) {
+ err = -ENOMEM;
+ goto free_res;
+ }
+
+ info->map.map_priv_1 = (unsigned long)info;
+
+ info->map.read = lf_read;
+ info->map.copy_from = lf_copy_from;
+ info->map.write = lf_write;
+ info->set_window = latch_addr_data->set_window;
+ info->data = latch_addr_data->data;
+ info->win_mask = win_size - 1;
+
+ spin_lock_init(&info->lock);
+
+ for (probe_type = rom_probe_types; !info->mtd && *probe_type;
+ probe_type++)
+ info->mtd = do_map_probe(*probe_type, &info->map);
+
+ if (info->mtd == NULL) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENODEV;
+ goto iounmap;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+ mtd_device_parse_register(info->mtd, NULL, NULL,
+ latch_addr_data->parts,
+ latch_addr_data->nr_parts);
+ return 0;
+
+iounmap:
+ iounmap(info->map.virt);
+free_res:
+ release_mem_region(info->res->start, resource_size(info->res));
+free_info:
+ kfree(info);
+done:
+ if (latch_addr_data->done)
+ latch_addr_data->done(latch_addr_data->data);
+ return err;
+}
+
+static struct platform_driver latch_addr_flash_driver = {
+ .probe = latch_addr_flash_probe,
+ .remove = latch_addr_flash_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(latch_addr_flash_driver);
+
+MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
+MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
+ "address lines being set board specifically");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/maps/map_funcs.c b/drivers/mtd/maps/map_funcs.c
new file mode 100644
index 000000000..3f268370e
--- /dev/null
+++ b/drivers/mtd/maps/map_funcs.c
@@ -0,0 +1,43 @@
+/*
+ * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
+ * is enabled.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+
+static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs)
+{
+ return inline_map_read(map, ofs);
+}
+
+static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+{
+ inline_map_write(map, datum, ofs);
+}
+
+static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ inline_map_copy_from(map, to, from, len);
+}
+
+static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ inline_map_copy_to(map, to, from, len);
+}
+
+void simple_map_init(struct map_info *map)
+{
+ BUG_ON(!map_bankwidth_supported(map->bankwidth));
+
+ map->read = simple_map_read;
+ map->write = simple_map_write;
+ map->copy_from = simple_map_copy_from;
+ map->copy_to = simple_map_copy_to;
+}
+
+EXPORT_SYMBOL(simple_map_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
new file mode 100644
index 000000000..81dc2598b
--- /dev/null
+++ b/drivers/mtd/maps/netsc520.c
@@ -0,0 +1,140 @@
+/* netsc520.c -- MTD map driver for AMD NetSc520 Demonstration Board
+ *
+ * Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
+ * based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * The NetSc520 is a demonstration board for the Elan Sc520 processor available
+ * from AMD. It has a single back of 16 megs of 32-bit Flash ROM and another
+ * 16 megs of SDRAM.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+
+/*
+** The single, 16 megabyte flash bank is divided into four virtual
+** partitions. The first partition is 768 KiB and is intended to
+** store the kernel image loaded by the bootstrap loader. The second
+** partition is 256 KiB and holds the BIOS image. The third
+** partition is 14.5 MiB and is intended for the flash file system
+** image. The last partition is 512 KiB and contains another copy
+** of the BIOS image and the reset vector.
+**
+** Only the third partition should be mounted. The first partition
+** should not be mounted, but it can erased and written to using the
+** MTD character routines. The second and fourth partitions should
+** not be touched - it is possible to corrupt the BIOS image by
+** mounting these partitions, and potentially the board will not be
+** recoverable afterwards.
+*/
+
+/* partition_info gives details on the logical partitions that the split the
+ * single flash device into. If the size if zero we use up to the end of the
+ * device. */
+static struct mtd_partition partition_info[]={
+ {
+ .name = "NetSc520 boot kernel",
+ .offset = 0,
+ .size = 0xc0000
+ },
+ {
+ .name = "NetSc520 Low BIOS",
+ .offset = 0xc0000,
+ .size = 0x40000
+ },
+ {
+ .name = "NetSc520 file system",
+ .offset = 0x100000,
+ .size = 0xe80000
+ },
+ {
+ .name = "NetSc520 High BIOS",
+ .offset = 0xf80000,
+ .size = 0x80000
+ },
+};
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
+
+#define WINDOW_SIZE 0x00100000
+#define WINDOW_ADDR 0x00200000
+
+static struct map_info netsc520_map = {
+ .name = "netsc520 Flash Bank",
+ .size = WINDOW_SIZE,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR,
+};
+
+#define NUM_FLASH_BANKS ARRAY_SIZE(netsc520_map)
+
+static struct mtd_info *mymtd;
+
+static int __init init_netsc520(void)
+{
+ printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
+ (unsigned long long)netsc520_map.size,
+ (unsigned long long)netsc520_map.phys);
+ netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
+
+ if (!netsc520_map.virt) {
+ printk("Failed to ioremap_nocache\n");
+ return -EIO;
+ }
+
+ simple_map_init(&netsc520_map);
+
+ mymtd = do_map_probe("cfi_probe", &netsc520_map);
+ if(!mymtd)
+ mymtd = do_map_probe("map_ram", &netsc520_map);
+ if(!mymtd)
+ mymtd = do_map_probe("map_rom", &netsc520_map);
+
+ if (!mymtd) {
+ iounmap(netsc520_map.virt);
+ return -ENXIO;
+ }
+
+ mymtd->owner = THIS_MODULE;
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
+ return 0;
+}
+
+static void __exit cleanup_netsc520(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+ if (netsc520_map.virt) {
+ iounmap(netsc520_map.virt);
+ netsc520_map.virt = NULL;
+ }
+}
+
+module_init(init_netsc520);
+module_exit(cleanup_netsc520);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
+MODULE_DESCRIPTION("MTD map driver for AMD NetSc520 Demonstration Board");
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
new file mode 100644
index 000000000..eadcfffc4
--- /dev/null
+++ b/drivers/mtd/maps/nettel.c
@@ -0,0 +1,454 @@
+/****************************************************************************/
+
+/*
+ * nettel.c -- mappings for NETtel/SecureEdge/SnapGear (x86) boards.
+ *
+ * (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
+ */
+
+/****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/cfi.h>
+#include <linux/reboot.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/root_dev.h>
+#include <asm/io.h>
+
+/****************************************************************************/
+
+#define INTEL_BUSWIDTH 1
+#define AMD_WINDOW_MAXSIZE 0x00200000
+#define AMD_BUSWIDTH 1
+
+/*
+ * PAR masks and shifts, assuming 64K pages.
+ */
+#define SC520_PAR_ADDR_MASK 0x00003fff
+#define SC520_PAR_ADDR_SHIFT 16
+#define SC520_PAR_TO_ADDR(par) \
+ (((par)&SC520_PAR_ADDR_MASK) << SC520_PAR_ADDR_SHIFT)
+
+#define SC520_PAR_SIZE_MASK 0x01ffc000
+#define SC520_PAR_SIZE_SHIFT 2
+#define SC520_PAR_TO_SIZE(par) \
+ ((((par)&SC520_PAR_SIZE_MASK) << SC520_PAR_SIZE_SHIFT) + (64*1024))
+
+#define SC520_PAR(cs, addr, size) \
+ ((cs) | \
+ ((((size)-(64*1024)) >> SC520_PAR_SIZE_SHIFT) & SC520_PAR_SIZE_MASK) | \
+ (((addr) >> SC520_PAR_ADDR_SHIFT) & SC520_PAR_ADDR_MASK))
+
+#define SC520_PAR_BOOTCS 0x8a000000
+#define SC520_PAR_ROMCS1 0xaa000000
+#define SC520_PAR_ROMCS2 0xca000000 /* Cache disabled, 64K page */
+
+static void *nettel_mmcrp = NULL;
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+static struct mtd_info *intel_mtd;
+#endif
+static struct mtd_info *amd_mtd;
+
+/****************************************************************************/
+
+/****************************************************************************/
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+static struct map_info nettel_intel_map = {
+ .name = "SnapGear Intel",
+ .size = 0,
+ .bankwidth = INTEL_BUSWIDTH,
+};
+
+static struct mtd_partition nettel_intel_partitions[] = {
+ {
+ .name = "SnapGear kernel",
+ .offset = 0,
+ .size = 0x000e0000
+ },
+ {
+ .name = "SnapGear filesystem",
+ .offset = 0x00100000,
+ },
+ {
+ .name = "SnapGear config",
+ .offset = 0x000e0000,
+ .size = 0x00020000
+ },
+ {
+ .name = "SnapGear Intel",
+ .offset = 0
+ },
+ {
+ .name = "SnapGear BIOS Config",
+ .offset = 0x007e0000,
+ .size = 0x00020000
+ },
+ {
+ .name = "SnapGear BIOS",
+ .offset = 0x007e0000,
+ .size = 0x00020000
+ },
+};
+#endif
+
+static struct map_info nettel_amd_map = {
+ .name = "SnapGear AMD",
+ .size = AMD_WINDOW_MAXSIZE,
+ .bankwidth = AMD_BUSWIDTH,
+};
+
+static struct mtd_partition nettel_amd_partitions[] = {
+ {
+ .name = "SnapGear BIOS config",
+ .offset = 0x000e0000,
+ .size = 0x00010000
+ },
+ {
+ .name = "SnapGear BIOS",
+ .offset = 0x000f0000,
+ .size = 0x00010000
+ },
+ {
+ .name = "SnapGear AMD",
+ .offset = 0
+ },
+ {
+ .name = "SnapGear high BIOS",
+ .offset = 0x001f0000,
+ .size = 0x00010000
+ }
+};
+
+#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
+
+/****************************************************************************/
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+
+/*
+ * Set the Intel flash back to read mode since some old boot
+ * loaders don't.
+ */
+static int nettel_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v)
+{
+ struct cfi_private *cfi = nettel_intel_map.fldrv_priv;
+ unsigned long b;
+
+ /* Make sure all FLASH chips are put back into read mode */
+ for (b = 0; (b < nettel_intel_partitions[3].size); b += 0x100000) {
+ cfi_send_gen_cmd(0xff, 0x55, b, &nettel_intel_map, cfi,
+ cfi->device_type, NULL);
+ }
+ return(NOTIFY_OK);
+}
+
+static struct notifier_block nettel_notifier_block = {
+ nettel_reboot_notifier, NULL, 0
+};
+
+#endif
+
+/****************************************************************************/
+
+static int __init nettel_init(void)
+{
+ volatile unsigned long *amdpar;
+ unsigned long amdaddr, maxsize;
+ int num_amd_partitions=0;
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ volatile unsigned long *intel0par, *intel1par;
+ unsigned long orig_bootcspar, orig_romcs1par;
+ unsigned long intel0addr, intel0size;
+ unsigned long intel1addr, intel1size;
+ int intelboot, intel0cs, intel1cs;
+ int num_intel_partitions;
+#endif
+ int rc = 0;
+
+ nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
+ if (nettel_mmcrp == NULL) {
+ printk("SNAPGEAR: failed to disable MMCR cache??\n");
+ return(-EIO);
+ }
+
+ /* Set CPU clock to be 33.000MHz */
+ *((unsigned char *) (nettel_mmcrp + 0xc64)) = 0x01;
+
+ amdpar = (volatile unsigned long *) (nettel_mmcrp + 0xc4);
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ intelboot = 0;
+ intel0cs = SC520_PAR_ROMCS1;
+ intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0);
+ intel1cs = SC520_PAR_ROMCS2;
+ intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xbc);
+
+ /*
+ * Save the CS settings then ensure ROMCS1 and ROMCS2 are off,
+ * otherwise they might clash with where we try to map BOOTCS.
+ */
+ orig_bootcspar = *amdpar;
+ orig_romcs1par = *intel0par;
+ *intel0par = 0;
+ *intel1par = 0;
+#endif
+
+ /*
+ * The first thing to do is determine if we have a separate
+ * boot FLASH device. Typically this is a small (1 to 2MB)
+ * AMD FLASH part. It seems that device size is about the
+ * only way to tell if this is the case...
+ */
+ amdaddr = 0x20000000;
+ maxsize = AMD_WINDOW_MAXSIZE;
+
+ *amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize);
+ __asm__ ("wbinvd");
+
+ nettel_amd_map.phys = amdaddr;
+ nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
+ if (!nettel_amd_map.virt) {
+ printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
+ iounmap(nettel_mmcrp);
+ return(-EIO);
+ }
+ simple_map_init(&nettel_amd_map);
+
+ if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
+ printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
+ (int)(amd_mtd->size>>10));
+
+ amd_mtd->owner = THIS_MODULE;
+
+ /* The high BIOS partition is only present for 2MB units */
+ num_amd_partitions = NUM_AMD_PARTITIONS;
+ if (amd_mtd->size < AMD_WINDOW_MAXSIZE)
+ num_amd_partitions--;
+ /* Don't add the partition until after the primary INTEL's */
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ /*
+ * Map the Intel flash into memory after the AMD
+ * It has to start on a multiple of maxsize.
+ */
+ maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
+ if (maxsize < (32 * 1024 * 1024))
+ maxsize = (32 * 1024 * 1024);
+ intel0addr = amdaddr + maxsize;
+#endif
+ } else {
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ /* INTEL boot FLASH */
+ intelboot++;
+
+ if (!orig_romcs1par) {
+ intel0cs = SC520_PAR_BOOTCS;
+ intel0par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc4);
+ intel1cs = SC520_PAR_ROMCS1;
+ intel1par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc0);
+
+ intel0addr = SC520_PAR_TO_ADDR(orig_bootcspar);
+ maxsize = SC520_PAR_TO_SIZE(orig_bootcspar);
+ } else {
+ /* Kernel base is on ROMCS1, not BOOTCS */
+ intel0cs = SC520_PAR_ROMCS1;
+ intel0par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc0);
+ intel1cs = SC520_PAR_BOOTCS;
+ intel1par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc4);
+
+ intel0addr = SC520_PAR_TO_ADDR(orig_romcs1par);
+ maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
+ }
+
+ /* Destroy useless AMD MTD mapping */
+ amd_mtd = NULL;
+ iounmap(nettel_amd_map.virt);
+ nettel_amd_map.virt = NULL;
+#else
+ /* Only AMD flash supported */
+ rc = -ENXIO;
+ goto out_unmap2;
+#endif
+ }
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ /*
+ * We have determined the INTEL FLASH configuration, so lets
+ * go ahead and probe for them now.
+ */
+
+ /* Set PAR to the maximum size */
+ if (maxsize < (32 * 1024 * 1024))
+ maxsize = (32 * 1024 * 1024);
+ *intel0par = SC520_PAR(intel0cs, intel0addr, maxsize);
+
+ /* Turn other PAR off so the first probe doesn't find it */
+ *intel1par = 0;
+
+ /* Probe for the size of the first Intel flash */
+ nettel_intel_map.size = maxsize;
+ nettel_intel_map.phys = intel0addr;
+ nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ if (!nettel_intel_map.virt) {
+ printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
+ rc = -EIO;
+ goto out_unmap2;
+ }
+ simple_map_init(&nettel_intel_map);
+
+ intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
+ if (!intel_mtd) {
+ rc = -ENXIO;
+ goto out_unmap1;
+ }
+
+ /* Set PAR to the detected size */
+ intel0size = intel_mtd->size;
+ *intel0par = SC520_PAR(intel0cs, intel0addr, intel0size);
+
+ /*
+ * Map second Intel FLASH right after first. Set its size to the
+ * same maxsize used for the first Intel FLASH.
+ */
+ intel1addr = intel0addr + intel0size;
+ *intel1par = SC520_PAR(intel1cs, intel1addr, maxsize);
+ __asm__ ("wbinvd");
+
+ maxsize += intel0size;
+
+ /* Delete the old map and probe again to do both chips */
+ map_destroy(intel_mtd);
+ intel_mtd = NULL;
+ iounmap(nettel_intel_map.virt);
+
+ nettel_intel_map.size = maxsize;
+ nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ if (!nettel_intel_map.virt) {
+ printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
+ rc = -EIO;
+ goto out_unmap2;
+ }
+
+ intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
+ if (! intel_mtd) {
+ rc = -ENXIO;
+ goto out_unmap1;
+ }
+
+ intel1size = intel_mtd->size - intel0size;
+ if (intel1size > 0) {
+ *intel1par = SC520_PAR(intel1cs, intel1addr, intel1size);
+ __asm__ ("wbinvd");
+ } else {
+ *intel1par = 0;
+ }
+
+ printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n",
+ (unsigned long long)(intel_mtd->size >> 10));
+
+ intel_mtd->owner = THIS_MODULE;
+
+ num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
+
+ if (intelboot) {
+ /*
+ * Adjust offset and size of last boot partition.
+ * Must allow for BIOS region at end of FLASH.
+ */
+ nettel_intel_partitions[1].size = (intel0size + intel1size) -
+ (1024*1024 + intel_mtd->erasesize);
+ nettel_intel_partitions[3].size = intel0size + intel1size;
+ nettel_intel_partitions[4].offset =
+ (intel0size + intel1size) - intel_mtd->erasesize;
+ nettel_intel_partitions[4].size = intel_mtd->erasesize;
+ nettel_intel_partitions[5].offset =
+ nettel_intel_partitions[4].offset;
+ nettel_intel_partitions[5].size =
+ nettel_intel_partitions[4].size;
+ } else {
+ /* No BIOS regions when AMD boot */
+ num_intel_partitions -= 2;
+ }
+ rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
+ num_intel_partitions);
+#endif
+
+ if (amd_mtd) {
+ rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
+ num_amd_partitions);
+ }
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ register_reboot_notifier(&nettel_notifier_block);
+#endif
+
+ return(rc);
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+out_unmap1:
+ iounmap(nettel_intel_map.virt);
+#endif
+
+out_unmap2:
+ iounmap(nettel_mmcrp);
+ iounmap(nettel_amd_map.virt);
+
+ return(rc);
+
+}
+
+/****************************************************************************/
+
+static void __exit nettel_cleanup(void)
+{
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ unregister_reboot_notifier(&nettel_notifier_block);
+#endif
+ if (amd_mtd) {
+ mtd_device_unregister(amd_mtd);
+ map_destroy(amd_mtd);
+ }
+ if (nettel_mmcrp) {
+ iounmap(nettel_mmcrp);
+ nettel_mmcrp = NULL;
+ }
+ if (nettel_amd_map.virt) {
+ iounmap(nettel_amd_map.virt);
+ nettel_amd_map.virt = NULL;
+ }
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ if (intel_mtd) {
+ mtd_device_unregister(intel_mtd);
+ map_destroy(intel_mtd);
+ }
+ if (nettel_intel_map.virt) {
+ iounmap(nettel_intel_map.virt);
+ nettel_intel_map.virt = NULL;
+ }
+#endif
+}
+
+/****************************************************************************/
+
+module_init(nettel_init);
+module_exit(nettel_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
+MODULE_DESCRIPTION("SnapGear/SecureEdge FLASH support");
+
+/****************************************************************************/
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
new file mode 100644
index 000000000..eb0242e0b
--- /dev/null
+++ b/drivers/mtd/maps/pci.c
@@ -0,0 +1,333 @@
+/*
+ * linux/drivers/mtd/maps/pci.c
+ *
+ * Copyright (C) 2001 Russell King, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic PCI memory map driver. We support the following boards:
+ * - Intel IQ80310 ATU.
+ * - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+struct map_pci_info;
+
+struct mtd_pci_info {
+ int (*init)(struct pci_dev *dev, struct map_pci_info *map);
+ void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
+ unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
+ const char *map_name;
+};
+
+struct map_pci_info {
+ struct map_info map;
+ void __iomem *base;
+ void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
+ unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
+ struct pci_dev *dev;
+};
+
+static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ map_word val;
+ val.x[0]= readb(map->base + map->translate(map, ofs));
+ return val;
+}
+
+static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ map_word val;
+ val.x[0] = readl(map->base + map->translate(map, ofs));
+ return val;
+}
+
+static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_fromio(to, map->base + map->translate(map, from), len);
+}
+
+static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ writeb(val.x[0], map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ writel(val.x[0], map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_toio(map->base + map->translate(map, to), from, len);
+}
+
+static const struct map_info mtd_pci_map = {
+ .phys = NO_XIP,
+ .copy_from = mtd_pci_copyfrom,
+ .copy_to = mtd_pci_copyto,
+};
+
+/*
+ * Intel IOP80310 Flash driver
+ */
+
+static int
+intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
+{
+ u32 win_base;
+
+ map->map.bankwidth = 1;
+ map->map.read = mtd_pci_read8,
+ map->map.write = mtd_pci_write8,
+
+ map->map.size = 0x00800000;
+ map->base = ioremap_nocache(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
+
+ if (!map->base)
+ return -ENOMEM;
+
+ /*
+ * We want to base the memory window at Xscale
+ * bus address 0, not 0x1000.
+ */
+ pci_read_config_dword(dev, 0x44, &win_base);
+ pci_write_config_dword(dev, 0x44, 0);
+
+ map->map.map_priv_2 = win_base;
+
+ return 0;
+}
+
+static void
+intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map)
+{
+ if (map->base)
+ iounmap(map->base);
+ pci_write_config_dword(dev, 0x44, map->map.map_priv_2);
+}
+
+static unsigned long
+intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs)
+{
+ unsigned long page_addr = ofs & 0x00400000;
+
+ /*
+ * This mundges the flash location so we avoid
+ * the first 80 bytes (they appear to read nonsense).
+ */
+ if (page_addr) {
+ writel(0x00000008, map->base + 0x1558);
+ writel(0x00000000, map->base + 0x1550);
+ } else {
+ writel(0x00000007, map->base + 0x1558);
+ writel(0x00800000, map->base + 0x1550);
+ ofs += 0x00800000;
+ }
+
+ return ofs;
+}
+
+static struct mtd_pci_info intel_iq80310_info = {
+ .init = intel_iq80310_init,
+ .exit = intel_iq80310_exit,
+ .translate = intel_iq80310_translate,
+ .map_name = "cfi_probe",
+};
+
+/*
+ * Intel DC21285 driver
+ */
+
+static int
+intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
+{
+ unsigned long base, len;
+
+ base = pci_resource_start(dev, PCI_ROM_RESOURCE);
+ len = pci_resource_len(dev, PCI_ROM_RESOURCE);
+
+ if (!len || !base) {
+ /*
+ * No ROM resource
+ */
+ base = pci_resource_start(dev, 2);
+ len = pci_resource_len(dev, 2);
+
+ /*
+ * We need to re-allocate PCI BAR2 address range to the
+ * PCI ROM BAR, and disable PCI BAR2.
+ */
+ } else {
+ /*
+ * Hmm, if an address was allocated to the ROM resource, but
+ * not enabled, should we be allocating a new resource for it
+ * or simply enabling it?
+ */
+ pci_enable_rom(dev);
+ printk("%s: enabling expansion ROM\n", pci_name(dev));
+ }
+
+ if (!len || !base)
+ return -ENXIO;
+
+ map->map.bankwidth = 4;
+ map->map.read = mtd_pci_read32,
+ map->map.write = mtd_pci_write32,
+ map->map.size = len;
+ map->base = ioremap_nocache(base, len);
+
+ if (!map->base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
+{
+ if (map->base)
+ iounmap(map->base);
+
+ /*
+ * We need to undo the PCI BAR2/PCI ROM BAR address alteration.
+ */
+ pci_disable_rom(dev);
+}
+
+static unsigned long
+intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs)
+{
+ return ofs & 0x00ffffc0 ? ofs : (ofs ^ (1 << 5));
+}
+
+static struct mtd_pci_info intel_dc21285_info = {
+ .init = intel_dc21285_init,
+ .exit = intel_dc21285_exit,
+ .translate = intel_dc21285_translate,
+ .map_name = "jedec_probe",
+};
+
+/*
+ * PCI device ID table
+ */
+
+static struct pci_device_id mtd_pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x530d,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = PCI_CLASS_MEMORY_OTHER << 8,
+ .class_mask = 0xffff00,
+ .driver_data = (unsigned long)&intel_iq80310_info,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_DEC,
+ .device = PCI_DEVICE_ID_DEC_21285,
+ .subvendor = 0, /* DC21285 defaults to 0 on reset */
+ .subdevice = 0, /* DC21285 defaults to 0 on reset */
+ .driver_data = (unsigned long)&intel_dc21285_info,
+ },
+ { 0, }
+};
+
+/*
+ * Generic code follows.
+ */
+
+static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
+ struct map_pci_info *map = NULL;
+ struct mtd_info *mtd = NULL;
+ int err;
+
+ err = pci_enable_device(dev);
+ if (err)
+ goto out;
+
+ err = pci_request_regions(dev, "pci mtd");
+ if (err)
+ goto out;
+
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!map)
+ goto release;
+
+ map->map = mtd_pci_map;
+ map->map.name = pci_name(dev);
+ map->dev = dev;
+ map->exit = info->exit;
+ map->translate = info->translate;
+
+ err = info->init(dev, map);
+ if (err)
+ goto release;
+
+ mtd = do_map_probe(info->map_name, &map->map);
+ err = -ENODEV;
+ if (!mtd)
+ goto release;
+
+ mtd->owner = THIS_MODULE;
+ mtd_device_register(mtd, NULL, 0);
+
+ pci_set_drvdata(dev, mtd);
+
+ return 0;
+
+release:
+ if (map) {
+ map->exit(dev, map);
+ kfree(map);
+ }
+
+ pci_release_regions(dev);
+out:
+ return err;
+}
+
+static void mtd_pci_remove(struct pci_dev *dev)
+{
+ struct mtd_info *mtd = pci_get_drvdata(dev);
+ struct map_pci_info *map = mtd->priv;
+
+ mtd_device_unregister(mtd);
+ map_destroy(mtd);
+ map->exit(dev, map);
+ kfree(map);
+
+ pci_release_regions(dev);
+}
+
+static struct pci_driver mtd_pci_driver = {
+ .name = "MTD PCI",
+ .probe = mtd_pci_probe,
+ .remove = mtd_pci_remove,
+ .id_table = mtd_pci_ids,
+};
+
+module_pci_driver(mtd_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Generic PCI map driver");
+MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
new file mode 100644
index 000000000..af747af5e
--- /dev/null
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -0,0 +1,753 @@
+/*
+ * pcmciamtd.c - MTD driver for PCMCIA flash memory cards
+ *
+ * Author: Simon Evans <spse@secret.org.uk>
+ *
+ * Copyright (C) 2002 Simon Evans
+ *
+ * Licence: GPL
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+
+#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
+
+#define DRIVER_DESC "PCMCIA Flash memory card driver"
+
+/* Size of the PCMCIA address space: 26 bits = 64 MB */
+#define MAX_PCMCIA_ADDR 0x4000000
+
+struct pcmciamtd_dev {
+ struct pcmcia_device *p_dev;
+ caddr_t win_base; /* ioremapped address of PCMCIA window */
+ unsigned int win_size; /* size of window */
+ unsigned int offset; /* offset into card the window currently points at */
+ struct map_info pcmcia_map;
+ struct mtd_info *mtd_info;
+ int vpp;
+ char mtd_name[sizeof(struct cistpl_vers_1_t)];
+};
+
+
+/* Module parameters */
+
+/* 2 = do 16-bit transfers, 1 = do 8-bit transfers */
+static int bankwidth = 2;
+
+/* Speed of memory accesses, in ns */
+static int mem_speed;
+
+/* Force the size of an SRAM card */
+static int force_size;
+
+/* Force Vpp */
+static int vpp;
+
+/* Set Vpp */
+static int setvpp;
+
+/* Force card to be treated as FLASH, ROM or RAM */
+static int mem_type;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+module_param(bankwidth, int, 0);
+MODULE_PARM_DESC(bankwidth, "Set bankwidth (1=8 bit, 2=16 bit, default=2)");
+module_param(mem_speed, int, 0);
+MODULE_PARM_DESC(mem_speed, "Set memory access speed in ns");
+module_param(force_size, int, 0);
+MODULE_PARM_DESC(force_size, "Force size of card in MiB (1-64)");
+module_param(setvpp, int, 0);
+MODULE_PARM_DESC(setvpp, "Set Vpp (0=Never, 1=On writes, 2=Always on, default=0)");
+module_param(vpp, int, 0);
+MODULE_PARM_DESC(vpp, "Vpp value in 1/10ths eg 33=3.3V 120=12V (Dangerous)");
+module_param(mem_type, int, 0);
+MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
+
+
+/* read/write{8,16} copy_{from,to} routines with window remapping
+ * to access whole card
+ */
+static caddr_t remap_window(struct map_info *map, unsigned long to)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ struct resource *win = (struct resource *) map->map_priv_2;
+ unsigned int offset;
+ int ret;
+
+ if (!pcmcia_dev_present(dev->p_dev)) {
+ pr_debug("device removed\n");
+ return NULL;
+ }
+
+ offset = to & ~(dev->win_size-1);
+ if (offset != dev->offset) {
+ pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n",
+ dev->offset, offset);
+ ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
+ if (ret != 0)
+ return NULL;
+ dev->offset = offset;
+ }
+ return dev->win_base + (to & (dev->win_size-1));
+}
+
+
+static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
+{
+ caddr_t addr;
+ map_word d = {{0}};
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return d;
+
+ d.x[0] = readb(addr);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]);
+ return d;
+}
+
+
+static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
+{
+ caddr_t addr;
+ map_word d = {{0}};
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return d;
+
+ d.x[0] = readw(addr);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]);
+ return d;
+}
+
+
+static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
+ while(len) {
+ int toread = win_size - (from & (win_size-1));
+ caddr_t addr;
+
+ if(toread > len)
+ toread = len;
+
+ addr = remap_window(map, from);
+ if(!addr)
+ return;
+
+ pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
+ memcpy_fromio(to, addr, toread);
+ len -= toread;
+ to += toread;
+ from += toread;
+ }
+}
+
+
+static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
+{
+ caddr_t addr = remap_window(map, adr);
+
+ if(!addr)
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]);
+ writeb(d.x[0], addr);
+}
+
+
+static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
+{
+ caddr_t addr = remap_window(map, adr);
+ if(!addr)
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]);
+ writew(d.x[0], addr);
+}
+
+
+static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
+ while(len) {
+ int towrite = win_size - (to & (win_size-1));
+ caddr_t addr;
+
+ if(towrite > len)
+ towrite = len;
+
+ addr = remap_window(map, to);
+ if(!addr)
+ return;
+
+ pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite);
+ memcpy_toio(addr, from, towrite);
+ len -= towrite;
+ to += towrite;
+ from += towrite;
+ }
+}
+
+
+/* read/write{8,16} copy_{from,to} routines with direct access */
+
+#define DEV_REMOVED(x) (!(pcmcia_dev_present(((struct pcmciamtd_dev *)map->map_priv_1)->p_dev)))
+
+static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+ map_word d = {{0}};
+
+ if(DEV_REMOVED(map))
+ return d;
+
+ d.x[0] = readb(win_base + ofs);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n",
+ ofs, win_base + ofs, d.x[0]);
+ return d;
+}
+
+
+static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+ map_word d = {{0}};
+
+ if(DEV_REMOVED(map))
+ return d;
+
+ d.x[0] = readw(win_base + ofs);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n",
+ ofs, win_base + ofs, d.x[0]);
+ return d;
+}
+
+
+static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
+ memcpy_fromio(to, win_base + from, len);
+}
+
+
+static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n",
+ adr, win_base + adr, d.x[0]);
+ writeb(d.x[0], win_base + adr);
+}
+
+
+static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n",
+ adr, win_base + adr, d.x[0]);
+ writew(d.x[0], win_base + adr);
+}
+
+
+static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
+ memcpy_toio(win_base + to, from, len);
+}
+
+
+static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static int pcmcia_vpp_refcnt;
+static void pcmciamtd_set_vpp(struct map_info *map, int on)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ struct pcmcia_device *link = dev->p_dev;
+ unsigned long flags;
+
+ pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
+ spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+ if (on) {
+ if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
+ pcmcia_fixup_vpp(link, dev->vpp);
+ } else {
+ if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
+ pcmcia_fixup_vpp(link, 0);
+ }
+ spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
+}
+
+
+static void pcmciamtd_release(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+
+ pr_debug("link = 0x%p\n", link);
+
+ if (link->resource[2]->end) {
+ if(dev->win_base) {
+ iounmap(dev->win_base);
+ dev->win_base = NULL;
+ }
+ }
+ pcmcia_disable_device(link);
+}
+
+
+static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ cisparse_t parse;
+
+ if (!pcmcia_parse_tuple(tuple, &parse)) {
+ cistpl_format_t *t = &parse.format;
+ (void)t; /* Shut up, gcc */
+ pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n",
+ t->type, t->edc, t->offset, t->length);
+ }
+ return -ENOSPC;
+}
+
+static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ cisparse_t parse;
+ int i;
+
+ if (!pcmcia_parse_tuple(tuple, &parse)) {
+ cistpl_jedec_t *t = &parse.jedec;
+ for (i = 0; i < t->nid; i++)
+ pr_debug("JEDEC: 0x%02x 0x%02x\n",
+ t->id[i].mfr, t->id[i].info);
+ }
+ return -ENOSPC;
+}
+
+static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ struct pcmciamtd_dev *dev = priv_data;
+ cisparse_t parse;
+ cistpl_device_t *t = &parse.device;
+ int i;
+
+ if (pcmcia_parse_tuple(tuple, &parse))
+ return -EINVAL;
+
+ pr_debug("Common memory:\n");
+ dev->pcmcia_map.size = t->dev[0].size;
+ /* from here on: DEBUG only */
+ for (i = 0; i < t->ndev; i++) {
+ pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
+ pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
+ pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
+ pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
+ }
+ return 0;
+}
+
+static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ struct pcmciamtd_dev *dev = priv_data;
+ cisparse_t parse;
+ cistpl_device_geo_t *t = &parse.device_geo;
+ int i;
+
+ if (pcmcia_parse_tuple(tuple, &parse))
+ return -EINVAL;
+
+ dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
+ /* from here on: DEBUG only */
+ for (i = 0; i < t->ngeo; i++) {
+ pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
+ pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
+ pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
+ pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
+ pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
+ pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
+ }
+ return 0;
+}
+
+
+static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name)
+{
+ int i;
+
+ if (p_dev->prod_id[0]) {
+ dev->mtd_name[0] = '\0';
+ for (i = 0; i < 4; i++) {
+ if (i)
+ strcat(dev->mtd_name, " ");
+ if (p_dev->prod_id[i])
+ strcat(dev->mtd_name, p_dev->prod_id[i]);
+ }
+ pr_debug("Found name: %s\n", dev->mtd_name);
+ }
+
+ pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
+ pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
+ pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
+ pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
+
+ if(!dev->pcmcia_map.size)
+ dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
+
+ if(!dev->pcmcia_map.bankwidth)
+ dev->pcmcia_map.bankwidth = 2;
+
+ if(force_size) {
+ dev->pcmcia_map.size = force_size << 20;
+ pr_debug("size forced to %dM\n", force_size);
+ }
+
+ if(bankwidth) {
+ dev->pcmcia_map.bankwidth = bankwidth;
+ pr_debug("bankwidth forced to %d\n", bankwidth);
+ }
+
+ dev->pcmcia_map.name = dev->mtd_name;
+ if(!dev->mtd_name[0]) {
+ strcpy(dev->mtd_name, "PCMCIA Memory card");
+ *new_name = 1;
+ }
+
+ pr_debug("Device: Size: %lu Width:%d Name: %s\n",
+ dev->pcmcia_map.size,
+ dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
+}
+
+
+static int pcmciamtd_config(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+ struct mtd_info *mtd = NULL;
+ int ret;
+ int i, j = 0;
+ static char *probes[] = { "jedec_probe", "cfi_probe" };
+ int new_name = 0;
+
+ pr_debug("link=0x%p\n", link);
+
+ card_settings(dev, link, &new_name);
+
+ dev->pcmcia_map.phys = NO_XIP;
+ dev->pcmcia_map.copy_from = pcmcia_copy_from_remap;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to_remap;
+ if (dev->pcmcia_map.bankwidth == 1) {
+ dev->pcmcia_map.read = pcmcia_read8_remap;
+ dev->pcmcia_map.write = pcmcia_write8_remap;
+ } else {
+ dev->pcmcia_map.read = pcmcia_read16_remap;
+ dev->pcmcia_map.write = pcmcia_write16_remap;
+ }
+ if(setvpp == 1)
+ dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
+
+ /* Request a memory window for PCMCIA. Some architeures can map windows
+ * up to the maximum that PCMCIA can support (64MiB) - this is ideal and
+ * we aim for a window the size of the whole card - otherwise we try
+ * smaller windows until we succeed
+ */
+
+ link->resource[2]->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ link->resource[2]->flags |= (dev->pcmcia_map.bankwidth == 1) ?
+ WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
+ link->resource[2]->start = 0;
+ link->resource[2]->end = (force_size) ? force_size << 20 :
+ MAX_PCMCIA_ADDR;
+ dev->win_size = 0;
+
+ do {
+ int ret;
+ pr_debug("requesting window with size = %luKiB memspeed = %d\n",
+ (unsigned long) resource_size(link->resource[2]) >> 10,
+ mem_speed);
+ ret = pcmcia_request_window(link, link->resource[2], mem_speed);
+ pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size);
+ if(ret) {
+ j++;
+ link->resource[2]->start = 0;
+ link->resource[2]->end = (force_size) ?
+ force_size << 20 : MAX_PCMCIA_ADDR;
+ link->resource[2]->end >>= j;
+ } else {
+ pr_debug("Got window of size %luKiB\n", (unsigned long)
+ resource_size(link->resource[2]) >> 10);
+ dev->win_size = resource_size(link->resource[2]);
+ break;
+ }
+ } while (link->resource[2]->end >= 0x1000);
+
+ pr_debug("dev->win_size = %d\n", dev->win_size);
+
+ if(!dev->win_size) {
+ dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+ pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10);
+
+ /* Get write protect status */
+ dev->win_base = ioremap(link->resource[2]->start,
+ resource_size(link->resource[2]));
+ if(!dev->win_base) {
+ dev_err(&dev->p_dev->dev, "ioremap(%pR) failed\n",
+ link->resource[2]);
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+ pr_debug("mapped window dev = %p @ %pR, base = %p\n",
+ dev, link->resource[2], dev->win_base);
+
+ dev->offset = 0;
+ dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
+ dev->pcmcia_map.map_priv_2 = (unsigned long)link->resource[2];
+
+ dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp;
+ if(setvpp == 2) {
+ link->vpp = dev->vpp;
+ } else {
+ link->vpp = 0;
+ }
+
+ link->config_index = 0;
+ pr_debug("Setting Configuration\n");
+ ret = pcmcia_enable_device(link);
+ if (ret != 0) {
+ if (dev->win_base) {
+ iounmap(dev->win_base);
+ dev->win_base = NULL;
+ }
+ return -ENODEV;
+ }
+
+ if(mem_type == 1) {
+ mtd = do_map_probe("map_ram", &dev->pcmcia_map);
+ } else if(mem_type == 2) {
+ mtd = do_map_probe("map_rom", &dev->pcmcia_map);
+ } else {
+ for(i = 0; i < ARRAY_SIZE(probes); i++) {
+ pr_debug("Trying %s\n", probes[i]);
+ mtd = do_map_probe(probes[i], &dev->pcmcia_map);
+ if(mtd)
+ break;
+
+ pr_debug("FAILED: %s\n", probes[i]);
+ }
+ }
+
+ if(!mtd) {
+ pr_debug("Can not find an MTD\n");
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+
+ dev->mtd_info = mtd;
+ mtd->owner = THIS_MODULE;
+
+ if(new_name) {
+ int size = 0;
+ char unit = ' ';
+ /* Since we are using a default name, make it better by adding
+ * in the size
+ */
+ if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
+ size = mtd->size >> 10;
+ unit = 'K';
+ } else {
+ size = mtd->size >> 20;
+ unit = 'M';
+ }
+ snprintf(dev->mtd_name, sizeof(dev->mtd_name), "%d%ciB %s", size, unit, "PCMCIA Memory card");
+ }
+
+ /* If the memory found is fits completely into the mapped PCMCIA window,
+ use the faster non-remapping read/write functions */
+ if(mtd->size <= dev->win_size) {
+ pr_debug("Using non remapping memory functions\n");
+ dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
+ if (dev->pcmcia_map.bankwidth == 1) {
+ dev->pcmcia_map.read = pcmcia_read8;
+ dev->pcmcia_map.write = pcmcia_write8;
+ } else {
+ dev->pcmcia_map.read = pcmcia_read16;
+ dev->pcmcia_map.write = pcmcia_write16;
+ }
+ dev->pcmcia_map.copy_from = pcmcia_copy_from;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to;
+ }
+
+ if (mtd_device_register(mtd, NULL, 0)) {
+ map_destroy(mtd);
+ dev->mtd_info = NULL;
+ dev_err(&dev->p_dev->dev,
+ "Could not register the MTD device\n");
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+ dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
+ return 0;
+}
+
+
+static int pcmciamtd_suspend(struct pcmcia_device *dev)
+{
+ pr_debug("EVENT_PM_RESUME\n");
+
+ /* get_lock(link); */
+
+ return 0;
+}
+
+static int pcmciamtd_resume(struct pcmcia_device *dev)
+{
+ pr_debug("EVENT_PM_SUSPEND\n");
+
+ /* free_lock(link); */
+
+ return 0;
+}
+
+
+static void pcmciamtd_detach(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+
+ pr_debug("link=0x%p\n", link);
+
+ if(dev->mtd_info) {
+ mtd_device_unregister(dev->mtd_info);
+ dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
+ dev->mtd_info->index);
+ map_destroy(dev->mtd_info);
+ }
+
+ pcmciamtd_release(link);
+}
+
+
+static int pcmciamtd_probe(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev;
+
+ /* Create new memory card device */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) return -ENOMEM;
+ pr_debug("dev=0x%p\n", dev);
+
+ dev->p_dev = link;
+ link->priv = dev;
+
+ return pcmciamtd_config(link);
+}
+
+static const struct pcmcia_device_id pcmciamtd_ids[] = {
+ PCMCIA_DEVICE_FUNC_ID(1),
+ PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCS-2M", "2MB SRAM", 0x547e66dc, 0x1fed36cd, 0x36eadd21),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "2MB SRAM", 0xb569a6e5, 0x36eadd21),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "4MB FLASH", 0xb569a6e5, 0x8bc54d2a),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "8MB FLASH", 0xb569a6e5, 0x6df1be3e),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "S2E20SW", 0x816cc815, 0xd14c9dcf),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "S2E8 SW", 0x816cc815, 0xa2d7dedb),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-02 ", 0x40ade711, 0x145cea5c),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-04 ", 0x40ade711, 0x42064dda),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-20 ", 0x40ade711, 0x25ee5cb0),
+ PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
+ PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
+ PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
+ PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
+ PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
+ PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
+ PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
+ PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
+ PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
+ /* the following was commented out in pcmcia-cs-3.2.7 */
+ /* PCMCIA_DEVICE_PROD_ID12("RATOC Systems,Inc.", "SmartMedia ADAPTER PC Card", 0xf4a2fefe, 0x5885b2ae), */
+#ifdef CONFIG_MTD_PCMCIA_ANONYMOUS
+ { .match_flags = PCMCIA_DEV_ID_MATCH_ANONYMOUS, },
+#endif
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, pcmciamtd_ids);
+
+static struct pcmcia_driver pcmciamtd_driver = {
+ .name = "pcmciamtd",
+ .probe = pcmciamtd_probe,
+ .remove = pcmciamtd_detach,
+ .owner = THIS_MODULE,
+ .id_table = pcmciamtd_ids,
+ .suspend = pcmciamtd_suspend,
+ .resume = pcmciamtd_resume,
+};
+
+
+static int __init init_pcmciamtd(void)
+{
+ if(bankwidth && bankwidth != 1 && bankwidth != 2) {
+ info("bad bankwidth (%d), using default", bankwidth);
+ bankwidth = 2;
+ }
+ if(force_size && (force_size < 1 || force_size > 64)) {
+ info("bad force_size (%d), using default", force_size);
+ force_size = 0;
+ }
+ if(mem_type && mem_type != 1 && mem_type != 2) {
+ info("bad mem_type (%d), using default", mem_type);
+ mem_type = 0;
+ }
+ return pcmcia_register_driver(&pcmciamtd_driver);
+}
+
+
+static void __exit exit_pcmciamtd(void)
+{
+ pr_debug(DRIVER_DESC " unloading");
+ pcmcia_unregister_driver(&pcmciamtd_driver);
+}
+
+module_init(init_pcmciamtd);
+module_exit(exit_pcmciamtd);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
new file mode 100644
index 000000000..4305fd607
--- /dev/null
+++ b/drivers/mtd/maps/physmap.c
@@ -0,0 +1,281 @@
+/*
+ * Normal mappings of chips in physical memory
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * 031022 - [jsun] add run-time configure and partition setup
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/concat.h>
+#include <linux/io.h>
+
+#define MAX_RESOURCES 4
+
+struct physmap_flash_info {
+ struct mtd_info *mtd[MAX_RESOURCES];
+ struct mtd_info *cmtd;
+ struct map_info map[MAX_RESOURCES];
+ spinlock_t vpp_lock;
+ int vpp_refcnt;
+};
+
+static int physmap_flash_remove(struct platform_device *dev)
+{
+ struct physmap_flash_info *info;
+ struct physmap_flash_data *physmap_data;
+ int i;
+
+ info = platform_get_drvdata(dev);
+ if (info == NULL)
+ return 0;
+
+ physmap_data = dev_get_platdata(&dev->dev);
+
+ if (info->cmtd) {
+ mtd_device_unregister(info->cmtd);
+ if (info->cmtd != info->mtd[0])
+ mtd_concat_destroy(info->cmtd);
+ }
+
+ for (i = 0; i < MAX_RESOURCES; i++) {
+ if (info->mtd[i] != NULL)
+ map_destroy(info->mtd[i]);
+ }
+
+ if (physmap_data->exit)
+ physmap_data->exit(dev);
+
+ return 0;
+}
+
+static void physmap_set_vpp(struct map_info *map, int state)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_data *physmap_data;
+ struct physmap_flash_info *info;
+ unsigned long flags;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ physmap_data = dev_get_platdata(&pdev->dev);
+
+ if (!physmap_data->set_vpp)
+ return;
+
+ info = platform_get_drvdata(pdev);
+
+ spin_lock_irqsave(&info->vpp_lock, flags);
+ if (state) {
+ if (++info->vpp_refcnt == 1) /* first nested 'on' */
+ physmap_data->set_vpp(pdev, 1);
+ } else {
+ if (--info->vpp_refcnt == 0) /* last nested 'off' */
+ physmap_data->set_vpp(pdev, 0);
+ }
+ spin_unlock_irqrestore(&info->vpp_lock, flags);
+}
+
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL };
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", "afs", NULL };
+
+static int physmap_flash_probe(struct platform_device *dev)
+{
+ struct physmap_flash_data *physmap_data;
+ struct physmap_flash_info *info;
+ const char * const *probe_type;
+ const char * const *part_types;
+ int err = 0;
+ int i;
+ int devices_found = 0;
+
+ physmap_data = dev_get_platdata(&dev->dev);
+ if (physmap_data == NULL)
+ return -ENODEV;
+
+ info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
+ GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ if (physmap_data->init) {
+ err = physmap_data->init(dev);
+ if (err)
+ goto err_out;
+ }
+
+ platform_set_drvdata(dev, info);
+
+ for (i = 0; i < dev->num_resources; i++) {
+ printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
+ (unsigned long long)resource_size(&dev->resource[i]),
+ (unsigned long long)dev->resource[i].start);
+
+ if (!devm_request_mem_region(&dev->dev,
+ dev->resource[i].start,
+ resource_size(&dev->resource[i]),
+ dev_name(&dev->dev))) {
+ dev_err(&dev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ info->map[i].name = dev_name(&dev->dev);
+ info->map[i].phys = dev->resource[i].start;
+ info->map[i].size = resource_size(&dev->resource[i]);
+ info->map[i].bankwidth = physmap_data->width;
+ info->map[i].set_vpp = physmap_set_vpp;
+ info->map[i].pfow_base = physmap_data->pfow_base;
+ info->map[i].map_priv_1 = (unsigned long)dev;
+
+ info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
+ info->map[i].size);
+ if (info->map[i].virt == NULL) {
+ dev_err(&dev->dev, "Failed to ioremap flash region\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ simple_map_init(&info->map[i]);
+
+ probe_type = rom_probe_types;
+ if (physmap_data->probe_type == NULL) {
+ for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
+ info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
+ } else
+ info->mtd[i] = do_map_probe(physmap_data->probe_type, &info->map[i]);
+
+ if (info->mtd[i] == NULL) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENXIO;
+ goto err_out;
+ } else {
+ devices_found++;
+ }
+ info->mtd[i]->owner = THIS_MODULE;
+ info->mtd[i]->dev.parent = &dev->dev;
+ }
+
+ if (devices_found == 1) {
+ info->cmtd = info->mtd[0];
+ } else if (devices_found > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev));
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+ }
+ if (err)
+ goto err_out;
+
+ spin_lock_init(&info->vpp_lock);
+
+ part_types = physmap_data->part_probe_types ? : part_probe_types;
+
+ mtd_device_parse_register(info->cmtd, part_types, NULL,
+ physmap_data->parts, physmap_data->nr_parts);
+ return 0;
+
+err_out:
+ physmap_flash_remove(dev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static void physmap_flash_shutdown(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
+ if (mtd_suspend(info->mtd[i]) == 0)
+ mtd_resume(info->mtd[i]);
+}
+#else
+#define physmap_flash_shutdown NULL
+#endif
+
+static struct platform_driver physmap_flash_driver = {
+ .probe = physmap_flash_probe,
+ .remove = physmap_flash_remove,
+ .shutdown = physmap_flash_shutdown,
+ .driver = {
+ .name = "physmap-flash",
+ },
+};
+
+
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+static struct physmap_flash_data physmap_flash_data = {
+ .width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
+};
+
+static struct resource physmap_flash_resource = {
+ .start = CONFIG_MTD_PHYSMAP_START,
+ .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device physmap_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &physmap_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &physmap_flash_resource,
+};
+#endif
+
+static int __init physmap_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&physmap_flash_driver);
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+ if (err == 0) {
+ err = platform_device_register(&physmap_flash);
+ if (err)
+ platform_driver_unregister(&physmap_flash_driver);
+ }
+#endif
+
+ return err;
+}
+
+static void __exit physmap_exit(void)
+{
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+ platform_device_unregister(&physmap_flash);
+#endif
+ platform_driver_unregister(&physmap_flash_driver);
+}
+
+module_init(physmap_init);
+module_exit(physmap_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Generic configurable MTD map driver");
+
+/* legacy platform drivers can't hotplug or coldplg */
+#ifndef CONFIG_MTD_PHYSMAP_COMPAT
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:physmap-flash");
+#endif
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
new file mode 100644
index 000000000..ff26e979b
--- /dev/null
+++ b/drivers/mtd/maps/physmap_of.c
@@ -0,0 +1,375 @@
+/*
+ * Flash mappings described by the OF (or flattened) device tree
+ *
+ * Copyright (C) 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ * Copyright (C) 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+struct of_flash_list {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+};
+
+struct of_flash {
+ struct mtd_info *cmtd;
+ int list_size; /* number of elements in of_flash_list */
+ struct of_flash_list list[0];
+};
+
+static int of_flash_remove(struct platform_device *dev)
+{
+ struct of_flash *info;
+ int i;
+
+ info = dev_get_drvdata(&dev->dev);
+ if (!info)
+ return 0;
+ dev_set_drvdata(&dev->dev, NULL);
+
+ if (info->cmtd) {
+ mtd_device_unregister(info->cmtd);
+ if (info->cmtd != info->list[0].mtd)
+ mtd_concat_destroy(info->cmtd);
+ }
+
+ for (i = 0; i < info->list_size; i++) {
+ if (info->list[i].mtd)
+ map_destroy(info->list[i].mtd);
+
+ if (info->list[i].map.virt)
+ iounmap(info->list[i].map.virt);
+
+ if (info->list[i].res) {
+ release_resource(info->list[i].res);
+ kfree(info->list[i].res);
+ }
+ }
+ return 0;
+}
+
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom" };
+
+/* Helper function to handle probing of the obsolete "direct-mapped"
+ * compatible binding, which has an extra "probe-type" property
+ * describing the type of flash probe necessary. */
+static struct mtd_info *obsolete_probe(struct platform_device *dev,
+ struct map_info *map)
+{
+ struct device_node *dp = dev->dev.of_node;
+ const char *of_probe;
+ struct mtd_info *mtd;
+ int i;
+
+ dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" "
+ "flash binding\n");
+
+ of_probe = of_get_property(dp, "probe-type", NULL);
+ if (!of_probe) {
+ for (i = 0; i < ARRAY_SIZE(rom_probe_types); i++) {
+ mtd = do_map_probe(rom_probe_types[i], map);
+ if (mtd)
+ return mtd;
+ }
+ return NULL;
+ } else if (strcmp(of_probe, "CFI") == 0) {
+ return do_map_probe("cfi_probe", map);
+ } else if (strcmp(of_probe, "JEDEC") == 0) {
+ return do_map_probe("jedec_probe", map);
+ } else {
+ if (strcmp(of_probe, "ROM") != 0)
+ dev_warn(&dev->dev, "obsolete_probe: don't know probe "
+ "type '%s', mapping as rom\n", of_probe);
+ return do_map_probe("map_rom", map);
+ }
+}
+
+/* When partitions are set we look for a linux,part-probe property which
+ specifies the list of partition probers to use. If none is given then the
+ default is use. These take precedence over other device tree
+ information. */
+static const char * const part_probe_types_def[] = {
+ "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
+
+static const char * const *of_get_probes(struct device_node *dp)
+{
+ const char *cp;
+ int cplen;
+ unsigned int l;
+ unsigned int count;
+ const char **res;
+
+ cp = of_get_property(dp, "linux,part-probe", &cplen);
+ if (cp == NULL)
+ return part_probe_types_def;
+
+ count = 0;
+ for (l = 0; l != cplen; l++)
+ if (cp[l] == 0)
+ count++;
+
+ res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
+ count = 0;
+ while (cplen > 0) {
+ res[count] = cp;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ count++;
+ }
+ return res;
+}
+
+static void of_free_probes(const char * const *probes)
+{
+ if (probes != part_probe_types_def)
+ kfree(probes);
+}
+
+static struct of_device_id of_flash_match[];
+static int of_flash_probe(struct platform_device *dev)
+{
+ const char * const *part_probe_types;
+ const struct of_device_id *match;
+ struct device_node *dp = dev->dev.of_node;
+ struct resource res;
+ struct of_flash *info;
+ const char *probe_type;
+ const __be32 *width;
+ int err;
+ int i;
+ int count;
+ const __be32 *p;
+ int reg_tuple_size;
+ struct mtd_info **mtd_list = NULL;
+ resource_size_t res_size;
+ struct mtd_part_parser_data ppdata;
+ bool map_indirect;
+ const char *mtd_name = NULL;
+
+ match = of_match_device(of_flash_match, &dev->dev);
+ if (!match)
+ return -EINVAL;
+ probe_type = match->data;
+
+ reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+
+ of_property_read_string(dp, "linux,mtd-name", &mtd_name);
+
+ /*
+ * Get number of "reg" tuples. Scan for MTD devices on area's
+ * described by each "reg" region. This makes it possible (including
+ * the concat support) to support the Intel P30 48F4400 chips which
+ * consists internally of 2 non-identical NOR chips on one die.
+ */
+ p = of_get_property(dp, "reg", &count);
+ if (count % reg_tuple_size != 0) {
+ dev_err(&dev->dev, "Malformed reg property on %s\n",
+ dev->dev.of_node->full_name);
+ err = -EINVAL;
+ goto err_flash_remove;
+ }
+ count /= reg_tuple_size;
+
+ map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
+
+ err = -ENOMEM;
+ info = devm_kzalloc(&dev->dev,
+ sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ if (!info)
+ goto err_flash_remove;
+
+ dev_set_drvdata(&dev->dev, info);
+
+ mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
+ if (!mtd_list)
+ goto err_flash_remove;
+
+ for (i = 0; i < count; i++) {
+ err = -ENXIO;
+ if (of_address_to_resource(dp, i, &res)) {
+ /*
+ * Continue with next register tuple if this
+ * one is not mappable
+ */
+ continue;
+ }
+
+ dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
+
+ err = -EBUSY;
+ res_size = resource_size(&res);
+ info->list[i].res = request_mem_region(res.start, res_size,
+ dev_name(&dev->dev));
+ if (!info->list[i].res)
+ goto err_out;
+
+ err = -ENXIO;
+ width = of_get_property(dp, "bank-width", NULL);
+ if (!width) {
+ dev_err(&dev->dev, "Can't get bank width from device"
+ " tree\n");
+ goto err_out;
+ }
+
+ info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
+ info->list[i].map.phys = res.start;
+ info->list[i].map.size = res_size;
+ info->list[i].map.bankwidth = be32_to_cpup(width);
+ info->list[i].map.device_node = dp;
+
+ err = -ENOMEM;
+ info->list[i].map.virt = ioremap(info->list[i].map.phys,
+ info->list[i].map.size);
+ if (!info->list[i].map.virt) {
+ dev_err(&dev->dev, "Failed to ioremap() flash"
+ " region\n");
+ goto err_out;
+ }
+
+ simple_map_init(&info->list[i].map);
+
+ /*
+ * On some platforms (e.g. MPC5200) a direct 1:1 mapping
+ * may cause problems with JFFS2 usage, as the local bus (LPB)
+ * doesn't support unaligned accesses as implemented in the
+ * JFFS2 code via memcpy(). By setting NO_XIP, the
+ * flash will not be exposed directly to the MTD users
+ * (e.g. JFFS2) any more.
+ */
+ if (map_indirect)
+ info->list[i].map.phys = NO_XIP;
+
+ if (probe_type) {
+ info->list[i].mtd = do_map_probe(probe_type,
+ &info->list[i].map);
+ } else {
+ info->list[i].mtd = obsolete_probe(dev,
+ &info->list[i].map);
+ }
+
+ /* Fall back to mapping region as ROM */
+ if (!info->list[i].mtd) {
+ dev_warn(&dev->dev,
+ "do_map_probe() failed for type %s\n",
+ probe_type);
+
+ info->list[i].mtd = do_map_probe("map_rom",
+ &info->list[i].map);
+ }
+ mtd_list[i] = info->list[i].mtd;
+
+ err = -ENXIO;
+ if (!info->list[i].mtd) {
+ dev_err(&dev->dev, "do_map_probe() failed\n");
+ goto err_out;
+ } else {
+ info->list_size++;
+ }
+ info->list[i].mtd->owner = THIS_MODULE;
+ info->list[i].mtd->dev.parent = &dev->dev;
+ }
+
+ err = 0;
+ info->cmtd = NULL;
+ if (info->list_size == 1) {
+ info->cmtd = info->list[0].mtd;
+ } else if (info->list_size > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ info->cmtd = mtd_concat_create(mtd_list, info->list_size,
+ dev_name(&dev->dev));
+ }
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+
+ if (err)
+ goto err_out;
+
+ ppdata.of_node = dp;
+ part_probe_types = of_get_probes(dp);
+ mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
+ NULL, 0);
+ of_free_probes(part_probe_types);
+
+ kfree(mtd_list);
+
+ return 0;
+
+err_out:
+ kfree(mtd_list);
+err_flash_remove:
+ of_flash_remove(dev);
+
+ return err;
+}
+
+static struct of_device_id of_flash_match[] = {
+ {
+ .compatible = "cfi-flash",
+ .data = (void *)"cfi_probe",
+ },
+ {
+ /* FIXME: JEDEC chips can't be safely and reliably
+ * probed, although the mtd code gets it right in
+ * practice most of the time. We should use the
+ * vendor and device ids specified by the binding to
+ * bypass the heuristic probe code, but the mtd layer
+ * provides, at present, no interface for doing so
+ * :(. */
+ .compatible = "jedec-flash",
+ .data = (void *)"jedec_probe",
+ },
+ {
+ .compatible = "mtd-ram",
+ .data = (void *)"map_ram",
+ },
+ {
+ .compatible = "mtd-rom",
+ .data = (void *)"map_rom",
+ },
+ {
+ .type = "rom",
+ .compatible = "direct-mapped"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_flash_match);
+
+static struct platform_driver of_flash_driver = {
+ .driver = {
+ .name = "of-flash",
+ .of_match_table = of_flash_match,
+ },
+ .probe = of_flash_probe,
+ .remove = of_flash_remove,
+};
+
+module_platform_driver(of_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
+MODULE_DESCRIPTION("Device tree based MTD map driver");
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
new file mode 100644
index 000000000..dc6df9abe
--- /dev/null
+++ b/drivers/mtd/maps/pismo.c
@@ -0,0 +1,292 @@
+/*
+ * PISMO memory driver - http://www.pismoworld.org/
+ *
+ * For ARM Realview and Versatile platforms
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/plat-ram.h>
+#include <linux/mtd/pismo.h>
+
+#define PISMO_NUM_CS 5
+
+struct pismo_cs_block {
+ u8 type;
+ u8 width;
+ __le16 access;
+ __le32 size;
+ u32 reserved[2];
+ char device[32];
+} __packed;
+
+struct pismo_eeprom {
+ struct pismo_cs_block cs[PISMO_NUM_CS];
+ char board[15];
+ u8 sum;
+} __packed;
+
+struct pismo_mem {
+ phys_addr_t base;
+ u32 size;
+ u16 access;
+ u8 width;
+ u8 type;
+};
+
+struct pismo_data {
+ struct i2c_client *client;
+ void (*vpp)(void *, int);
+ void *vpp_data;
+ struct platform_device *dev[PISMO_NUM_CS];
+};
+
+static void pismo_set_vpp(struct platform_device *pdev, int on)
+{
+ struct i2c_client *client = to_i2c_client(pdev->dev.parent);
+ struct pismo_data *pismo = i2c_get_clientdata(client);
+
+ pismo->vpp(pismo->vpp_data, on);
+}
+
+static unsigned int pismo_width_to_bytes(unsigned int width)
+{
+ width &= 15;
+ if (width > 2)
+ return 0;
+ return 1 << width;
+}
+
+static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
+ size_t size)
+{
+ int ret;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(addr),
+ .buf = &addr,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buf,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+
+ return ret == ARRAY_SIZE(msg) ? size : -EIO;
+}
+
+static int pismo_add_device(struct pismo_data *pismo, int i,
+ struct pismo_mem *region, const char *name,
+ void *pdata, size_t psize)
+{
+ struct platform_device *dev;
+ struct resource res = { };
+ phys_addr_t base = region->base;
+ int ret;
+
+ if (base == ~0)
+ return -ENXIO;
+
+ res.start = base;
+ res.end = base + region->size - 1;
+ res.flags = IORESOURCE_MEM;
+
+ dev = platform_device_alloc(name, i);
+ if (!dev)
+ return -ENOMEM;
+ dev->dev.parent = &pismo->client->dev;
+
+ do {
+ ret = platform_device_add_resources(dev, &res, 1);
+ if (ret)
+ break;
+
+ ret = platform_device_add_data(dev, pdata, psize);
+ if (ret)
+ break;
+
+ ret = platform_device_add(dev);
+ if (ret)
+ break;
+
+ pismo->dev[i] = dev;
+ return 0;
+ } while (0);
+
+ platform_device_put(dev);
+ return ret;
+}
+
+static int pismo_add_nor(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
+{
+ struct physmap_flash_data data = {
+ .width = region->width,
+ };
+
+ if (pismo->vpp)
+ data.set_vpp = pismo_set_vpp;
+
+ return pismo_add_device(pismo, i, region, "physmap-flash",
+ &data, sizeof(data));
+}
+
+static int pismo_add_sram(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
+{
+ struct platdata_mtd_ram data = {
+ .bankwidth = region->width,
+ };
+
+ return pismo_add_device(pismo, i, region, "mtd-ram",
+ &data, sizeof(data));
+}
+
+static void pismo_add_one(struct pismo_data *pismo, int i,
+ const struct pismo_cs_block *cs, phys_addr_t base)
+{
+ struct device *dev = &pismo->client->dev;
+ struct pismo_mem region;
+
+ region.base = base;
+ region.type = cs->type;
+ region.width = pismo_width_to_bytes(cs->width);
+ region.access = le16_to_cpu(cs->access);
+ region.size = le32_to_cpu(cs->size);
+
+ if (region.width == 0) {
+ dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width);
+ return;
+ }
+
+ /*
+ * FIXME: may need to the platforms memory controller here, but at
+ * the moment we assume that it has already been correctly setup.
+ * The memory controller can also tell us the base address as well.
+ */
+
+ dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n",
+ i, cs->device, region.type, region.access, region.size / 1024);
+
+ switch (region.type) {
+ case 0:
+ break;
+ case 1:
+ /* static DOC */
+ break;
+ case 2:
+ /* static NOR */
+ pismo_add_nor(pismo, i, &region);
+ break;
+ case 3:
+ /* static RAM */
+ pismo_add_sram(pismo, i, &region);
+ break;
+ }
+}
+
+static int pismo_remove(struct i2c_client *client)
+{
+ struct pismo_data *pismo = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pismo->dev); i++)
+ platform_device_unregister(pismo->dev[i]);
+
+ kfree(pismo);
+
+ return 0;
+}
+
+static int pismo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct pismo_pdata *pdata = client->dev.platform_data;
+ struct pismo_eeprom eeprom;
+ struct pismo_data *pismo;
+ int ret, i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "functionality mismatch\n");
+ return -EIO;
+ }
+
+ pismo = kzalloc(sizeof(*pismo), GFP_KERNEL);
+ if (!pismo)
+ return -ENOMEM;
+
+ pismo->client = client;
+ if (pdata) {
+ pismo->vpp = pdata->set_vpp;
+ pismo->vpp_data = pdata->vpp_data;
+ }
+ i2c_set_clientdata(client, pismo);
+
+ ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
+ goto exit_free;
+ }
+
+ dev_info(&client->dev, "%.15s board found\n", eeprom.board);
+
+ for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++)
+ if (eeprom.cs[i].type != 0xff)
+ pismo_add_one(pismo, i, &eeprom.cs[i],
+ pdata->cs_addrs[i]);
+
+ return 0;
+
+ exit_free:
+ kfree(pismo);
+ return ret;
+}
+
+static const struct i2c_device_id pismo_id[] = {
+ { "pismo" },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, pismo_id);
+
+static struct i2c_driver pismo_driver = {
+ .driver = {
+ .name = "pismo",
+ .owner = THIS_MODULE,
+ },
+ .probe = pismo_probe,
+ .remove = pismo_remove,
+ .id_table = pismo_id,
+};
+
+static int __init pismo_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48);
+ BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256);
+
+ return i2c_add_driver(&pismo_driver);
+}
+module_init(pismo_init);
+
+static void __exit pismo_exit(void)
+{
+ i2c_del_driver(&pismo_driver);
+}
+module_exit(pismo_exit);
+
+MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>");
+MODULE_DESCRIPTION("PISMO memory driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
new file mode 100644
index 000000000..4b65c08d1
--- /dev/null
+++ b/drivers/mtd/maps/plat-ram.c
@@ -0,0 +1,261 @@
+/* drivers/mtd/maps/plat-ram.c
+ *
+ * (c) 2004-2005 Simtec Electronics
+ * http://www.simtec.co.uk/products/SWLINUX/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Generic platform device based RAM map
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
+
+#include <asm/io.h>
+
+/* private structure for each mtd platform ram device created */
+
+struct platram_info {
+ struct device *dev;
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *area;
+ struct platdata_mtd_ram *pdata;
+};
+
+/* to_platram_info()
+ *
+ * device private data to struct platram_info conversion
+*/
+
+static inline struct platram_info *to_platram_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+
+/* platram_setrw
+ *
+ * call the platform device's set rw/ro control
+ *
+ * to = 0 => read-only
+ * = 1 => read-write
+*/
+
+static inline void platram_setrw(struct platram_info *info, int to)
+{
+ if (info->pdata == NULL)
+ return;
+
+ if (info->pdata->set_rw != NULL)
+ (info->pdata->set_rw)(info->dev, to);
+}
+
+/* platram_remove
+ *
+ * called to remove the device from the driver's control
+*/
+
+static int platram_remove(struct platform_device *pdev)
+{
+ struct platram_info *info = to_platram_info(pdev);
+
+ dev_dbg(&pdev->dev, "removing device\n");
+
+ if (info == NULL)
+ return 0;
+
+ if (info->mtd) {
+ mtd_device_unregister(info->mtd);
+ map_destroy(info->mtd);
+ }
+
+ /* ensure ram is left read-only */
+
+ platram_setrw(info, PLATRAM_RO);
+
+ /* release resources */
+
+ if (info->area) {
+ release_resource(info->area);
+ kfree(info->area);
+ }
+
+ if (info->map.virt != NULL)
+ iounmap(info->map.virt);
+
+ kfree(info);
+
+ return 0;
+}
+
+/* platram_probe
+ *
+ * called from device drive system when a device matching our
+ * driver is found.
+*/
+
+static int platram_probe(struct platform_device *pdev)
+{
+ struct platdata_mtd_ram *pdata;
+ struct platram_info *info;
+ struct resource *res;
+ int err = 0;
+
+ dev_dbg(&pdev->dev, "probe entered\n");
+
+ if (dev_get_platdata(&pdev->dev) == NULL) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ info->dev = &pdev->dev;
+ info->pdata = pdata;
+
+ /* get the resource for the memory mapping */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource specified\n");
+ err = -ENOENT;
+ goto exit_free;
+ }
+
+ dev_dbg(&pdev->dev, "got platform resource %p (0x%llx)\n", res,
+ (unsigned long long)res->start);
+
+ /* setup map parameters */
+
+ info->map.phys = res->start;
+ info->map.size = resource_size(res);
+ info->map.name = pdata->mapname != NULL ?
+ (char *)pdata->mapname : (char *)pdev->name;
+ info->map.bankwidth = pdata->bankwidth;
+
+ /* register our usage of the memory area */
+
+ info->area = request_mem_region(res->start, info->map.size, pdev->name);
+ if (info->area == NULL) {
+ dev_err(&pdev->dev, "failed to request memory region\n");
+ err = -EIO;
+ goto exit_free;
+ }
+
+ /* remap the memory area */
+
+ info->map.virt = ioremap(res->start, info->map.size);
+ dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
+
+ if (info->map.virt == NULL) {
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
+ err = -EIO;
+ goto exit_free;
+ }
+
+ simple_map_init(&info->map);
+
+ dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
+
+ /* probe for the right mtd map driver
+ * supplied by the platform_data struct */
+
+ if (pdata->map_probes) {
+ const char * const *map_probes = pdata->map_probes;
+
+ for ( ; !info->mtd && *map_probes; map_probes++)
+ info->mtd = do_map_probe(*map_probes , &info->map);
+ }
+ /* fallback to map_ram */
+ else
+ info->mtd = do_map_probe("map_ram", &info->map);
+
+ if (info->mtd == NULL) {
+ dev_err(&pdev->dev, "failed to probe for map_ram\n");
+ err = -ENOMEM;
+ goto exit_free;
+ }
+
+ info->mtd->owner = THIS_MODULE;
+ info->mtd->dev.parent = &pdev->dev;
+
+ platram_setrw(info, PLATRAM_RW);
+
+ /* check to see if there are any available partitions, or whether
+ * to add this device whole */
+
+ err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
+ pdata->partitions,
+ pdata->nr_partitions);
+ if (!err)
+ dev_info(&pdev->dev, "registered mtd device\n");
+
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ }
+ }
+
+ return err;
+
+ exit_free:
+ platram_remove(pdev);
+ exit_error:
+ return err;
+}
+
+/* device driver info */
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:mtd-ram");
+
+static struct platform_driver platram_driver = {
+ .probe = platram_probe,
+ .remove = platram_remove,
+ .driver = {
+ .name = "mtd-ram",
+ },
+};
+
+module_platform_driver(platram_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("MTD platform RAM map driver");
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
new file mode 100644
index 000000000..744ca5cac
--- /dev/null
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -0,0 +1,229 @@
+/*
+ * Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
+ * Config with both CFI and JEDEC device support.
+ *
+ * Basically physmap.c with the addition of partitions and
+ * an array of mapping info to accommodate more than one flash type per board.
+ *
+ * Copyright 2005-2007 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+#include <msp_prom.h>
+#include <msp_regs.h>
+
+
+static struct mtd_info **msp_flash;
+static struct mtd_partition **msp_parts;
+static struct map_info *msp_maps;
+static int fcnt;
+
+#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
+
+static int __init init_msp_flash(void)
+{
+ int i, j, ret = -ENOMEM;
+ int offset, coff;
+ char *env;
+ int pcnt;
+ char flash_name[] = "flash0";
+ char part_name[] = "flash0_0";
+ unsigned addr, size;
+
+ /* If ELB is disabled by "ful-mux" mode, we can't get at flash */
+ if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) &&
+ (*ELB_1PC_EN_REG & SINGLE_PCCARD)) {
+ printk(KERN_NOTICE "Single PC Card mode: no flash access\n");
+ return -ENXIO;
+ }
+
+ /* examine the prom environment for flash devices */
+ for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++)
+ flash_name[5] = '0' + fcnt + 1;
+
+ if (fcnt < 1)
+ return -ENXIO;
+
+ printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
+
+ msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
+ if (!msp_flash)
+ return -ENOMEM;
+
+ msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
+ if (!msp_parts)
+ goto free_msp_flash;
+
+ msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
+ if (!msp_maps)
+ goto free_msp_parts;
+
+ /* loop over the flash devices, initializing each */
+ for (i = 0; i < fcnt; i++) {
+ /* examine the prom environment for flash partititions */
+ part_name[5] = '0' + i;
+ part_name[7] = '0';
+ for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++)
+ part_name[7] = '0' + pcnt + 1;
+
+ if (pcnt == 0) {
+ printk(KERN_NOTICE "Skipping flash device %d "
+ "(no partitions defined)\n", i);
+ continue;
+ }
+
+ msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition),
+ GFP_KERNEL);
+ if (!msp_parts[i])
+ goto cleanup_loop;
+
+ /* now initialize the devices proper */
+ flash_name[5] = '0' + i;
+ env = prom_getenv(flash_name);
+
+ if (sscanf(env, "%x:%x", &addr, &size) < 2) {
+ ret = -ENXIO;
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
+ addr = CPHYSADDR(addr);
+
+ printk(KERN_NOTICE
+ "MSP flash device \"%s\": 0x%08x at 0x%08x\n",
+ flash_name, size, addr);
+ /* This must matchs the actual size of the flash chip */
+ msp_maps[i].size = size;
+ msp_maps[i].phys = addr;
+
+ /*
+ * Platforms have a specific limit of the size of memory
+ * which may be mapped for flash:
+ */
+ if (size > CONFIG_MSP_FLASH_MAP_LIMIT)
+ size = CONFIG_MSP_FLASH_MAP_LIMIT;
+
+ msp_maps[i].virt = ioremap(addr, size);
+ if (msp_maps[i].virt == NULL) {
+ ret = -ENXIO;
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
+
+ msp_maps[i].bankwidth = 1;
+ msp_maps[i].name = kmalloc(7, GFP_KERNEL);
+ if (!msp_maps[i].name) {
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
+
+ msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
+
+ for (j = 0; j < pcnt; j++) {
+ part_name[5] = '0' + i;
+ part_name[7] = '0' + j;
+
+ env = prom_getenv(part_name);
+
+ if (sscanf(env, "%x:%x:%n", &offset, &size,
+ &coff) < 2) {
+ ret = -ENXIO;
+ kfree(msp_maps[i].name);
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
+
+ msp_parts[i][j].size = size;
+ msp_parts[i][j].offset = offset;
+ msp_parts[i][j].name = env + coff;
+ }
+
+ /* now probe and add the device */
+ simple_map_init(&msp_maps[i]);
+ msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
+ if (msp_flash[i]) {
+ msp_flash[i]->owner = THIS_MODULE;
+ mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
+ } else {
+ printk(KERN_ERR "map probe failed for flash\n");
+ ret = -ENXIO;
+ kfree(msp_maps[i].name);
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
+ }
+
+ return 0;
+
+cleanup_loop:
+ while (i--) {
+ mtd_device_unregister(msp_flash[i]);
+ map_destroy(msp_flash[i]);
+ kfree(msp_maps[i].name);
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ }
+ kfree(msp_maps);
+free_msp_parts:
+ kfree(msp_parts);
+free_msp_flash:
+ kfree(msp_flash);
+ return ret;
+}
+
+static void __exit cleanup_msp_flash(void)
+{
+ int i;
+
+ for (i = 0; i < fcnt; i++) {
+ mtd_device_unregister(msp_flash[i]);
+ map_destroy(msp_flash[i]);
+ iounmap((void *)msp_maps[i].virt);
+
+ /* free the memory */
+ kfree(msp_maps[i].name);
+ kfree(msp_parts[i]);
+ }
+
+ kfree(msp_flash);
+ kfree(msp_parts);
+ kfree(msp_maps);
+}
+
+MODULE_AUTHOR("PMC-Sierra, Inc");
+MODULE_DESCRIPTION("MTD map driver for PMC-Sierra MSP boards");
+MODULE_LICENSE("GPL");
+
+module_init(init_msp_flash);
+module_exit(cleanup_msp_flash);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
new file mode 100644
index 000000000..12fa75df5
--- /dev/null
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -0,0 +1,144 @@
+/*
+ * Map driver for Intel XScale PXA2xx platforms.
+ *
+ * Author: Nicolas Pitre
+ * Copyright: (C) 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <mach/hardware.h>
+
+#include <asm/mach/flash.h>
+
+#define CACHELINESIZE 32
+
+static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
+ ssize_t len)
+{
+ unsigned long start = (unsigned long)map->cached + from;
+ unsigned long end = start + len;
+
+ start &= ~(CACHELINESIZE - 1);
+ while (start < end) {
+ /* invalidate D cache line */
+ asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
+ start += CACHELINESIZE;
+ }
+}
+
+struct pxa2xx_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+};
+
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int pxa2xx_flash_probe(struct platform_device *pdev)
+{
+ struct flash_platform_data *flash = dev_get_platdata(&pdev->dev);
+ struct pxa2xx_flash_info *info;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->map.name = flash->name;
+ info->map.bankwidth = flash->width;
+ info->map.phys = res->start;
+ info->map.size = resource_size(res);
+
+ info->map.virt = ioremap(info->map.phys, info->map.size);
+ if (!info->map.virt) {
+ printk(KERN_WARNING "Failed to ioremap %s\n",
+ info->map.name);
+ return -ENOMEM;
+ }
+ info->map.cached =
+ ioremap_cache(info->map.phys, info->map.size);
+ if (!info->map.cached)
+ printk(KERN_WARNING "Failed to ioremap cached %s\n",
+ info->map.name);
+ info->map.inval_cache = pxa2xx_map_inval_cache;
+ simple_map_init(&info->map);
+
+ printk(KERN_NOTICE
+ "Probing %s at physical address 0x%08lx"
+ " (%d-bit bankwidth)\n",
+ info->map.name, (unsigned long)info->map.phys,
+ info->map.bankwidth * 8);
+
+ info->mtd = do_map_probe(flash->map_name, &info->map);
+
+ if (!info->mtd) {
+ iounmap((void *)info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
+ return -EIO;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+ mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
+ flash->nr_parts);
+
+ platform_set_drvdata(pdev, info);
+ return 0;
+}
+
+static int pxa2xx_flash_remove(struct platform_device *dev)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
+
+ mtd_device_unregister(info->mtd);
+
+ map_destroy(info->mtd);
+ iounmap(info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
+ kfree(info);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void pxa2xx_flash_shutdown(struct platform_device *dev)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
+
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
+}
+#else
+#define pxa2xx_flash_shutdown NULL
+#endif
+
+static struct platform_driver pxa2xx_flash_driver = {
+ .driver = {
+ .name = "pxa2xx-flash",
+ },
+ .probe = pxa2xx_flash_probe,
+ .remove = pxa2xx_flash_remove,
+ .shutdown = pxa2xx_flash_shutdown,
+};
+
+module_platform_driver(pxa2xx_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
+MODULE_DESCRIPTION("MTD map driver for Intel XScale PXA2xx");
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
new file mode 100644
index 000000000..5a7551aa2
--- /dev/null
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -0,0 +1,137 @@
+/*
+ * rbtx4939-flash (based on physmap.c)
+ *
+ * This is a simplified physmap driver with map_init callback function.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <asm/txx9/rbtx4939.h>
+
+struct rbtx4939_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+};
+
+static int rbtx4939_flash_remove(struct platform_device *dev)
+{
+ struct rbtx4939_flash_info *info;
+
+ info = platform_get_drvdata(dev);
+ if (!info)
+ return 0;
+
+ if (info->mtd) {
+ mtd_device_unregister(info->mtd);
+ map_destroy(info->mtd);
+ }
+ return 0;
+}
+
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", NULL };
+
+static int rbtx4939_flash_probe(struct platform_device *dev)
+{
+ struct rbtx4939_flash_data *pdata;
+ struct rbtx4939_flash_info *info;
+ struct resource *res;
+ const char * const *probe_type;
+ int err = 0;
+ unsigned long size;
+
+ pdata = dev_get_platdata(&dev->dev);
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ info = devm_kzalloc(&dev->dev, sizeof(struct rbtx4939_flash_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, info);
+
+ size = resource_size(res);
+ pr_notice("rbtx4939 platform flash device: %pR\n", res);
+
+ if (!devm_request_mem_region(&dev->dev, res->start, size,
+ dev_name(&dev->dev)))
+ return -EBUSY;
+
+ info->map.name = dev_name(&dev->dev);
+ info->map.phys = res->start;
+ info->map.size = size;
+ info->map.bankwidth = pdata->width;
+
+ info->map.virt = devm_ioremap(&dev->dev, info->map.phys, size);
+ if (!info->map.virt)
+ return -EBUSY;
+
+ if (pdata->map_init)
+ (*pdata->map_init)(&info->map);
+ else
+ simple_map_init(&info->map);
+
+ probe_type = rom_probe_types;
+ for (; !info->mtd && *probe_type; probe_type++)
+ info->mtd = do_map_probe(*probe_type, &info->map);
+ if (!info->mtd) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENXIO;
+ goto err_out;
+ }
+ info->mtd->owner = THIS_MODULE;
+ err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
+ pdata->nr_parts);
+
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ rbtx4939_flash_remove(dev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static void rbtx4939_flash_shutdown(struct platform_device *dev)
+{
+ struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
+
+ if (mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
+}
+#else
+#define rbtx4939_flash_shutdown NULL
+#endif
+
+static struct platform_driver rbtx4939_flash_driver = {
+ .probe = rbtx4939_flash_probe,
+ .remove = rbtx4939_flash_remove,
+ .shutdown = rbtx4939_flash_shutdown,
+ .driver = {
+ .name = "rbtx4939-flash",
+ },
+};
+
+module_platform_driver(rbtx4939_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RBTX4939 MTD map driver");
+MODULE_ALIAS("platform:rbtx4939-flash");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
new file mode 100644
index 000000000..892ad6ac6
--- /dev/null
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -0,0 +1,300 @@
+/*
+ * Flash memory access on SA11x0 based devices
+ *
+ * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
+
+#include <mach/hardware.h>
+#include <asm/sizes.h>
+#include <asm/mach/flash.h>
+
+struct sa_subdev_info {
+ char name[16];
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct flash_platform_data *plat;
+};
+
+struct sa_info {
+ struct mtd_info *mtd;
+ int num_subdev;
+ struct sa_subdev_info subdev[0];
+};
+
+static DEFINE_SPINLOCK(sa1100_vpp_lock);
+static int sa1100_vpp_refcnt;
+static void sa1100_set_vpp(struct map_info *map, int on)
+{
+ struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sa1100_vpp_lock, flags);
+ if (on) {
+ if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
+ subdev->plat->set_vpp(1);
+ } else {
+ if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
+ subdev->plat->set_vpp(0);
+ }
+ spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
+}
+
+static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
+
+static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *res)
+{
+ unsigned long phys;
+ unsigned int size;
+ int ret;
+
+ phys = res->start;
+ size = res->end - phys + 1;
+
+ /*
+ * Retrieve the bankwidth from the MSC registers.
+ * We currently only implement CS0 and CS1 here.
+ */
+ switch (phys) {
+ default:
+ printk(KERN_WARNING "SA1100 flash: unknown base address "
+ "0x%08lx, assuming CS0\n", phys);
+
+ case SA1100_CS0_PHYS:
+ subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
+ break;
+
+ case SA1100_CS1_PHYS:
+ subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4;
+ break;
+ }
+
+ if (!request_mem_region(phys, size, subdev->name)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (subdev->plat->set_vpp)
+ subdev->map.set_vpp = sa1100_set_vpp;
+
+ subdev->map.phys = phys;
+ subdev->map.size = size;
+ subdev->map.virt = ioremap(phys, size);
+ if (!subdev->map.virt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ simple_map_init(&subdev->map);
+
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map);
+ if (subdev->mtd == NULL) {
+ ret = -ENXIO;
+ goto err;
+ }
+ subdev->mtd->owner = THIS_MODULE;
+
+ printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n",
+ phys, (unsigned)(subdev->mtd->size >> 20),
+ subdev->map.bankwidth * 8);
+
+ return 0;
+
+ err:
+ sa1100_destroy_subdev(subdev);
+ out:
+ return ret;
+}
+
+static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *plat)
+{
+ int i;
+
+ if (info->mtd) {
+ mtd_device_unregister(info->mtd);
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+ }
+
+ for (i = info->num_subdev - 1; i >= 0; i--)
+ sa1100_destroy_subdev(&info->subdev[i]);
+ kfree(info);
+
+ if (plat->exit)
+ plat->exit();
+}
+
+static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
+ struct flash_platform_data *plat)
+{
+ struct sa_info *info;
+ int nr, size, i, ret = 0;
+
+ /*
+ * Count number of devices.
+ */
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(pdev, IORESOURCE_MEM, nr))
+ break;
+
+ if (nr == 0) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr;
+
+ /*
+ * Allocate the map_info structs in one go.
+ */
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (plat->init) {
+ ret = plat->init();
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * Claim and then map the memory regions.
+ */
+ for (i = 0; i < nr; i++) {
+ struct sa_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ subdev->map.name = subdev->name;
+ sprintf(subdev->name, "%s-%d", plat->name, i);
+ subdev->plat = plat;
+
+ ret = sa1100_probe_subdev(subdev, res);
+ if (ret)
+ break;
+ }
+
+ info->num_subdev = i;
+
+ /*
+ * ENXIO is special. It means we didn't find a chip when we probed.
+ */
+ if (ret != 0 && !(ret == -ENXIO && info->num_subdev > 0))
+ goto err;
+
+ /*
+ * If we found one device, don't bother with concat support. If
+ * we found multiple devices, use concat if we have it available,
+ * otherwise fail. Either way, it'll be called "sa1100".
+ */
+ if (info->num_subdev == 1) {
+ strcpy(info->subdev[0].name, plat->name);
+ info->mtd = info->subdev[0].mtd;
+ ret = 0;
+ } else if (info->num_subdev > 1) {
+ struct mtd_info *cdev[nr];
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->num_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->num_subdev,
+ plat->name);
+ if (info->mtd == NULL)
+ ret = -ENXIO;
+ }
+
+ if (ret == 0)
+ return info;
+
+ err:
+ sa1100_destroy(info, plat);
+ out:
+ return ERR_PTR(ret);
+}
+
+static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+static int sa1100_mtd_probe(struct platform_device *pdev)
+{
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
+ struct sa_info *info;
+ int err;
+
+ if (!plat)
+ return -ENODEV;
+
+ info = sa1100_setup_mtd(pdev, plat);
+ if (IS_ERR(info)) {
+ err = PTR_ERR(info);
+ goto out;
+ }
+
+ /*
+ * Partition selection stuff.
+ */
+ mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
+ plat->nr_parts);
+
+ platform_set_drvdata(pdev, info);
+ err = 0;
+
+ out:
+ return err;
+}
+
+static int sa1100_mtd_remove(struct platform_device *pdev)
+{
+ struct sa_info *info = platform_get_drvdata(pdev);
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
+
+ sa1100_destroy(info, plat);
+
+ return 0;
+}
+
+static struct platform_driver sa1100_mtd_driver = {
+ .probe = sa1100_mtd_probe,
+ .remove = sa1100_mtd_remove,
+ .driver = {
+ .name = "sa1100-mtd",
+ },
+};
+
+module_platform_driver(sa1100_mtd_driver);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_DESCRIPTION("SA1100 CFI map driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sa1100-mtd");
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
new file mode 100644
index 000000000..556a2dfe9
--- /dev/null
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -0,0 +1,236 @@
+/* sbc_gxx.c -- MTD map driver for Arcom Control Systems SBC-MediaGX,
+ SBC-GXm and SBC-GX1 series boards.
+
+ Copyright (C) 2001 Arcom Control System Ltd
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+The SBC-MediaGX / SBC-GXx has up to 16 MiB of
+Intel StrataFlash (28F320/28F640) in x8 mode.
+
+This driver uses the CFI probe and Intel Extended Command Set drivers.
+
+The flash is accessed as follows:
+
+ 16 KiB memory window at 0xdc000-0xdffff
+
+ Two IO address locations for paging
+
+ 0x258
+ bit 0-7: address bit 14-21
+ 0x259
+ bit 0-1: address bit 22-23
+ bit 7: 0 - reset/powered down
+ 1 - device enabled
+
+The single flash device is divided into 3 partition which appear as
+separate MTD devices.
+
+25/04/2001 AJL (Arcom) Modified signon strings and partition sizes
+ (to support bzImages up to 638KiB-ish)
+*/
+
+// Includes
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+// Defines
+
+// - Hardware specific
+
+#define WINDOW_START 0xdc000
+
+/* Number of bits in offset. */
+#define WINDOW_SHIFT 14
+#define WINDOW_LENGTH (1 << WINDOW_SHIFT)
+
+/* The bits for the offset into the window. */
+#define WINDOW_MASK (WINDOW_LENGTH-1)
+#define PAGE_IO 0x258
+#define PAGE_IO_SIZE 2
+
+/* bit 7 of 0x259 must be 1 to enable device. */
+#define DEVICE_ENABLE 0x8000
+
+// - Flash / Partition sizing
+
+#define MAX_SIZE_KiB 16384
+#define BOOT_PARTITION_SIZE_KiB 768
+#define DATA_PARTITION_SIZE_KiB 1280
+#define APP_PARTITION_SIZE_KiB 6144
+
+// Globals
+
+static volatile int page_in_window = -1; // Current page in window.
+static void __iomem *iomapadr;
+static DEFINE_SPINLOCK(sbc_gxx_spin);
+
+/* partition_info gives details on the logical partitions that the split the
+ * single flash device into. If the size if zero we use up to the end of the
+ * device. */
+static struct mtd_partition partition_info[]={
+ { .name = "SBC-GXx flash boot partition",
+ .offset = 0,
+ .size = BOOT_PARTITION_SIZE_KiB*1024 },
+ { .name = "SBC-GXx flash data partition",
+ .offset = BOOT_PARTITION_SIZE_KiB*1024,
+ .size = (DATA_PARTITION_SIZE_KiB)*1024 },
+ { .name = "SBC-GXx flash application partition",
+ .offset = (BOOT_PARTITION_SIZE_KiB+DATA_PARTITION_SIZE_KiB)*1024 }
+};
+
+#define NUM_PARTITIONS 3
+
+static inline void sbc_gxx_page(struct map_info *map, unsigned long ofs)
+{
+ unsigned long page = ofs >> WINDOW_SHIFT;
+
+ if( page!=page_in_window ) {
+ outw( page | DEVICE_ENABLE, PAGE_IO );
+ page_in_window = page;
+ }
+}
+
+
+static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs)
+{
+ map_word ret;
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, ofs);
+ ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+ return ret;
+}
+
+static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ while(len) {
+ unsigned long thislen = len;
+ if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
+ thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
+
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, from);
+ memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen);
+ spin_unlock(&sbc_gxx_spin);
+ to += thislen;
+ from += thislen;
+ len -= thislen;
+ }
+}
+
+static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
+{
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, adr);
+ writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+}
+
+static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while(len) {
+ unsigned long thislen = len;
+ if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
+ thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
+
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, to);
+ memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen);
+ spin_unlock(&sbc_gxx_spin);
+ to += thislen;
+ from += thislen;
+ len -= thislen;
+ }
+}
+
+static struct map_info sbc_gxx_map = {
+ .name = "SBC-GXx flash",
+ .phys = NO_XIP,
+ .size = MAX_SIZE_KiB*1024, /* this must be set to a maximum possible amount
+ of flash so the cfi probe routines find all
+ the chips */
+ .bankwidth = 1,
+ .read = sbc_gxx_read8,
+ .copy_from = sbc_gxx_copy_from,
+ .write = sbc_gxx_write8,
+ .copy_to = sbc_gxx_copy_to
+};
+
+/* MTD device for all of the flash. */
+static struct mtd_info *all_mtd;
+
+static void cleanup_sbc_gxx(void)
+{
+ if( all_mtd ) {
+ mtd_device_unregister(all_mtd);
+ map_destroy( all_mtd );
+ }
+
+ iounmap(iomapadr);
+ release_region(PAGE_IO,PAGE_IO_SIZE);
+}
+
+static int __init init_sbc_gxx(void)
+{
+ iomapadr = ioremap(WINDOW_START, WINDOW_LENGTH);
+ if (!iomapadr) {
+ printk( KERN_ERR"%s: failed to ioremap memory region\n",
+ sbc_gxx_map.name );
+ return -EIO;
+ }
+
+ if (!request_region( PAGE_IO, PAGE_IO_SIZE, "SBC-GXx flash")) {
+ printk( KERN_ERR"%s: IO ports 0x%x-0x%x in use\n",
+ sbc_gxx_map.name,
+ PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1 );
+ iounmap(iomapadr);
+ return -EAGAIN;
+ }
+
+
+ printk( KERN_INFO"%s: IO:0x%x-0x%x MEM:0x%x-0x%x\n",
+ sbc_gxx_map.name,
+ PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1,
+ WINDOW_START, WINDOW_START+WINDOW_LENGTH-1 );
+
+ /* Probe for chip. */
+ all_mtd = do_map_probe( "cfi_probe", &sbc_gxx_map );
+ if( !all_mtd ) {
+ cleanup_sbc_gxx();
+ return -ENXIO;
+ }
+
+ all_mtd->owner = THIS_MODULE;
+
+ /* Create MTD devices for each partition. */
+ mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
+
+ return 0;
+}
+
+module_init(init_sbc_gxx);
+module_exit(cleanup_sbc_gxx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arcom Control Systems Ltd.");
+MODULE_DESCRIPTION("MTD map driver for SBC-GXm and SBC-GX1 series boards");
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
new file mode 100644
index 000000000..093edd51b
--- /dev/null
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -0,0 +1,302 @@
+/* sc520cdp.c -- MTD map driver for AMD SC520 Customer Development Platform
+ *
+ * Copyright (C) 2001 Sysgo Real-Time Solutions GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ *
+ * The SC520CDP is an evaluation board for the Elan SC520 processor available
+ * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
+ * and up to 512 KiB of 8-bit DIL Flash ROM.
+ * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/concat.h>
+
+/*
+** The Embedded Systems BIOS decodes the first FLASH starting at
+** 0x8400000. This is a *terrible* place for it because accessing
+** the flash at this location causes the A22 address line to be high
+** (that's what 0x8400000 binary's ought to be). But this is the highest
+** order address line on the raw flash devices themselves!!
+** This causes the top HALF of the flash to be accessed first. Beyond
+** the physical limits of the flash, the flash chip aliases over (to
+** 0x880000 which causes the bottom half to be accessed. This splits the
+** flash into two and inverts it! If you then try to access this from another
+** program that does NOT do this insanity, then you *will* access the
+** first half of the flash, but not find what you expect there. That
+** stuff is in the *second* half! Similarly, the address used by the
+** BIOS for the second FLASH bank is also quite a bad choice.
+** If REPROGRAM_PAR is defined below (the default), then this driver will
+** choose more useful addresses for the FLASH banks by reprogramming the
+** responsible PARxx registers in the SC520's MMCR region. This will
+** cause the settings to be incompatible with the BIOS's settings, which
+** shouldn't be a problem since you are running Linux, (i.e. the BIOS is
+** not much use anyway). However, if you need to be compatible with
+** the BIOS for some reason, just undefine REPROGRAM_PAR.
+*/
+#define REPROGRAM_PAR
+
+
+
+#ifdef REPROGRAM_PAR
+
+/* These are the addresses we want.. */
+#define WINDOW_ADDR_0 0x08800000
+#define WINDOW_ADDR_1 0x09000000
+#define WINDOW_ADDR_2 0x09800000
+
+/* .. and these are the addresses the BIOS gives us */
+#define WINDOW_ADDR_0_BIOS 0x08400000
+#define WINDOW_ADDR_1_BIOS 0x08c00000
+#define WINDOW_ADDR_2_BIOS 0x09400000
+
+#else
+
+#define WINDOW_ADDR_0 0x08400000
+#define WINDOW_ADDR_1 0x08C00000
+#define WINDOW_ADDR_2 0x09400000
+
+#endif
+
+#define WINDOW_SIZE_0 0x00800000
+#define WINDOW_SIZE_1 0x00800000
+#define WINDOW_SIZE_2 0x00080000
+
+
+static struct map_info sc520cdp_map[] = {
+ {
+ .name = "SC520CDP Flash Bank #0",
+ .size = WINDOW_SIZE_0,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR_0
+ },
+ {
+ .name = "SC520CDP Flash Bank #1",
+ .size = WINDOW_SIZE_1,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR_1
+ },
+ {
+ .name = "SC520CDP DIL Flash",
+ .size = WINDOW_SIZE_2,
+ .bankwidth = 1,
+ .phys = WINDOW_ADDR_2
+ },
+};
+
+#define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map)
+
+static struct mtd_info *mymtd[NUM_FLASH_BANKS];
+static struct mtd_info *merged_mtd;
+
+#ifdef REPROGRAM_PAR
+
+/*
+** The SC520 MMCR (memory mapped control register) region resides
+** at 0xFFFEF000. The 16 Programmable Address Region (PAR) registers
+** are at offset 0x88 in the MMCR:
+*/
+#define SC520_MMCR_BASE 0xFFFEF000
+#define SC520_MMCR_EXTENT 0x1000
+#define SC520_PAR(x) ((0x88/sizeof(unsigned long)) + (x))
+#define NUM_SC520_PAR 16 /* total number of PAR registers */
+
+/*
+** The highest three bits in a PAR register determine what target
+** device is controlled by this PAR. Here, only ROMCS? and BOOTCS
+** devices are of interest.
+*/
+#define SC520_PAR_BOOTCS (0x4<<29)
+#define SC520_PAR_ROMCS0 (0x5<<29)
+#define SC520_PAR_ROMCS1 (0x6<<29)
+#define SC520_PAR_TRGDEV (0x7<<29)
+
+/*
+** Bits 28 thru 26 determine some attributes for the
+** region controlled by the PAR. (We only use non-cacheable)
+*/
+#define SC520_PAR_WRPROT (1<<26) /* write protected */
+#define SC520_PAR_NOCACHE (1<<27) /* non-cacheable */
+#define SC520_PAR_NOEXEC (1<<28) /* code execution denied */
+
+
+/*
+** Bit 25 determines the granularity: 4K or 64K
+*/
+#define SC520_PAR_PG_SIZ4 (0<<25)
+#define SC520_PAR_PG_SIZ64 (1<<25)
+
+/*
+** Build a value to be written into a PAR register.
+** We only need ROM entries, 64K page size:
+*/
+#define SC520_PAR_ENTRY(trgdev, address, size) \
+ ((trgdev) | SC520_PAR_NOCACHE | SC520_PAR_PG_SIZ64 | \
+ (address) >> 16 | (((size) >> 16) - 1) << 14)
+
+struct sc520_par_table
+{
+ unsigned long trgdev;
+ unsigned long new_par;
+ unsigned long default_address;
+};
+
+static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
+{
+ { /* Flash Bank #0: selected by ROMCS0 */
+ SC520_PAR_ROMCS0,
+ SC520_PAR_ENTRY(SC520_PAR_ROMCS0, WINDOW_ADDR_0, WINDOW_SIZE_0),
+ WINDOW_ADDR_0_BIOS
+ },
+ { /* Flash Bank #1: selected by ROMCS1 */
+ SC520_PAR_ROMCS1,
+ SC520_PAR_ENTRY(SC520_PAR_ROMCS1, WINDOW_ADDR_1, WINDOW_SIZE_1),
+ WINDOW_ADDR_1_BIOS
+ },
+ { /* DIL (BIOS) Flash: selected by BOOTCS */
+ SC520_PAR_BOOTCS,
+ SC520_PAR_ENTRY(SC520_PAR_BOOTCS, WINDOW_ADDR_2, WINDOW_SIZE_2),
+ WINDOW_ADDR_2_BIOS
+ }
+};
+
+
+static void sc520cdp_setup_par(void)
+{
+ unsigned long __iomem *mmcr;
+ unsigned long mmcr_val;
+ int i, j;
+
+ /* map in SC520's MMCR area */
+ mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+ if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
+ /* force physical address fields to BIOS defaults: */
+ for(i = 0; i < NUM_FLASH_BANKS; i++)
+ sc520cdp_map[i].phys = par_table[i].default_address;
+ return;
+ }
+
+ /*
+ ** Find the PARxx registers that are responsible for activating
+ ** ROMCS0, ROMCS1 and BOOTCS. Reprogram each of these with a
+ ** new value from the table.
+ */
+ for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */
+ for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */
+ mmcr_val = readl(&mmcr[SC520_PAR(j)]);
+ /* if target device field matches, reprogram the PAR */
+ if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)
+ {
+ writel(par_table[i].new_par, &mmcr[SC520_PAR(j)]);
+ break;
+ }
+ }
+ if(j == NUM_SC520_PAR)
+ { /* no matching PAR found: try default BIOS address */
+ printk(KERN_NOTICE "Could not find PAR responsible for %s\n",
+ sc520cdp_map[i].name);
+ printk(KERN_NOTICE "Trying default address 0x%lx\n",
+ par_table[i].default_address);
+ sc520cdp_map[i].phys = par_table[i].default_address;
+ }
+ }
+ iounmap(mmcr);
+}
+#endif
+
+
+static int __init init_sc520cdp(void)
+{
+ int i, devices_found = 0;
+
+#ifdef REPROGRAM_PAR
+ /* reprogram PAR registers so flash appears at the desired addresses */
+ sc520cdp_setup_par();
+#endif
+
+ for (i = 0; i < NUM_FLASH_BANKS; i++) {
+ printk(KERN_NOTICE "SC520 CDP flash device: 0x%Lx at 0x%Lx\n",
+ (unsigned long long)sc520cdp_map[i].size,
+ (unsigned long long)sc520cdp_map[i].phys);
+
+ sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
+
+ if (!sc520cdp_map[i].virt) {
+ printk("Failed to ioremap_nocache\n");
+ return -EIO;
+ }
+
+ simple_map_init(&sc520cdp_map[i]);
+
+ mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]);
+ if(!mymtd[i])
+ mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]);
+ if(!mymtd[i])
+ mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]);
+
+ if (mymtd[i]) {
+ mymtd[i]->owner = THIS_MODULE;
+ ++devices_found;
+ }
+ else {
+ iounmap(sc520cdp_map[i].virt);
+ }
+ }
+ if(devices_found >= 2) {
+ /* Combine the two flash banks into a single MTD device & register it: */
+ merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
+ if(merged_mtd)
+ mtd_device_register(merged_mtd, NULL, 0);
+ }
+ if(devices_found == 3) /* register the third (DIL-Flash) device */
+ mtd_device_register(mymtd[2], NULL, 0);
+ return(devices_found ? 0 : -ENXIO);
+}
+
+static void __exit cleanup_sc520cdp(void)
+{
+ int i;
+
+ if (merged_mtd) {
+ mtd_device_unregister(merged_mtd);
+ mtd_concat_destroy(merged_mtd);
+ }
+ if (mymtd[2])
+ mtd_device_unregister(mymtd[2]);
+
+ for (i = 0; i < NUM_FLASH_BANKS; i++) {
+ if (mymtd[i])
+ map_destroy(mymtd[i]);
+ if (sc520cdp_map[i].virt) {
+ iounmap(sc520cdp_map[i].virt);
+ sc520cdp_map[i].virt = NULL;
+ }
+ }
+}
+
+module_init(init_sc520cdp);
+module_exit(cleanup_sc520cdp);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
+MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
new file mode 100644
index 000000000..b7a22a612
--- /dev/null
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -0,0 +1,238 @@
+/*
+ * MTD map driver for BIOS Flash on Intel SCB2 boards
+ * Copyright (C) 2002 Sun Microsystems, Inc.
+ * Tim Hockin <thockin@sun.com>
+ *
+ * A few notes on this MTD map:
+ *
+ * This was developed with a small number of SCB2 boards to test on.
+ * Hopefully, Intel has not introducted too many unaccounted variables in the
+ * making of this board.
+ *
+ * The BIOS marks its own memory region as 'reserved' in the e820 map. We
+ * try to request it here, but if it fails, we carry on anyway.
+ *
+ * This is how the chip is attached, so said the schematic:
+ * * a 4 MiB (32 Mib) 16 bit chip
+ * * a 1 MiB memory region
+ * * A20 and A21 pulled up
+ * * D8-D15 ignored
+ * What this means is that, while we are addressing bytes linearly, we are
+ * really addressing words, and discarding the other byte. This means that
+ * the chip MUST BE at least 2 MiB. This also means that every block is
+ * actually half as big as the chip reports. It also means that accesses of
+ * logical address 0 hit higher-address sections of the chip, not physical 0.
+ * One can only hope that these 4MiB x16 chips were a lot cheaper than 1MiB x8
+ * chips.
+ *
+ * This driver assumes the chip is not write-protected by an external signal.
+ * As of the this writing, that is true, but may change, just to spite me.
+ *
+ * The actual BIOS layout has been mostly reverse engineered. Intel BIOS
+ * updates for this board include 10 related (*.bio - &.bi9) binary files and
+ * another separate (*.bbo) binary file. The 10 files are 64k of data + a
+ * small header. If the headers are stripped off, the 10 64k files can be
+ * concatenated into a 640k image. This is your BIOS image, proper. The
+ * separate .bbo file also has a small header. It is the 'Boot Block'
+ * recovery BIOS. Once the header is stripped, no further prep is needed.
+ * As best I can tell, the BIOS is arranged as such:
+ * offset 0x00000 to 0x4ffff (320k): unknown - SCSI BIOS, etc?
+ * offset 0x50000 to 0xeffff (640k): BIOS proper
+ * offset 0xf0000 ty 0xfffff (64k): Boot Block region
+ *
+ * Intel's BIOS update program flashes the BIOS and Boot Block in separate
+ * steps. Probably a wise thing to do.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#define MODNAME "scb2_flash"
+#define SCB2_ADDR 0xfff00000
+#define SCB2_WINDOW 0x00100000
+
+
+static void __iomem *scb2_ioaddr;
+static struct mtd_info *scb2_mtd;
+static struct map_info scb2_map = {
+ .name = "SCB2 BIOS Flash",
+ .size = 0,
+ .bankwidth = 1,
+};
+static int region_fail;
+
+static int scb2_fixup_mtd(struct mtd_info *mtd)
+{
+ int i;
+ int done = 0;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /* barf if this doesn't look right */
+ if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
+ printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
+ cfi->cfiq->InterfaceDesc);
+ return -1;
+ }
+
+ /* I wasn't here. I didn't see. dwmw2. */
+
+ /* the chip is sometimes bigger than the map - what a waste */
+ mtd->size = map->size;
+
+ /*
+ * We only REALLY get half the chip, due to the way it is
+ * wired up - D8-D15 are tossed away. We read linear bytes,
+ * but in reality we are getting 1/2 of each 16-bit read,
+ * which LOOKS linear to us. Because CFI code accounts for
+ * things like lock/unlock/erase by eraseregions, we need to
+ * fudge them to reflect this. Erases go like this:
+ * * send an erase to an address
+ * * the chip samples the address and erases the block
+ * * add the block erasesize to the address and repeat
+ * -- the problem is that addresses are 16-bit addressable
+ * -- we end up erasing every-other block
+ */
+ mtd->erasesize /= 2;
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ struct mtd_erase_region_info *region = &mtd->eraseregions[i];
+ region->erasesize /= 2;
+ }
+
+ /*
+ * If the chip is bigger than the map, it is wired with the high
+ * address lines pulled up. This makes us access the top portion of
+ * the chip, so all our erase-region info is wrong. Start cutting from
+ * the bottom.
+ */
+ for (i = 0; !done && i < mtd->numeraseregions; i++) {
+ struct mtd_erase_region_info *region = &mtd->eraseregions[i];
+
+ if (region->numblocks * region->erasesize > mtd->size) {
+ region->numblocks = ((unsigned long)mtd->size /
+ region->erasesize);
+ done = 1;
+ } else {
+ region->numblocks = 0;
+ }
+ region->offset = 0;
+ }
+
+ return 0;
+}
+
+/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
+#define CSB5_FCR 0x41
+#define CSB5_FCR_DECODE_ALL 0x0e
+static int scb2_flash_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ u8 reg;
+
+ /* enable decoding of the flash region in the south bridge */
+ pci_read_config_byte(dev, CSB5_FCR, &reg);
+ pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL);
+
+ if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) {
+ /*
+ * The BIOS seems to mark the flash region as 'reserved'
+ * in the e820 map. Warn and go about our business.
+ */
+ printk(KERN_WARNING MODNAME
+ ": warning - can't reserve rom window, continuing\n");
+ region_fail = 1;
+ }
+
+ /* remap the IO window (w/o caching) */
+ scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
+ if (!scb2_ioaddr) {
+ printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENOMEM;
+ }
+
+ scb2_map.phys = SCB2_ADDR;
+ scb2_map.virt = scb2_ioaddr;
+ scb2_map.size = SCB2_WINDOW;
+
+ simple_map_init(&scb2_map);
+
+ /* try to find a chip */
+ scb2_mtd = do_map_probe("cfi_probe", &scb2_map);
+
+ if (!scb2_mtd) {
+ printk(KERN_ERR MODNAME ": flash probe failed!\n");
+ iounmap(scb2_ioaddr);
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENODEV;
+ }
+
+ scb2_mtd->owner = THIS_MODULE;
+ if (scb2_fixup_mtd(scb2_mtd) < 0) {
+ mtd_device_unregister(scb2_mtd);
+ map_destroy(scb2_mtd);
+ iounmap(scb2_ioaddr);
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENODEV;
+ }
+
+ printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
+ (unsigned long long)scb2_mtd->size,
+ (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
+
+ mtd_device_register(scb2_mtd, NULL, 0);
+
+ return 0;
+}
+
+static void scb2_flash_remove(struct pci_dev *dev)
+{
+ if (!scb2_mtd)
+ return;
+
+ /* disable flash writes */
+ mtd_lock(scb2_mtd, 0, scb2_mtd->size);
+
+ mtd_device_unregister(scb2_mtd);
+ map_destroy(scb2_mtd);
+
+ iounmap(scb2_ioaddr);
+ scb2_ioaddr = NULL;
+
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+}
+
+static struct pci_device_id scb2_flash_pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_SERVERWORKS,
+ .device = PCI_DEVICE_ID_SERVERWORKS_CSB5,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID
+ },
+ { 0, }
+};
+
+static struct pci_driver scb2_flash_driver = {
+ .name = "Intel SCB2 BIOS Flash",
+ .id_table = scb2_flash_pci_ids,
+ .probe = scb2_flash_probe,
+ .remove = scb2_flash_remove,
+};
+
+module_pci_driver(scb2_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
+MODULE_DESCRIPTION("MTD map driver for Intel SCB2 BIOS Flash");
+MODULE_DEVICE_TABLE(pci, scb2_flash_pci_ids);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
new file mode 100644
index 000000000..f1c1f737d
--- /dev/null
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -0,0 +1,225 @@
+/* linux/drivers/mtd/maps/scx200_docflash.c
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+
+ National Semiconductor SCx200 flash mapped with DOCCS
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/pci.h>
+#include <linux/scx200.h>
+
+#define NAME "scx200_docflash"
+
+MODULE_AUTHOR("Christer Weinigel <wingel@hack.org>");
+MODULE_DESCRIPTION("NatSemi SCx200 DOCCS Flash Driver");
+MODULE_LICENSE("GPL");
+
+static int probe = 0; /* Don't autoprobe */
+static unsigned size = 0x1000000; /* 16 MiB the whole ISA address space */
+static unsigned width = 8; /* Default to 8 bits wide */
+static char *flashtype = "cfi_probe";
+
+module_param(probe, int, 0);
+MODULE_PARM_DESC(probe, "Probe for a BIOS mapping");
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Size of the flash mapping");
+module_param(width, int, 0);
+MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)");
+module_param(flashtype, charp, 0);
+MODULE_PARM_DESC(flashtype, "Type of MTD probe to do");
+
+static struct resource docmem = {
+ .flags = IORESOURCE_MEM,
+ .name = "NatSemi SCx200 DOCCS Flash",
+};
+
+static struct mtd_info *mymtd;
+
+static struct mtd_partition partition_info[] = {
+ {
+ .name = "DOCCS Boot kernel",
+ .offset = 0,
+ .size = 0xc0000
+ },
+ {
+ .name = "DOCCS Low BIOS",
+ .offset = 0xc0000,
+ .size = 0x40000
+ },
+ {
+ .name = "DOCCS File system",
+ .offset = 0x100000,
+ .size = ~0 /* calculate from flash size */
+ },
+ {
+ .name = "DOCCS High BIOS",
+ .offset = ~0, /* calculate from flash size */
+ .size = 0x80000
+ },
+};
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
+
+static struct map_info scx200_docflash_map = {
+ .name = "NatSemi SCx200 DOCCS Flash",
+};
+
+static int __init init_scx200_docflash(void)
+{
+ unsigned u;
+ unsigned base;
+ unsigned ctrl;
+ unsigned pmr;
+ struct pci_dev *bridge;
+
+ printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
+
+ if ((bridge = pci_get_device(PCI_VENDOR_ID_NS,
+ PCI_DEVICE_ID_NS_SCx200_BRIDGE,
+ NULL)) == NULL)
+ return -ENODEV;
+
+ /* check that we have found the configuration block */
+ if (!scx200_cb_present()) {
+ pci_dev_put(bridge);
+ return -ENODEV;
+ }
+
+ if (probe) {
+ /* Try to use the present flash mapping if any */
+ pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
+ pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
+ pci_dev_put(bridge);
+
+ pmr = inl(scx200_cb_base + SCx200_PMR);
+
+ if (base == 0
+ || (ctrl & 0x07000000) != 0x07000000
+ || (ctrl & 0x0007ffff) == 0)
+ return -ENODEV;
+
+ size = ((ctrl&0x1fff)<<13) + (1<<13);
+
+ for (u = size; u > 1; u >>= 1)
+ ;
+ if (u != 1)
+ return -ENODEV;
+
+ if (pmr & (1<<6))
+ width = 16;
+ else
+ width = 8;
+
+ docmem.start = base;
+ docmem.end = base + size;
+
+ if (request_resource(&iomem_resource, &docmem)) {
+ printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
+ return -ENOMEM;
+ }
+ } else {
+ pci_dev_put(bridge);
+ for (u = size; u > 1; u >>= 1)
+ ;
+ if (u != 1) {
+ printk(KERN_ERR NAME ": invalid size for flash mapping\n");
+ return -EINVAL;
+ }
+
+ if (width != 8 && width != 16) {
+ printk(KERN_ERR NAME ": invalid bus width for flash mapping\n");
+ return -EINVAL;
+ }
+
+ if (allocate_resource(&iomem_resource, &docmem,
+ size,
+ 0xc0000000, 0xffffffff,
+ size, NULL, NULL)) {
+ printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
+ return -ENOMEM;
+ }
+
+ ctrl = 0x07000000 | ((size-1) >> 13);
+
+ printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl);
+
+ pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start);
+ pci_write_config_dword(bridge, SCx200_DOCCS_CTRL, ctrl);
+ pmr = inl(scx200_cb_base + SCx200_PMR);
+
+ if (width == 8) {
+ pmr &= ~(1<<6);
+ } else {
+ pmr |= (1<<6);
+ }
+ outl(pmr, scx200_cb_base + SCx200_PMR);
+ }
+
+ printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
+ &docmem, width);
+
+ scx200_docflash_map.size = size;
+ if (width == 8)
+ scx200_docflash_map.bankwidth = 1;
+ else
+ scx200_docflash_map.bankwidth = 2;
+
+ simple_map_init(&scx200_docflash_map);
+
+ scx200_docflash_map.phys = docmem.start;
+ scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size);
+ if (!scx200_docflash_map.virt) {
+ printk(KERN_ERR NAME ": failed to ioremap the flash\n");
+ release_resource(&docmem);
+ return -EIO;
+ }
+
+ mymtd = do_map_probe(flashtype, &scx200_docflash_map);
+ if (!mymtd) {
+ printk(KERN_ERR NAME ": unable to detect flash\n");
+ iounmap(scx200_docflash_map.virt);
+ release_resource(&docmem);
+ return -ENXIO;
+ }
+
+ if (size < mymtd->size)
+ printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n");
+
+ mymtd->owner = THIS_MODULE;
+
+ partition_info[3].offset = mymtd->size-partition_info[3].size;
+ partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
+
+ return 0;
+}
+
+static void __exit cleanup_scx200_docflash(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+ if (scx200_docflash_map.virt) {
+ iounmap(scx200_docflash_map.virt);
+ release_resource(&docmem);
+ }
+}
+
+module_init(init_scx200_docflash);
+module_exit(cleanup_scx200_docflash);
+
+/*
+ Local variables:
+ compile-command: "make -k -C ../../.. SUBDIRS=drivers/mtd/maps modules"
+ c-basic-offset: 8
+ End:
+*/
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
new file mode 100644
index 000000000..bb580bc16
--- /dev/null
+++ b/drivers/mtd/maps/solutionengine.c
@@ -0,0 +1,95 @@
+/*
+ * Flash and EPROM on Hitachi Solution Engine and similar boards.
+ *
+ * (C) 2001 Red Hat, Inc.
+ *
+ * GPL'd
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/errno.h>
+
+static struct mtd_info *flash_mtd;
+static struct mtd_info *eprom_mtd;
+
+struct map_info soleng_eprom_map = {
+ .name = "Solution Engine EPROM",
+ .size = 0x400000,
+ .bankwidth = 4,
+};
+
+struct map_info soleng_flash_map = {
+ .name = "Solution Engine FLASH",
+ .size = 0x400000,
+ .bankwidth = 4,
+};
+
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int __init init_soleng_maps(void)
+{
+ /* First probe at offset 0 */
+ soleng_flash_map.phys = 0;
+ soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
+ soleng_eprom_map.phys = 0x01000000;
+ soleng_eprom_map.virt = (void __iomem *)P1SEGADDR(0x01000000);
+ simple_map_init(&soleng_eprom_map);
+ simple_map_init(&soleng_flash_map);
+
+ printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
+ flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
+ if (!flash_mtd) {
+ /* Not there. Try swapping */
+ printk(KERN_NOTICE "Probing for flash chips at 0x01000000:\n");
+ soleng_flash_map.phys = 0x01000000;
+ soleng_flash_map.virt = P2SEGADDR(0x01000000);
+ soleng_eprom_map.phys = 0;
+ soleng_eprom_map.virt = P1SEGADDR(0);
+ flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
+ if (!flash_mtd) {
+ /* Eep. */
+ printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
+ return -ENXIO;
+ }
+ }
+ printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
+ soleng_flash_map.phys & 0x1fffffff,
+ soleng_eprom_map.phys & 0x1fffffff);
+ flash_mtd->owner = THIS_MODULE;
+
+ eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
+ if (eprom_mtd) {
+ eprom_mtd->owner = THIS_MODULE;
+ mtd_device_register(eprom_mtd, NULL, 0);
+ }
+
+ mtd_device_parse_register(flash_mtd, probes, NULL, NULL, 0);
+
+ return 0;
+}
+
+static void __exit cleanup_soleng_maps(void)
+{
+ if (eprom_mtd) {
+ mtd_device_unregister(eprom_mtd);
+ map_destroy(eprom_mtd);
+ }
+
+ mtd_device_unregister(flash_mtd);
+ map_destroy(flash_mtd);
+}
+
+module_init(init_soleng_maps);
+module_exit(cleanup_soleng_maps);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for Hitachi SolutionEngine (and similar) boards");
+
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
new file mode 100644
index 000000000..d459aca07
--- /dev/null
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -0,0 +1,159 @@
+/* sun_uflash.c - Driver for user-programmable flash on
+ * Sun Microsystems SME boardsets.
+ *
+ * This driver does NOT provide access to the OBP-flash for
+ * safety reasons-- use <linux>/drivers/sbus/char/flash.c instead.
+ *
+ * Copyright (c) 2001 Eric Brower (ebrower@usa.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <asm/prom.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+#define UFLASH_OBPNAME "flashprom"
+#define DRIVER_NAME "sun_uflash"
+#define PFX DRIVER_NAME ": "
+
+#define UFLASH_WINDOW_SIZE 0x200000
+#define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */
+
+MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
+MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
+MODULE_SUPPORTED_DEVICE(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.1");
+
+struct uflash_dev {
+ const char *name; /* device name */
+ struct map_info map; /* mtd map info */
+ struct mtd_info *mtd; /* mtd info */
+};
+
+struct map_info uflash_map_templ = {
+ .name = "SUNW,???-????",
+ .size = UFLASH_WINDOW_SIZE,
+ .bankwidth = UFLASH_BUSWIDTH,
+};
+
+int uflash_devinit(struct platform_device *op, struct device_node *dp)
+{
+ struct uflash_dev *up;
+
+ if (op->resource[1].flags) {
+ /* Non-CFI userflash device-- once I find one we
+ * can work on supporting it.
+ */
+ printk(KERN_ERR PFX "Unsupported device at %s, 0x%llx\n",
+ dp->full_name, (unsigned long long)op->resource[0].start);
+
+ return -ENODEV;
+ }
+
+ up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL);
+ if (!up) {
+ printk(KERN_ERR PFX "Cannot allocate struct uflash_dev\n");
+ return -ENOMEM;
+ }
+
+ /* copy defaults and tweak parameters */
+ memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ));
+
+ up->map.size = resource_size(&op->resource[0]);
+
+ up->name = of_get_property(dp, "model", NULL);
+ if (up->name && 0 < strlen(up->name))
+ up->map.name = up->name;
+
+ up->map.phys = op->resource[0].start;
+
+ up->map.virt = of_ioremap(&op->resource[0], 0, up->map.size,
+ DRIVER_NAME);
+ if (!up->map.virt) {
+ printk(KERN_ERR PFX "Failed to map device.\n");
+ kfree(up);
+
+ return -EINVAL;
+ }
+
+ simple_map_init(&up->map);
+
+ /* MTD registration */
+ up->mtd = do_map_probe("cfi_probe", &up->map);
+ if (!up->mtd) {
+ of_iounmap(&op->resource[0], up->map.virt, up->map.size);
+ kfree(up);
+
+ return -ENXIO;
+ }
+
+ up->mtd->owner = THIS_MODULE;
+
+ mtd_device_register(up->mtd, NULL, 0);
+
+ dev_set_drvdata(&op->dev, up);
+
+ return 0;
+}
+
+static int uflash_probe(struct platform_device *op)
+{
+ struct device_node *dp = op->dev.of_node;
+
+ /* Flashprom must have the "user" property in order to
+ * be used by this driver.
+ */
+ if (!of_find_property(dp, "user", NULL))
+ return -ENODEV;
+
+ return uflash_devinit(op, dp);
+}
+
+static int uflash_remove(struct platform_device *op)
+{
+ struct uflash_dev *up = dev_get_drvdata(&op->dev);
+
+ if (up->mtd) {
+ mtd_device_unregister(up->mtd);
+ map_destroy(up->mtd);
+ }
+ if (up->map.virt) {
+ of_iounmap(&op->resource[0], up->map.virt, up->map.size);
+ up->map.virt = NULL;
+ }
+
+ kfree(up);
+
+ return 0;
+}
+
+static const struct of_device_id uflash_match[] = {
+ {
+ .name = UFLASH_OBPNAME,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, uflash_match);
+
+static struct platform_driver uflash_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = uflash_match,
+ },
+ .probe = uflash_probe,
+ .remove = uflash_remove,
+};
+
+module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
new file mode 100644
index 000000000..9969fedb1
--- /dev/null
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -0,0 +1,121 @@
+/*
+ * ts5500_flash.c -- MTD map driver for Technology Systems TS-5500 board
+ *
+ * Copyright (C) 2004 Sean Young <sean@mess.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note:
+ * - In order for detection to work, jumper 3 must be set.
+ * - Drive A and B use the resident flash disk (RFD) flash translation layer.
+ * - If you have created your own jffs file system and the bios overwrites
+ * it during boot, try disabling Drive A: and B: in the boot order.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/types.h>
+
+
+#define WINDOW_ADDR 0x09400000
+#define WINDOW_SIZE 0x00200000
+
+static struct map_info ts5500_map = {
+ .name = "TS-5500 Flash",
+ .size = WINDOW_SIZE,
+ .bankwidth = 1,
+ .phys = WINDOW_ADDR
+};
+
+static struct mtd_partition ts5500_partitions[] = {
+ {
+ .name = "Drive A",
+ .offset = 0,
+ .size = 0x0e0000
+ },
+ {
+ .name = "BIOS",
+ .offset = 0x0e0000,
+ .size = 0x020000,
+ },
+ {
+ .name = "Drive B",
+ .offset = 0x100000,
+ .size = 0x100000
+ }
+};
+
+#define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions)
+
+static struct mtd_info *mymtd;
+
+static int __init init_ts5500_map(void)
+{
+ int rc = 0;
+
+ ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
+
+ if (!ts5500_map.virt) {
+ printk(KERN_ERR "Failed to ioremap_nocache\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ simple_map_init(&ts5500_map);
+
+ mymtd = do_map_probe("jedec_probe", &ts5500_map);
+ if (!mymtd)
+ mymtd = do_map_probe("map_rom", &ts5500_map);
+
+ if (!mymtd) {
+ rc = -ENXIO;
+ goto err1;
+ }
+
+ mymtd->owner = THIS_MODULE;
+ mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
+
+ return 0;
+
+err1:
+ iounmap(ts5500_map.virt);
+err2:
+ return rc;
+}
+
+static void __exit cleanup_ts5500_map(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+
+ if (ts5500_map.virt) {
+ iounmap(ts5500_map.virt);
+ ts5500_map.virt = NULL;
+ }
+}
+
+module_init(init_ts5500_map);
+module_exit(cleanup_ts5500_map);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("MTD map driver for Technology Systems TS-5500 board");
+
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
new file mode 100644
index 000000000..da2cdb5fd
--- /dev/null
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -0,0 +1,108 @@
+/*
+ * tsunami_flash.c
+ *
+ * flash chip on alpha ds10...
+ */
+#include <asm/io.h>
+#include <asm/core_tsunami.h>
+#include <linux/init.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+
+#define FLASH_ENABLE_PORT 0x00C00001
+#define FLASH_ENABLE_BYTE 0x01
+#define FLASH_DISABLE_BYTE 0x00
+
+#define MAX_TIG_FLASH_SIZE (12*1024*1024)
+static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
+{
+ map_word val;
+ val.x[0] = tsunami_tig_readb(offset);
+ return val;
+}
+
+static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
+{
+ tsunami_tig_writeb(value.x[0], offset);
+}
+
+static void tsunami_flash_copy_from(
+ struct map_info *map, void *addr, unsigned long offset, ssize_t len)
+{
+ unsigned char *dest;
+ dest = addr;
+ while(len && (offset < MAX_TIG_FLASH_SIZE)) {
+ *dest = tsunami_tig_readb(offset);
+ offset++;
+ dest++;
+ len--;
+ }
+}
+
+static void tsunami_flash_copy_to(
+ struct map_info *map, unsigned long offset,
+ const void *addr, ssize_t len)
+{
+ const unsigned char *src;
+ src = addr;
+ while(len && (offset < MAX_TIG_FLASH_SIZE)) {
+ tsunami_tig_writeb(*src, offset);
+ offset++;
+ src++;
+ len--;
+ }
+}
+
+/*
+ * Deliberately don't provide operations wider than 8 bits. I don't
+ * have then and it scares me to think how you could mess up if
+ * you tried to use them. Buswidth is correctly so I'm safe.
+ */
+static struct map_info tsunami_flash_map = {
+ .name = "flash chip on the Tsunami TIG bus",
+ .size = MAX_TIG_FLASH_SIZE,
+ .phys = NO_XIP,
+ .bankwidth = 1,
+ .read = tsunami_flash_read8,
+ .copy_from = tsunami_flash_copy_from,
+ .write = tsunami_flash_write8,
+ .copy_to = tsunami_flash_copy_to,
+};
+
+static struct mtd_info *tsunami_flash_mtd;
+
+static void __exit cleanup_tsunami_flash(void)
+{
+ struct mtd_info *mtd;
+ mtd = tsunami_flash_mtd;
+ if (mtd) {
+ mtd_device_unregister(mtd);
+ map_destroy(mtd);
+ }
+ tsunami_flash_mtd = 0;
+}
+
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom", NULL };
+
+static int __init init_tsunami_flash(void)
+{
+ const char * const *type;
+
+ tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
+
+ tsunami_flash_mtd = 0;
+ type = rom_probe_types;
+ for(; !tsunami_flash_mtd && *type; type++) {
+ tsunami_flash_mtd = do_map_probe(*type, &tsunami_flash_map);
+ }
+ if (tsunami_flash_mtd) {
+ tsunami_flash_mtd->owner = THIS_MODULE;
+ mtd_device_register(tsunami_flash_mtd, NULL, 0);
+ return 0;
+ }
+ return -ENXIO;
+}
+
+module_init(init_tsunami_flash);
+module_exit(cleanup_tsunami_flash);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
new file mode 100644
index 000000000..c1af83db5
--- /dev/null
+++ b/drivers/mtd/maps/uclinux.c
@@ -0,0 +1,143 @@
+/****************************************************************************/
+
+/*
+ * uclinux.c -- generic memory mapped MTD driver for uclinux
+ *
+ * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ */
+
+/****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/sections.h>
+
+/****************************************************************************/
+
+#ifdef CONFIG_MTD_ROM
+#define MAP_NAME "rom"
+#else
+#define MAP_NAME "ram"
+#endif
+
+/*
+ * Blackfin uses uclinux_ram_map during startup, so it must not be static.
+ * Provide a dummy declaration to make sparse happy.
+ */
+extern struct map_info uclinux_ram_map;
+
+struct map_info uclinux_ram_map = {
+ .name = MAP_NAME,
+ .size = 0,
+};
+
+static unsigned long physaddr = -1;
+module_param(physaddr, ulong, S_IRUGO);
+
+static struct mtd_info *uclinux_ram_mtdinfo;
+
+/****************************************************************************/
+
+static struct mtd_partition uclinux_romfs[] = {
+ { .name = "ROMfs" }
+};
+
+#define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs)
+
+/****************************************************************************/
+
+static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return(0);
+}
+
+/****************************************************************************/
+
+static int __init uclinux_mtd_init(void)
+{
+ struct mtd_info *mtd;
+ struct map_info *mapp;
+
+ mapp = &uclinux_ram_map;
+
+ if (physaddr == -1)
+ mapp->phys = (resource_size_t)__bss_stop;
+ else
+ mapp->phys = physaddr;
+
+ if (!mapp->size)
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
+ mapp->bankwidth = 4;
+
+ printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
+ (int) mapp->phys, (int) mapp->size);
+
+ /*
+ * The filesystem is guaranteed to be in direct mapped memory. It is
+ * directly following the kernels own bss region. Following the same
+ * mechanism used by architectures setting up traditional initrds we
+ * use phys_to_virt to get the virtual address of its start.
+ */
+ mapp->virt = phys_to_virt(mapp->phys);
+
+ if (mapp->virt == 0) {
+ printk("uclinux[mtd]: no virtual mapping?\n");
+ return(-EIO);
+ }
+
+ simple_map_init(mapp);
+
+ mtd = do_map_probe("map_" MAP_NAME, mapp);
+ if (!mtd) {
+ printk("uclinux[mtd]: failed to find a mapping?\n");
+ return(-ENXIO);
+ }
+
+ mtd->owner = THIS_MODULE;
+ mtd->_point = uclinux_point;
+ mtd->priv = mapp;
+
+ uclinux_ram_mtdinfo = mtd;
+ mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
+
+ return(0);
+}
+
+/****************************************************************************/
+
+static void __exit uclinux_mtd_cleanup(void)
+{
+ if (uclinux_ram_mtdinfo) {
+ mtd_device_unregister(uclinux_ram_mtdinfo);
+ map_destroy(uclinux_ram_mtdinfo);
+ uclinux_ram_mtdinfo = NULL;
+ }
+ if (uclinux_ram_map.virt)
+ uclinux_ram_map.virt = 0;
+}
+
+/****************************************************************************/
+
+module_init(uclinux_mtd_init);
+module_exit(uclinux_mtd_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
+MODULE_DESCRIPTION("Generic MTD for uClinux");
+
+/****************************************************************************/
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
new file mode 100644
index 000000000..6b223cfe9
--- /dev/null
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -0,0 +1,824 @@
+/* vmu-flash.c
+ * Driver for SEGA Dreamcast Visual Memory Unit
+ *
+ * Copyright (c) Adrian McMenamin 2002 - 2009
+ * Copyright (c) Paul Mundt 2001
+ *
+ * Licensed under version 2 of the
+ * GNU General Public Licence
+ */
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/maple.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+struct vmu_cache {
+ unsigned char *buffer; /* Cache */
+ unsigned int block; /* Which block was cached */
+ unsigned long jiffies_atc; /* When was it cached? */
+ int valid;
+};
+
+struct mdev_part {
+ struct maple_device *mdev;
+ int partition;
+};
+
+struct vmupart {
+ u16 user_blocks;
+ u16 root_block;
+ u16 numblocks;
+ char *name;
+ struct vmu_cache *pcache;
+};
+
+struct memcard {
+ u16 tempA;
+ u16 tempB;
+ u32 partitions;
+ u32 blocklen;
+ u32 writecnt;
+ u32 readcnt;
+ u32 removeable;
+ int partition;
+ int read;
+ unsigned char *blockread;
+ struct vmupart *parts;
+ struct mtd_info *mtd;
+};
+
+struct vmu_block {
+ unsigned int num; /* block number */
+ unsigned int ofs; /* block offset */
+};
+
+static struct vmu_block *ofs_to_block(unsigned long src_ofs,
+ struct mtd_info *mtd, int partition)
+{
+ struct vmu_block *vblock;
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int num;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ card = maple_get_drvdata(mdev);
+
+ if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
+ goto failed;
+
+ num = src_ofs / card->blocklen;
+ if (num > card->parts[partition].numblocks)
+ goto failed;
+
+ vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
+ if (!vblock)
+ goto failed;
+
+ vblock->num = num;
+ vblock->ofs = src_ofs % card->blocklen;
+ return vblock;
+
+failed:
+ return NULL;
+}
+
+/* Maple bus callback function for reads */
+static void vmu_blockread(struct mapleq *mq)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+
+ mdev = mq->dev;
+ card = maple_get_drvdata(mdev);
+ /* copy the read in data */
+
+ if (unlikely(!card->blockread))
+ return;
+
+ memcpy(card->blockread, mq->recvbuf->buf + 12,
+ card->blocklen/card->readcnt);
+
+}
+
+/* Interface with maple bus to read blocks
+ * caching the results so that other parts
+ * of the driver can access block reads */
+static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
+ struct mtd_info *mtd)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ int partition, error = 0, x, wait;
+ unsigned char *blockread = NULL;
+ struct vmu_cache *pcache;
+ __be32 sendbuf;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+ pcache = card->parts[partition].pcache;
+ pcache->valid = 0;
+
+ /* prepare the cache for this block */
+ if (!pcache->buffer) {
+ pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!pcache->buffer) {
+ dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
+ " to lack of memory\n", mdev->port,
+ mdev->unit);
+ error = -ENOMEM;
+ goto outB;
+ }
+ }
+
+ /*
+ * Reads may be phased - again the hardware spec
+ * supports this - though may not be any devices in
+ * the wild that implement it, but we will here
+ */
+ for (x = 0; x < card->readcnt; x++) {
+ sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
+
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ dev_notice(&mdev->dev, "VMU at (%d, %d)"
+ " is busy\n", mdev->port, mdev->unit);
+ error = -EAGAIN;
+ goto outB;
+ }
+ }
+
+ atomic_set(&mdev->busy, 1);
+ blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
+ if (!blockread) {
+ error = -ENOMEM;
+ atomic_set(&mdev->busy, 0);
+ goto outB;
+ }
+ card->blockread = blockread;
+
+ maple_getcond_callback(mdev, vmu_blockread, 0,
+ MAPLE_FUNC_MEMCARD);
+ error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_BREAD, 2, &sendbuf);
+ /* Very long timeouts seem to be needed when box is stressed */
+ wait = wait_event_interruptible_timeout(mdev->maple_wait,
+ (atomic_read(&mdev->busy) == 0 ||
+ atomic_read(&mdev->busy) == 2), HZ * 3);
+ /*
+ * MTD layer does not handle hotplugging well
+ * so have to return errors when VMU is unplugged
+ * in the middle of a read (busy == 2)
+ */
+ if (error || atomic_read(&mdev->busy) == 2) {
+ if (atomic_read(&mdev->busy) == 2)
+ error = -ENXIO;
+ atomic_set(&mdev->busy, 0);
+ card->blockread = NULL;
+ goto outA;
+ }
+ if (wait == 0 || wait == -ERESTARTSYS) {
+ card->blockread = NULL;
+ atomic_set(&mdev->busy, 0);
+ error = -EIO;
+ list_del_init(&(mdev->mq->list));
+ kfree(mdev->mq->sendbuf);
+ mdev->mq->sendbuf = NULL;
+ if (wait == -ERESTARTSYS) {
+ dev_warn(&mdev->dev, "VMU read on (%d, %d)"
+ " interrupted on block 0x%X\n",
+ mdev->port, mdev->unit, num);
+ } else
+ dev_notice(&mdev->dev, "VMU read on (%d, %d)"
+ " timed out on block 0x%X\n",
+ mdev->port, mdev->unit, num);
+ goto outA;
+ }
+
+ memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
+ card->blocklen/card->readcnt);
+
+ memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
+ card->blockread, card->blocklen/card->readcnt);
+ card->blockread = NULL;
+ pcache->block = num;
+ pcache->jiffies_atc = jiffies;
+ pcache->valid = 1;
+ kfree(blockread);
+ }
+
+ return error;
+
+outA:
+ kfree(blockread);
+outB:
+ return error;
+}
+
+/* communicate with maple bus for phased writing */
+static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
+ struct mtd_info *mtd)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ int partition, error, locking, x, phaselen, wait;
+ __be32 *sendbuf;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ phaselen = card->blocklen/card->writecnt;
+
+ sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
+ if (!sendbuf) {
+ error = -ENOMEM;
+ goto fail_nosendbuf;
+ }
+ for (x = 0; x < card->writecnt; x++) {
+ sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
+ memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
+ /* wait until the device is not busy doing something else
+ * or 1 second - which ever is longer */
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ error = -EBUSY;
+ dev_notice(&mdev->dev, "VMU write at (%d, %d)"
+ "failed - device is busy\n",
+ mdev->port, mdev->unit);
+ goto fail_nolock;
+ }
+ }
+ atomic_set(&mdev->busy, 1);
+
+ locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
+ wait = wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ/10);
+ if (locking) {
+ error = -EIO;
+ atomic_set(&mdev->busy, 0);
+ goto fail_nolock;
+ }
+ if (atomic_read(&mdev->busy) == 2) {
+ atomic_set(&mdev->busy, 0);
+ } else if (wait == 0 || wait == -ERESTARTSYS) {
+ error = -EIO;
+ dev_warn(&mdev->dev, "Write at (%d, %d) of block"
+ " 0x%X at phase %d failed: could not"
+ " communicate with VMU", mdev->port,
+ mdev->unit, num, x);
+ atomic_set(&mdev->busy, 0);
+ kfree(mdev->mq->sendbuf);
+ mdev->mq->sendbuf = NULL;
+ list_del_init(&(mdev->mq->list));
+ goto fail_nolock;
+ }
+ }
+ kfree(sendbuf);
+
+ return card->blocklen;
+
+fail_nolock:
+ kfree(sendbuf);
+fail_nosendbuf:
+ dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
+ mdev->unit);
+ return error;
+}
+
+/* mtd function to simulate reading byte by byte */
+static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
+ struct mtd_info *mtd)
+{
+ struct vmu_block *vblock;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ unsigned char *buf, ret;
+ int partition, error;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+ *retval = 0;
+
+ buf = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!buf) {
+ *retval = 1;
+ ret = -ENOMEM;
+ goto finish;
+ }
+
+ vblock = ofs_to_block(ofs, mtd, partition);
+ if (!vblock) {
+ *retval = 3;
+ ret = -ENOMEM;
+ goto out_buf;
+ }
+
+ error = maple_vmu_read_block(vblock->num, buf, mtd);
+ if (error) {
+ ret = error;
+ *retval = 2;
+ goto out_vblock;
+ }
+
+ ret = buf[vblock->ofs];
+
+out_vblock:
+ kfree(vblock);
+out_buf:
+ kfree(buf);
+finish:
+ return ret;
+}
+
+/* mtd higher order function to read flash */
+static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct vmu_cache *pcache;
+ struct vmu_block *vblock;
+ int index = 0, retval, partition, leftover, numblocks;
+ unsigned char cx;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ numblocks = card->parts[partition].numblocks;
+ if (from + len > numblocks * card->blocklen)
+ len = numblocks * card->blocklen - from;
+ if (len == 0)
+ return -EIO;
+ /* Have we cached this bit already? */
+ pcache = card->parts[partition].pcache;
+ do {
+ vblock = ofs_to_block(from + index, mtd, partition);
+ if (!vblock)
+ return -ENOMEM;
+ /* Have we cached this and is the cache valid and timely? */
+ if (pcache->valid &&
+ time_before(jiffies, pcache->jiffies_atc + HZ) &&
+ (pcache->block == vblock->num)) {
+ /* we have cached it, so do necessary copying */
+ leftover = card->blocklen - vblock->ofs;
+ if (vblock->ofs + len - index < card->blocklen) {
+ /* only a bit of this block to copy */
+ memcpy(buf + index,
+ pcache->buffer + vblock->ofs,
+ len - index);
+ index = len;
+ } else {
+ /* otherwise copy remainder of whole block */
+ memcpy(buf + index, pcache->buffer +
+ vblock->ofs, leftover);
+ index += leftover;
+ }
+ } else {
+ /*
+ * Not cached so read one byte -
+ * but cache the rest of the block
+ */
+ cx = vmu_flash_read_char(from + index, &retval, mtd);
+ if (retval) {
+ *retlen = index;
+ kfree(vblock);
+ return cx;
+ }
+ memset(buf + index, cx, 1);
+ index++;
+ }
+ kfree(vblock);
+ } while (len > index);
+ *retlen = index;
+
+ return 0;
+}
+
+static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int index = 0, partition, error = 0, numblocks;
+ struct vmu_cache *pcache;
+ struct vmu_block *vblock;
+ unsigned char *buffer;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ numblocks = card->parts[partition].numblocks;
+ if (to + len > numblocks * card->blocklen)
+ len = numblocks * card->blocklen - to;
+ if (len == 0) {
+ error = -EIO;
+ goto failed;
+ }
+
+ vblock = ofs_to_block(to, mtd, partition);
+ if (!vblock) {
+ error = -ENOMEM;
+ goto failed;
+ }
+
+ buffer = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto fail_buffer;
+ }
+
+ do {
+ /* Read in the block we are to write to */
+ error = maple_vmu_read_block(vblock->num, buffer, mtd);
+ if (error)
+ goto fail_io;
+
+ do {
+ buffer[vblock->ofs] = buf[index];
+ vblock->ofs++;
+ index++;
+ if (index >= len)
+ break;
+ } while (vblock->ofs < card->blocklen);
+
+ /* write out new buffer */
+ error = maple_vmu_write_block(vblock->num, buffer, mtd);
+ /* invalidate the cache */
+ pcache = card->parts[partition].pcache;
+ pcache->valid = 0;
+
+ if (error != card->blocklen)
+ goto fail_io;
+
+ vblock->num++;
+ vblock->ofs = 0;
+ } while (len > index);
+
+ kfree(buffer);
+ *retlen = index;
+ kfree(vblock);
+ return 0;
+
+fail_io:
+ kfree(buffer);
+fail_buffer:
+ kfree(vblock);
+failed:
+ dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
+ return error;
+}
+
+static void vmu_flash_sync(struct mtd_info *mtd)
+{
+ /* Do nothing here */
+}
+
+/* Maple bus callback function to recursively query hardware details */
+static void vmu_queryblocks(struct mapleq *mq)
+{
+ struct maple_device *mdev;
+ unsigned short *res;
+ struct memcard *card;
+ __be32 partnum;
+ struct vmu_cache *pcache;
+ struct mdev_part *mpart;
+ struct mtd_info *mtd_cur;
+ struct vmupart *part_cur;
+ int error;
+
+ mdev = mq->dev;
+ card = maple_get_drvdata(mdev);
+ res = (unsigned short *) (mq->recvbuf->buf);
+ card->tempA = res[12];
+ card->tempB = res[6];
+
+ dev_info(&mdev->dev, "VMU device at partition %d has %d user "
+ "blocks with a root block at %d\n", card->partition,
+ card->tempA, card->tempB);
+
+ part_cur = &card->parts[card->partition];
+ part_cur->user_blocks = card->tempA;
+ part_cur->root_block = card->tempB;
+ part_cur->numblocks = card->tempB + 1;
+ part_cur->name = kmalloc(12, GFP_KERNEL);
+ if (!part_cur->name)
+ goto fail_name;
+
+ sprintf(part_cur->name, "vmu%d.%d.%d",
+ mdev->port, mdev->unit, card->partition);
+ mtd_cur = &card->mtd[card->partition];
+ mtd_cur->name = part_cur->name;
+ mtd_cur->type = 8;
+ mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
+ mtd_cur->size = part_cur->numblocks * card->blocklen;
+ mtd_cur->erasesize = card->blocklen;
+ mtd_cur->_write = vmu_flash_write;
+ mtd_cur->_read = vmu_flash_read;
+ mtd_cur->_sync = vmu_flash_sync;
+ mtd_cur->writesize = card->blocklen;
+
+ mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
+ if (!mpart)
+ goto fail_mpart;
+
+ mpart->mdev = mdev;
+ mpart->partition = card->partition;
+ mtd_cur->priv = mpart;
+ mtd_cur->owner = THIS_MODULE;
+
+ pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
+ if (!pcache)
+ goto fail_cache_create;
+ part_cur->pcache = pcache;
+
+ error = mtd_device_register(mtd_cur, NULL, 0);
+ if (error)
+ goto fail_mtd_register;
+
+ maple_getcond_callback(mdev, NULL, 0,
+ MAPLE_FUNC_MEMCARD);
+
+ /*
+ * Set up a recursive call to the (probably theoretical)
+ * second or more partition
+ */
+ if (++card->partition < card->partitions) {
+ partnum = cpu_to_be32(card->partition << 24);
+ maple_getcond_callback(mdev, vmu_queryblocks, 0,
+ MAPLE_FUNC_MEMCARD);
+ maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_GETMINFO, 2, &partnum);
+ }
+ return;
+
+fail_mtd_register:
+ dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
+ "error is 0x%X\n", mdev->port, mdev->unit, error);
+ for (error = 0; error <= card->partition; error++) {
+ kfree(((card->parts)[error]).pcache);
+ ((card->parts)[error]).pcache = NULL;
+ }
+fail_cache_create:
+fail_mpart:
+ for (error = 0; error <= card->partition; error++) {
+ kfree(((card->mtd)[error]).priv);
+ ((card->mtd)[error]).priv = NULL;
+ }
+ maple_getcond_callback(mdev, NULL, 0,
+ MAPLE_FUNC_MEMCARD);
+ kfree(part_cur->name);
+fail_name:
+ return;
+}
+
+/* Handles very basic info about the flash, queries for details */
+static int vmu_connect(struct maple_device *mdev)
+{
+ unsigned long test_flash_data, basic_flash_data;
+ int c, error;
+ struct memcard *card;
+ u32 partnum = 0;
+
+ test_flash_data = be32_to_cpu(mdev->devinfo.function);
+ /* Need to count how many bits are set - to find out which
+ * function_data element has details of the memory card
+ */
+ c = hweight_long(test_flash_data);
+
+ basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
+
+ card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
+ if (!card) {
+ error = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
+ card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
+ card->writecnt = basic_flash_data >> 12 & 0xF;
+ card->readcnt = basic_flash_data >> 8 & 0xF;
+ card->removeable = basic_flash_data >> 7 & 1;
+
+ card->partition = 0;
+
+ /*
+ * Not sure there are actually any multi-partition devices in the
+ * real world, but the hardware supports them, so, so will we
+ */
+ card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
+ GFP_KERNEL);
+ if (!card->parts) {
+ error = -ENOMEM;
+ goto fail_partitions;
+ }
+
+ card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
+ GFP_KERNEL);
+ if (!card->mtd) {
+ error = -ENOMEM;
+ goto fail_mtd_info;
+ }
+
+ maple_set_drvdata(mdev, card);
+
+ /*
+ * We want to trap meminfo not get cond
+ * so set interval to zero, but rely on maple bus
+ * driver to pass back the results of the meminfo
+ */
+ maple_getcond_callback(mdev, vmu_queryblocks, 0,
+ MAPLE_FUNC_MEMCARD);
+
+ /* Make sure we are clear to go */
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
+ mdev->port, mdev->unit);
+ error = -EAGAIN;
+ goto fail_device_busy;
+ }
+ }
+
+ atomic_set(&mdev->busy, 1);
+
+ /*
+ * Set up the minfo call: vmu_queryblocks will handle
+ * the information passed back
+ */
+ error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_GETMINFO, 2, &partnum);
+ if (error) {
+ dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
+ " error is 0x%X\n", mdev->port, mdev->unit, error);
+ goto fail_mtd_info;
+ }
+ return 0;
+
+fail_device_busy:
+ kfree(card->mtd);
+fail_mtd_info:
+ kfree(card->parts);
+fail_partitions:
+ kfree(card);
+fail_nomem:
+ return error;
+}
+
+static void vmu_disconnect(struct maple_device *mdev)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int x;
+
+ mdev->callback = NULL;
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mpart = ((card->mtd)[x]).priv;
+ mpart->mdev = NULL;
+ mtd_device_unregister(&((card->mtd)[x]));
+ kfree(((card->parts)[x]).name);
+ }
+ kfree(card->parts);
+ kfree(card->mtd);
+ kfree(card);
+}
+
+/* Callback to handle eccentricities of both mtd subsystem
+ * and general flakyness of Dreamcast VMUs
+ */
+static int vmu_can_unload(struct maple_device *mdev)
+{
+ struct memcard *card;
+ int x;
+ struct mtd_info *mtd;
+
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mtd = &((card->mtd)[x]);
+ if (mtd->usecount > 0)
+ return 0;
+ }
+ return 1;
+}
+
+#define ERRSTR "VMU at (%d, %d) file error -"
+
+static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
+{
+ enum maple_file_errors error = ((int *)recvbuf)[1];
+
+ switch (error) {
+
+ case MAPLE_FILEERR_INVALID_PARTITION:
+ dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_PHASE_ERROR:
+ dev_notice(&mdev->dev, ERRSTR " phase error\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_INVALID_BLOCK:
+ dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_WRITE_ERROR:
+ dev_notice(&mdev->dev, ERRSTR " write error\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
+ dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_BAD_CRC:
+ dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
+ mdev->port, mdev->unit);
+ break;
+
+ default:
+ dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
+ mdev->port, mdev->unit, error);
+ }
+}
+
+
+static int probe_maple_vmu(struct device *dev)
+{
+ int error;
+ struct maple_device *mdev = to_maple_dev(dev);
+ struct maple_driver *mdrv = to_maple_driver(dev->driver);
+
+ mdev->can_unload = vmu_can_unload;
+ mdev->fileerr_handler = vmu_file_error;
+ mdev->driver = mdrv;
+
+ error = vmu_connect(mdev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int remove_maple_vmu(struct device *dev)
+{
+ struct maple_device *mdev = to_maple_dev(dev);
+
+ vmu_disconnect(mdev);
+ return 0;
+}
+
+static struct maple_driver vmu_flash_driver = {
+ .function = MAPLE_FUNC_MEMCARD,
+ .drv = {
+ .name = "Dreamcast_visual_memory",
+ .probe = probe_maple_vmu,
+ .remove = remove_maple_vmu,
+ },
+};
+
+static int __init vmu_flash_map_init(void)
+{
+ return maple_driver_register(&vmu_flash_driver);
+}
+
+static void __exit vmu_flash_map_exit(void)
+{
+ maple_driver_unregister(&vmu_flash_driver);
+}
+
+module_init(vmu_flash_map_init);
+module_exit(vmu_flash_map_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian McMenamin");
+MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
new file mode 100644
index 000000000..df7c6c707
--- /dev/null
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -0,0 +1,603 @@
+/*
+ * Interface to Linux block layer for MTD 'translation layers'.
+ *
+ * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/mutex.h>
+#include <asm/uaccess.h>
+
+#include "mtdcore.h"
+
+static LIST_HEAD(blktrans_majors);
+static DEFINE_MUTEX(blktrans_ref_mutex);
+
+static void blktrans_dev_release(struct kref *kref)
+{
+ struct mtd_blktrans_dev *dev =
+ container_of(kref, struct mtd_blktrans_dev, ref);
+
+ dev->disk->private_data = NULL;
+ blk_cleanup_queue(dev->rq);
+ put_disk(dev->disk);
+ list_del(&dev->list);
+ kfree(dev);
+}
+
+static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
+{
+ struct mtd_blktrans_dev *dev;
+
+ mutex_lock(&blktrans_ref_mutex);
+ dev = disk->private_data;
+
+ if (!dev)
+ goto unlock;
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&blktrans_ref_mutex);
+ return dev;
+}
+
+static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
+{
+ mutex_lock(&blktrans_ref_mutex);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&blktrans_ref_mutex);
+}
+
+
+static int do_blktrans_request(struct mtd_blktrans_ops *tr,
+ struct mtd_blktrans_dev *dev,
+ struct request *req)
+{
+ unsigned long block, nsect;
+ char *buf;
+
+ block = blk_rq_pos(req) << 9 >> tr->blkshift;
+ nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
+ buf = bio_data(req->bio);
+
+ if (req->cmd_type != REQ_TYPE_FS)
+ return -EIO;
+
+ if (req->cmd_flags & REQ_FLUSH)
+ return tr->flush(dev);
+
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
+ get_capacity(req->rq_disk))
+ return -EIO;
+
+ if (req->cmd_flags & REQ_DISCARD)
+ return tr->discard(dev, block, nsect);
+
+ switch(rq_data_dir(req)) {
+ case READ:
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ if (tr->readsect(dev, block, buf))
+ return -EIO;
+ rq_flush_dcache_pages(req);
+ return 0;
+ case WRITE:
+ if (!tr->writesect)
+ return -EIO;
+
+ rq_flush_dcache_pages(req);
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+ if (tr->writesect(dev, block, buf))
+ return -EIO;
+ return 0;
+ default:
+ printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+ return -EIO;
+ }
+}
+
+int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
+{
+ return dev->bg_stop;
+}
+EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
+
+static void mtd_blktrans_work(struct work_struct *work)
+{
+ struct mtd_blktrans_dev *dev =
+ container_of(work, struct mtd_blktrans_dev, work);
+ struct mtd_blktrans_ops *tr = dev->tr;
+ struct request_queue *rq = dev->rq;
+ struct request *req = NULL;
+ int background_done = 0;
+
+ spin_lock_irq(rq->queue_lock);
+
+ while (1) {
+ int res;
+
+ dev->bg_stop = false;
+ if (!req && !(req = blk_fetch_request(rq))) {
+ if (tr->background && !background_done) {
+ spin_unlock_irq(rq->queue_lock);
+ mutex_lock(&dev->lock);
+ tr->background(dev);
+ mutex_unlock(&dev->lock);
+ spin_lock_irq(rq->queue_lock);
+ /*
+ * Do background processing just once per idle
+ * period.
+ */
+ background_done = !dev->bg_stop;
+ continue;
+ }
+ break;
+ }
+
+ spin_unlock_irq(rq->queue_lock);
+
+ mutex_lock(&dev->lock);
+ res = do_blktrans_request(dev->tr, dev, req);
+ mutex_unlock(&dev->lock);
+
+ spin_lock_irq(rq->queue_lock);
+
+ if (!__blk_end_request_cur(req, res))
+ req = NULL;
+
+ background_done = 0;
+ }
+
+ spin_unlock_irq(rq->queue_lock);
+}
+
+static void mtd_blktrans_request(struct request_queue *rq)
+{
+ struct mtd_blktrans_dev *dev;
+ struct request *req = NULL;
+
+ dev = rq->queuedata;
+
+ if (!dev)
+ while ((req = blk_fetch_request(rq)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ else
+ queue_work(dev->wq, &dev->work);
+}
+
+static int blktrans_open(struct block_device *bdev, fmode_t mode)
+{
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = 0;
+
+ if (!dev)
+ return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+
+ mutex_lock(&dev->lock);
+ mutex_lock(&mtd_table_mutex);
+
+ if (dev->open)
+ goto unlock;
+
+ kref_get(&dev->ref);
+ __module_get(dev->tr->owner);
+
+ if (!dev->mtd)
+ goto unlock;
+
+ if (dev->tr->open) {
+ ret = dev->tr->open(dev);
+ if (ret)
+ goto error_put;
+ }
+
+ ret = __get_mtd_device(dev->mtd);
+ if (ret)
+ goto error_release;
+ dev->file_mode = mode;
+
+unlock:
+ dev->open++;
+ mutex_unlock(&mtd_table_mutex);
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
+
+error_release:
+ if (dev->tr->release)
+ dev->tr->release(dev);
+error_put:
+ module_put(dev->tr->owner);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&mtd_table_mutex);
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
+}
+
+static void blktrans_release(struct gendisk *disk, fmode_t mode)
+{
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
+
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->lock);
+ mutex_lock(&mtd_table_mutex);
+
+ if (--dev->open)
+ goto unlock;
+
+ kref_put(&dev->ref, blktrans_dev_release);
+ module_put(dev->tr->owner);
+
+ if (dev->mtd) {
+ if (dev->tr->release)
+ dev->tr->release(dev);
+ __put_mtd_device(dev->mtd);
+ }
+unlock:
+ mutex_unlock(&mtd_table_mutex);
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+}
+
+static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
+
+ ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
+}
+
+static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
+}
+
+static const struct block_device_operations mtd_block_ops = {
+ .owner = THIS_MODULE,
+ .open = blktrans_open,
+ .release = blktrans_release,
+ .ioctl = blktrans_ioctl,
+ .getgeo = blktrans_getgeo,
+};
+
+int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
+{
+ struct mtd_blktrans_ops *tr = new->tr;
+ struct mtd_blktrans_dev *d;
+ int last_devnum = -1;
+ struct gendisk *gd;
+ int ret;
+
+ if (mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
+ BUG();
+ }
+
+ mutex_lock(&blktrans_ref_mutex);
+ list_for_each_entry(d, &tr->devs, list) {
+ if (new->devnum == -1) {
+ /* Use first free number */
+ if (d->devnum != last_devnum+1) {
+ /* Found a free devnum. Plug it in here */
+ new->devnum = last_devnum+1;
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ } else if (d->devnum == new->devnum) {
+ /* Required number taken */
+ mutex_unlock(&blktrans_ref_mutex);
+ return -EBUSY;
+ } else if (d->devnum > new->devnum) {
+ /* Required number was free */
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ last_devnum = d->devnum;
+ }
+
+ ret = -EBUSY;
+ if (new->devnum == -1)
+ new->devnum = last_devnum+1;
+
+ /* Check that the device and any partitions will get valid
+ * minor numbers and that the disk naming code below can cope
+ * with this number. */
+ if (new->devnum > (MINORMASK >> tr->part_bits) ||
+ (tr->part_bits && new->devnum >= 27 * 26)) {
+ mutex_unlock(&blktrans_ref_mutex);
+ goto error1;
+ }
+
+ list_add_tail(&new->list, &tr->devs);
+ added:
+ mutex_unlock(&blktrans_ref_mutex);
+
+ mutex_init(&new->lock);
+ kref_init(&new->ref);
+ if (!tr->writesect)
+ new->readonly = 1;
+
+ /* Create gendisk */
+ ret = -ENOMEM;
+ gd = alloc_disk(1 << tr->part_bits);
+
+ if (!gd)
+ goto error2;
+
+ new->disk = gd;
+ gd->private_data = new;
+ gd->major = tr->major;
+ gd->first_minor = (new->devnum) << tr->part_bits;
+ gd->fops = &mtd_block_ops;
+
+ if (tr->part_bits)
+ if (new->devnum < 26)
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c", tr->name, 'a' + new->devnum);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c%c", tr->name,
+ 'a' - 1 + new->devnum / 26,
+ 'a' + new->devnum % 26);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%d", tr->name, new->devnum);
+
+ set_capacity(gd, (new->size * tr->blksize) >> 9);
+
+ /* Create the request queue */
+ spin_lock_init(&new->queue_lock);
+ new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+
+ if (!new->rq)
+ goto error3;
+
+ if (tr->flush)
+ blk_queue_flush(new->rq, REQ_FLUSH);
+
+ new->rq->queuedata = new;
+ blk_queue_logical_block_size(new->rq, tr->blksize);
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
+
+ if (tr->discard) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
+ new->rq->limits.max_discard_sectors = UINT_MAX;
+ }
+
+ gd->queue = new->rq;
+
+ /* Create processing workqueue */
+ new->wq = alloc_workqueue("%s%d", 0, 0,
+ tr->name, new->mtd->index);
+ if (!new->wq)
+ goto error4;
+ INIT_WORK(&new->work, mtd_blktrans_work);
+
+ gd->driverfs_dev = &new->mtd->dev;
+
+ if (new->readonly)
+ set_disk_ro(gd, 1);
+
+ add_disk(gd);
+
+ if (new->disk_attributes) {
+ ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
+ new->disk_attributes);
+ WARN_ON(ret);
+ }
+ return 0;
+error4:
+ blk_cleanup_queue(new->rq);
+error3:
+ put_disk(new->disk);
+error2:
+ list_del(&new->list);
+error1:
+ return ret;
+}
+
+int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
+{
+ unsigned long flags;
+
+ if (mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
+ BUG();
+ }
+
+ if (old->disk_attributes)
+ sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
+ old->disk_attributes);
+
+ /* Stop new requests to arrive */
+ del_gendisk(old->disk);
+
+ /* Stop workqueue. This will perform any pending request. */
+ destroy_workqueue(old->wq);
+
+ /* Kill current requests */
+ spin_lock_irqsave(&old->queue_lock, flags);
+ old->rq->queuedata = NULL;
+ blk_start_queue(old->rq);
+ spin_unlock_irqrestore(&old->queue_lock, flags);
+
+ /* If the device is currently open, tell trans driver to close it,
+ then put mtd device, and don't touch it again */
+ mutex_lock(&old->lock);
+ if (old->open) {
+ if (old->tr->release)
+ old->tr->release(old);
+ __put_mtd_device(old->mtd);
+ }
+
+ old->mtd = NULL;
+
+ mutex_unlock(&old->lock);
+ blktrans_dev_put(old);
+ return 0;
+}
+
+static void blktrans_notify_remove(struct mtd_info *mtd)
+{
+ struct mtd_blktrans_ops *tr;
+ struct mtd_blktrans_dev *dev, *next;
+
+ list_for_each_entry(tr, &blktrans_majors, list)
+ list_for_each_entry_safe(dev, next, &tr->devs, list)
+ if (dev->mtd == mtd)
+ tr->remove_dev(dev);
+}
+
+static void blktrans_notify_add(struct mtd_info *mtd)
+{
+ struct mtd_blktrans_ops *tr;
+
+ if (mtd->type == MTD_ABSENT)
+ return;
+
+ list_for_each_entry(tr, &blktrans_majors, list)
+ tr->add_mtd(tr, mtd);
+}
+
+static struct mtd_notifier blktrans_notifier = {
+ .add = blktrans_notify_add,
+ .remove = blktrans_notify_remove,
+};
+
+int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ struct mtd_info *mtd;
+ int ret;
+
+ /* Register the notifier if/when the first device type is
+ registered, to prevent the link/init ordering from fucking
+ us over. */
+ if (!blktrans_notifier.list.next)
+ register_mtd_user(&blktrans_notifier);
+
+
+ mutex_lock(&mtd_table_mutex);
+
+ ret = register_blkdev(tr->major, tr->name);
+ if (ret < 0) {
+ printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
+ tr->name, tr->major, ret);
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+ }
+
+ if (ret)
+ tr->major = ret;
+
+ tr->blkshift = ffs(tr->blksize) - 1;
+
+ INIT_LIST_HEAD(&tr->devs);
+ list_add(&tr->list, &blktrans_majors);
+
+ mtd_for_each_device(mtd)
+ if (mtd->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+
+int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ struct mtd_blktrans_dev *dev, *next;
+
+ mutex_lock(&mtd_table_mutex);
+
+ /* Remove it from the list of active majors */
+ list_del(&tr->list);
+
+ list_for_each_entry_safe(dev, next, &tr->devs, list)
+ tr->remove_dev(dev);
+
+ unregister_blkdev(tr->major, tr->name);
+ mutex_unlock(&mtd_table_mutex);
+
+ BUG_ON(!list_empty(&tr->devs));
+ return 0;
+}
+
+static void __exit mtd_blktrans_exit(void)
+{
+ /* No race here -- if someone's currently in register_mtd_blktrans
+ we're screwed anyway. */
+ if (blktrans_notifier.list.next)
+ unregister_mtd_user(&blktrans_notifier);
+}
+
+module_exit(mtd_blktrans_exit);
+
+EXPORT_SYMBOL_GPL(register_mtd_blktrans);
+EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
+EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
new file mode 100644
index 000000000..bb4c14f83
--- /dev/null
+++ b/drivers/mtd/mtdblock.c
@@ -0,0 +1,396 @@
+/*
+ * Direct MTD block device access
+ *
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mutex.h>
+#include <linux/major.h>
+
+
+struct mtdblk_dev {
+ struct mtd_blktrans_dev mbd;
+ int count;
+ struct mutex cache_mutex;
+ unsigned char *cache_data;
+ unsigned long cache_offset;
+ unsigned int cache_size;
+ enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
+};
+
+/*
+ * Cache stuff...
+ *
+ * Since typical flash erasable sectors are much larger than what Linux's
+ * buffer cache can handle, we must implement read-modify-write on flash
+ * sectors for each block write requests. To avoid over-erasing flash sectors
+ * and to speed things up, we locally cache a whole flash sector while it is
+ * being written to until a different sector is required.
+ */
+
+static void erase_callback(struct erase_info *done)
+{
+ wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
+ wake_up(wait_q);
+}
+
+static int erase_write (struct mtd_info *mtd, unsigned long pos,
+ int len, const char *buf)
+{
+ struct erase_info erase;
+ DECLARE_WAITQUEUE(wait, current);
+ wait_queue_head_t wait_q;
+ size_t retlen;
+ int ret;
+
+ /*
+ * First, let's erase the flash block.
+ */
+
+ init_waitqueue_head(&wait_q);
+ erase.mtd = mtd;
+ erase.callback = erase_callback;
+ erase.addr = pos;
+ erase.len = len;
+ erase.priv = (u_long)&wait_q;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&wait_q, &wait);
+
+ ret = mtd_erase(mtd, &erase);
+ if (ret) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&wait_q, &wait);
+ printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
+ "on \"%s\" failed\n",
+ pos, len, mtd->name);
+ return ret;
+ }
+
+ schedule(); /* Wait for erase to finish. */
+ remove_wait_queue(&wait_q, &wait);
+
+ /*
+ * Next, write the data to flash.
+ */
+
+ ret = mtd_write(mtd, pos, len, &retlen, buf);
+ if (ret)
+ return ret;
+ if (retlen != len)
+ return -EIO;
+ return 0;
+}
+
+
+static int write_cached_data (struct mtdblk_dev *mtdblk)
+{
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
+ int ret;
+
+ if (mtdblk->cache_state != STATE_DIRTY)
+ return 0;
+
+ pr_debug("mtdblock: writing cached data for \"%s\" "
+ "at 0x%lx, size 0x%x\n", mtd->name,
+ mtdblk->cache_offset, mtdblk->cache_size);
+
+ ret = erase_write (mtd, mtdblk->cache_offset,
+ mtdblk->cache_size, mtdblk->cache_data);
+ if (ret)
+ return ret;
+
+ /*
+ * Here we could arguably set the cache state to STATE_CLEAN.
+ * However this could lead to inconsistency since we will not
+ * be notified if this content is altered on the flash by other
+ * means. Let's declare it empty and leave buffering tasks to
+ * the buffer cache instead.
+ */
+ mtdblk->cache_state = STATE_EMPTY;
+ return 0;
+}
+
+
+static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
+ int len, const char *buf)
+{
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
+ unsigned int sect_size = mtdblk->cache_size;
+ size_t retlen;
+ int ret;
+
+ pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
+ mtd->name, pos, len);
+
+ if (!sect_size)
+ return mtd_write(mtd, pos, len, &retlen, buf);
+
+ while (len > 0) {
+ unsigned long sect_start = (pos/sect_size)*sect_size;
+ unsigned int offset = pos - sect_start;
+ unsigned int size = sect_size - offset;
+ if( size > len )
+ size = len;
+
+ if (size == sect_size) {
+ /*
+ * We are covering a whole sector. Thus there is no
+ * need to bother with the cache while it may still be
+ * useful for other partial writes.
+ */
+ ret = erase_write (mtd, pos, size, buf);
+ if (ret)
+ return ret;
+ } else {
+ /* Partial sector: need to use the cache */
+
+ if (mtdblk->cache_state == STATE_DIRTY &&
+ mtdblk->cache_offset != sect_start) {
+ ret = write_cached_data(mtdblk);
+ if (ret)
+ return ret;
+ }
+
+ if (mtdblk->cache_state == STATE_EMPTY ||
+ mtdblk->cache_offset != sect_start) {
+ /* fill the cache with the current sector */
+ mtdblk->cache_state = STATE_EMPTY;
+ ret = mtd_read(mtd, sect_start, sect_size,
+ &retlen, mtdblk->cache_data);
+ if (ret)
+ return ret;
+ if (retlen != sect_size)
+ return -EIO;
+
+ mtdblk->cache_offset = sect_start;
+ mtdblk->cache_size = sect_size;
+ mtdblk->cache_state = STATE_CLEAN;
+ }
+
+ /* write data to our local cache */
+ memcpy (mtdblk->cache_data + offset, buf, size);
+ mtdblk->cache_state = STATE_DIRTY;
+ }
+
+ buf += size;
+ pos += size;
+ len -= size;
+ }
+
+ return 0;
+}
+
+
+static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
+ int len, char *buf)
+{
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
+ unsigned int sect_size = mtdblk->cache_size;
+ size_t retlen;
+ int ret;
+
+ pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+ mtd->name, pos, len);
+
+ if (!sect_size)
+ return mtd_read(mtd, pos, len, &retlen, buf);
+
+ while (len > 0) {
+ unsigned long sect_start = (pos/sect_size)*sect_size;
+ unsigned int offset = pos - sect_start;
+ unsigned int size = sect_size - offset;
+ if (size > len)
+ size = len;
+
+ /*
+ * Check if the requested data is already cached
+ * Read the requested amount of data from our internal cache if it
+ * contains what we want, otherwise we read the data directly
+ * from flash.
+ */
+ if (mtdblk->cache_state != STATE_EMPTY &&
+ mtdblk->cache_offset == sect_start) {
+ memcpy (buf, mtdblk->cache_data + offset, size);
+ } else {
+ ret = mtd_read(mtd, pos, size, &retlen, buf);
+ if (ret)
+ return ret;
+ if (retlen != size)
+ return -EIO;
+ }
+
+ buf += size;
+ pos += size;
+ len -= size;
+ }
+
+ return 0;
+}
+
+static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ return do_cached_read(mtdblk, block<<9, 512, buf);
+}
+
+static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
+ mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
+ if (!mtdblk->cache_data)
+ return -EINTR;
+ /* -EINTR is not really correct, but it is the best match
+ * documented in man 2 write for all cases. We could also
+ * return -EAGAIN sometimes, but why bother?
+ */
+ }
+ return do_cached_write(mtdblk, block<<9, 512, buf);
+}
+
+static int mtdblock_open(struct mtd_blktrans_dev *mbd)
+{
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
+
+ pr_debug("mtdblock_open\n");
+
+ if (mtdblk->count) {
+ mtdblk->count++;
+ return 0;
+ }
+
+ /* OK, it's not open. Create cache info for it */
+ mtdblk->count = 1;
+ mutex_init(&mtdblk->cache_mutex);
+ mtdblk->cache_state = STATE_EMPTY;
+ if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
+ mtdblk->cache_size = mbd->mtd->erasesize;
+ mtdblk->cache_data = NULL;
+ }
+
+ pr_debug("ok\n");
+
+ return 0;
+}
+
+static void mtdblock_release(struct mtd_blktrans_dev *mbd)
+{
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
+
+ pr_debug("mtdblock_release\n");
+
+ mutex_lock(&mtdblk->cache_mutex);
+ write_cached_data(mtdblk);
+ mutex_unlock(&mtdblk->cache_mutex);
+
+ if (!--mtdblk->count) {
+ /*
+ * It was the last usage. Free the cache, but only sync if
+ * opened for writing.
+ */
+ if (mbd->file_mode & FMODE_WRITE)
+ mtd_sync(mbd->mtd);
+ vfree(mtdblk->cache_data);
+ }
+
+ pr_debug("ok\n");
+}
+
+static int mtdblock_flush(struct mtd_blktrans_dev *dev)
+{
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+
+ mutex_lock(&mtdblk->cache_mutex);
+ write_cached_data(mtdblk);
+ mutex_unlock(&mtdblk->cache_mutex);
+ mtd_sync(dev->mtd);
+ return 0;
+}
+
+static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev)
+ return;
+
+ dev->mbd.mtd = mtd;
+ dev->mbd.devnum = mtd->index;
+
+ dev->mbd.size = mtd->size >> 9;
+ dev->mbd.tr = tr;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ dev->mbd.readonly = 1;
+
+ if (add_mtd_blktrans_dev(&dev->mbd))
+ kfree(dev);
+}
+
+static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ del_mtd_blktrans_dev(dev);
+}
+
+static struct mtd_blktrans_ops mtdblock_tr = {
+ .name = "mtdblock",
+ .major = MTD_BLOCK_MAJOR,
+ .part_bits = 0,
+ .blksize = 512,
+ .open = mtdblock_open,
+ .flush = mtdblock_flush,
+ .release = mtdblock_release,
+ .readsect = mtdblock_readsect,
+ .writesect = mtdblock_writesect,
+ .add_mtd = mtdblock_add_mtd,
+ .remove_dev = mtdblock_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_mtdblock(void)
+{
+ return register_mtd_blktrans(&mtdblock_tr);
+}
+
+static void __exit cleanup_mtdblock(void)
+{
+ deregister_mtd_blktrans(&mtdblock_tr);
+}
+
+module_init(init_mtdblock);
+module_exit(cleanup_mtdblock);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
+MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
new file mode 100644
index 000000000..fb5dc8936
--- /dev/null
+++ b/drivers/mtd/mtdblock_ro.c
@@ -0,0 +1,99 @@
+/*
+ * Simple read-only (writable only for RAM) mtdblock driver
+ *
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/module.h>
+#include <linux/major.h>
+
+static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ size_t retlen;
+
+ if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
+ return 1;
+ return 0;
+}
+
+static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ size_t retlen;
+
+ if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
+ return 1;
+ return 0;
+}
+
+static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev)
+ return;
+
+ dev->mtd = mtd;
+ dev->devnum = mtd->index;
+
+ dev->size = mtd->size >> 9;
+ dev->tr = tr;
+ dev->readonly = 1;
+
+ if (add_mtd_blktrans_dev(dev))
+ kfree(dev);
+}
+
+static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ del_mtd_blktrans_dev(dev);
+}
+
+static struct mtd_blktrans_ops mtdblock_tr = {
+ .name = "mtdblock",
+ .major = MTD_BLOCK_MAJOR,
+ .part_bits = 0,
+ .blksize = 512,
+ .readsect = mtdblock_readsect,
+ .writesect = mtdblock_writesect,
+ .add_mtd = mtdblock_add_mtd,
+ .remove_dev = mtdblock_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init mtdblock_init(void)
+{
+ return register_mtd_blktrans(&mtdblock_tr);
+}
+
+static void __exit mtdblock_exit(void)
+{
+ deregister_mtd_blktrans(&mtdblock_tr);
+}
+
+module_init(mtdblock_init);
+module_exit(mtdblock_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
new file mode 100644
index 000000000..55fa27ecf
--- /dev/null
+++ b/drivers/mtd/mtdchar.c
@@ -0,0 +1,1164 @@
+/*
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/backing-dev.h>
+#include <linux/compat.h>
+#include <linux/mount.h>
+#include <linux/blkpg.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/map.h>
+
+#include <asm/uaccess.h>
+
+#include "mtdcore.h"
+
+static DEFINE_MUTEX(mtd_mutex);
+
+/*
+ * Data structure to hold the pointer to the mtd device as well
+ * as mode information of various use cases.
+ */
+struct mtd_file_info {
+ struct mtd_info *mtd;
+ enum mtd_file_modes mode;
+};
+
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
+}
+
+static int mtdchar_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ int devnum = minor >> 1;
+ int ret = 0;
+ struct mtd_info *mtd;
+ struct mtd_file_info *mfi;
+
+ pr_debug("MTD_open\n");
+
+ /* You can't open the RO devices RW */
+ if ((file->f_mode & FMODE_WRITE) && (minor & 1))
+ return -EACCES;
+
+ mutex_lock(&mtd_mutex);
+ mtd = get_mtd_device(NULL, devnum);
+
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto out;
+ }
+
+ if (mtd->type == MTD_ABSENT) {
+ ret = -ENODEV;
+ goto out1;
+ }
+
+ /* You can't open it RW if it's not a writeable device */
+ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
+ ret = -EACCES;
+ goto out1;
+ }
+
+ mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
+ if (!mfi) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+ mfi->mtd = mtd;
+ file->private_data = mfi;
+ mutex_unlock(&mtd_mutex);
+ return 0;
+
+out1:
+ put_mtd_device(mtd);
+out:
+ mutex_unlock(&mtd_mutex);
+ return ret;
+} /* mtdchar_open */
+
+/*====================================================================*/
+
+static int mtdchar_close(struct inode *inode, struct file *file)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+
+ pr_debug("MTD_close\n");
+
+ /* Only sync if opened RW */
+ if ((file->f_mode & FMODE_WRITE))
+ mtd_sync(mtd);
+
+ put_mtd_device(mtd);
+ file->private_data = NULL;
+ kfree(mfi);
+
+ return 0;
+} /* mtdchar_close */
+
+/* Back in June 2001, dwmw2 wrote:
+ *
+ * FIXME: This _really_ needs to die. In 2.5, we should lock the
+ * userspace buffer down and use it directly with readv/writev.
+ *
+ * The implementation below, using mtd_kmalloc_up_to, mitigates
+ * allocation failures when the system is under low-memory situations
+ * or if memory is highly fragmented at the cost of reducing the
+ * performance of the requested transfer due to a smaller buffer size.
+ *
+ * A more complex but more memory-efficient implementation based on
+ * get_user_pages and iovecs to cover extents of those pages is a
+ * longer-term goal, as intimated by dwmw2 above. However, for the
+ * write case, this requires yet more complex head and tail transfer
+ * handling when those head and tail offsets and sizes are such that
+ * alignment requirements are not met in the NAND subdriver.
+ */
+
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
+ size_t total_retlen=0;
+ int ret=0;
+ int len;
+ size_t size = count;
+ char *kbuf;
+
+ pr_debug("MTD_read\n");
+
+ if (*ppos + count > mtd->size)
+ count = mtd->size - *ppos;
+
+ if (!count)
+ return 0;
+
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
+ if (!kbuf)
+ return -ENOMEM;
+
+ while (count) {
+ len = min_t(size_t, count, size);
+
+ switch (mfi->mode) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
+ break;
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
+ break;
+ case MTD_FILE_MODE_RAW:
+ {
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_RAW;
+ ops.datbuf = kbuf;
+ ops.oobbuf = NULL;
+ ops.len = len;
+
+ ret = mtd_read_oob(mtd, *ppos, &ops);
+ retlen = ops.retlen;
+ break;
+ }
+ default:
+ ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
+ }
+ /* Nand returns -EBADMSG on ECC errors, but it returns
+ * the data. For our userspace tools it is important
+ * to dump areas with ECC errors!
+ * For kernel internal usage it also might return -EUCLEAN
+ * to signal the caller that a bitflip has occurred and has
+ * been corrected by the ECC algorithm.
+ * Userspace software which accesses NAND this way
+ * must be aware of the fact that it deals with NAND
+ */
+ if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
+ *ppos += retlen;
+ if (copy_to_user(buf, kbuf, retlen)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+ else
+ total_retlen += retlen;
+
+ count -= retlen;
+ buf += retlen;
+ if (retlen == 0)
+ count = 0;
+ }
+ else {
+ kfree(kbuf);
+ return ret;
+ }
+
+ }
+
+ kfree(kbuf);
+ return total_retlen;
+} /* mtdchar_read */
+
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ size_t size = count;
+ char *kbuf;
+ size_t retlen;
+ size_t total_retlen=0;
+ int ret=0;
+ int len;
+
+ pr_debug("MTD_write\n");
+
+ if (*ppos == mtd->size)
+ return -ENOSPC;
+
+ if (*ppos + count > mtd->size)
+ count = mtd->size - *ppos;
+
+ if (!count)
+ return 0;
+
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
+ if (!kbuf)
+ return -ENOMEM;
+
+ while (count) {
+ len = min_t(size_t, count, size);
+
+ if (copy_from_user(kbuf, buf, len)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+
+ switch (mfi->mode) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = -EROFS;
+ break;
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
+ break;
+
+ case MTD_FILE_MODE_RAW:
+ {
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_RAW;
+ ops.datbuf = kbuf;
+ ops.oobbuf = NULL;
+ ops.ooboffs = 0;
+ ops.len = len;
+
+ ret = mtd_write_oob(mtd, *ppos, &ops);
+ retlen = ops.retlen;
+ break;
+ }
+
+ default:
+ ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
+ }
+
+ /*
+ * Return -ENOSPC only if no data could be written at all.
+ * Otherwise just return the number of bytes that actually
+ * have been written.
+ */
+ if ((ret == -ENOSPC) && (total_retlen))
+ break;
+
+ if (!ret) {
+ *ppos += retlen;
+ total_retlen += retlen;
+ count -= retlen;
+ buf += retlen;
+ }
+ else {
+ kfree(kbuf);
+ return ret;
+ }
+ }
+
+ kfree(kbuf);
+ return total_retlen;
+} /* mtdchar_write */
+
+/*======================================================================
+
+ IOCTL calls for getting device parameters.
+
+======================================================================*/
+static void mtdchar_erase_callback (struct erase_info *instr)
+{
+ wake_up((wait_queue_head_t *)instr->priv);
+}
+
+static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
+{
+ struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
+
+ switch (mode) {
+ case MTD_OTP_FACTORY:
+ if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
+ break;
+ case MTD_OTP_USER:
+ if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
+ break;
+ case MTD_OTP_OFF:
+ mfi->mode = MTD_FILE_MODE_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_oob_ops ops;
+ uint32_t retlen;
+ int ret = 0;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->_write_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
+
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->writesize - 1);
+ ops.datbuf = NULL;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = memdup_user(ptr, length);
+ if (IS_ERR(ops.oobbuf))
+ return PTR_ERR(ops.oobbuf);
+
+ start &= ~((uint64_t)mtd->writesize - 1);
+ ret = mtd_write_oob(mtd, start, &ops);
+
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(retp, &retlen, sizeof(length)))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_oob_ops ops;
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->writesize - 1);
+ ops.datbuf = NULL;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ start &= ~((uint64_t)mtd->writesize - 1);
+ ret = mtd_read_oob(mtd, start, &ops);
+
+ if (put_user(ops.oobretlen, retp))
+ ret = -EFAULT;
+ else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
+ ops.oobretlen))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+
+ /*
+ * NAND returns -EBADMSG on ECC errors, but it returns the OOB
+ * data. For our userspace tools it is important to dump areas
+ * with ECC errors!
+ * For kernel internal usage it also might return -EUCLEAN
+ * to signal the caller that a bitflip has occured and has
+ * been corrected by the ECC algorithm.
+ *
+ * Note: currently the standard NAND function, nand_read_oob_std,
+ * does not calculate ECC for the OOB area, so do not rely on
+ * this behavior unless you have replaced it with your own.
+ */
+ if (mtd_is_bitflip_or_eccerr(ret))
+ return 0;
+
+ return ret;
+}
+
+/*
+ * Copies (and truncates, if necessary) data from the larger struct,
+ * nand_ecclayout, to the smaller, deprecated layout struct,
+ * nand_ecclayout_user. This is necessary only to support the deprecated
+ * API ioctl ECCGETLAYOUT while allowing all new functionality to use
+ * nand_ecclayout flexibly (i.e. the struct may change size in new
+ * releases without requiring major rewrites).
+ */
+static int shrink_ecclayout(const struct nand_ecclayout *from,
+ struct nand_ecclayout_user *to)
+{
+ int i;
+
+ if (!from || !to)
+ return -EINVAL;
+
+ memset(to, 0, sizeof(*to));
+
+ to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
+ for (i = 0; i < to->eccbytes; i++)
+ to->eccpos[i] = from->eccpos[i];
+
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
+ if (from->oobfree[i].length == 0 &&
+ from->oobfree[i].offset == 0)
+ break;
+ to->oobavail += from->oobfree[i].length;
+ to->oobfree[i] = from->oobfree[i];
+ }
+
+ return 0;
+}
+
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
+ struct blkpg_ioctl_arg __user *arg)
+{
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+ return -EFAULT;
+
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+
+ /* Only master mtd device must be used to add partitions */
+ if (mtd_is_partition(mtd))
+ return -EINVAL;
+
+ /* Sanitize user input */
+ p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
+
+ return mtd_add_partition(mtd, p.devname, p.start, p.length);
+
+ case BLKPG_DEL_PARTITION:
+
+ if (p.pno < 0)
+ return -EINVAL;
+
+ return mtd_del_partition(mtd, p.pno);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mtdchar_write_ioctl(struct mtd_info *mtd,
+ struct mtd_write_req __user *argp)
+{
+ struct mtd_write_req req;
+ struct mtd_oob_ops ops;
+ const void __user *usr_data, *usr_oob;
+ int ret;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ usr_data = (const void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
+ if (!access_ok(VERIFY_READ, usr_data, req.len) ||
+ !access_ok(VERIFY_READ, usr_oob, req.ooblen))
+ return -EFAULT;
+
+ if (!mtd->_write_oob)
+ return -EOPNOTSUPP;
+
+ ops.mode = req.mode;
+ ops.len = (size_t)req.len;
+ ops.ooblen = (size_t)req.ooblen;
+ ops.ooboffs = 0;
+
+ if (usr_data) {
+ ops.datbuf = memdup_user(usr_data, ops.len);
+ if (IS_ERR(ops.datbuf))
+ return PTR_ERR(ops.datbuf);
+ } else {
+ ops.datbuf = NULL;
+ }
+
+ if (usr_oob) {
+ ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
+ if (IS_ERR(ops.oobbuf)) {
+ kfree(ops.datbuf);
+ return PTR_ERR(ops.oobbuf);
+ }
+ } else {
+ ops.oobbuf = NULL;
+ }
+
+ ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
+
+ kfree(ops.datbuf);
+ kfree(ops.oobbuf);
+
+ return ret;
+}
+
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+ u_long size;
+ struct mtd_info_user info;
+
+ pr_debug("MTD_ioctl\n");
+
+ size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+ if (cmd & IOC_IN) {
+ if (!access_ok(VERIFY_READ, argp, size))
+ return -EFAULT;
+ }
+ if (cmd & IOC_OUT) {
+ if (!access_ok(VERIFY_WRITE, argp, size))
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case MEMGETREGIONCOUNT:
+ if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
+ return -EFAULT;
+ break;
+
+ case MEMGETREGIONINFO:
+ {
+ uint32_t ur_idx;
+ struct mtd_erase_region_info *kr;
+ struct region_info_user __user *ur = argp;
+
+ if (get_user(ur_idx, &(ur->regionindex)))
+ return -EFAULT;
+
+ if (ur_idx >= mtd->numeraseregions)
+ return -EINVAL;
+
+ kr = &(mtd->eraseregions[ur_idx]);
+
+ if (put_user(kr->offset, &(ur->offset))
+ || put_user(kr->erasesize, &(ur->erasesize))
+ || put_user(kr->numblocks, &(ur->numblocks)))
+ return -EFAULT;
+
+ break;
+ }
+
+ case MEMGETINFO:
+ memset(&info, 0, sizeof(info));
+ info.type = mtd->type;
+ info.flags = mtd->flags;
+ info.size = mtd->size;
+ info.erasesize = mtd->erasesize;
+ info.writesize = mtd->writesize;
+ info.oobsize = mtd->oobsize;
+ /* The below field is obsolete */
+ info.padding = 0;
+ if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
+ return -EFAULT;
+ break;
+
+ case MEMERASE:
+ case MEMERASE64:
+ {
+ struct erase_info *erase;
+
+ if(!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+
+ erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
+ if (!erase)
+ ret = -ENOMEM;
+ else {
+ wait_queue_head_t waitq;
+ DECLARE_WAITQUEUE(wait, current);
+
+ init_waitqueue_head(&waitq);
+
+ if (cmd == MEMERASE64) {
+ struct erase_info_user64 einfo64;
+
+ if (copy_from_user(&einfo64, argp,
+ sizeof(struct erase_info_user64))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo64.start;
+ erase->len = einfo64.length;
+ } else {
+ struct erase_info_user einfo32;
+
+ if (copy_from_user(&einfo32, argp,
+ sizeof(struct erase_info_user))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo32.start;
+ erase->len = einfo32.length;
+ }
+ erase->mtd = mtd;
+ erase->callback = mtdchar_erase_callback;
+ erase->priv = (unsigned long)&waitq;
+
+ /*
+ FIXME: Allow INTERRUPTIBLE. Which means
+ not having the wait_queue head on the stack.
+
+ If the wq_head is on the stack, and we
+ leave because we got interrupted, then the
+ wq_head is no longer there when the
+ callback routine tries to wake us up.
+ */
+ ret = mtd_erase(mtd, erase);
+ if (!ret) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&waitq, &wait);
+ if (erase->state != MTD_ERASE_DONE &&
+ erase->state != MTD_ERASE_FAILED)
+ schedule();
+ remove_wait_queue(&waitq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
+ }
+ kfree(erase);
+ }
+ break;
+ }
+
+ case MEMWRITEOOB:
+ {
+ struct mtd_oob_buf buf;
+ struct mtd_oob_buf __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf_user->length */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB:
+ {
+ struct mtd_oob_buf buf;
+ struct mtd_oob_buf __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf_user->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->start);
+ break;
+ }
+
+ case MEMWRITEOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
+
+ case MEMWRITE:
+ {
+ ret = mtdchar_write_ioctl(mtd,
+ (struct mtd_write_req __user *)arg);
+ break;
+ }
+
+ case MEMLOCK:
+ {
+ struct erase_info_user einfo;
+
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
+ return -EFAULT;
+
+ ret = mtd_lock(mtd, einfo.start, einfo.length);
+ break;
+ }
+
+ case MEMUNLOCK:
+ {
+ struct erase_info_user einfo;
+
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
+ return -EFAULT;
+
+ ret = mtd_unlock(mtd, einfo.start, einfo.length);
+ break;
+ }
+
+ case MEMISLOCKED:
+ {
+ struct erase_info_user einfo;
+
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
+ return -EFAULT;
+
+ ret = mtd_is_locked(mtd, einfo.start, einfo.length);
+ break;
+ }
+
+ /* Legacy interface */
+ case MEMGETOOBSEL:
+ {
+ struct nand_oobinfo oi;
+
+ if (!mtd->ecclayout)
+ return -EOPNOTSUPP;
+ if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
+ return -EINVAL;
+
+ oi.useecc = MTD_NANDECC_AUTOPLACE;
+ memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
+ memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
+ sizeof(oi.oobfree));
+ oi.eccbytes = mtd->ecclayout->eccbytes;
+
+ if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
+ return -EFAULT;
+ break;
+ }
+
+ case MEMGETBADBLOCK:
+ {
+ loff_t offs;
+
+ if (copy_from_user(&offs, argp, sizeof(loff_t)))
+ return -EFAULT;
+ return mtd_block_isbad(mtd, offs);
+ break;
+ }
+
+ case MEMSETBADBLOCK:
+ {
+ loff_t offs;
+
+ if (copy_from_user(&offs, argp, sizeof(loff_t)))
+ return -EFAULT;
+ return mtd_block_markbad(mtd, offs);
+ break;
+ }
+
+ case OTPSELECT:
+ {
+ int mode;
+ if (copy_from_user(&mode, argp, sizeof(int)))
+ return -EFAULT;
+
+ mfi->mode = MTD_FILE_MODE_NORMAL;
+
+ ret = otp_select_filemode(mfi, mode);
+
+ file->f_pos = 0;
+ break;
+ }
+
+ case OTPGETREGIONCOUNT:
+ case OTPGETREGIONINFO:
+ {
+ struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
+ size_t retlen;
+ if (!buf)
+ return -ENOMEM;
+ switch (mfi->mode) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
+ break;
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (!ret) {
+ if (cmd == OTPGETREGIONCOUNT) {
+ int nbr = retlen / sizeof(struct otp_info);
+ ret = copy_to_user(argp, &nbr, sizeof(int));
+ } else
+ ret = copy_to_user(argp, buf, retlen);
+ if (ret)
+ ret = -EFAULT;
+ }
+ kfree(buf);
+ break;
+ }
+
+ case OTPLOCK:
+ {
+ struct otp_info oinfo;
+
+ if (mfi->mode != MTD_FILE_MODE_OTP_USER)
+ return -EINVAL;
+ if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
+ return -EFAULT;
+ ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ break;
+ }
+
+ /* This ioctl is being deprecated - it truncates the ECC layout */
+ case ECCGETLAYOUT:
+ {
+ struct nand_ecclayout_user *usrlay;
+
+ if (!mtd->ecclayout)
+ return -EOPNOTSUPP;
+
+ usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
+ if (!usrlay)
+ return -ENOMEM;
+
+ shrink_ecclayout(mtd->ecclayout, usrlay);
+
+ if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
+ ret = -EFAULT;
+ kfree(usrlay);
+ break;
+ }
+
+ case ECCGETSTATS:
+ {
+ if (copy_to_user(argp, &mtd->ecc_stats,
+ sizeof(struct mtd_ecc_stats)))
+ return -EFAULT;
+ break;
+ }
+
+ case MTDFILEMODE:
+ {
+ mfi->mode = 0;
+
+ switch(arg) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_USER:
+ ret = otp_select_filemode(mfi, arg);
+ break;
+
+ case MTD_FILE_MODE_RAW:
+ if (!mtd_has_oob(mtd))
+ return -EOPNOTSUPP;
+ mfi->mode = arg;
+
+ case MTD_FILE_MODE_NORMAL:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ file->f_pos = 0;
+ break;
+ }
+
+ case BLKPG:
+ {
+ ret = mtdchar_blkpg_ioctl(mtd,
+ (struct blkpg_ioctl_arg __user *)arg);
+ break;
+ }
+
+ case BLKRRPART:
+ {
+ /* No reread partition feature. Just return ok */
+ ret = 0;
+ break;
+ }
+
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+} /* memory_ioctl */
+
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+{
+ int ret;
+
+ mutex_lock(&mtd_mutex);
+ ret = mtdchar_ioctl(file, cmd, arg);
+ mutex_unlock(&mtd_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct mtd_oob_buf32 {
+ u_int32_t start;
+ u_int32_t length;
+ compat_caddr_t ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
+
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ void __user *argp = compat_ptr(arg);
+ int ret = 0;
+
+ mutex_lock(&mtd_mutex);
+
+ switch (cmd) {
+ case MEMWRITEOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_writeoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_readoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->start);
+ break;
+ }
+ default:
+ ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
+ }
+
+ mutex_unlock(&mtd_mutex);
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+/*
+ * try to determine where a shared mapping can be made
+ * - only supported for NOMMU at the moment (MMU can't doesn't copy private
+ * mappings)
+ */
+#ifndef CONFIG_MMU
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ unsigned long offset;
+ int ret;
+
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
+
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
+
+ offset = pgoff << PAGE_SHIFT;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
+
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ return ret == -EOPNOTSUPP ? -ENODEV : ret;
+}
+
+static unsigned mtdchar_mmap_capabilities(struct file *file)
+{
+ struct mtd_file_info *mfi = file->private_data;
+
+ return mtd_mmap_capabilities(mfi->mtd);
+}
+#endif
+
+/*
+ * set up a mapping for shared memory segments
+ */
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
+{
+#ifdef CONFIG_MMU
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ struct map_info *map = mtd->priv;
+
+ /* This is broken because it assumes the MTD device is map-based
+ and that mtd->priv is a valid struct map_info. It should be
+ replaced with something that uses the mtd_get_unmapped_area()
+ operation properly. */
+ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
+#ifdef pgprot_noncached
+ if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+ return vm_iomap_memory(vma, map->phys, map->size);
+ }
+ return -ENODEV;
+#else
+ return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
+#endif
+}
+
+static const struct file_operations mtd_fops = {
+ .owner = THIS_MODULE,
+ .llseek = mtdchar_lseek,
+ .read = mtdchar_read,
+ .write = mtdchar_write,
+ .unlocked_ioctl = mtdchar_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtdchar_compat_ioctl,
+#endif
+ .open = mtdchar_open,
+ .release = mtdchar_close,
+ .mmap = mtdchar_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = mtdchar_get_unmapped_area,
+ .mmap_capabilities = mtdchar_mmap_capabilities,
+#endif
+};
+
+int __init init_mtdchar(void)
+{
+ int ret;
+
+ ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
+ "mtd", &mtd_fops);
+ if (ret < 0) {
+ pr_err("Can't allocate major number %d for MTD\n",
+ MTD_CHAR_MAJOR);
+ return ret;
+ }
+
+ return ret;
+}
+
+void __exit cleanup_mtdchar(void)
+{
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
+}
+
+MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
new file mode 100644
index 000000000..239a8c806
--- /dev/null
+++ b/drivers/mtd/mtdconcat.c
@@ -0,0 +1,942 @@
+/*
+ * MTD device concatenation layer
+ *
+ * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * NAND support by Christian Gan <cgan@iders.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/backing-dev.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/concat.h>
+
+#include <asm/div64.h>
+
+/*
+ * Our storage structure:
+ * Subdev points to an array of pointers to struct mtd_info objects
+ * which is allocated along with this structure
+ *
+ */
+struct mtd_concat {
+ struct mtd_info mtd;
+ int num_subdev;
+ struct mtd_info **subdev;
+};
+
+/*
+ * how to calculate the size required for the above structure,
+ * including the pointer array subdev points to:
+ */
+#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
+ ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
+
+/*
+ * Given a pointer to the MTD object in the mtd_concat structure,
+ * we can retrieve the pointer to that structure with this macro.
+ */
+#define CONCAT(x) ((struct mtd_concat *)(x))
+
+/*
+ * MTD methods which look up the relevant subdevice, translate the
+ * effective address and pass through to the subdevice.
+ */
+
+static int
+concat_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int ret = 0, err;
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
+ from -= subdev->size;
+ continue;
+ }
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
+ else
+ /* Entire transaction goes into this subdev */
+ size = len;
+
+ err = mtd_read(subdev, from, size, &retsize, buf);
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (mtd_is_eccerr(err)) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (mtd_is_bitflip(err)) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ return ret;
+
+ buf += size;
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ size = 0;
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ err = mtd_write(subdev, to, size, &retsize, buf);
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+static int
+concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t * retlen)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct kvec *vecs_copy;
+ unsigned long entry_low, entry_high;
+ size_t total_len = 0;
+ int i;
+ int err = -EINVAL;
+
+ /* Calculate total length of data */
+ for (i = 0; i < count; i++)
+ total_len += vecs[i].iov_len;
+
+ /* Check alignment */
+ if (mtd->writesize > 1) {
+ uint64_t __to = to;
+ if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
+ return -EINVAL;
+ }
+
+ /* make a copy of vecs */
+ vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
+ if (!vecs_copy)
+ return -ENOMEM;
+
+ entry_low = 0;
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, wsize, retsize, old_iov_len;
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ size = min_t(uint64_t, total_len, subdev->size - to);
+ wsize = size; /* store for future use */
+
+ entry_high = entry_low;
+ while (entry_high < count) {
+ if (size <= vecs_copy[entry_high].iov_len)
+ break;
+ size -= vecs_copy[entry_high++].iov_len;
+ }
+
+ old_iov_len = vecs_copy[entry_high].iov_len;
+ vecs_copy[entry_high].iov_len = size;
+
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to, &retsize);
+
+ vecs_copy[entry_high].iov_len = old_iov_len - size;
+ vecs_copy[entry_high].iov_base += size;
+
+ entry_low = entry_high;
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ total_len -= wsize;
+
+ if (total_len == 0)
+ break;
+
+ err = -EINVAL;
+ to = 0;
+ }
+
+ kfree(vecs_copy);
+ return err;
+}
+
+static int
+concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err, ret = 0;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (from >= subdev->size) {
+ from -= subdev->size;
+ continue;
+ }
+
+ /* partial read ? */
+ if (from + devops.len > subdev->size)
+ devops.len = subdev->size - from;
+
+ err = mtd_read_oob(subdev, from, &devops);
+ ops->retlen += devops.retlen;
+ ops->oobretlen += devops.oobretlen;
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (mtd_is_eccerr(err)) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (mtd_is_bitflip(err)) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return ret;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return ret;
+ devops.oobbuf += ops->oobretlen;
+ }
+
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ /* partial write ? */
+ if (to + devops.len > subdev->size)
+ devops.len = subdev->size - to;
+
+ err = mtd_write_oob(subdev, to, &devops);
+ ops->retlen += devops.retlen;
+ ops->oobretlen += devops.oobretlen;
+ if (err)
+ return err;
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return 0;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return 0;
+ devops.oobbuf += devops.oobretlen;
+ }
+ to = 0;
+ }
+ return -EINVAL;
+}
+
+static void concat_erase_callback(struct erase_info *instr)
+{
+ wake_up((wait_queue_head_t *) instr->priv);
+}
+
+static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ int err;
+ wait_queue_head_t waitq;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * This code was stol^H^H^H^Hinspired by mtdchar.c
+ */
+ init_waitqueue_head(&waitq);
+
+ erase->mtd = mtd;
+ erase->callback = concat_erase_callback;
+ erase->priv = (unsigned long) &waitq;
+
+ /*
+ * FIXME: Allow INTERRUPTIBLE. Which means
+ * not having the wait_queue head on the stack.
+ */
+ err = mtd_erase(mtd, erase);
+ if (!err) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&waitq, &wait);
+ if (erase->state != MTD_ERASE_DONE
+ && erase->state != MTD_ERASE_FAILED)
+ schedule();
+ remove_wait_queue(&waitq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
+ }
+ return err;
+}
+
+static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_info *subdev;
+ int i, err;
+ uint64_t length, offset = 0;
+ struct erase_info *erase;
+
+ /*
+ * Check for proper erase block alignment of the to-be-erased area.
+ * It is easier to do this based on the super device's erase
+ * region info rather than looking at each particular sub-device
+ * in turn.
+ */
+ if (!concat->mtd.numeraseregions) {
+ /* the easy case: device has uniform erase block size */
+ if (instr->addr & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ if (instr->len & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ } else {
+ /* device has variable erase size */
+ struct mtd_erase_region_info *erase_regions =
+ concat->mtd.eraseregions;
+
+ /*
+ * Find the erase region where the to-be-erased area begins:
+ */
+ for (i = 0; i < concat->mtd.numeraseregions &&
+ instr->addr >= erase_regions[i].offset; i++) ;
+ --i;
+
+ /*
+ * Now erase_regions[i] is the region in which the
+ * to-be-erased area begins. Verify that the starting
+ * offset is aligned to this region's erase size:
+ */
+ if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
+ return -EINVAL;
+
+ /*
+ * now find the erase region where the to-be-erased area ends:
+ */
+ for (; i < concat->mtd.numeraseregions &&
+ (instr->addr + instr->len) >= erase_regions[i].offset;
+ ++i) ;
+ --i;
+ /*
+ * check if the ending offset is aligned to this region's erase size
+ */
+ if (i < 0 || ((instr->addr + instr->len) &
+ (erase_regions[i].erasesize - 1)))
+ return -EINVAL;
+ }
+
+ /* make a local copy of instr to avoid modifying the caller's struct */
+ erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
+
+ if (!erase)
+ return -ENOMEM;
+
+ *erase = *instr;
+ length = instr->len;
+
+ /*
+ * find the subdevice where the to-be-erased area begins, adjust
+ * starting offset to be relative to the subdevice start
+ */
+ for (i = 0; i < concat->num_subdev; i++) {
+ subdev = concat->subdev[i];
+ if (subdev->size <= erase->addr) {
+ erase->addr -= subdev->size;
+ offset += subdev->size;
+ } else {
+ break;
+ }
+ }
+
+ /* must never happen since size limit has been verified above */
+ BUG_ON(i >= concat->num_subdev);
+
+ /* now do the erase: */
+ err = 0;
+ for (; length > 0; i++) {
+ /* loop for all subdevices affected by this request */
+ subdev = concat->subdev[i]; /* get current subdevice */
+
+ /* limit length to subdevice's size: */
+ if (erase->addr + length > subdev->size)
+ erase->len = subdev->size - erase->addr;
+ else
+ erase->len = length;
+
+ length -= erase->len;
+ if ((err = concat_dev_erase(subdev, erase))) {
+ /* sanity check: should never happen since
+ * block alignment has been checked above */
+ BUG_ON(err == -EINVAL);
+ if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr = erase->fail_addr + offset;
+ break;
+ }
+ /*
+ * erase->addr specifies the offset of the area to be
+ * erased *within the current subdevice*. It can be
+ * non-zero only the first time through this loop, i.e.
+ * for the first subdevice where blocks need to be erased.
+ * All the following erases must begin at the start of the
+ * current subdevice, i.e. at offset zero.
+ */
+ erase->addr = 0;
+ offset += subdev->size;
+ }
+ instr->state = erase->state;
+ kfree(erase);
+ if (err)
+ return err;
+
+ if (instr->callback)
+ instr->callback(instr);
+ return 0;
+}
+
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = mtd_lock(subdev, ofs, size);
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static void concat_sync(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ mtd_sync(subdev);
+ }
+}
+
+static int concat_suspend(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, rc = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ if ((rc = mtd_suspend(subdev)) < 0)
+ return rc;
+ }
+ return rc;
+}
+
+static void concat_resume(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ mtd_resume(subdev);
+ }
+}
+
+static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, res = 0;
+
+ if (!mtd_can_have_bb(concat->subdev[0]))
+ return res;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ res = mtd_block_isbad(subdev, ofs);
+ break;
+ }
+
+ return res;
+}
+
+static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ err = mtd_block_markbad(subdev, ofs);
+ if (!err)
+ mtd->ecc_stats.badblocks++;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * try to support NOMMU mmaps on concatenated devices
+ * - we don't support subdev spanning as we can't guarantee it'll work
+ */
+static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (offset >= subdev->size) {
+ offset -= subdev->size;
+ continue;
+ }
+
+ return mtd_get_unmapped_area(subdev, len, offset, flags);
+ }
+
+ return (unsigned long) -ENOSYS;
+}
+
+/*
+ * This function constructs a virtual MTD device by concatenating
+ * num_devs MTD devices. A pointer to the new device object is
+ * stored to *new_dev upon success. This function does _not_
+ * register any devices: this is the caller's responsibility.
+ */
+struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ const char *name)
+{ /* name for the new device */
+ int i;
+ size_t size;
+ struct mtd_concat *concat;
+ uint32_t max_erasesize, curr_erasesize;
+ int num_erase_region;
+ int max_writebufsize = 0;
+
+ printk(KERN_NOTICE "Concatenating MTD devices:\n");
+ for (i = 0; i < num_devs; i++)
+ printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
+ printk(KERN_NOTICE "into device \"%s\"\n", name);
+
+ /* allocate the device structure */
+ size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
+ concat = kzalloc(size, GFP_KERNEL);
+ if (!concat) {
+ printk
+ ("memory allocation error while creating concatenated device \"%s\"\n",
+ name);
+ return NULL;
+ }
+ concat->subdev = (struct mtd_info **) (concat + 1);
+
+ /*
+ * Set up the new "super" device's MTD object structure, check for
+ * incompatibilities between the subdevices.
+ */
+ concat->mtd.type = subdev[0]->type;
+ concat->mtd.flags = subdev[0]->flags;
+ concat->mtd.size = subdev[0]->size;
+ concat->mtd.erasesize = subdev[0]->erasesize;
+ concat->mtd.writesize = subdev[0]->writesize;
+
+ for (i = 0; i < num_devs; i++)
+ if (max_writebufsize < subdev[i]->writebufsize)
+ max_writebufsize = subdev[i]->writebufsize;
+ concat->mtd.writebufsize = max_writebufsize;
+
+ concat->mtd.subpage_sft = subdev[0]->subpage_sft;
+ concat->mtd.oobsize = subdev[0]->oobsize;
+ concat->mtd.oobavail = subdev[0]->oobavail;
+ if (subdev[0]->_writev)
+ concat->mtd._writev = concat_writev;
+ if (subdev[0]->_read_oob)
+ concat->mtd._read_oob = concat_read_oob;
+ if (subdev[0]->_write_oob)
+ concat->mtd._write_oob = concat_write_oob;
+ if (subdev[0]->_block_isbad)
+ concat->mtd._block_isbad = concat_block_isbad;
+ if (subdev[0]->_block_markbad)
+ concat->mtd._block_markbad = concat_block_markbad;
+
+ concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
+
+ concat->subdev[0] = subdev[0];
+
+ for (i = 1; i < num_devs; i++) {
+ if (concat->mtd.type != subdev[i]->type) {
+ kfree(concat);
+ printk("Incompatible device type on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ if (concat->mtd.flags != subdev[i]->flags) {
+ /*
+ * Expect all flags except MTD_WRITEABLE to be
+ * equal on all subdevices.
+ */
+ if ((concat->mtd.flags ^ subdev[i]->
+ flags) & ~MTD_WRITEABLE) {
+ kfree(concat);
+ printk("Incompatible device flags on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ } else
+ /* if writeable attribute differs,
+ make super device writeable */
+ concat->mtd.flags |=
+ subdev[i]->flags & MTD_WRITEABLE;
+ }
+
+ concat->mtd.size += subdev[i]->size;
+ concat->mtd.ecc_stats.badblocks +=
+ subdev[i]->ecc_stats.badblocks;
+ if (concat->mtd.writesize != subdev[i]->writesize ||
+ concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
+ concat->mtd.oobsize != subdev[i]->oobsize ||
+ !concat->mtd._read_oob != !subdev[i]->_read_oob ||
+ !concat->mtd._write_oob != !subdev[i]->_write_oob) {
+ kfree(concat);
+ printk("Incompatible OOB or ECC data on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ concat->subdev[i] = subdev[i];
+
+ }
+
+ concat->mtd.ecclayout = subdev[0]->ecclayout;
+
+ concat->num_subdev = num_devs;
+ concat->mtd.name = name;
+
+ concat->mtd._erase = concat_erase;
+ concat->mtd._read = concat_read;
+ concat->mtd._write = concat_write;
+ concat->mtd._sync = concat_sync;
+ concat->mtd._lock = concat_lock;
+ concat->mtd._unlock = concat_unlock;
+ concat->mtd._suspend = concat_suspend;
+ concat->mtd._resume = concat_resume;
+ concat->mtd._get_unmapped_area = concat_get_unmapped_area;
+
+ /*
+ * Combine the erase block size info of the subdevices:
+ *
+ * first, walk the map of the new device and see how
+ * many changes in erase size we have
+ */
+ max_erasesize = curr_erasesize = subdev[0]->erasesize;
+ num_erase_region = 1;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /* if it differs from the last subdevice's erase size, count it */
+ ++num_erase_region;
+ curr_erasesize = subdev[i]->erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].erasesize !=
+ curr_erasesize) {
+ ++num_erase_region;
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ }
+ }
+ }
+
+ if (num_erase_region == 1) {
+ /*
+ * All subdevices have the same uniform erase size.
+ * This is easy:
+ */
+ concat->mtd.erasesize = curr_erasesize;
+ concat->mtd.numeraseregions = 0;
+ } else {
+ uint64_t tmp64;
+
+ /*
+ * erase block size varies across the subdevices: allocate
+ * space to store the data describing the variable erase regions
+ */
+ struct mtd_erase_region_info *erase_region_p;
+ uint64_t begin, position;
+
+ concat->mtd.erasesize = max_erasesize;
+ concat->mtd.numeraseregions = num_erase_region;
+ concat->mtd.eraseregions = erase_region_p =
+ kmalloc(num_erase_region *
+ sizeof (struct mtd_erase_region_info), GFP_KERNEL);
+ if (!erase_region_p) {
+ kfree(concat);
+ printk
+ ("memory allocation error while creating erase region list"
+ " for device \"%s\"\n", name);
+ return NULL;
+ }
+
+ /*
+ * walk the map of the new device once more and fill in
+ * in erase region info:
+ */
+ curr_erasesize = subdev[0]->erasesize;
+ begin = position = 0;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /*
+ * fill in an mtd_erase_region_info structure for the area
+ * we have walked so far:
+ */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize = subdev[i]->erasesize;
+ ++erase_region_p;
+ }
+ position += subdev[i]->size;
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].
+ erasesize != curr_erasesize) {
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ ++erase_region_p;
+ }
+ position +=
+ subdev[i]->eraseregions[j].
+ numblocks * (uint64_t)curr_erasesize;
+ }
+ }
+ }
+ /* Now write the final entry */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ }
+
+ return &concat->mtd;
+}
+
+/*
+ * This function destroys an MTD object obtained from concat_mtd_devs()
+ */
+
+void mtd_concat_destroy(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ if (concat->mtd.numeraseregions)
+ kfree(concat->mtd.eraseregions);
+ kfree(concat);
+}
+
+EXPORT_SYMBOL(mtd_concat_create);
+EXPORT_SYMBOL(mtd_concat_destroy);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
+MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
new file mode 100644
index 000000000..d172195fb
--- /dev/null
+++ b/drivers/mtd/mtdcore.c
@@ -0,0 +1,1303 @@
+/*
+ * Core registration and callback routines for MTD
+ * drivers and users.
+ *
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2006 Red Hat UK Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/ioctl.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/idr.h>
+#include <linux/backing-dev.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/reboot.h>
+#include <linux/kconfig.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include "mtdcore.h"
+
+static struct backing_dev_info mtd_bdi = {
+};
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
+
+static struct class mtd_class = {
+ .name = "mtd",
+ .owner = THIS_MODULE,
+ .suspend = mtd_cls_suspend,
+ .resume = mtd_cls_resume,
+};
+
+static DEFINE_IDR(mtd_idr);
+
+/* These are exported solely for the purpose of mtd_blkdevs.c. You
+ should not use them for _anything_ else */
+DEFINE_MUTEX(mtd_table_mutex);
+EXPORT_SYMBOL_GPL(mtd_table_mutex);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+ return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
+
+static LIST_HEAD(mtd_notifiers);
+
+
+#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
+
+/* REVISIT once MTD uses the driver model better, whoever allocates
+ * the mtd_info will probably want to use the release() hook...
+ */
+static void mtd_release(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
+
+ /* remove /dev/mtdXro node */
+ device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return mtd ? mtd_suspend(mtd) : 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ if (mtd)
+ mtd_resume(mtd);
+ return 0;
+}
+
+static ssize_t mtd_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ char *type;
+
+ switch (mtd->type) {
+ case MTD_ABSENT:
+ type = "absent";
+ break;
+ case MTD_RAM:
+ type = "ram";
+ break;
+ case MTD_ROM:
+ type = "rom";
+ break;
+ case MTD_NORFLASH:
+ type = "nor";
+ break;
+ case MTD_NANDFLASH:
+ type = "nand";
+ break;
+ case MTD_DATAFLASH:
+ type = "dataflash";
+ break;
+ case MTD_UBIVOLUME:
+ type = "ubi";
+ break;
+ case MTD_MLCNANDFLASH:
+ type = "mlc-nand";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", type);
+}
+static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
+
+static ssize_t mtd_flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
+
+}
+static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
+
+static ssize_t mtd_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)mtd->size);
+
+}
+static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
+
+static ssize_t mtd_erasesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
+
+}
+static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
+
+static ssize_t mtd_writesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
+
+}
+static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
+
+static ssize_t mtd_subpagesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
+
+}
+static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
+
+static ssize_t mtd_oobsize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
+
+}
+static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
+
+static ssize_t mtd_numeraseregions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
+
+}
+static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
+ NULL);
+
+static ssize_t mtd_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
+
+}
+static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
+
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int bitflip_threshold;
+ int retval;
+
+ retval = kstrtouint(buf, 0, &bitflip_threshold);
+ if (retval)
+ return retval;
+
+ mtd->bitflip_threshold = bitflip_threshold;
+ return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+ mtd_bitflip_threshold_show,
+ mtd_bitflip_threshold_store);
+
+static ssize_t mtd_ecc_step_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
+
+}
+static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+
+static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
+}
+static DEVICE_ATTR(corrected_bits, S_IRUGO,
+ mtd_ecc_stats_corrected_show, NULL);
+
+static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
+}
+static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
+
+static ssize_t mtd_badblocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
+}
+static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
+
+static ssize_t mtd_bbtblocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
+}
+static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
+
+static struct attribute *mtd_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_size.attr,
+ &dev_attr_erasesize.attr,
+ &dev_attr_writesize.attr,
+ &dev_attr_subpagesize.attr,
+ &dev_attr_oobsize.attr,
+ &dev_attr_numeraseregions.attr,
+ &dev_attr_name.attr,
+ &dev_attr_ecc_strength.attr,
+ &dev_attr_ecc_step_size.attr,
+ &dev_attr_corrected_bits.attr,
+ &dev_attr_ecc_failures.attr,
+ &dev_attr_bad_blocks.attr,
+ &dev_attr_bbt_blocks.attr,
+ &dev_attr_bitflip_threshold.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mtd);
+
+static struct device_type mtd_devtype = {
+ .name = "mtd",
+ .groups = mtd_groups,
+ .release = mtd_release,
+};
+
+#ifndef CONFIG_MMU
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
+{
+ switch (mtd->type) {
+ case MTD_RAM:
+ return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
+ NOMMU_MAP_READ | NOMMU_MAP_WRITE;
+ case MTD_ROM:
+ return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
+ NOMMU_MAP_READ;
+ default:
+ return NOMMU_MAP_COPY;
+ }
+}
+EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
+#endif
+
+static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
+ void *cmd)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(n, struct mtd_info, reboot_notifier);
+ mtd->_reboot(mtd);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * add_mtd_device - register an MTD device
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Add a device to the list of MTD devices present in the system, and
+ * notify each currently active MTD 'user' of its arrival. Returns
+ * zero on success or 1 on failure, which currently will only happen
+ * if there is insufficient memory or a sysfs error.
+ */
+
+int add_mtd_device(struct mtd_info *mtd)
+{
+ struct mtd_notifier *not;
+ int i, error;
+
+ mtd->backing_dev_info = &mtd_bdi;
+
+ BUG_ON(mtd->writesize == 0);
+ mutex_lock(&mtd_table_mutex);
+
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (i < 0)
+ goto fail_locked;
+
+ mtd->index = i;
+ mtd->usecount = 0;
+
+ /* default value if not set by driver */
+ if (mtd->bitflip_threshold == 0)
+ mtd->bitflip_threshold = mtd->ecc_strength;
+
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
+ printk(KERN_WARNING
+ "%s: unlock failed, writes may not work\n",
+ mtd->name);
+ }
+
+ /* Caller should have set dev.parent to match the
+ * physical device.
+ */
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
+ if (device_register(&mtd->dev) != 0)
+ goto fail_added;
+
+ device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
+ "mtd%dro", i);
+
+ pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+
+fail_added:
+ idr_remove(&mtd_idr, i);
+fail_locked:
+ mutex_unlock(&mtd_table_mutex);
+ return 1;
+}
+
+/**
+ * del_mtd_device - unregister an MTD device
+ * @mtd: pointer to MTD device info structure
+ *
+ * Remove a device from the list of MTD devices present in the system,
+ * and notify each currently active MTD 'user' of its departure.
+ * Returns zero on success or 1 on failure, which currently will happen
+ * if the requested device does not appear to be present in the list.
+ */
+
+int del_mtd_device(struct mtd_info *mtd)
+{
+ int ret;
+ struct mtd_notifier *not;
+
+ mutex_lock(&mtd_table_mutex);
+
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
+ ret = -ENODEV;
+ goto out_error;
+ }
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->remove(mtd);
+
+ if (mtd->usecount) {
+ printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
+ mtd->index, mtd->name, mtd->usecount);
+ ret = -EBUSY;
+ } else {
+ device_unregister(&mtd->dev);
+
+ idr_remove(&mtd_idr, mtd->index);
+
+ module_put(THIS_MODULE);
+ ret = 0;
+ }
+
+out_error:
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+}
+
+static int mtd_add_device_partitions(struct mtd_info *mtd,
+ struct mtd_partition *real_parts,
+ int nbparts)
+{
+ int ret;
+
+ if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
+ ret = add_mtd_device(mtd);
+ if (ret == 1)
+ return -ENODEV;
+ }
+
+ if (nbparts > 0) {
+ ret = add_mtd_partitions(mtd, real_parts, nbparts);
+ if (ret && IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ del_mtd_device(mtd);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
+ * mtd_device_parse_register - parse partitions and register an MTD device.
+ *
+ * @mtd: the MTD device to register
+ * @types: the list of MTD partition probes to try, see
+ * 'parse_mtd_partitions()' for more information
+ * @parser_data: MTD partition parser-specific data
+ * @parts: fallback partition information to register, if parsing fails;
+ * only valid if %nr_parts > %0
+ * @nr_parts: the number of partitions in parts, if zero then the full
+ * MTD device is registered if no partition info is found
+ *
+ * This function aggregates MTD partitions parsing (done by
+ * 'parse_mtd_partitions()') and MTD device and partitions registering. It
+ * basically follows the most common pattern found in many MTD drivers:
+ *
+ * * It first tries to probe partitions on MTD device @mtd using parsers
+ * specified in @types (if @types is %NULL, then the default list of parsers
+ * is used, see 'parse_mtd_partitions()' for more information). If none are
+ * found this functions tries to fallback to information specified in
+ * @parts/@nr_parts.
+ * * If any partitioning info was found, this function registers the found
+ * partitions. If the MTD_PARTITIONED_MASTER option is set, then the device
+ * as a whole is registered first.
+ * * If no partitions were found this function just registers the MTD device
+ * @mtd and exits.
+ *
+ * Returns zero in case of success and a negative error code in case of failure.
+ */
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *parts,
+ int nr_parts)
+{
+ int ret;
+ struct mtd_partition *real_parts = NULL;
+
+ ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+ if (ret <= 0 && nr_parts && parts) {
+ real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
+ GFP_KERNEL);
+ if (!real_parts)
+ ret = -ENOMEM;
+ else
+ ret = nr_parts;
+ }
+
+ if (ret >= 0)
+ ret = mtd_add_device_partitions(mtd, real_parts, ret);
+
+ /*
+ * FIXME: some drivers unfortunately call this function more than once.
+ * So we have to check if we've already assigned the reboot notifier.
+ *
+ * Generally, we can make multiple calls work for most cases, but it
+ * does cause problems with parse_mtd_partitions() above (e.g.,
+ * cmdlineparts will register partitions more than once).
+ */
+ if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
+ mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
+ register_reboot_notifier(&mtd->reboot_notifier);
+ }
+
+ kfree(real_parts);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_device_parse_register);
+
+/**
+ * mtd_device_unregister - unregister an existing MTD device.
+ *
+ * @master: the MTD device to unregister. This will unregister both the master
+ * and any partitions if registered.
+ */
+int mtd_device_unregister(struct mtd_info *master)
+{
+ int err;
+
+ if (master->_reboot)
+ unregister_reboot_notifier(&master->reboot_notifier);
+
+ err = del_mtd_partitions(master);
+ if (err)
+ return err;
+
+ if (!device_is_registered(&master->dev))
+ return 0;
+
+ return del_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_unregister);
+
+/**
+ * register_mtd_user - register a 'user' of MTD devices.
+ * @new: pointer to notifier info structure
+ *
+ * Registers a pair of callbacks function to be called upon addition
+ * or removal of MTD devices. Causes the 'add' callback to be immediately
+ * invoked for each MTD device currently present in the system.
+ */
+void register_mtd_user (struct mtd_notifier *new)
+{
+ struct mtd_info *mtd;
+
+ mutex_lock(&mtd_table_mutex);
+
+ list_add(&new->list, &mtd_notifiers);
+
+ __module_get(THIS_MODULE);
+
+ mtd_for_each_device(mtd)
+ new->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+}
+EXPORT_SYMBOL_GPL(register_mtd_user);
+
+/**
+ * unregister_mtd_user - unregister a 'user' of MTD devices.
+ * @old: pointer to notifier info structure
+ *
+ * Removes a callback function pair from the list of 'users' to be
+ * notified upon addition or removal of MTD devices. Causes the
+ * 'remove' callback to be immediately invoked for each MTD device
+ * currently present in the system.
+ */
+int unregister_mtd_user (struct mtd_notifier *old)
+{
+ struct mtd_info *mtd;
+
+ mutex_lock(&mtd_table_mutex);
+
+ module_put(THIS_MODULE);
+
+ mtd_for_each_device(mtd)
+ old->remove(mtd);
+
+ list_del(&old->list);
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
+
+/**
+ * get_mtd_device - obtain a validated handle for an MTD device
+ * @mtd: last known address of the required MTD device
+ * @num: internal device number of the required MTD device
+ *
+ * Given a number and NULL address, return the num'th entry in the device
+ * table, if any. Given an address and num == -1, search the device table
+ * for a device with that address and return if it's still present. Given
+ * both, return the num'th driver only if its address matches. Return
+ * error code if not.
+ */
+struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
+{
+ struct mtd_info *ret = NULL, *other;
+ int err = -ENODEV;
+
+ mutex_lock(&mtd_table_mutex);
+
+ if (num == -1) {
+ mtd_for_each_device(other) {
+ if (other == mtd) {
+ ret = mtd;
+ break;
+ }
+ }
+ } else if (num >= 0) {
+ ret = idr_find(&mtd_idr, num);
+ if (mtd && mtd != ret)
+ ret = NULL;
+ }
+
+ if (!ret) {
+ ret = ERR_PTR(err);
+ goto out;
+ }
+
+ err = __get_mtd_device(ret);
+ if (err)
+ ret = ERR_PTR(err);
+out:
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(get_mtd_device);
+
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+ int err;
+
+ if (!try_module_get(mtd->owner))
+ return -ENODEV;
+
+ if (mtd->_get_device) {
+ err = mtd->_get_device(mtd);
+
+ if (err) {
+ module_put(mtd->owner);
+ return err;
+ }
+ }
+ mtd->usecount++;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
+
+/**
+ * get_mtd_device_nm - obtain a validated handle for an MTD device by
+ * device name
+ * @name: MTD device name to open
+ *
+ * This function returns MTD device description structure in case of
+ * success and an error code in case of failure.
+ */
+struct mtd_info *get_mtd_device_nm(const char *name)
+{
+ int err = -ENODEV;
+ struct mtd_info *mtd = NULL, *other;
+
+ mutex_lock(&mtd_table_mutex);
+
+ mtd_for_each_device(other) {
+ if (!strcmp(name, other->name)) {
+ mtd = other;
+ break;
+ }
+ }
+
+ if (!mtd)
+ goto out_unlock;
+
+ err = __get_mtd_device(mtd);
+ if (err)
+ goto out_unlock;
+
+ mutex_unlock(&mtd_table_mutex);
+ return mtd;
+
+out_unlock:
+ mutex_unlock(&mtd_table_mutex);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+
+void put_mtd_device(struct mtd_info *mtd)
+{
+ mutex_lock(&mtd_table_mutex);
+ __put_mtd_device(mtd);
+ mutex_unlock(&mtd_table_mutex);
+
+}
+EXPORT_SYMBOL_GPL(put_mtd_device);
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+
+ if (mtd->_put_device)
+ mtd->_put_device(mtd);
+
+ module_put(mtd->owner);
+}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
+
+/*
+ * Erase is an asynchronous operation. Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ if (!instr->len) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+ }
+ return mtd->_erase(mtd, instr);
+}
+EXPORT_SYMBOL_GPL(mtd_erase);
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ void **virt, resource_size_t *phys)
+{
+ *retlen = 0;
+ *virt = NULL;
+ if (phys)
+ *phys = 0;
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from >= mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_point(mtd, from, len, retlen, virt, phys);
+}
+EXPORT_SYMBOL_GPL(mtd_point);
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from >= mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unpoint(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unpoint);
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+ unsigned long offset, unsigned long flags)
+{
+ if (!mtd->_get_unmapped_area)
+ return -EOPNOTSUPP;
+ if (offset >= mtd->size || len > mtd->size - offset)
+ return -EINVAL;
+ return mtd->_get_unmapped_area(mtd, len, offset, flags);
+}
+EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ u_char *buf)
+{
+ int ret_code;
+ *retlen = 0;
+ if (from < 0 || from >= mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ /*
+ * In the absence of an error, drivers return a non-negative integer
+ * representing the maximum number of bitflips that were corrected on
+ * any one ecc region (if applicable; zero otherwise).
+ */
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read);
+
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (to < 0 || to >= mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ return mtd->_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_write);
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_panic_write)
+ return -EOPNOTSUPP;
+ if (to < 0 || to >= mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ return mtd->_panic_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_panic_write);
+
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ int ret_code;
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->_read_oob)
+ return -EOPNOTSUPP;
+ /*
+ * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+ * similar to mtd->_read(), returning a non-negative integer
+ * representing max bitflips. In other cases, mtd->_read_oob() may
+ * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+ */
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read_oob);
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ if (!mtd->_get_fact_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
+
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_fact_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
+
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ if (!mtd->_get_user_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_user_prot_info(mtd, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
+
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
+
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ int ret;
+
+ *retlen = 0;
+ if (!mtd->_write_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * If no data could be written at all, we are out of memory and
+ * must return -ENOSPC.
+ */
+ return (*retlen) ? 0 : -ENOSPC;
+}
+EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
+
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_lock_user_prot_reg(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
+
+/* Chip-supported device locking */
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_lock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_lock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock);
+
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_unlock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unlock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unlock);
+
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_is_locked)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_is_locked(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_is_locked);
+
+int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ if (ofs < 0 || ofs >= mtd->size)
+ return -EINVAL;
+ if (!mtd->_block_isreserved)
+ return 0;
+ return mtd->_block_isreserved(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isreserved);
+
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (ofs < 0 || ofs >= mtd->size)
+ return -EINVAL;
+ if (!mtd->_block_isbad)
+ return 0;
+ return mtd->_block_isbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isbad);
+
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->_block_markbad)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ return mtd->_block_markbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_markbad);
+
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
+ }
+ *retlen = totlen;
+ return ret;
+}
+
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ *retlen = 0;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!mtd->_writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+ return mtd->_writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
+/**
+ * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
+ * to the actual allocation size on success.
+ *
+ * This routine attempts to allocate a contiguous kernel buffer up to
+ * the specified size, backing off the size of the request exponentially
+ * until the request succeeds or until the allocation size falls below
+ * the system page size. This attempts to make sure it does not adversely
+ * impact system performance, so when allocating more than one page, we
+ * ask the memory allocator to avoid re-trying, swapping, writing back
+ * or performing I/O.
+ *
+ * Note, this function also makes sure that the allocated buffer is aligned to
+ * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
+ *
+ * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
+ * to handle smaller (i.e. degraded) buffer allocations under low- or
+ * fragmented-memory situations where such reduced allocations, from a
+ * requested ideal, are allowed.
+ *
+ * Returns a pointer to the allocated buffer on success; otherwise, NULL.
+ */
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
+{
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
+ __GFP_NORETRY | __GFP_NO_KSWAPD;
+ size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
+ void *kbuf;
+
+ *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
+
+ while (*size > min_alloc) {
+ kbuf = kmalloc(*size, flags);
+ if (kbuf)
+ return kbuf;
+
+ *size >>= 1;
+ *size = ALIGN(*size, mtd->writesize);
+ }
+
+ /*
+ * For the last resort allocation allow 'kmalloc()' to do all sorts of
+ * things (write-back, dropping caches, etc) by using GFP_KERNEL.
+ */
+ return kmalloc(*size, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
+
+#ifdef CONFIG_PROC_FS
+
+/*====================================================================*/
+/* Support for /proc/mtd */
+
+static int mtd_proc_show(struct seq_file *m, void *v)
+{
+ struct mtd_info *mtd;
+
+ seq_puts(m, "dev: size erasesize name\n");
+ mutex_lock(&mtd_table_mutex);
+ mtd_for_each_device(mtd) {
+ seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
+ mtd->index, (unsigned long long)mtd->size,
+ mtd->erasesize, mtd->name);
+ }
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+
+static int mtd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtd_proc_show, NULL);
+}
+
+static const struct file_operations mtd_proc_ops = {
+ .open = mtd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+/*====================================================================*/
+/* Init code */
+
+static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+{
+ int ret;
+
+ ret = bdi_init(bdi);
+ if (!ret)
+ ret = bdi_register(bdi, NULL, "%s", name);
+
+ if (ret)
+ bdi_destroy(bdi);
+
+ return ret;
+}
+
+static struct proc_dir_entry *proc_mtd;
+
+static int __init init_mtd(void)
+{
+ int ret;
+
+ ret = class_register(&mtd_class);
+ if (ret)
+ goto err_reg;
+
+ ret = mtd_bdi_init(&mtd_bdi, "mtd");
+ if (ret)
+ goto err_bdi;
+
+ proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
+
+ ret = init_mtdchar();
+ if (ret)
+ goto out_procfs;
+
+ return 0;
+
+out_procfs:
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
+err_bdi:
+ class_unregister(&mtd_class);
+err_reg:
+ pr_err("Error registering mtd class or bdi: %d\n", ret);
+ return ret;
+}
+
+static void __exit cleanup_mtd(void)
+{
+ cleanup_mtdchar();
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
+ class_unregister(&mtd_class);
+ bdi_destroy(&mtd_bdi);
+}
+
+module_init(init_mtd);
+module_exit(cleanup_mtd);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core MTD registration and access routines");
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
new file mode 100644
index 000000000..7b0353399
--- /dev/null
+++ b/drivers/mtd/mtdcore.h
@@ -0,0 +1,23 @@
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
+ */
+
+extern struct mutex mtd_table_mutex;
+
+struct mtd_info *__mtd_next_device(int i);
+int add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
+
+#define mtd_for_each_device(mtd) \
+ for ((mtd) = __mtd_next_device(0); \
+ (mtd) != NULL; \
+ (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
new file mode 100644
index 000000000..97bb8f630
--- /dev/null
+++ b/drivers/mtd/mtdoops.c
@@ -0,0 +1,451 @@
+/*
+ * MTD Oops/Panic logger
+ *
+ * Copyright © 2007 Nokia Corporation. All rights reserved.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/kmsg_dump.h>
+
+/* Maximum MTD partition size */
+#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
+
+#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
+#define MTDOOPS_HEADER_SIZE 8
+
+static unsigned long record_size = 4096;
+module_param(record_size, ulong, 0400);
+MODULE_PARM_DESC(record_size,
+ "record size for MTD OOPS pages in bytes (default 4096)");
+
+static char mtddev[80];
+module_param_string(mtddev, mtddev, 80, 0400);
+MODULE_PARM_DESC(mtddev,
+ "name or index number of the MTD device to use");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+static struct mtdoops_context {
+ struct kmsg_dumper dump;
+
+ int mtd_index;
+ struct work_struct work_erase;
+ struct work_struct work_write;
+ struct mtd_info *mtd;
+ int oops_pages;
+ int nextpage;
+ int nextcount;
+ unsigned long *oops_page_used;
+
+ void *oops_buf;
+} oops_cxt;
+
+static void mark_page_used(struct mtdoops_context *cxt, int page)
+{
+ set_bit(page, cxt->oops_page_used);
+}
+
+static void mark_page_unused(struct mtdoops_context *cxt, int page)
+{
+ clear_bit(page, cxt->oops_page_used);
+}
+
+static int page_is_used(struct mtdoops_context *cxt, int page)
+{
+ return test_bit(page, cxt->oops_page_used);
+}
+
+static void mtdoops_erase_callback(struct erase_info *done)
+{
+ wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
+ wake_up(wait_q);
+}
+
+static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
+ u32 start_page = start_page_offset / record_size;
+ u32 erase_pages = mtd->erasesize / record_size;
+ struct erase_info erase;
+ DECLARE_WAITQUEUE(wait, current);
+ wait_queue_head_t wait_q;
+ int ret;
+ int page;
+
+ init_waitqueue_head(&wait_q);
+ erase.mtd = mtd;
+ erase.callback = mtdoops_erase_callback;
+ erase.addr = offset;
+ erase.len = mtd->erasesize;
+ erase.priv = (u_long)&wait_q;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&wait_q, &wait);
+
+ ret = mtd_erase(mtd, &erase);
+ if (ret) {
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&wait_q, &wait);
+ printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
+ (unsigned long long)erase.addr,
+ (unsigned long long)erase.len, mtddev);
+ return ret;
+ }
+
+ schedule(); /* Wait for erase to finish. */
+ remove_wait_queue(&wait_q, &wait);
+
+ /* Mark pages as unused */
+ for (page = start_page; page < start_page + erase_pages; page++)
+ mark_page_unused(cxt, page);
+
+ return 0;
+}
+
+static void mtdoops_inc_counter(struct mtdoops_context *cxt)
+{
+ cxt->nextpage++;
+ if (cxt->nextpage >= cxt->oops_pages)
+ cxt->nextpage = 0;
+ cxt->nextcount++;
+ if (cxt->nextcount == 0xffffffff)
+ cxt->nextcount = 0;
+
+ if (page_is_used(cxt, cxt->nextpage)) {
+ schedule_work(&cxt->work_erase);
+ return;
+ }
+
+ printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
+ cxt->nextpage, cxt->nextcount);
+}
+
+/* Scheduled work - when we can't proceed without erasing a block */
+static void mtdoops_workfunc_erase(struct work_struct *work)
+{
+ struct mtdoops_context *cxt =
+ container_of(work, struct mtdoops_context, work_erase);
+ struct mtd_info *mtd = cxt->mtd;
+ int i = 0, j, ret, mod;
+
+ /* We were unregistered */
+ if (!mtd)
+ return;
+
+ mod = (cxt->nextpage * record_size) % mtd->erasesize;
+ if (mod != 0) {
+ cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
+ if (cxt->nextpage >= cxt->oops_pages)
+ cxt->nextpage = 0;
+ }
+
+ while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
+badblock:
+ printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
+ cxt->nextpage * record_size);
+ i++;
+ cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
+ if (cxt->nextpage >= cxt->oops_pages)
+ cxt->nextpage = 0;
+ if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
+ printk(KERN_ERR "mtdoops: all blocks bad!\n");
+ return;
+ }
+ }
+
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
+ return;
+ }
+
+ for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
+ ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
+
+ if (ret >= 0) {
+ printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
+ cxt->nextpage, cxt->nextcount);
+ return;
+ }
+
+ if (ret == -EIO) {
+ ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
+ if (ret < 0 && ret != -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
+ return;
+ }
+ }
+ goto badblock;
+}
+
+static void mtdoops_write(struct mtdoops_context *cxt, int panic)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ u32 *hdr;
+ int ret;
+
+ /* Add mtdoops header to the buffer */
+ hdr = cxt->oops_buf;
+ hdr[0] = cxt->nextcount;
+ hdr[1] = MTDOOPS_KERNMSG_MAGIC;
+
+ if (panic) {
+ ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+ if (ret == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+ return;
+ }
+ } else
+ ret = mtd_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+
+ if (retlen != record_size || ret < 0)
+ printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
+ cxt->nextpage * record_size, retlen, record_size, ret);
+ mark_page_used(cxt, cxt->nextpage);
+ memset(cxt->oops_buf, 0xff, record_size);
+
+ mtdoops_inc_counter(cxt);
+}
+
+static void mtdoops_workfunc_write(struct work_struct *work)
+{
+ struct mtdoops_context *cxt =
+ container_of(work, struct mtdoops_context, work_write);
+
+ mtdoops_write(cxt, 0);
+}
+
+static void find_next_position(struct mtdoops_context *cxt)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ int ret, page, maxpos = 0;
+ u32 count[2], maxcount = 0xffffffff;
+ size_t retlen;
+
+ for (page = 0; page < cxt->oops_pages; page++) {
+ if (mtd_block_isbad(mtd, page * record_size))
+ continue;
+ /* Assume the page is used */
+ mark_page_used(cxt, page);
+ ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+ &retlen, (u_char *)&count[0]);
+ if (retlen != MTDOOPS_HEADER_SIZE ||
+ (ret < 0 && !mtd_is_bitflip(ret))) {
+ printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
+ page * record_size, retlen,
+ MTDOOPS_HEADER_SIZE, ret);
+ continue;
+ }
+
+ if (count[0] == 0xffffffff && count[1] == 0xffffffff)
+ mark_page_unused(cxt, page);
+ if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
+ continue;
+ if (maxcount == 0xffffffff) {
+ maxcount = count[0];
+ maxpos = page;
+ } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
+ maxcount = count[0];
+ maxpos = page;
+ } else if (count[0] > maxcount && count[0] < 0xc0000000) {
+ maxcount = count[0];
+ maxpos = page;
+ } else if (count[0] > maxcount && count[0] > 0xc0000000
+ && maxcount > 0x80000000) {
+ maxcount = count[0];
+ maxpos = page;
+ }
+ }
+ if (maxcount == 0xffffffff) {
+ cxt->nextpage = cxt->oops_pages - 1;
+ cxt->nextcount = 0;
+ }
+ else {
+ cxt->nextpage = maxpos;
+ cxt->nextcount = maxcount;
+ }
+
+ mtdoops_inc_counter(cxt);
+}
+
+static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ struct mtdoops_context *cxt = container_of(dumper,
+ struct mtdoops_context, dump);
+
+ /* Only dump oopses if dump_oops is set */
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
+
+ kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ record_size - MTDOOPS_HEADER_SIZE, NULL);
+
+ /* Panics must be written immediately */
+ if (reason != KMSG_DUMP_OOPS)
+ mtdoops_write(cxt, 1);
+
+ /* For other cases, schedule work to write it "nicely" */
+ schedule_work(&cxt->work_write);
+}
+
+static void mtdoops_notify_add(struct mtd_info *mtd)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+ u64 mtdoops_pages = div_u64(mtd->size, record_size);
+ int err;
+
+ if (!strcmp(mtd->name, mtddev))
+ cxt->mtd_index = mtd->index;
+
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
+ return;
+
+ if (mtd->size < mtd->erasesize * 2) {
+ printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
+ mtd->index);
+ return;
+ }
+ if (mtd->erasesize < record_size) {
+ printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
+ mtd->index);
+ return;
+ }
+ if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
+ printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
+ mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
+ return;
+ }
+
+ /* oops_page_used is a bit field */
+ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
+ BITS_PER_LONG) * sizeof(unsigned long));
+ if (!cxt->oops_page_used) {
+ printk(KERN_ERR "mtdoops: could not allocate page array\n");
+ return;
+ }
+
+ cxt->dump.max_reason = KMSG_DUMP_OOPS;
+ cxt->dump.dump = mtdoops_do_dump;
+ err = kmsg_dump_register(&cxt->dump);
+ if (err) {
+ printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
+ vfree(cxt->oops_page_used);
+ cxt->oops_page_used = NULL;
+ return;
+ }
+
+ cxt->mtd = mtd;
+ cxt->oops_pages = (int)mtd->size / record_size;
+ find_next_position(cxt);
+ printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
+}
+
+static void mtdoops_notify_remove(struct mtd_info *mtd)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
+ return;
+
+ if (kmsg_dump_unregister(&cxt->dump) < 0)
+ printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
+
+ cxt->mtd = NULL;
+ flush_work(&cxt->work_erase);
+ flush_work(&cxt->work_write);
+}
+
+
+static struct mtd_notifier mtdoops_notifier = {
+ .add = mtdoops_notify_add,
+ .remove = mtdoops_notify_remove,
+};
+
+static int __init mtdoops_init(void)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+ int mtd_index;
+ char *endp;
+
+ if (strlen(mtddev) == 0) {
+ printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
+ return -EINVAL;
+ }
+ if ((record_size & 4095) != 0) {
+ printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
+ return -EINVAL;
+ }
+ if (record_size < 4096) {
+ printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
+ return -EINVAL;
+ }
+
+ /* Setup the MTD device to use */
+ cxt->mtd_index = -1;
+ mtd_index = simple_strtoul(mtddev, &endp, 0);
+ if (*endp == '\0')
+ cxt->mtd_index = mtd_index;
+
+ cxt->oops_buf = vmalloc(record_size);
+ if (!cxt->oops_buf) {
+ printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
+ return -ENOMEM;
+ }
+ memset(cxt->oops_buf, 0xff, record_size);
+
+ INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
+ INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
+
+ register_mtd_user(&mtdoops_notifier);
+ return 0;
+}
+
+static void __exit mtdoops_exit(void)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+
+ unregister_mtd_user(&mtdoops_notifier);
+ vfree(cxt->oops_buf);
+ vfree(cxt->oops_page_used);
+}
+
+
+module_init(mtdoops_init);
+module_exit(mtdoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
+MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
new file mode 100644
index 000000000..cafdb8855
--- /dev/null
+++ b/drivers/mtd/mtdpart.c
@@ -0,0 +1,803 @@
+/*
+ * Simple MTD partitioning layer
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kmod.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/err.h>
+#include <linux/kconfig.h>
+
+#include "mtdcore.h"
+
+/* Our partition linked list */
+static LIST_HEAD(mtd_partitions);
+static DEFINE_MUTEX(mtd_partitions_mutex);
+
+/* Our partition node structure */
+struct mtd_part {
+ struct mtd_info mtd;
+ struct mtd_info *master;
+ uint64_t offset;
+ struct list_head list;
+};
+
+/*
+ * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
+ * the pointer to that structure with this macro.
+ */
+#define PART(x) ((struct mtd_part *)(x))
+
+
+/*
+ * MTD methods which simply translate the effective address and pass through
+ * to the _real_ device.
+ */
+
+static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ struct mtd_ecc_stats stats;
+ int res;
+
+ stats = part->master->ecc_stats;
+ res = part->master->_read(part->master, from + part->offset, len,
+ retlen, buf);
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->master->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->master->ecc_stats.corrected - stats.corrected;
+ return res;
+}
+
+static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct mtd_part *part = PART(mtd);
+
+ return part->master->_point(part->master, from + part->offset, len,
+ retlen, virt, phys);
+}
+
+static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+
+ return part->master->_unpoint(part->master, from + part->offset, len);
+}
+
+static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ struct mtd_part *part = PART(mtd);
+
+ offset += part->offset;
+ return part->master->_get_unmapped_area(part->master, len, offset,
+ flags);
+}
+
+static int part_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_part *part = PART(mtd);
+ int res;
+
+ if (from >= mtd->size)
+ return -EINVAL;
+ if (ops->datbuf && from + ops->len > mtd->size)
+ return -EINVAL;
+
+ /*
+ * If OOB is also requested, make sure that we do not read past the end
+ * of this partition.
+ */
+ if (ops->oobbuf) {
+ size_t len, pages;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ len = mtd->oobavail;
+ else
+ len = mtd->oobsize;
+ pages = mtd_div_by_ws(mtd->size, mtd);
+ pages -= mtd_div_by_ws(from, mtd);
+ if (ops->ooboffs + ops->ooblen > pages * len)
+ return -EINVAL;
+ }
+
+ res = part->master->_read_oob(part->master, from + part->offset, ops);
+ if (unlikely(res)) {
+ if (mtd_is_bitflip(res))
+ mtd->ecc_stats.corrected++;
+ if (mtd_is_eccerr(res))
+ mtd->ecc_stats.failed++;
+ }
+ return res;
+}
+
+static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_read_user_prot_reg(part->master, from, len,
+ retlen, buf);
+}
+
+static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_get_user_prot_info(part->master, len, retlen,
+ buf);
+}
+
+static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_read_fact_prot_reg(part->master, from, len,
+ retlen, buf);
+}
+
+static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_get_fact_prot_info(part->master, len, retlen,
+ buf);
+}
+
+static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_write(part->master, to + part->offset, len,
+ retlen, buf);
+}
+
+static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_panic_write(part->master, to + part->offset, len,
+ retlen, buf);
+}
+
+static int part_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_part *part = PART(mtd);
+
+ if (to >= mtd->size)
+ return -EINVAL;
+ if (ops->datbuf && to + ops->len > mtd->size)
+ return -EINVAL;
+ return part->master->_write_oob(part->master, to + part->offset, ops);
+}
+
+static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_write_user_prot_reg(part->master, from, len,
+ retlen, buf);
+}
+
+static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_lock_user_prot_reg(part->master, from, len);
+}
+
+static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_writev(part->master, vecs, count,
+ to + part->offset, retlen);
+}
+
+static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_part *part = PART(mtd);
+ int ret;
+
+ instr->addr += part->offset;
+ ret = part->master->_erase(part->master, instr);
+ if (ret) {
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+ }
+ return ret;
+}
+
+void mtd_erase_callback(struct erase_info *instr)
+{
+ if (instr->mtd->_erase == part_erase) {
+ struct mtd_part *part = PART(instr->mtd);
+
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+ }
+ if (instr->callback)
+ instr->callback(instr);
+}
+EXPORT_SYMBOL_GPL(mtd_erase_callback);
+
+static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_lock(part->master, ofs + part->offset, len);
+}
+
+static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_unlock(part->master, ofs + part->offset, len);
+}
+
+static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_is_locked(part->master, ofs + part->offset, len);
+}
+
+static void part_sync(struct mtd_info *mtd)
+{
+ struct mtd_part *part = PART(mtd);
+ part->master->_sync(part->master);
+}
+
+static int part_suspend(struct mtd_info *mtd)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->_suspend(part->master);
+}
+
+static void part_resume(struct mtd_info *mtd)
+{
+ struct mtd_part *part = PART(mtd);
+ part->master->_resume(part->master);
+}
+
+static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ ofs += part->offset;
+ return part->master->_block_isreserved(part->master, ofs);
+}
+
+static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ ofs += part->offset;
+ return part->master->_block_isbad(part->master, ofs);
+}
+
+static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ int res;
+
+ ofs += part->offset;
+ res = part->master->_block_markbad(part->master, ofs);
+ if (!res)
+ mtd->ecc_stats.badblocks++;
+ return res;
+}
+
+static inline void free_partition(struct mtd_part *p)
+{
+ kfree(p->mtd.name);
+ kfree(p);
+}
+
+/*
+ * This function unregisters and destroy all slave MTD objects which are
+ * attached to the given master MTD object.
+ */
+
+int del_mtd_partitions(struct mtd_info *master)
+{
+ struct mtd_part *slave, *next;
+ int ret, err = 0;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+ if (slave->master == master) {
+ ret = del_mtd_device(&slave->mtd);
+ if (ret < 0) {
+ err = ret;
+ continue;
+ }
+ list_del(&slave->list);
+ free_partition(slave);
+ }
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return err;
+}
+
+static struct mtd_part *allocate_partition(struct mtd_info *master,
+ const struct mtd_partition *part, int partno,
+ uint64_t cur_offset)
+{
+ struct mtd_part *slave;
+ char *name;
+
+ /* allocate the partition structure */
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ name = kstrdup(part->name, GFP_KERNEL);
+ if (!name || !slave) {
+ printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
+ master->name);
+ kfree(name);
+ kfree(slave);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* set up the MTD object for this partition */
+ slave->mtd.type = master->type;
+ slave->mtd.flags = master->flags & ~part->mask_flags;
+ slave->mtd.size = part->size;
+ slave->mtd.writesize = master->writesize;
+ slave->mtd.writebufsize = master->writebufsize;
+ slave->mtd.oobsize = master->oobsize;
+ slave->mtd.oobavail = master->oobavail;
+ slave->mtd.subpage_sft = master->subpage_sft;
+
+ slave->mtd.name = name;
+ slave->mtd.owner = master->owner;
+
+ /* NOTE: Historically, we didn't arrange MTDs as a tree out of
+ * concern for showing the same data in multiple partitions.
+ * However, it is very useful to have the master node present,
+ * so the MTD_PARTITIONED_MASTER option allows that. The master
+ * will have device nodes etc only if this is set, so make the
+ * parent conditional on that option. Note, this is a way to
+ * distinguish between the master and the partition in sysfs.
+ */
+ slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
+ &master->dev :
+ master->dev.parent;
+
+ slave->mtd._read = part_read;
+ slave->mtd._write = part_write;
+
+ if (master->_panic_write)
+ slave->mtd._panic_write = part_panic_write;
+
+ if (master->_point && master->_unpoint) {
+ slave->mtd._point = part_point;
+ slave->mtd._unpoint = part_unpoint;
+ }
+
+ if (master->_get_unmapped_area)
+ slave->mtd._get_unmapped_area = part_get_unmapped_area;
+ if (master->_read_oob)
+ slave->mtd._read_oob = part_read_oob;
+ if (master->_write_oob)
+ slave->mtd._write_oob = part_write_oob;
+ if (master->_read_user_prot_reg)
+ slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
+ if (master->_read_fact_prot_reg)
+ slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
+ if (master->_write_user_prot_reg)
+ slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
+ if (master->_lock_user_prot_reg)
+ slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
+ if (master->_get_user_prot_info)
+ slave->mtd._get_user_prot_info = part_get_user_prot_info;
+ if (master->_get_fact_prot_info)
+ slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
+ if (master->_sync)
+ slave->mtd._sync = part_sync;
+ if (!partno && !master->dev.class && master->_suspend &&
+ master->_resume) {
+ slave->mtd._suspend = part_suspend;
+ slave->mtd._resume = part_resume;
+ }
+ if (master->_writev)
+ slave->mtd._writev = part_writev;
+ if (master->_lock)
+ slave->mtd._lock = part_lock;
+ if (master->_unlock)
+ slave->mtd._unlock = part_unlock;
+ if (master->_is_locked)
+ slave->mtd._is_locked = part_is_locked;
+ if (master->_block_isreserved)
+ slave->mtd._block_isreserved = part_block_isreserved;
+ if (master->_block_isbad)
+ slave->mtd._block_isbad = part_block_isbad;
+ if (master->_block_markbad)
+ slave->mtd._block_markbad = part_block_markbad;
+ slave->mtd._erase = part_erase;
+ slave->master = master;
+ slave->offset = part->offset;
+
+ if (slave->offset == MTDPART_OFS_APPEND)
+ slave->offset = cur_offset;
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
+ slave->offset = cur_offset;
+ if (mtd_mod_by_eb(cur_offset, master) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ printk(KERN_NOTICE "Moving partition %d: "
+ "0x%012llx -> 0x%012llx\n", partno,
+ (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+ }
+ }
+ if (slave->offset == MTDPART_OFS_RETAIN) {
+ slave->offset = cur_offset;
+ if (master->size - slave->offset >= slave->mtd.size) {
+ slave->mtd.size = master->size - slave->offset
+ - slave->mtd.size;
+ } else {
+ printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
+ part->name, master->size - slave->offset,
+ slave->mtd.size);
+ /* register to preserve ordering */
+ goto out_register;
+ }
+ }
+ if (slave->mtd.size == MTDPART_SIZ_FULL)
+ slave->mtd.size = master->size - slave->offset;
+
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
+ (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
+
+ /* let's do some sanity checks */
+ if (slave->offset >= master->size) {
+ /* let's register it anyway to preserve ordering */
+ slave->offset = 0;
+ slave->mtd.size = 0;
+ printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ goto out_register;
+ }
+ if (slave->offset + slave->mtd.size > master->size) {
+ slave->mtd.size = master->size - slave->offset;
+ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
+ part->name, master->name, (unsigned long long)slave->mtd.size);
+ }
+ if (master->numeraseregions > 1) {
+ /* Deal with variable erase size stuff */
+ int i, max = master->numeraseregions;
+ u64 end = slave->offset + slave->mtd.size;
+ struct mtd_erase_region_info *regions = master->eraseregions;
+
+ /* Find the first erase regions which is part of this
+ * partition. */
+ for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ ;
+ /* The loop searched for the region _behind_ the first one */
+ if (i > 0)
+ i--;
+
+ /* Pick biggest erasesize */
+ for (; i < max && regions[i].offset < end; i++) {
+ if (slave->mtd.erasesize < regions[i].erasesize) {
+ slave->mtd.erasesize = regions[i].erasesize;
+ }
+ }
+ BUG_ON(slave->mtd.erasesize == 0);
+ } else {
+ /* Single erase size */
+ slave->mtd.erasesize = master->erasesize;
+ }
+
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->offset, &slave->mtd)) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of
+ * _minor_ erase size though */
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ part->name);
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ part->name);
+ }
+
+ slave->mtd.ecclayout = master->ecclayout;
+ slave->mtd.ecc_step_size = master->ecc_step_size;
+ slave->mtd.ecc_strength = master->ecc_strength;
+ slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
+ if (master->_block_isbad) {
+ uint64_t offs = 0;
+
+ while (offs < slave->mtd.size) {
+ if (mtd_block_isreserved(master, offs + slave->offset))
+ slave->mtd.ecc_stats.bbtblocks++;
+ else if (mtd_block_isbad(master, offs + slave->offset))
+ slave->mtd.ecc_stats.badblocks++;
+ offs += slave->mtd.erasesize;
+ }
+ }
+
+out_register:
+ return slave;
+}
+
+static ssize_t mtd_partition_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_part *part = PART(mtd);
+ return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
+}
+
+static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
+
+static const struct attribute *mtd_partition_attrs[] = {
+ &dev_attr_offset.attr,
+ NULL
+};
+
+static int mtd_add_partition_attrs(struct mtd_part *new)
+{
+ int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
+ if (ret)
+ printk(KERN_WARNING
+ "mtd: failed to create partition attrs, err=%d\n", ret);
+ return ret;
+}
+
+int mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length)
+{
+ struct mtd_partition part;
+ struct mtd_part *new;
+ int ret = 0;
+
+ /* the direct offset is expected */
+ if (offset == MTDPART_OFS_APPEND ||
+ offset == MTDPART_OFS_NXTBLK)
+ return -EINVAL;
+
+ if (length == MTDPART_SIZ_FULL)
+ length = master->size - offset;
+
+ if (length <= 0)
+ return -EINVAL;
+
+ part.name = name;
+ part.size = length;
+ part.offset = offset;
+ part.mask_flags = 0;
+ part.ecclayout = NULL;
+
+ new = allocate_partition(master, &part, -1, offset);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_add(&new->list, &mtd_partitions);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(&new->mtd);
+
+ mtd_add_partition_attrs(new);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_add_partition);
+
+int mtd_del_partition(struct mtd_info *master, int partno)
+{
+ struct mtd_part *slave, *next;
+ int ret = -EINVAL;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+ if ((slave->master == master) &&
+ (slave->mtd.index == partno)) {
+ sysfs_remove_files(&slave->mtd.dev.kobj,
+ mtd_partition_attrs);
+ ret = del_mtd_device(&slave->mtd);
+ if (ret < 0)
+ break;
+
+ list_del(&slave->list);
+ free_partition(slave);
+ break;
+ }
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_del_partition);
+
+/*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+ * the partition definitions.
+ *
+ * For historical reasons, this function's caller only registers the master
+ * if the MTD_PARTITIONED_MASTER config option is set.
+ */
+
+int add_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nbparts)
+{
+ struct mtd_part *slave;
+ uint64_t cur_offset = 0;
+ int i;
+
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+
+ for (i = 0; i < nbparts; i++) {
+ slave = allocate_partition(master, parts + i, i, cur_offset);
+ if (IS_ERR(slave))
+ return PTR_ERR(slave);
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_add(&slave->list, &mtd_partitions);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(&slave->mtd);
+ mtd_add_partition_attrs(slave);
+
+ cur_offset = slave->offset + slave->mtd.size;
+ }
+
+ return 0;
+}
+
+static DEFINE_SPINLOCK(part_parser_lock);
+static LIST_HEAD(part_parsers);
+
+static struct mtd_part_parser *get_partition_parser(const char *name)
+{
+ struct mtd_part_parser *p, *ret = NULL;
+
+ spin_lock(&part_parser_lock);
+
+ list_for_each_entry(p, &part_parsers, list)
+ if (!strcmp(p->name, name) && try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
+#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+
+void register_mtd_parser(struct mtd_part_parser *p)
+{
+ spin_lock(&part_parser_lock);
+ list_add(&p->list, &part_parsers);
+ spin_unlock(&part_parser_lock);
+}
+EXPORT_SYMBOL_GPL(register_mtd_parser);
+
+void deregister_mtd_parser(struct mtd_part_parser *p)
+{
+ spin_lock(&part_parser_lock);
+ list_del(&p->list);
+ spin_unlock(&part_parser_lock);
+}
+EXPORT_SYMBOL_GPL(deregister_mtd_parser);
+
+/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+static const char * const default_mtd_part_types[] = {
+ "cmdlinepart",
+ "ofpart",
+ NULL
+};
+
+/**
+ * parse_mtd_partitions - parse MTD partitions
+ * @master: the master partition (describes whole MTD device)
+ * @types: names of partition parsers to try or %NULL
+ * @pparts: array of partitions found is returned here
+ * @data: MTD partition parser-specific data
+ *
+ * This function tries to find partition on MTD device @master. It uses MTD
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+ * o zero if no partitions were found
+ * o a positive number of found partitions, in which case on exit @pparts will
+ * point to an array containing this number of &struct mtd_info objects.
+ */
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_part_parser *parser;
+ int ret = 0;
+
+ if (!types)
+ types = default_mtd_part_types;
+
+ for ( ; ret <= 0 && *types; types++) {
+ parser = get_partition_parser(*types);
+ if (!parser && !request_module("%s", *types))
+ parser = get_partition_parser(*types);
+ if (!parser)
+ continue;
+ ret = (*parser->parse_fn)(master, pparts, data);
+ put_partition_parser(parser);
+ if (ret > 0) {
+ printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
+ break;
+ }
+ }
+ return ret;
+}
+
+int mtd_is_partition(const struct mtd_info *mtd)
+{
+ struct mtd_part *part;
+ int ispart = 0;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry(part, &mtd_partitions, list)
+ if (&part->mtd == mtd) {
+ ispart = 1;
+ break;
+ }
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return ispart;
+}
+EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return mtd->size;
+
+ return PART(mtd)->master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
new file mode 100644
index 000000000..20c02a3b7
--- /dev/null
+++ b/drivers/mtd/mtdsuper.c
@@ -0,0 +1,220 @@
+/* MTD-based superblock management
+ *
+ * Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Written by: David Howells <dhowells@redhat.com>
+ * David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mtd/super.h>
+#include <linux/namei.h>
+#include <linux/export.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+
+/*
+ * compare superblocks to see if they're equivalent
+ * - they are if the underlying MTD device is the same
+ */
+static int get_sb_mtd_compare(struct super_block *sb, void *_mtd)
+{
+ struct mtd_info *mtd = _mtd;
+
+ if (sb->s_mtd == mtd) {
+ pr_debug("MTDSB: Match on device %d (\"%s\")\n",
+ mtd->index, mtd->name);
+ return 1;
+ }
+
+ pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+ sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
+ return 0;
+}
+
+/*
+ * mark the superblock by the MTD device it is using
+ * - set the device number to be the correct MTD block device for pesuperstence
+ * of NFS exports
+ */
+static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
+{
+ struct mtd_info *mtd = _mtd;
+
+ sb->s_mtd = mtd;
+ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ sb->s_bdi = mtd->backing_dev_info;
+ return 0;
+}
+
+/*
+ * get a superblock on an MTD-backed filesystem
+ */
+static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct mtd_info *mtd,
+ int (*fill_super)(struct super_block *, void *, int))
+{
+ struct super_block *sb;
+ int ret;
+
+ sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd);
+ if (IS_ERR(sb))
+ goto out_error;
+
+ if (sb->s_root)
+ goto already_mounted;
+
+ /* fresh new superblock */
+ pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
+ mtd->index, mtd->name);
+
+ ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ if (ret < 0) {
+ deactivate_locked_super(sb);
+ return ERR_PTR(ret);
+ }
+
+ /* go */
+ sb->s_flags |= MS_ACTIVE;
+ return dget(sb->s_root);
+
+ /* new mountpoint for an already mounted superblock */
+already_mounted:
+ pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
+ mtd->index, mtd->name);
+ put_mtd_device(mtd);
+ return dget(sb->s_root);
+
+out_error:
+ put_mtd_device(mtd);
+ return ERR_CAST(sb);
+}
+
+/*
+ * get a superblock on an MTD-backed filesystem by MTD device number
+ */
+static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, int mtdnr,
+ int (*fill_super)(struct super_block *, void *, int))
+{
+ struct mtd_info *mtd;
+
+ mtd = get_mtd_device(NULL, mtdnr);
+ if (IS_ERR(mtd)) {
+ pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
+ return ERR_CAST(mtd);
+ }
+
+ return mount_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super);
+}
+
+/*
+ * set up an MTD-based superblock
+ */
+struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ int (*fill_super)(struct super_block *, void *, int))
+{
+#ifdef CONFIG_BLOCK
+ struct block_device *bdev;
+ int ret, major;
+#endif
+ int mtdnr;
+
+ if (!dev_name)
+ return ERR_PTR(-EINVAL);
+
+ pr_debug("MTDSB: dev_name \"%s\"\n", dev_name);
+
+ /* the preferred way of mounting in future; especially when
+ * CONFIG_BLOCK=n - we specify the underlying MTD device by number or
+ * by name, so that we don't require block device support to be present
+ * in the kernel. */
+ if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') {
+ if (dev_name[3] == ':') {
+ struct mtd_info *mtd;
+
+ /* mount by MTD device name */
+ pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
+ dev_name + 4);
+
+ mtd = get_mtd_device_nm(dev_name + 4);
+ if (!IS_ERR(mtd))
+ return mount_mtd_aux(
+ fs_type, flags,
+ dev_name, data, mtd,
+ fill_super);
+
+ printk(KERN_NOTICE "MTD:"
+ " MTD device with name \"%s\" not found.\n",
+ dev_name + 4);
+
+ } else if (isdigit(dev_name[3])) {
+ /* mount by MTD device number name */
+ char *endptr;
+
+ mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
+ if (!*endptr) {
+ /* It was a valid number */
+ pr_debug("MTDSB: mtd%%d, mtdnr %d\n",
+ mtdnr);
+ return mount_mtd_nr(fs_type, flags,
+ dev_name, data,
+ mtdnr, fill_super);
+ }
+ }
+ }
+
+#ifdef CONFIG_BLOCK
+ /* try the old way - the hack where we allowed users to mount
+ * /dev/mtdblock$(n) but didn't actually _use_ the blockdev
+ */
+ bdev = lookup_bdev(dev_name);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
+ pr_debug("MTDSB: lookup_bdev() returned %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ pr_debug("MTDSB: lookup_bdev() returned 0\n");
+
+ ret = -EINVAL;
+
+ major = MAJOR(bdev->bd_dev);
+ mtdnr = MINOR(bdev->bd_dev);
+ bdput(bdev);
+
+ if (major != MTD_BLOCK_MAJOR)
+ goto not_an_MTD_device;
+
+ return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super);
+
+not_an_MTD_device:
+#endif /* CONFIG_BLOCK */
+
+ if (!(flags & MS_SILENT))
+ printk(KERN_NOTICE
+ "MTD: Attempt to mount non-MTD device \"%s\"\n",
+ dev_name);
+ return ERR_PTR(-EINVAL);
+}
+
+EXPORT_SYMBOL_GPL(mount_mtd);
+
+/*
+ * destroy an MTD-based superblock
+ */
+void kill_mtd_super(struct super_block *sb)
+{
+ generic_shutdown_super(sb);
+ put_mtd_device(sb->s_mtd);
+ sb->s_mtd = NULL;
+}
+
+EXPORT_SYMBOL_GPL(kill_mtd_super);
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
new file mode 100644
index 000000000..fc8b3d16c
--- /dev/null
+++ b/drivers/mtd/mtdswap.c
@@ -0,0 +1,1591 @@
+/*
+ * Swap block device support for MTDs
+ * Turns an MTD device into a swap device with block wear leveling
+ *
+ * Copyright © 2007,2011 Nokia Corporation. All rights reserved.
+ *
+ * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
+ *
+ * Based on Richard Purdie's earlier implementation in 2007. Background
+ * support and lock-less operation written by Adrian Hunter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/genhd.h>
+#include <linux/swap.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/math64.h>
+
+#define MTDSWAP_PREFIX "mtdswap"
+
+/*
+ * The number of free eraseblocks when GC should stop
+ */
+#define CLEAN_BLOCK_THRESHOLD 20
+
+/*
+ * Number of free eraseblocks below which GC can also collect low frag
+ * blocks.
+ */
+#define LOW_FRAG_GC_TRESHOLD 5
+
+/*
+ * Wear level cost amortization. We want to do wear leveling on the background
+ * without disturbing gc too much. This is made by defining max GC frequency.
+ * Frequency value 6 means 1/6 of the GC passes will pick an erase block based
+ * on the biggest wear difference rather than the biggest dirtiness.
+ *
+ * The lower freq2 should be chosen so that it makes sure the maximum erase
+ * difference will decrease even if a malicious application is deliberately
+ * trying to make erase differences large.
+ */
+#define MAX_ERASE_DIFF 4000
+#define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
+#define COLLECT_NONDIRTY_FREQ1 6
+#define COLLECT_NONDIRTY_FREQ2 4
+
+#define PAGE_UNDEF UINT_MAX
+#define BLOCK_UNDEF UINT_MAX
+#define BLOCK_ERROR (UINT_MAX - 1)
+#define BLOCK_MAX (UINT_MAX - 2)
+
+#define EBLOCK_BAD (1 << 0)
+#define EBLOCK_NOMAGIC (1 << 1)
+#define EBLOCK_BITFLIP (1 << 2)
+#define EBLOCK_FAILED (1 << 3)
+#define EBLOCK_READERR (1 << 4)
+#define EBLOCK_IDX_SHIFT 5
+
+struct swap_eb {
+ struct rb_node rb;
+ struct rb_root *root;
+
+ unsigned int flags;
+ unsigned int active_count;
+ unsigned int erase_count;
+ unsigned int pad; /* speeds up pointer decrement */
+};
+
+#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
+ rb)->erase_count)
+#define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
+ rb)->erase_count)
+
+struct mtdswap_tree {
+ struct rb_root root;
+ unsigned int count;
+};
+
+enum {
+ MTDSWAP_CLEAN,
+ MTDSWAP_USED,
+ MTDSWAP_LOWFRAG,
+ MTDSWAP_HIFRAG,
+ MTDSWAP_DIRTY,
+ MTDSWAP_BITFLIP,
+ MTDSWAP_FAILING,
+ MTDSWAP_TREE_CNT,
+};
+
+struct mtdswap_dev {
+ struct mtd_blktrans_dev *mbd_dev;
+ struct mtd_info *mtd;
+ struct device *dev;
+
+ unsigned int *page_data;
+ unsigned int *revmap;
+
+ unsigned int eblks;
+ unsigned int spare_eblks;
+ unsigned int pages_per_eblk;
+ unsigned int max_erase_count;
+ struct swap_eb *eb_data;
+
+ struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
+
+ unsigned long long sect_read_count;
+ unsigned long long sect_write_count;
+ unsigned long long mtd_write_count;
+ unsigned long long mtd_read_count;
+ unsigned long long discard_count;
+ unsigned long long discard_page_count;
+
+ unsigned int curr_write_pos;
+ struct swap_eb *curr_write;
+
+ char *page_buf;
+ char *oob_buf;
+
+ struct dentry *debugfs_root;
+};
+
+struct mtdswap_oobdata {
+ __le16 magic;
+ __le32 count;
+} __packed;
+
+#define MTDSWAP_MAGIC_CLEAN 0x2095
+#define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
+#define MTDSWAP_TYPE_CLEAN 0
+#define MTDSWAP_TYPE_DIRTY 1
+#define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
+
+#define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
+#define MTDSWAP_IO_RETRIES 3
+
+enum {
+ MTDSWAP_SCANNED_CLEAN,
+ MTDSWAP_SCANNED_DIRTY,
+ MTDSWAP_SCANNED_BITFLIP,
+ MTDSWAP_SCANNED_BAD,
+};
+
+/*
+ * In the worst case mtdswap_writesect() has allocated the last clean
+ * page from the current block and is then pre-empted by the GC
+ * thread. The thread can consume a full erase block when moving a
+ * block.
+ */
+#define MIN_SPARE_EBLOCKS 2
+#define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
+
+#define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
+#define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
+#define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
+#define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
+
+#define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
+
+static char partitions[128] = "";
+module_param_string(partitions, partitions, sizeof(partitions), 0444);
+MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
+ "partitions=\"1,3,5\"");
+
+static unsigned int spare_eblocks = 10;
+module_param(spare_eblocks, uint, 0444);
+MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
+ "garbage collection (default 10%)");
+
+static bool header; /* false */
+module_param(header, bool, 0444);
+MODULE_PARM_DESC(header,
+ "Include builtin swap header (default 0, without header)");
+
+static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
+
+static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
+}
+
+static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int oldidx;
+ struct mtdswap_tree *tp;
+
+ if (eb->root) {
+ tp = container_of(eb->root, struct mtdswap_tree, root);
+ oldidx = tp - &d->trees[0];
+
+ d->trees[oldidx].count--;
+ rb_erase(&eb->rb, eb->root);
+ }
+}
+
+static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
+{
+ struct rb_node **p, *parent = NULL;
+ struct swap_eb *cur;
+
+ p = &root->rb_node;
+ while (*p) {
+ parent = *p;
+ cur = rb_entry(parent, struct swap_eb, rb);
+ if (eb->erase_count > cur->erase_count)
+ p = &(*p)->rb_right;
+ else
+ p = &(*p)->rb_left;
+ }
+
+ rb_link_node(&eb->rb, parent, p);
+ rb_insert_color(&eb->rb, root);
+}
+
+static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
+{
+ struct rb_root *root;
+
+ if (eb->root == &d->trees[idx].root)
+ return;
+
+ mtdswap_eb_detach(d, eb);
+ root = &d->trees[idx].root;
+ __mtdswap_rb_add(root, eb);
+ eb->root = root;
+ d->trees[idx].count++;
+}
+
+static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
+{
+ struct rb_node *p;
+ unsigned int i;
+
+ p = rb_first(root);
+ i = 0;
+ while (i < idx && p) {
+ p = rb_next(p);
+ i++;
+ }
+
+ return p;
+}
+
+static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ int ret;
+ loff_t offset;
+
+ d->spare_eblks--;
+ eb->flags |= EBLOCK_BAD;
+ mtdswap_eb_detach(d, eb);
+ eb->root = NULL;
+
+ /* badblocks not supported */
+ if (!mtd_can_have_bb(d->mtd))
+ return 1;
+
+ offset = mtdswap_eb_offset(d, eb);
+ dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
+ ret = mtd_block_markbad(d->mtd, offset);
+
+ if (ret) {
+ dev_warn(d->dev, "Mark block bad failed for block at %08llx "
+ "error %d\n", offset, ret);
+ return ret;
+ }
+
+ return 1;
+
+}
+
+static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int marked = eb->flags & EBLOCK_FAILED;
+ struct swap_eb *curr_write = d->curr_write;
+
+ eb->flags |= EBLOCK_FAILED;
+ if (curr_write == eb) {
+ d->curr_write = NULL;
+
+ if (!marked && d->curr_write_pos != 0) {
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ return 0;
+ }
+ }
+
+ return mtdswap_handle_badblock(d, eb);
+}
+
+static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret = mtd_read_oob(d->mtd, from, ops);
+
+ if (mtd_is_bitflip(ret))
+ return ret;
+
+ if (ret) {
+ dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
+ ret, from);
+ return ret;
+ }
+
+ if (ops->oobretlen < ops->ooblen) {
+ dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
+ "%zd) for block at %08llx\n",
+ ops->oobretlen, ops->ooblen, from);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ struct mtdswap_oobdata *data, *data2;
+ int ret;
+ loff_t offset;
+ struct mtd_oob_ops ops;
+
+ offset = mtdswap_eb_offset(d, eb);
+
+ /* Check first if the block is bad. */
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
+ return MTDSWAP_SCANNED_BAD;
+
+ ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
+ ops.oobbuf = d->oob_buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_AUTO_OOB;
+
+ ret = mtdswap_read_oob(d, offset, &ops);
+
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+
+ data = (struct mtdswap_oobdata *)d->oob_buf;
+ data2 = (struct mtdswap_oobdata *)
+ (d->oob_buf + d->mtd->ecclayout->oobavail);
+
+ if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
+ eb->erase_count = le32_to_cpu(data->count);
+ if (mtd_is_bitflip(ret))
+ ret = MTDSWAP_SCANNED_BITFLIP;
+ else {
+ if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
+ ret = MTDSWAP_SCANNED_DIRTY;
+ else
+ ret = MTDSWAP_SCANNED_CLEAN;
+ }
+ } else {
+ eb->flags |= EBLOCK_NOMAGIC;
+ ret = MTDSWAP_SCANNED_DIRTY;
+ }
+
+ return ret;
+}
+
+static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
+ u16 marker)
+{
+ struct mtdswap_oobdata n;
+ int ret;
+ loff_t offset;
+ struct mtd_oob_ops ops;
+
+ ops.ooboffs = 0;
+ ops.oobbuf = (uint8_t *)&n;
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.datbuf = NULL;
+
+ if (marker == MTDSWAP_TYPE_CLEAN) {
+ n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
+ n.count = cpu_to_le32(eb->erase_count);
+ ops.ooblen = MTDSWAP_OOBSIZE;
+ offset = mtdswap_eb_offset(d, eb);
+ } else {
+ n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
+ ops.ooblen = sizeof(n.magic);
+ offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
+ }
+
+ ret = mtd_write_oob(d->mtd, offset, &ops);
+
+ if (ret) {
+ dev_warn(d->dev, "Write OOB failed for block at %08llx "
+ "error %d\n", offset, ret);
+ if (ret == -EIO || mtd_is_eccerr(ret))
+ mtdswap_handle_write_error(d, eb);
+ return ret;
+ }
+
+ if (ops.oobretlen != ops.ooblen) {
+ dev_warn(d->dev, "Short OOB write for block at %08llx: "
+ "%zd not %zd\n",
+ offset, ops.oobretlen, ops.ooblen);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Are there any erase blocks without MAGIC_CLEAN header, presumably
+ * because power was cut off after erase but before header write? We
+ * need to guestimate the erase count.
+ */
+static void mtdswap_check_counts(struct mtdswap_dev *d)
+{
+ struct rb_root hist_root = RB_ROOT;
+ struct rb_node *medrb;
+ struct swap_eb *eb;
+ unsigned int i, cnt, median;
+
+ cnt = 0;
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
+ continue;
+
+ __mtdswap_rb_add(&hist_root, eb);
+ cnt++;
+ }
+
+ if (cnt == 0)
+ return;
+
+ medrb = mtdswap_rb_index(&hist_root, cnt / 2);
+ median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
+
+ d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
+ eb->erase_count = median;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
+ continue;
+
+ rb_erase(&eb->rb, &hist_root);
+ }
+}
+
+static void mtdswap_scan_eblks(struct mtdswap_dev *d)
+{
+ int status;
+ unsigned int i, idx;
+ struct swap_eb *eb;
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ status = mtdswap_read_markers(d, eb);
+ if (status < 0)
+ eb->flags |= EBLOCK_READERR;
+ else if (status == MTDSWAP_SCANNED_BAD) {
+ eb->flags |= EBLOCK_BAD;
+ continue;
+ }
+
+ switch (status) {
+ case MTDSWAP_SCANNED_CLEAN:
+ idx = MTDSWAP_CLEAN;
+ break;
+ case MTDSWAP_SCANNED_DIRTY:
+ case MTDSWAP_SCANNED_BITFLIP:
+ idx = MTDSWAP_DIRTY;
+ break;
+ default:
+ idx = MTDSWAP_FAILING;
+ }
+
+ eb->flags |= (idx << EBLOCK_IDX_SHIFT);
+ }
+
+ mtdswap_check_counts(d);
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & EBLOCK_BAD)
+ continue;
+
+ idx = eb->flags >> EBLOCK_IDX_SHIFT;
+ mtdswap_rb_add(d, eb, idx);
+ }
+}
+
+/*
+ * Place eblk into a tree corresponding to its number of active blocks
+ * it contains.
+ */
+static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int weight = eb->active_count;
+ unsigned int maxweight = d->pages_per_eblk;
+
+ if (eb == d->curr_write)
+ return;
+
+ if (eb->flags & EBLOCK_BITFLIP)
+ mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
+ else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ if (weight == maxweight)
+ mtdswap_rb_add(d, eb, MTDSWAP_USED);
+ else if (weight == 0)
+ mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
+ else if (weight > (maxweight/2))
+ mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
+ else
+ mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
+}
+
+
+static void mtdswap_erase_callback(struct erase_info *done)
+{
+ wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
+ wake_up(wait_q);
+}
+
+static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct erase_info erase;
+ wait_queue_head_t wq;
+ unsigned int retries = 0;
+ int ret;
+
+ eb->erase_count++;
+ if (eb->erase_count > d->max_erase_count)
+ d->max_erase_count = eb->erase_count;
+
+retry:
+ init_waitqueue_head(&wq);
+ memset(&erase, 0, sizeof(struct erase_info));
+
+ erase.mtd = mtd;
+ erase.callback = mtdswap_erase_callback;
+ erase.addr = mtdswap_eb_offset(d, eb);
+ erase.len = mtd->erasesize;
+ erase.priv = (u_long)&wq;
+
+ ret = mtd_erase(mtd, &erase);
+ if (ret) {
+ if (retries++ < MTDSWAP_ERASE_RETRIES) {
+ dev_warn(d->dev,
+ "erase of erase block %#llx on %s failed",
+ erase.addr, mtd->name);
+ yield();
+ goto retry;
+ }
+
+ dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
+ erase.addr, mtd->name);
+
+ mtdswap_handle_badblock(d, eb);
+ return -EIO;
+ }
+
+ ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
+ erase.state == MTD_ERASE_FAILED);
+ if (ret) {
+ dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
+ erase.addr, mtd->name);
+ return -EINTR;
+ }
+
+ if (erase.state == MTD_ERASE_FAILED) {
+ if (retries++ < MTDSWAP_ERASE_RETRIES) {
+ dev_warn(d->dev,
+ "erase of erase block %#llx on %s failed",
+ erase.addr, mtd->name);
+ yield();
+ goto retry;
+ }
+
+ mtdswap_handle_badblock(d, eb);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
+ unsigned int *block)
+{
+ int ret;
+ struct swap_eb *old_eb = d->curr_write;
+ struct rb_root *clean_root;
+ struct swap_eb *eb;
+
+ if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
+ do {
+ if (TREE_EMPTY(d, CLEAN))
+ return -ENOSPC;
+
+ clean_root = TREE_ROOT(d, CLEAN);
+ eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
+ rb_erase(&eb->rb, clean_root);
+ eb->root = NULL;
+ TREE_COUNT(d, CLEAN)--;
+
+ ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
+ } while (ret == -EIO || mtd_is_eccerr(ret));
+
+ if (ret)
+ return ret;
+
+ d->curr_write_pos = 0;
+ d->curr_write = eb;
+ if (old_eb)
+ mtdswap_store_eb(d, old_eb);
+ }
+
+ *block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
+ d->curr_write_pos;
+
+ d->curr_write->active_count++;
+ d->revmap[*block] = page;
+ d->curr_write_pos++;
+
+ return 0;
+}
+
+static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
+{
+ return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
+ d->pages_per_eblk - d->curr_write_pos;
+}
+
+static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
+{
+ return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
+}
+
+static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
+ unsigned int page, unsigned int *bp, int gc_context)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct swap_eb *eb;
+ size_t retlen;
+ loff_t writepos;
+ int ret;
+
+retry:
+ if (!gc_context)
+ while (!mtdswap_enough_free_pages(d))
+ if (mtdswap_gc(d, 0) > 0)
+ return -ENOSPC;
+
+ ret = mtdswap_map_free_block(d, page, bp);
+ eb = d->eb_data + (*bp / d->pages_per_eblk);
+
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
+ d->curr_write = NULL;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+ goto retry;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ writepos = (loff_t)*bp << PAGE_SHIFT;
+ ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
+ d->curr_write_pos--;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+ mtdswap_handle_write_error(d, eb);
+ goto retry;
+ }
+
+ if (ret < 0) {
+ dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
+ ret, retlen);
+ goto err;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short write to MTD device: %zd written",
+ retlen);
+ ret = -EIO;
+ goto err;
+ }
+
+ return ret;
+
+err:
+ d->curr_write_pos--;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+
+ return ret;
+}
+
+static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
+ unsigned int *newblock)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct swap_eb *eb, *oldeb;
+ int ret;
+ size_t retlen;
+ unsigned int page, retries;
+ loff_t readpos;
+
+ page = d->revmap[oldblock];
+ readpos = (loff_t) oldblock << PAGE_SHIFT;
+ retries = 0;
+
+retry:
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ oldeb = d->eb_data + oldblock / d->pages_per_eblk;
+ oldeb->flags |= EBLOCK_READERR;
+
+ dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
+ oldblock);
+ retries++;
+ if (retries < MTDSWAP_IO_RETRIES)
+ goto retry;
+
+ goto read_error;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
+ oldblock);
+ ret = -EIO;
+ goto read_error;
+ }
+
+ ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
+ if (ret < 0) {
+ d->page_data[page] = BLOCK_ERROR;
+ dev_err(d->dev, "Write error: %d\n", ret);
+ return ret;
+ }
+
+ eb = d->eb_data + *newblock / d->pages_per_eblk;
+ d->page_data[page] = *newblock;
+ d->revmap[oldblock] = PAGE_UNDEF;
+ eb = d->eb_data + oldblock / d->pages_per_eblk;
+ eb->active_count--;
+
+ return 0;
+
+read_error:
+ d->page_data[page] = BLOCK_ERROR;
+ d->revmap[oldblock] = PAGE_UNDEF;
+ return ret;
+}
+
+static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int i, block, eblk_base, newblock;
+ int ret, errcode;
+
+ errcode = 0;
+ eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
+
+ for (i = 0; i < d->pages_per_eblk; i++) {
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return -ENOSPC;
+
+ block = eblk_base + i;
+ if (d->revmap[block] == PAGE_UNDEF)
+ continue;
+
+ ret = mtdswap_move_block(d, block, &newblock);
+ if (ret < 0 && !errcode)
+ errcode = ret;
+ }
+
+ return errcode;
+}
+
+static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
+{
+ int idx, stopat;
+
+ if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
+ stopat = MTDSWAP_LOWFRAG;
+ else
+ stopat = MTDSWAP_HIFRAG;
+
+ for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
+ if (d->trees[idx].root.rb_node != NULL)
+ return idx;
+
+ return -1;
+}
+
+static int mtdswap_wlfreq(unsigned int maxdiff)
+{
+ unsigned int h, x, y, dist, base;
+
+ /*
+ * Calculate linear ramp down from f1 to f2 when maxdiff goes from
+ * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
+ * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
+ */
+
+ dist = maxdiff - MAX_ERASE_DIFF;
+ if (dist > COLLECT_NONDIRTY_BASE)
+ dist = COLLECT_NONDIRTY_BASE;
+
+ /*
+ * Modelling the slop as right angular triangle with base
+ * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
+ * equal to the ratio h/base.
+ */
+ h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
+ base = COLLECT_NONDIRTY_BASE;
+
+ x = dist - base;
+ y = (x * h + base / 2) / base;
+
+ return COLLECT_NONDIRTY_FREQ2 + y;
+}
+
+static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
+{
+ static unsigned int pick_cnt;
+ unsigned int i, idx = -1, wear, max;
+ struct rb_root *root;
+
+ max = 0;
+ for (i = 0; i <= MTDSWAP_DIRTY; i++) {
+ root = &d->trees[i].root;
+ if (root->rb_node == NULL)
+ continue;
+
+ wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
+ if (wear > max) {
+ max = wear;
+ idx = i;
+ }
+ }
+
+ if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
+ pick_cnt = 0;
+ return idx;
+ }
+
+ pick_cnt++;
+ return -1;
+}
+
+static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
+ unsigned int background)
+{
+ int idx;
+
+ if (TREE_NONEMPTY(d, FAILING) &&
+ (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
+ return MTDSWAP_FAILING;
+
+ idx = mtdswap_choose_wl_tree(d);
+ if (idx >= MTDSWAP_CLEAN)
+ return idx;
+
+ return __mtdswap_choose_gc_tree(d);
+}
+
+static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
+ unsigned int background)
+{
+ struct rb_root *rp = NULL;
+ struct swap_eb *eb = NULL;
+ int idx;
+
+ if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
+ TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
+ return NULL;
+
+ idx = mtdswap_choose_gc_tree(d, background);
+ if (idx < 0)
+ return NULL;
+
+ rp = &d->trees[idx].root;
+ eb = rb_entry(rb_first(rp), struct swap_eb, rb);
+
+ rb_erase(&eb->rb, rp);
+ eb->root = NULL;
+ d->trees[idx].count--;
+ return eb;
+}
+
+static unsigned int mtdswap_test_patt(unsigned int i)
+{
+ return i % 2 ? 0x55555555 : 0xAAAAAAAA;
+}
+
+static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
+ struct swap_eb *eb)
+{
+ struct mtd_info *mtd = d->mtd;
+ unsigned int test, i, j, patt, mtd_pages;
+ loff_t base, pos;
+ unsigned int *p1 = (unsigned int *)d->page_buf;
+ unsigned char *p2 = (unsigned char *)d->oob_buf;
+ struct mtd_oob_ops ops;
+ int ret;
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = mtd->writesize;
+ ops.ooblen = mtd->ecclayout->oobavail;
+ ops.ooboffs = 0;
+ ops.datbuf = d->page_buf;
+ ops.oobbuf = d->oob_buf;
+ base = mtdswap_eb_offset(d, eb);
+ mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
+
+ for (test = 0; test < 2; test++) {
+ pos = base;
+ for (i = 0; i < mtd_pages; i++) {
+ patt = mtdswap_test_patt(test + i);
+ memset(d->page_buf, patt, mtd->writesize);
+ memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
+ ret = mtd_write_oob(mtd, pos, &ops);
+ if (ret)
+ goto error;
+
+ pos += mtd->writesize;
+ }
+
+ pos = base;
+ for (i = 0; i < mtd_pages; i++) {
+ ret = mtd_read_oob(mtd, pos, &ops);
+ if (ret)
+ goto error;
+
+ patt = mtdswap_test_patt(test + i);
+ for (j = 0; j < mtd->writesize/sizeof(int); j++)
+ if (p1[j] != patt)
+ goto error;
+
+ for (j = 0; j < mtd->ecclayout->oobavail; j++)
+ if (p2[j] != (unsigned char)patt)
+ goto error;
+
+ pos += mtd->writesize;
+ }
+
+ ret = mtdswap_erase_block(d, eb);
+ if (ret)
+ goto error;
+ }
+
+ eb->flags &= ~EBLOCK_READERR;
+ return 1;
+
+error:
+ mtdswap_handle_badblock(d, eb);
+ return 0;
+}
+
+static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
+{
+ struct swap_eb *eb;
+ int ret;
+
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return 1;
+
+ eb = mtdswap_pick_gc_eblk(d, background);
+ if (!eb)
+ return 1;
+
+ ret = mtdswap_gc_eblock(d, eb);
+ if (ret == -ENOSPC)
+ return 1;
+
+ if (eb->flags & EBLOCK_FAILED) {
+ mtdswap_handle_badblock(d, eb);
+ return 0;
+ }
+
+ eb->flags &= ~EBLOCK_BITFLIP;
+ ret = mtdswap_erase_block(d, eb);
+ if ((eb->flags & EBLOCK_READERR) &&
+ (ret || !mtdswap_eblk_passes(d, eb)))
+ return 0;
+
+ if (ret == 0)
+ ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
+
+ if (ret == 0)
+ mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
+ else if (ret != -EIO && !mtd_is_eccerr(ret))
+ mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
+
+ return 0;
+}
+
+static void mtdswap_background(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ int ret;
+
+ while (1) {
+ ret = mtdswap_gc(d, 1);
+ if (ret || mtd_blktrans_cease_background(dev))
+ return;
+ }
+}
+
+static void mtdswap_cleanup(struct mtdswap_dev *d)
+{
+ vfree(d->eb_data);
+ vfree(d->revmap);
+ vfree(d->page_data);
+ kfree(d->oob_buf);
+ kfree(d->page_buf);
+}
+
+static int mtdswap_flush(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+
+ mtd_sync(d->mtd);
+ return 0;
+}
+
+static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
+{
+ loff_t offset;
+ unsigned int badcnt;
+
+ badcnt = 0;
+
+ if (mtd_can_have_bb(mtd))
+ for (offset = 0; offset < size; offset += mtd->erasesize)
+ if (mtd_block_isbad(mtd, offset))
+ badcnt++;
+
+ return badcnt;
+}
+
+static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long page, char *buf)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ unsigned int newblock, mapped;
+ struct swap_eb *eb;
+ int ret;
+
+ d->sect_write_count++;
+
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return -ENOSPC;
+
+ if (header) {
+ /* Ignore writes to the header page */
+ if (unlikely(page == 0))
+ return 0;
+
+ page--;
+ }
+
+ mapped = d->page_data[page];
+ if (mapped <= BLOCK_MAX) {
+ eb = d->eb_data + (mapped / d->pages_per_eblk);
+ eb->active_count--;
+ mtdswap_store_eb(d, eb);
+ d->page_data[page] = BLOCK_UNDEF;
+ d->revmap[mapped] = PAGE_UNDEF;
+ }
+
+ ret = mtdswap_write_block(d, buf, page, &newblock, 0);
+ d->mtd_write_count++;
+
+ if (ret < 0)
+ return ret;
+
+ eb = d->eb_data + (newblock / d->pages_per_eblk);
+ d->page_data[page] = newblock;
+
+ return 0;
+}
+
+/* Provide a dummy swap header for the kernel */
+static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
+{
+ union swap_header *hd = (union swap_header *)(buf);
+
+ memset(buf, 0, PAGE_SIZE - 10);
+
+ hd->info.version = 1;
+ hd->info.last_page = d->mbd_dev->size - 1;
+ hd->info.nr_badpages = 0;
+
+ memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
+
+ return 0;
+}
+
+static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long page, char *buf)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ struct mtd_info *mtd = d->mtd;
+ unsigned int realblock, retries;
+ loff_t readpos;
+ struct swap_eb *eb;
+ size_t retlen;
+ int ret;
+
+ d->sect_read_count++;
+
+ if (header) {
+ if (unlikely(page == 0))
+ return mtdswap_auto_header(d, buf);
+
+ page--;
+ }
+
+ realblock = d->page_data[page];
+ if (realblock > BLOCK_MAX) {
+ memset(buf, 0x0, PAGE_SIZE);
+ if (realblock == BLOCK_UNDEF)
+ return 0;
+ else
+ return -EIO;
+ }
+
+ eb = d->eb_data + (realblock / d->pages_per_eblk);
+ BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
+
+ readpos = (loff_t)realblock << PAGE_SHIFT;
+ retries = 0;
+
+retry:
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+
+ d->mtd_read_count++;
+ if (mtd_is_bitflip(ret)) {
+ eb->flags |= EBLOCK_BITFLIP;
+ mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
+ ret = 0;
+ }
+
+ if (ret < 0) {
+ dev_err(d->dev, "Read error %d\n", ret);
+ eb->flags |= EBLOCK_READERR;
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ retries++;
+ if (retries < MTDSWAP_IO_RETRIES)
+ goto retry;
+
+ return ret;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short read %zd\n", retlen);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
+ unsigned nr_pages)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ unsigned long page;
+ struct swap_eb *eb;
+ unsigned int mapped;
+
+ d->discard_count++;
+
+ for (page = first; page < first + nr_pages; page++) {
+ mapped = d->page_data[page];
+ if (mapped <= BLOCK_MAX) {
+ eb = d->eb_data + (mapped / d->pages_per_eblk);
+ eb->active_count--;
+ mtdswap_store_eb(d, eb);
+ d->page_data[page] = BLOCK_UNDEF;
+ d->revmap[mapped] = PAGE_UNDEF;
+ d->discard_page_count++;
+ } else if (mapped == BLOCK_ERROR) {
+ d->page_data[page] = BLOCK_UNDEF;
+ d->discard_page_count++;
+ }
+ }
+
+ return 0;
+}
+
+static int mtdswap_show(struct seq_file *s, void *data)
+{
+ struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
+ unsigned long sum;
+ unsigned int count[MTDSWAP_TREE_CNT];
+ unsigned int min[MTDSWAP_TREE_CNT];
+ unsigned int max[MTDSWAP_TREE_CNT];
+ unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
+ uint64_t use_size;
+ char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
+ "failing"};
+
+ mutex_lock(&d->mbd_dev->lock);
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
+ struct rb_root *root = &d->trees[i].root;
+
+ if (root->rb_node) {
+ count[i] = d->trees[i].count;
+ min[i] = rb_entry(rb_first(root), struct swap_eb,
+ rb)->erase_count;
+ max[i] = rb_entry(rb_last(root), struct swap_eb,
+ rb)->erase_count;
+ } else
+ count[i] = 0;
+ }
+
+ if (d->curr_write) {
+ cw = 1;
+ cwp = d->curr_write_pos;
+ cwecount = d->curr_write->erase_count;
+ }
+
+ sum = 0;
+ for (i = 0; i < d->eblks; i++)
+ sum += d->eb_data[i].erase_count;
+
+ use_size = (uint64_t)d->eblks * d->mtd->erasesize;
+ bb_cnt = mtdswap_badblocks(d->mtd, use_size);
+
+ mapped = 0;
+ pages = d->mbd_dev->size;
+ for (i = 0; i < pages; i++)
+ if (d->page_data[i] != BLOCK_UNDEF)
+ mapped++;
+
+ mutex_unlock(&d->mbd_dev->lock);
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
+ if (!count[i])
+ continue;
+
+ if (min[i] != max[i])
+ seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
+ "max %d times\n",
+ name[i], count[i], min[i], max[i]);
+ else
+ seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
+ "times\n", name[i], count[i], min[i]);
+ }
+
+ if (bb_cnt)
+ seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
+
+ if (cw)
+ seq_printf(s, "current erase block: %u pages used, %u free, "
+ "erased %u times\n",
+ cwp, d->pages_per_eblk - cwp, cwecount);
+
+ seq_printf(s, "total erasures: %lu\n", sum);
+
+ seq_puts(s, "\n");
+
+ seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
+ seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
+ seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
+ seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
+ seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
+ seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
+
+ seq_puts(s, "\n");
+ seq_printf(s, "total pages: %u\n", pages);
+ seq_printf(s, "pages mapped: %u\n", mapped);
+
+ return 0;
+}
+
+static int mtdswap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtdswap_show, inode->i_private);
+}
+
+static const struct file_operations mtdswap_fops = {
+ .open = mtdswap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mtdswap_add_debugfs(struct mtdswap_dev *d)
+{
+ struct gendisk *gd = d->mbd_dev->disk;
+ struct device *dev = disk_to_dev(gd);
+
+ struct dentry *root;
+ struct dentry *dent;
+
+ root = debugfs_create_dir(gd->disk_name, NULL);
+ if (IS_ERR(root))
+ return 0;
+
+ if (!root) {
+ dev_err(dev, "failed to initialize debugfs\n");
+ return -1;
+ }
+
+ d->debugfs_root = root;
+
+ dent = debugfs_create_file("stats", S_IRUSR, root, d,
+ &mtdswap_fops);
+ if (!dent) {
+ dev_err(d->dev, "debugfs_create_file failed\n");
+ debugfs_remove_recursive(root);
+ d->debugfs_root = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
+ unsigned int spare_cnt)
+{
+ struct mtd_info *mtd = d->mbd_dev->mtd;
+ unsigned int i, eblk_bytes, pages, blocks;
+ int ret = -ENOMEM;
+
+ d->mtd = mtd;
+ d->eblks = eblocks;
+ d->spare_eblks = spare_cnt;
+ d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
+
+ pages = d->mbd_dev->size;
+ blocks = eblocks * d->pages_per_eblk;
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++)
+ d->trees[i].root = RB_ROOT;
+
+ d->page_data = vmalloc(sizeof(int)*pages);
+ if (!d->page_data)
+ goto page_data_fail;
+
+ d->revmap = vmalloc(sizeof(int)*blocks);
+ if (!d->revmap)
+ goto revmap_fail;
+
+ eblk_bytes = sizeof(struct swap_eb)*d->eblks;
+ d->eb_data = vzalloc(eblk_bytes);
+ if (!d->eb_data)
+ goto eb_data_fail;
+
+ for (i = 0; i < pages; i++)
+ d->page_data[i] = BLOCK_UNDEF;
+
+ for (i = 0; i < blocks; i++)
+ d->revmap[i] = PAGE_UNDEF;
+
+ d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!d->page_buf)
+ goto page_buf_fail;
+
+ d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
+ if (!d->oob_buf)
+ goto oob_buf_fail;
+
+ mtdswap_scan_eblks(d);
+
+ return 0;
+
+oob_buf_fail:
+ kfree(d->page_buf);
+page_buf_fail:
+ vfree(d->eb_data);
+eb_data_fail:
+ vfree(d->revmap);
+revmap_fail:
+ vfree(d->page_data);
+page_data_fail:
+ printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
+ return ret;
+}
+
+static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtdswap_dev *d;
+ struct mtd_blktrans_dev *mbd_dev;
+ char *parts;
+ char *this_opt;
+ unsigned long part;
+ unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
+ uint64_t swap_size, use_size, size_limit;
+ struct nand_ecclayout *oinfo;
+ int ret;
+
+ parts = &partitions[0];
+ if (!*parts)
+ return;
+
+ while ((this_opt = strsep(&parts, ",")) != NULL) {
+ if (kstrtoul(this_opt, 0, &part) < 0)
+ return;
+
+ if (mtd->index == part)
+ break;
+ }
+
+ if (mtd->index != part)
+ return;
+
+ if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
+ printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
+ "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
+ return;
+ }
+
+ if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
+ printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
+ " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
+ return;
+ }
+
+ oinfo = mtd->ecclayout;
+ if (!oinfo) {
+ printk(KERN_ERR "%s: mtd%d does not have OOB\n",
+ MTDSWAP_PREFIX, mtd->index);
+ return;
+ }
+
+ if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
+ printk(KERN_ERR "%s: Not enough free bytes in OOB, "
+ "%d available, %zu needed.\n",
+ MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
+ return;
+ }
+
+ if (spare_eblocks > 100)
+ spare_eblocks = 100;
+
+ use_size = mtd->size;
+ size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
+
+ if (mtd->size > size_limit) {
+ printk(KERN_WARNING "%s: Device too large. Limiting size to "
+ "%llu bytes\n", MTDSWAP_PREFIX, size_limit);
+ use_size = size_limit;
+ }
+
+ eblocks = mtd_div_by_eb(use_size, mtd);
+ use_size = (uint64_t)eblocks * mtd->erasesize;
+ bad_blocks = mtdswap_badblocks(mtd, use_size);
+ eavailable = eblocks - bad_blocks;
+
+ if (eavailable < MIN_ERASE_BLOCKS) {
+ printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
+ "%d needed\n", MTDSWAP_PREFIX, eavailable,
+ MIN_ERASE_BLOCKS);
+ return;
+ }
+
+ spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
+
+ if (spare_cnt < MIN_SPARE_EBLOCKS)
+ spare_cnt = MIN_SPARE_EBLOCKS;
+
+ if (spare_cnt > eavailable - 1)
+ spare_cnt = eavailable - 1;
+
+ swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
+ (header ? PAGE_SIZE : 0);
+
+ printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
+ "%u spare, %u bad blocks\n",
+ MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
+
+ d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
+ if (!d)
+ return;
+
+ mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!mbd_dev) {
+ kfree(d);
+ return;
+ }
+
+ d->mbd_dev = mbd_dev;
+ mbd_dev->priv = d;
+
+ mbd_dev->mtd = mtd;
+ mbd_dev->devnum = mtd->index;
+ mbd_dev->size = swap_size >> PAGE_SHIFT;
+ mbd_dev->tr = tr;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ mbd_dev->readonly = 1;
+
+ if (mtdswap_init(d, eblocks, spare_cnt) < 0)
+ goto init_failed;
+
+ if (add_mtd_blktrans_dev(mbd_dev) < 0)
+ goto cleanup;
+
+ d->dev = disk_to_dev(mbd_dev->disk);
+
+ ret = mtdswap_add_debugfs(d);
+ if (ret < 0)
+ goto debugfs_failed;
+
+ return;
+
+debugfs_failed:
+ del_mtd_blktrans_dev(mbd_dev);
+
+cleanup:
+ mtdswap_cleanup(d);
+
+init_failed:
+ kfree(mbd_dev);
+ kfree(d);
+}
+
+static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+
+ debugfs_remove_recursive(d->debugfs_root);
+ del_mtd_blktrans_dev(dev);
+ mtdswap_cleanup(d);
+ kfree(d);
+}
+
+static struct mtd_blktrans_ops mtdswap_ops = {
+ .name = "mtdswap",
+ .major = 0,
+ .part_bits = 0,
+ .blksize = PAGE_SIZE,
+ .flush = mtdswap_flush,
+ .readsect = mtdswap_readsect,
+ .writesect = mtdswap_writesect,
+ .discard = mtdswap_discard,
+ .background = mtdswap_background,
+ .add_mtd = mtdswap_add_mtd,
+ .remove_dev = mtdswap_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init mtdswap_modinit(void)
+{
+ return register_mtd_blktrans(&mtdswap_ops);
+}
+
+static void __exit mtdswap_modexit(void)
+{
+ deregister_mtd_blktrans(&mtdswap_ops);
+}
+
+module_init(mtdswap_modinit);
+module_exit(mtdswap_modexit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
+MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
+ "swap space");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
new file mode 100644
index 000000000..5897d8d8f
--- /dev/null
+++ b/drivers/mtd/nand/Kconfig
@@ -0,0 +1,533 @@
+config MTD_NAND_ECC
+ tristate
+
+config MTD_NAND_ECC_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
+
+menuconfig MTD_NAND
+ tristate "NAND Device Support"
+ depends on MTD
+ select MTD_NAND_IDS
+ select MTD_NAND_ECC
+ help
+ This enables support for accessing all type of NAND flash
+ devices. For further information see
+ <http://www.linux-mtd.infradead.org/doc/nand.html>.
+
+if MTD_NAND
+
+config MTD_NAND_BCH
+ tristate
+ select BCH
+ depends on MTD_NAND_ECC_BCH
+ default MTD_NAND
+
+config MTD_NAND_ECC_BCH
+ bool "Support software BCH ECC"
+ default n
+ help
+ This enables support for software BCH error correction. Binary BCH
+ codes are more powerful and cpu intensive than traditional Hamming
+ ECC codes. They are used with NAND devices requiring more than 1 bit
+ of error correction.
+
+config MTD_SM_COMMON
+ tristate
+ default n
+
+config MTD_NAND_DENALI
+ tristate "Support Denali NAND controller"
+ depends on HAS_DMA
+ help
+ Enable support for the Denali NAND controller. This should be
+ combined with either the PCI or platform drivers to provide device
+ registration.
+
+config MTD_NAND_DENALI_PCI
+ tristate "Support Denali NAND controller on Intel Moorestown"
+ depends on PCI && MTD_NAND_DENALI
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
+
+config MTD_NAND_DENALI_DT
+ tristate "Support Denali NAND controller as a DT device"
+ depends on HAVE_CLK && MTD_NAND_DENALI
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
+config MTD_NAND_DENALI_SCRATCH_REG_ADDR
+ hex "Denali NAND size scratch register address"
+ default "0xFF108018"
+ depends on MTD_NAND_DENALI_PCI
+ help
+ Some platforms place the NAND chip size in a scratch register
+ because (some versions of) the driver aren't able to automatically
+ determine the size of certain chips. Set the address of the
+ scratch register here to enable this feature. On Intel Moorestown
+ boards, the scratch register is at 0xFF108018.
+
+config MTD_NAND_GPIO
+ tristate "GPIO assisted NAND Flash driver"
+ depends on GPIOLIB
+ help
+ This enables a NAND flash driver where control signals are
+ connected to GPIO pins, and commands and data are communicated
+ via a memory mapped interface.
+
+config MTD_NAND_AMS_DELTA
+ tristate "NAND Flash device on Amstrad E3"
+ depends on MACH_AMS_DELTA
+ default y
+ help
+ Support for NAND flash on Amstrad E3 (Delta).
+
+config MTD_NAND_OMAP2
+ tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
+ depends on ARCH_OMAP2PLUS
+ help
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
+ platforms.
+
+config MTD_NAND_OMAP_BCH
+ depends on MTD_NAND_OMAP2
+ bool "Support hardware based BCH error correction"
+ default n
+ select BCH
+ help
+ This config enables the ELM hardware engine, which can be used to
+ locate and correct errors when using BCH ECC scheme. This offloads
+ the cpu from doing ECC error searching and correction. However some
+ legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+ so this is optional for them.
+
+config MTD_NAND_OMAP_BCH_BUILD
+ def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
+
+config MTD_NAND_IDS
+ tristate
+
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
+config MTD_NAND_AU1550
+ tristate "Au1550/1200 NAND support"
+ depends on MIPS_ALCHEMY
+ help
+ This enables the driver for the NAND flash controller on the
+ AMD/Alchemy 1550 SOC.
+
+config MTD_NAND_BF5XX
+ tristate "Blackfin on-chip NAND Flash Controller driver"
+ depends on BF54x || BF52x
+ help
+ This enables the Blackfin on-chip NAND flash controller
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+ This driver can also be built as a module. If so, the module
+ will be called bf5xx-nand.
+
+config MTD_NAND_BF5XX_HWECC
+ bool "BF5XX NAND Hardware ECC"
+ default y
+ depends on MTD_NAND_BF5XX
+ help
+ Enable the use of the BF5XX's internal ECC generator when
+ using NAND.
+
+config MTD_NAND_BF5XX_BOOTROM_ECC
+ bool "Use Blackfin BootROM ECC Layout"
+ default n
+ depends on MTD_NAND_BF5XX_HWECC
+ help
+ If you wish to modify NAND pages and allow the Blackfin on-chip
+ BootROM to boot from them, say Y here. This is only necessary
+ if you are booting U-Boot out of NAND and you wish to update
+ U-Boot from Linux' userspace. Otherwise, you should say N here.
+
+ If unsure, say N.
+
+config MTD_NAND_S3C2410
+ tristate "NAND Flash support for Samsung S3C SoCs"
+ depends on ARCH_S3C24XX || ARCH_S3C64XX
+ help
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
+ SoCs
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_S3C2410_DEBUG
+ bool "Samsung S3C NAND driver debug"
+ depends on MTD_NAND_S3C2410
+ help
+ Enable debugging of the S3C NAND driver
+
+config MTD_NAND_S3C2410_HWECC
+ bool "Samsung S3C NAND Hardware ECC"
+ depends on MTD_NAND_S3C2410
+ help
+ Enable the use of the controller's internal ECC generator when
+ using NAND. Early versions of the chips have had problems with
+ incorrect ECC generation, and if using these, the default of
+ software ECC is preferable.
+
+config MTD_NAND_NDFC
+ tristate "NDFC NanD Flash Controller"
+ depends on 4xx
+ select MTD_NAND_ECC_SMC
+ help
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
+config MTD_NAND_S3C2410_CLKSTOP
+ bool "Samsung S3C NAND IDLE clock stop"
+ depends on MTD_NAND_S3C2410
+ default n
+ help
+ Stop the clock to the NAND controller when there is no chip
+ selected to save power. This will mean there is a small delay
+ when the is NAND chip selected or released, but will save
+ approximately 5mA of power when there is nothing happening.
+
+config MTD_NAND_DISKONCHIP
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
+ depends on HAS_IOMEM
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ This is a reimplementation of M-Systems DiskOnChip 2000,
+ Millennium and Millennium Plus as a standard NAND device driver,
+ as opposed to the earlier self-contained MTD device drivers.
+ This should enable, among other things, proper JFFS2 operation on
+ these devices.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ ---help---
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
+
+config MTD_NAND_DISKONCHIP_PROBE_HIGH
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
+
+config MTD_NAND_DISKONCHIP_BBTWRITE
+ bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ On DiskOnChip devices shipped with the INFTL filesystem (Millennium
+ and 2000 TSOP/Alon), Linux reserves some space at the end of the
+ device for the Bad Block Table (BBT). If you have existing INFTL
+ data on your device (created by non-Linux tools such as M-Systems'
+ DOS drivers), your data might overlap the area Linux wants to use for
+ the BBT. If this is a concern for you, leave this option disabled and
+ Linux will not write BBT data into this area.
+ The downside of leaving this option disabled is that if bad blocks
+ are detected by Linux, they will not be recorded in the BBT, which
+ could cause future problems.
+ Once you enable this option, new filesystems (INFTL or others, created
+ in Linux or other operating systems) will not use the reserved area.
+ The only reason not to enable this option is to prevent damage to
+ preexisting filesystems.
+ Even if you leave this disabled, you can enable BBT writes at module
+ load time (assuming you build diskonchip as a module) with the module
+ parameter "inftl_bbt_write=1".
+
+config MTD_NAND_DOCG4
+ tristate "Support for DiskOnChip G4"
+ depends on HAS_IOMEM
+ select BCH
+ select BITREVERSE
+ help
+ Support for diskonchip G4 nand flash, found in various smartphones and
+ PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
+ Portege G900, Asus P526, and O2 XDA Zinc.
+
+ With this driver you will be able to use UBI and create a ubifs on the
+ device, so you may wish to consider enabling UBI and UBIFS as well.
+
+ These devices ship with the Mys/Sandisk SAFTL formatting, for which
+ there is currently no mtd parser, so you may want to use command line
+ partitioning to segregate write-protected blocks. On the Treo680, the
+ first five erase blocks (256KiB each) are write-protected, followed
+ by the block containing the saftl partition table. This is probably
+ typical.
+
+config MTD_NAND_SHARPSL
+ tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
+ depends on ARCH_PXA
+
+config MTD_NAND_CAFE
+ tristate "NAND support for OLPC CAFÉ chip"
+ depends on PCI
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ Use NAND flash attached to the CAFÉ chip designed for the OLPC
+ laptop.
+
+config MTD_NAND_CS553X
+ tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
+ depends on X86_32
+ help
+ The CS553x companion chips for the AMD Geode processor
+ include NAND flash controllers with built-in hardware ECC
+ capabilities; enabling this option will allow you to use
+ these. The driver will check the MSRs to verify that the
+ controller is enabled for NAND, and currently requires that
+ the controller be in MMIO mode.
+
+ If you say "m", the module will be called cs553x_nand.
+
+config MTD_NAND_ATMEL
+ tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
+ depends on ARCH_AT91 || AVR32
+ help
+ Enables support for NAND Flash / Smart Media Card interface
+ on Atmel AT91 and AVR32 processors.
+
+config MTD_NAND_PXA3xx
+ tristate "NAND support on PXA3xx and Armada 370/XP"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION
+ help
+ This enables the driver for the NAND flash device found on
+ PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+
+config MTD_NAND_SLC_LPC32XX
+ tristate "NXP LPC32xx SLC Controller"
+ depends on ARCH_LPC32XX
+ help
+ Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
+ chips) NAND controller. This is the default for the PHYTEC 3250
+ reference board which contains a NAND256R3A2CZA6 chip.
+
+ Please check the actual NAND chip connected and its support
+ by the SLC NAND controller.
+
+config MTD_NAND_MLC_LPC32XX
+ tristate "NXP LPC32xx MLC Controller"
+ depends on ARCH_LPC32XX
+ help
+ Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
+ controller. This is the default for the WORK92105 controller
+ board.
+
+ Please check the actual NAND chip connected and its support
+ by the MLC NAND controller.
+
+config MTD_NAND_CM_X270
+ tristate "Support for NAND Flash on CM-X270 modules"
+ depends on MACH_ARMCORE
+
+config MTD_NAND_PASEMI
+ tristate "NAND support for PA Semi PWRficient"
+ depends on PPC_PASEMI
+ help
+ Enables support for NAND Flash interface on PA Semi PWRficient
+ based boards
+
+config MTD_NAND_TMIO
+ tristate "NAND Flash device on Toshiba Mobile IO Controller"
+ depends on MFD_TMIO
+ help
+ Support for NAND flash connected to a Toshiba Mobile IO
+ Controller in some PDAs, including the Sharp SL6000x.
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ help
+ The simulator may simulate various NAND flash chips for the
+ MTD nand layer.
+
+config MTD_NAND_GPMI_NAND
+ tristate "GPMI NAND Flash Controller driver"
+ depends on MTD_NAND && MXS_DMA
+ help
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time. The GPMI may conflicts with other
+ block, such as SD card. So pay attention to it when you enable
+ the GPMI.
+
+config MTD_NAND_BCM47XXNFLASH
+ tristate "Support for NAND flash on BCM4706 BCMA bus"
+ depends on BCMA_NFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ NAND flash memories. For now only BCM4706 is supported.
+
+config MTD_NAND_PLATFORM
+ tristate "Support for generic platform NAND driver"
+ depends on HAS_IOMEM
+ help
+ This implements a generic NAND driver for on-SOC platform
+ devices. You will need to provide platform-specific functions
+ via platform_data.
+
+config MTD_NAND_ORION
+ tristate "NAND Flash support for Marvell Orion SoC"
+ depends on PLAT_ORION
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_FSL_ELBC
+ tristate "NAND support for Freescale eLBC controllers"
+ depends on PPC
+ select FSL_LBC
+ help
+ Various Freescale chips, including the 8313, include a NAND Flash
+ Controller Module with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_IFC
+ tristate "NAND support for Freescale IFC controller"
+ depends on MTD_NAND && FSL_SOC
+ select FSL_IFC
+ select MEMORY
+ help
+ Various Freescale chips e.g P1010, include a NAND Flash machine
+ with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_UPM
+ tristate "Support for NAND on Freescale UPM"
+ depends on PPC_83xx || PPC_85xx
+ select FSL_LBC
+ help
+ Enables support for NAND Flash chips wired onto Freescale PowerPC
+ processor localbus with User-Programmable Machine support.
+
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 built-in NAND Flash Controller support"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
+config MTD_NAND_MXC
+ tristate "MXC NAND support"
+ depends on ARCH_MXC
+ help
+ This enables the driver for the NAND flash controller on the
+ MXC processors.
+
+config MTD_NAND_SH_FLCTL
+ tristate "Support for NAND on Renesas SuperH FLCTL"
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on HAS_DMA
+ help
+ Several Renesas SuperH CPU has FLCTL. This option enables support
+ for NAND Flash using FLCTL.
+
+config MTD_NAND_DAVINCI
+ tristate "Support NAND on DaVinci/Keystone SoC"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
+ help
+ Enable the driver for NAND flash chips on Texas Instruments
+ DaVinci/Keystone processors.
+
+config MTD_NAND_TXX9NDFMC
+ tristate "NAND Flash support for TXx9 SoC"
+ depends on SOC_TX4938 || SOC_TX4939
+ help
+ This enables the NAND flash controller on the TXx9 SoCs.
+
+config MTD_NAND_SOCRATES
+ tristate "Support for NAND on Socrates board"
+ depends on SOCRATES
+ help
+ Enables support for NAND Flash chips wired onto Socrates board.
+
+config MTD_NAND_NUC900
+ tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
+ depends on ARCH_W90X900
+ help
+ This enables the driver for the NAND Flash on evaluation board based
+ on w90p910 / NUC9xx.
+
+config MTD_NAND_JZ4740
+ tristate "Support for JZ4740 SoC NAND controller"
+ depends on MACH_JZ4740
+ help
+ Enables support for NAND Flash on JZ4740 SoC based boards.
+
+config MTD_NAND_FSMC
+ tristate "Support for NAND on ST Micros FSMC"
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
+ help
+ Enables support for NAND Flash chips on the ST Microelectronics
+ Flexible Static Memory Controller (FSMC)
+
+config MTD_NAND_XWAY
+ tristate "Support for NAND on Lantiq XWAY SoC"
+ depends on LANTIQ && SOC_TYPE_XWAY
+ select MTD_NAND_PLATFORM
+ help
+ Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
+ to the External Bus Unit (EBU).
+
+config MTD_NAND_SUNXI
+ tristate "Support for NAND on Allwinner SoCs"
+ depends on ARCH_SUNXI
+ help
+ Enables support for NAND Flash chips on Allwinner SoCs.
+
+config MTD_NAND_HISI504
+ tristate "Support for NAND controller on Hisilicon SoC Hip04"
+ depends on HAS_DMA
+ help
+ Enables support for NAND controller on Hisilicon SoC Hip04.
+
+endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
new file mode 100644
index 000000000..582bbd05a
--- /dev/null
+++ b/drivers/mtd/nand/Makefile
@@ -0,0 +1,56 @@
+#
+# linux/drivers/nand/Makefile
+#
+
+obj-$(CONFIG_MTD_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
+obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o
+obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
+
+obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
+obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
+obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
+obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
+obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
+obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
+obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
+obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
+obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
+obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
+obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
+obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
+obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
+obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
+obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
+obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
+obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
+obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
+obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o
+obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
+obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
+obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
+obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
+obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
+obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
+obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
+obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
+obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
+
+nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
new file mode 100644
index 000000000..842f8fe91
--- /dev/null
+++ b/drivers/mtd/nand/ams-delta.c
@@ -0,0 +1,296 @@
+/*
+ * drivers/mtd/nand/ams-delta.c
+ *
+ * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ * Derived from drivers/mtd/toto.c
+ * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ * Partially stolen from drivers/mtd/nand/plat_nand.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * Amstrad E3 (Delta).
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-omap.h>
+
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+#include <mach/board-ams-delta.h>
+
+#include <mach/hardware.h>
+
+/*
+ * MTD structure for E3 (Delta)
+ */
+static struct mtd_info *ams_delta_mtd = NULL;
+
+/*
+ * Define partitions for flash devices
+ */
+
+static struct mtd_partition partition_info[] = {
+ { .name = "Kernel",
+ .offset = 0,
+ .size = 3 * SZ_1M + SZ_512K },
+ { .name = "u-boot",
+ .offset = 3 * SZ_1M + SZ_512K,
+ .size = SZ_256K },
+ { .name = "u-boot params",
+ .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
+ .size = SZ_256K },
+ { .name = "Amstrad LDR",
+ .offset = 4 * SZ_1M,
+ .size = SZ_256K },
+ { .name = "File system",
+ .offset = 4 * SZ_1M + 1 * SZ_256K,
+ .size = 27 * SZ_1M },
+ { .name = "PBL reserved",
+ .offset = 32 * SZ_1M - 3 * SZ_256K,
+ .size = 3 * SZ_256K },
+};
+
+static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
+
+ writew(0, io_base + OMAP_MPUIO_IO_CNTL);
+ writew(byte, this->IO_ADDR_W);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0);
+ ndelay(40);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1);
+}
+
+static u_char ams_delta_read_byte(struct mtd_info *mtd)
+{
+ u_char res;
+ struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
+
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0);
+ ndelay(40);
+ writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
+ res = readw(this->IO_ADDR_R);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1);
+
+ return res;
+}
+
+static void ams_delta_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
+{
+ int i;
+
+ for (i=0; i<len; i++)
+ ams_delta_write_byte(mtd, buf[i]);
+}
+
+static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+
+ for (i=0; i<len; i++)
+ buf[i] = ams_delta_read_byte(mtd);
+}
+
+/*
+ * Command control function
+ *
+ * ctrl:
+ * NAND_NCE: bit 0 -> bit 2
+ * NAND_CLE: bit 1 -> bit 7
+ * NAND_ALE: bit 2 -> bit 6
+ */
+static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE,
+ (ctrl & NAND_NCE) == 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE,
+ (ctrl & NAND_CLE) != 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE,
+ (ctrl & NAND_ALE) != 0);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ ams_delta_write_byte(mtd, cmd);
+}
+
+static int ams_delta_nand_ready(struct mtd_info *mtd)
+{
+ return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
+}
+
+static const struct gpio _mandatory_gpio[] = {
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NCE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nce",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NRE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nre",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWP,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwp",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwe",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_ALE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_ale",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_CLE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_cle",
+ },
+};
+
+/*
+ * Main initialization routine
+ */
+static int ams_delta_init(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *io_base;
+ int err = 0;
+
+ if (!res)
+ return -ENXIO;
+
+ /* Allocate memory for MTD device structure and private data */
+ ams_delta_mtd = kzalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!ams_delta_mtd) {
+ printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ams_delta_mtd->owner = THIS_MODULE;
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&ams_delta_mtd[1]);
+
+ /* Link the private data with the MTD structure */
+ ams_delta_mtd->priv = this;
+
+ /*
+ * Don't try to request the memory region from here,
+ * it should have been already requested from the
+ * gpio-omap driver and requesting it again would fail.
+ */
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto out_free;
+ }
+
+ this->priv = io_base;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+ this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
+ this->read_byte = ams_delta_read_byte;
+ this->write_buf = ams_delta_write_buf;
+ this->read_buf = ams_delta_read_buf;
+ this->cmd_ctrl = ams_delta_hwcontrol;
+ if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
+ this->dev_ready = ams_delta_nand_ready;
+ } else {
+ this->dev_ready = NULL;
+ printk(KERN_NOTICE "Couldn't request gpio for Delta NAND ready.\n");
+ }
+ /* 25 us command delay time */
+ this->chip_delay = 30;
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ platform_set_drvdata(pdev, io_base);
+
+ /* Set chip enabled, but */
+ err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ if (err)
+ goto out_gpio;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(ams_delta_mtd, 1)) {
+ err = -ENXIO;
+ goto out_mtd;
+ }
+
+ /* Register the partitions */
+ mtd_device_register(ams_delta_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
+
+ goto out;
+
+ out_mtd:
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+out_gpio:
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
+ iounmap(io_base);
+out_free:
+ kfree(ams_delta_mtd);
+ out:
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int ams_delta_cleanup(struct platform_device *pdev)
+{
+ void __iomem *io_base = platform_get_drvdata(pdev);
+
+ /* Release resources, unregister device */
+ nand_release(ams_delta_mtd);
+
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
+ iounmap(io_base);
+
+ /* Free the MTD device structure */
+ kfree(ams_delta_mtd);
+
+ return 0;
+}
+
+static struct platform_driver ams_delta_nand_driver = {
+ .probe = ams_delta_init,
+ .remove = ams_delta_cleanup,
+ .driver = {
+ .name = "ams-delta-nand",
+ },
+};
+
+module_platform_driver(ams_delta_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
new file mode 100644
index 000000000..46010bd89
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -0,0 +1,2412 @@
+/*
+ * Copyright © 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c
+ * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c
+ * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * © Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_data/atmel.h>
+
+static int use_dma = 1;
+module_param(use_dma, int, 0);
+
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
+/* Register access macros */
+#define ecc_readl(add, reg) \
+ __raw_readl(add + ATMEL_ECC_##reg)
+#define ecc_writel(add, reg, value) \
+ __raw_writel((value), add + ATMEL_ECC_##reg)
+
+#include "atmel_nand_ecc.h" /* Hardware ECC registers */
+#include "atmel_nand_nfc.h" /* Nand Flash Controller definition */
+
+struct atmel_nand_caps {
+ bool pmecc_correct_erase_page;
+};
+
+/* oob layout for large page size
+ * bad block info is on bytes 0 and 1
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_large = {
+ .eccbytes = 4,
+ .eccpos = {60, 61, 62, 63},
+ .oobfree = {
+ {2, 58}
+ },
+};
+
+/* oob layout for small page size
+ * bad block info is on bytes 4 and 5
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_small = {
+ .eccbytes = 4,
+ .eccpos = {0, 1, 2, 3},
+ .oobfree = {
+ {6, 10}
+ },
+};
+
+struct atmel_nfc {
+ void __iomem *base_cmd_regs;
+ void __iomem *hsmc_regs;
+ void *sram_bank0;
+ dma_addr_t sram_bank0_phys;
+ bool use_nfc_sram;
+ bool write_by_sram;
+
+ struct clk *clk;
+
+ bool is_initialized;
+ struct completion comp_ready;
+ struct completion comp_cmd_done;
+ struct completion comp_xfer_done;
+
+ /* Point to the sram bank which include readed data via NFC */
+ void *data_in_sram;
+ bool will_write_sram;
+};
+static struct atmel_nfc nand_nfc;
+
+struct atmel_nand_host {
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ dma_addr_t io_phys;
+ struct atmel_nand_data board;
+ struct device *dev;
+ void __iomem *ecc;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
+
+ struct atmel_nfc *nfc;
+
+ struct atmel_nand_caps *caps;
+ bool has_pmecc;
+ u8 pmecc_corr_cap;
+ u16 pmecc_sector_size;
+ bool has_no_lookup_table;
+ u32 pmecc_lookup_table_offset;
+ u32 pmecc_lookup_table_offset_512;
+ u32 pmecc_lookup_table_offset_1024;
+
+ int pmecc_degree; /* Degree of remainders */
+ int pmecc_cw_len; /* Length of codeword */
+
+ void __iomem *pmerrloc_base;
+ void __iomem *pmecc_rom_base;
+
+ /* lookup table for alpha_to and index_of */
+ void __iomem *pmecc_alpha_to;
+ void __iomem *pmecc_index_of;
+
+ /* data for pmecc computation */
+ int16_t *pmecc_partial_syn;
+ int16_t *pmecc_si;
+ int16_t *pmecc_smu; /* Sigma table */
+ int16_t *pmecc_lmu; /* polynomal order */
+ int *pmecc_mu;
+ int *pmecc_dmu;
+ int *pmecc_delta;
+};
+
+static struct nand_ecclayout atmel_pmecc_oobinfo;
+
+/*
+ * Enable NAND.
+ */
+static void atmel_nand_enable(struct atmel_nand_host *host)
+{
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 0);
+}
+
+/*
+ * Disable NAND.
+ */
+static void atmel_nand_disable(struct atmel_nand_host *host)
+{
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 1);
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_NCE)
+ atmel_nand_enable(host);
+ else
+ atmel_nand_disable(host);
+ }
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, host->io_base + (1 << host->board.cle));
+ else
+ writeb(cmd, host->io_base + (1 << host->board.ale));
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int atmel_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ return gpio_get_value(host->board.rdy_pin) ^
+ !!host->board.rdy_pin_active_low;
+}
+
+/* Set up for hardware ready pin and enable pin. */
+static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ int res = 0;
+
+ if (gpio_is_valid(host->board.rdy_pin)) {
+ res = devm_gpio_request(host->dev,
+ host->board.rdy_pin, "nand_rdy");
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request rdy gpio %d\n",
+ host->board.rdy_pin);
+ return res;
+ }
+
+ res = gpio_direction_input(host->board.rdy_pin);
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request input direction rdy gpio %d\n",
+ host->board.rdy_pin);
+ return res;
+ }
+
+ chip->dev_ready = atmel_nand_device_ready;
+ }
+
+ if (gpio_is_valid(host->board.enable_pin)) {
+ res = devm_gpio_request(host->dev,
+ host->board.enable_pin, "nand_enable");
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request enable gpio %d\n",
+ host->board.enable_pin);
+ return res;
+ }
+
+ res = gpio_direction_output(host->board.enable_pin, 1);
+ if (res < 0) {
+ dev_err(host->dev,
+ "can't request output direction enable gpio %d\n",
+ host->board.enable_pin);
+ return res;
+ }
+ }
+
+ return res;
+}
+
+/*
+ * Minimal-overhead PIO for data access.
+ */
+static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+ memcpy(buf, host->nfc->data_in_sram, len);
+ host->nfc->data_in_sram += len;
+ } else {
+ __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+ }
+}
+
+static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+ memcpy(buf, host->nfc->data_in_sram, len);
+ host->nfc->data_in_sram += len;
+ } else {
+ __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+ }
+}
+
+static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
+}
+
+static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
+}
+
+static void dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank)
+{
+ /* NFC only has two banks. Must be 0 or 1 */
+ if (bank > 1)
+ return -EINVAL;
+
+ if (bank) {
+ /* Only for a 2k-page or lower flash, NFC can handle 2 banks */
+ if (host->mtd.writesize > 2048)
+ return -EINVAL;
+ nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1);
+ } else {
+ nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0);
+ }
+
+ return 0;
+}
+
+static uint nfc_get_sram_off(struct atmel_nand_host *host)
+{
+ if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+ return NFC_SRAM_BANK1_OFFSET;
+ else
+ return 0;
+}
+
+static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host)
+{
+ if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+ return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET;
+ else
+ return host->nfc->sram_bank0_phys;
+}
+
+static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
+ int is_read)
+{
+ struct dma_device *dma_dev;
+ enum dma_ctrl_flags flags;
+ dma_addr_t dma_src_addr, dma_dst_addr, phys_addr;
+ struct dma_async_tx_descriptor *tx = NULL;
+ dma_cookie_t cookie;
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ void *p = buf;
+ int err = -EIO;
+ enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct atmel_nfc *nfc = host->nfc;
+
+ if (buf >= high_memory)
+ goto err_buf;
+
+ dma_dev = host->dma_chan->device;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
+ if (dma_mapping_error(dma_dev->dev, phys_addr)) {
+ dev_err(host->dev, "Failed to dma_map_single\n");
+ goto err_buf;
+ }
+
+ if (is_read) {
+ if (nfc && nfc->data_in_sram)
+ dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram
+ - (nfc->sram_bank0 + nfc_get_sram_off(host)));
+ else
+ dma_src_addr = host->io_phys;
+
+ dma_dst_addr = phys_addr;
+ } else {
+ dma_src_addr = phys_addr;
+
+ if (nfc && nfc->write_by_sram)
+ dma_dst_addr = nfc_sram_phys(host);
+ else
+ dma_dst_addr = host->io_phys;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
+ dma_src_addr, len, flags);
+ if (!tx) {
+ dev_err(host->dev, "Failed to prepare DMA memcpy\n");
+ goto err_dma;
+ }
+
+ init_completion(&host->comp);
+ tx->callback = dma_complete_func;
+ tx->callback_param = &host->comp;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(host->dev, "Failed to do DMA tx_submit\n");
+ goto err_dma;
+ }
+
+ dma_async_issue_pending(host->dma_chan);
+ wait_for_completion(&host->comp);
+
+ if (is_read && nfc && nfc->data_in_sram)
+ /* After read data from SRAM, need to increase the position */
+ nfc->data_in_sram += len;
+
+ err = 0;
+
+err_dma:
+ dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
+err_buf:
+ if (err != 0)
+ dev_dbg(host->dev, "Fall back to CPU I/O\n");
+ return err;
+}
+
+static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
+ if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
+ return;
+
+ if (host->board.bus_width_16)
+ atmel_read_buf16(mtd, buf, len);
+ else
+ atmel_read_buf8(mtd, buf, len);
+}
+
+static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
+ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
+ return;
+
+ if (host->board.bus_width_16)
+ atmel_write_buf16(mtd, buf, len);
+ else
+ atmel_write_buf8(mtd, buf, len);
+}
+
+/*
+ * Return number of ecc bytes per sector according to sector size and
+ * correction capability
+ *
+ * Following table shows what at91 PMECC supported:
+ * Correction Capability Sector_512_bytes Sector_1024_bytes
+ * ===================== ================ =================
+ * 2-bits 4-bytes 4-bytes
+ * 4-bits 7-bytes 7-bytes
+ * 8-bits 13-bytes 14-bytes
+ * 12-bits 20-bytes 21-bytes
+ * 24-bits 39-bytes 42-bytes
+ */
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
+{
+ int m = 12 + sector_size / 512;
+ return (m * cap + 7) / 8;
+}
+
+static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+ int oobsize, int ecc_len)
+{
+ int i;
+
+ layout->eccbytes = ecc_len;
+
+ /* ECC will occupy the last ecc_len bytes continuously */
+ for (i = 0; i < ecc_len; i++)
+ layout->eccpos[i] = oobsize - ecc_len + i;
+
+ layout->oobfree[0].offset = PMECC_OOB_RESERVED_BYTES;
+ layout->oobfree[0].length =
+ oobsize - ecc_len - layout->oobfree[0].offset;
+}
+
+static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+{
+ int table_size;
+
+ table_size = host->pmecc_sector_size == 512 ?
+ PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
+
+ return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
+ table_size * sizeof(int16_t);
+}
+
+static int pmecc_data_alloc(struct atmel_nand_host *host)
+{
+ const int cap = host->pmecc_corr_cap;
+ int size;
+
+ size = (2 * cap + 1) * sizeof(int16_t);
+ host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_lmu = devm_kzalloc(host->dev,
+ (cap + 1) * sizeof(int16_t), GFP_KERNEL);
+ host->pmecc_smu = devm_kzalloc(host->dev,
+ (cap + 2) * size, GFP_KERNEL);
+
+ size = (cap + 1) * sizeof(int);
+ host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+ host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL);
+
+ if (!host->pmecc_partial_syn ||
+ !host->pmecc_si ||
+ !host->pmecc_lmu ||
+ !host->pmecc_smu ||
+ !host->pmecc_mu ||
+ !host->pmecc_dmu ||
+ !host->pmecc_delta)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i;
+ uint32_t value;
+
+ /* Fill odd syndromes */
+ for (i = 0; i < host->pmecc_corr_cap; i++) {
+ value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
+ if (i & 1)
+ value >>= 16;
+ value &= 0xffff;
+ host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
+ }
+}
+
+static void pmecc_substitute(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t *partial_syn = host->pmecc_partial_syn;
+ const int cap = host->pmecc_corr_cap;
+ int16_t *si;
+ int i, j;
+
+ /* si[] is a table that holds the current syndrome value,
+ * an element of that table belongs to the field
+ */
+ si = host->pmecc_si;
+
+ memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
+
+ /* Computation 2t syndromes based on S(x) */
+ /* Odd syndromes */
+ for (i = 1; i < 2 * cap; i += 2) {
+ for (j = 0; j < host->pmecc_degree; j++) {
+ if (partial_syn[i] & ((unsigned short)0x1 << j))
+ si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
+ }
+ }
+ /* Even syndrome = (Odd syndrome) ** 2 */
+ for (i = 2, j = 1; j <= cap; i = ++j << 1) {
+ if (si[j] == 0) {
+ si[i] = 0;
+ } else {
+ int16_t tmp;
+
+ tmp = readw_relaxed(index_of + si[j]);
+ tmp = (tmp * 2) % host->pmecc_cw_len;
+ si[i] = readw_relaxed(alpha_to + tmp);
+ }
+ }
+
+ return;
+}
+
+static void pmecc_get_sigma(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ int16_t *lmu = host->pmecc_lmu;
+ int16_t *si = host->pmecc_si;
+ int *mu = host->pmecc_mu;
+ int *dmu = host->pmecc_dmu; /* Discrepancy */
+ int *delta = host->pmecc_delta; /* Delta order */
+ int cw_len = host->pmecc_cw_len;
+ const int16_t cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int i, j, k;
+ uint32_t dmu_0_count, tmp;
+ int16_t *smu = host->pmecc_smu;
+
+ /* index of largest delta */
+ int ro;
+ int largest;
+ int diff;
+
+ dmu_0_count = 0;
+
+ /* First Row */
+
+ /* Mu */
+ mu[0] = -1;
+
+ memset(smu, 0, sizeof(int16_t) * num);
+ smu[0] = 1;
+
+ /* discrepancy set to 1 */
+ dmu[0] = 1;
+ /* polynom order set to 0 */
+ lmu[0] = 0;
+ delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
+
+ /* Second Row */
+
+ /* Mu */
+ mu[1] = 0;
+ /* Sigma(x) set to 1 */
+ memset(&smu[num], 0, sizeof(int16_t) * num);
+ smu[num] = 1;
+
+ /* discrepancy set to S1 */
+ dmu[1] = si[1];
+
+ /* polynom order set to 0 */
+ lmu[1] = 0;
+
+ delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
+
+ /* Init the Sigma(x) last row */
+ memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
+
+ for (i = 1; i <= cap; i++) {
+ mu[i + 1] = i << 1;
+ /* Begin Computing Sigma (Mu+1) and L(mu) */
+ /* check if discrepancy is set to 0 */
+ if (dmu[i] == 0) {
+ dmu_0_count++;
+
+ tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
+ if ((cap - (lmu[i] >> 1) - 1) & 0x1)
+ tmp += 2;
+ else
+ tmp += 1;
+
+ if (dmu_0_count == tmp) {
+ for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+ smu[(cap + 1) * num + j] =
+ smu[i * num + j];
+
+ lmu[cap + 1] = lmu[i];
+ return;
+ }
+
+ /* copy polynom */
+ for (j = 0; j <= lmu[i] >> 1; j++)
+ smu[(i + 1) * num + j] = smu[i * num + j];
+
+ /* copy previous polynom order to the next */
+ lmu[i + 1] = lmu[i];
+ } else {
+ ro = 0;
+ largest = -1;
+ /* find largest delta with dmu != 0 */
+ for (j = 0; j < i; j++) {
+ if ((dmu[j]) && (delta[j] > largest)) {
+ largest = delta[j];
+ ro = j;
+ }
+ }
+
+ /* compute difference */
+ diff = (mu[i] - mu[ro]);
+
+ /* Compute degree of the new smu polynomial */
+ if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+ lmu[i + 1] = lmu[i];
+ else
+ lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+ /* Init smu[i+1] with 0 */
+ for (k = 0; k < num; k++)
+ smu[(i + 1) * num + k] = 0;
+
+ /* Compute smu[i+1] */
+ for (k = 0; k <= lmu[ro] >> 1; k++) {
+ int16_t a, b, c;
+
+ if (!(smu[ro * num + k] && dmu[i]))
+ continue;
+ a = readw_relaxed(index_of + dmu[i]);
+ b = readw_relaxed(index_of + dmu[ro]);
+ c = readw_relaxed(index_of + smu[ro * num + k]);
+ tmp = a + (cw_len - b) + c;
+ a = readw_relaxed(alpha_to + tmp % cw_len);
+ smu[(i + 1) * num + (k + diff)] = a;
+ }
+
+ for (k = 0; k <= lmu[i] >> 1; k++)
+ smu[(i + 1) * num + k] ^= smu[i * num + k];
+ }
+
+ /* End Computing Sigma (Mu+1) and L(mu) */
+ /* In either case compute delta */
+ delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+ /* Do not compute discrepancy for the last iteration */
+ if (i >= cap)
+ continue;
+
+ for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+ tmp = 2 * (i - 1);
+ if (k == 0) {
+ dmu[i + 1] = si[tmp + 3];
+ } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+ int16_t a, b, c;
+ a = readw_relaxed(index_of +
+ smu[(i + 1) * num + k]);
+ b = si[2 * (i - 1) + 3 - k];
+ c = readw_relaxed(index_of + b);
+ tmp = a + c;
+ tmp %= cw_len;
+ dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
+ dmu[i + 1];
+ }
+ }
+ }
+
+ return;
+}
+
+static int pmecc_err_location(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ unsigned long end_time;
+ const int cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int sector_size = host->pmecc_sector_size;
+ int err_nbr = 0; /* number of error */
+ int roots_nbr; /* number of roots */
+ int i;
+ uint32_t val;
+ int16_t *smu = host->pmecc_smu;
+
+ pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
+
+ for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
+ pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
+ smu[(cap + 1) * num + i]);
+ err_nbr++;
+ }
+
+ val = (err_nbr - 1) << 16;
+ if (sector_size == 1024)
+ val |= 1;
+
+ pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
+ pmerrloc_writel(host->pmerrloc_base, ELEN,
+ sector_size * 8 + host->pmecc_degree * cap);
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+ & PMERRLOC_CALC_DONE)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
+ return -1;
+ }
+ cpu_relax();
+ }
+
+ roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+ & PMERRLOC_ERR_NUM_MASK) >> 8;
+ /* Number of roots == degree of smu hence <= cap */
+ if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
+ return err_nbr - 1;
+
+ /* Number of roots does not match the degree of smu
+ * unable to correct error */
+ return -1;
+}
+
+static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
+ int sector_num, int extra_bytes, int err_nbr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i = 0;
+ int byte_pos, bit_pos, sector_size, pos;
+ uint32_t tmp;
+ uint8_t err_byte;
+
+ sector_size = host->pmecc_sector_size;
+
+ while (err_nbr) {
+ tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
+ byte_pos = tmp / 8;
+ bit_pos = tmp % 8;
+
+ if (byte_pos >= (sector_size + extra_bytes))
+ BUG(); /* should never happen */
+
+ if (byte_pos < sector_size) {
+ err_byte = *(buf + byte_pos);
+ *(buf + byte_pos) ^= (1 << bit_pos);
+
+ pos = sector_num * host->pmecc_sector_size + byte_pos;
+ dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, *(buf + byte_pos));
+ } else {
+ /* Bit flip in OOB area */
+ tmp = sector_num * nand_chip->ecc.bytes
+ + (byte_pos - sector_size);
+ err_byte = ecc[tmp];
+ ecc[tmp] ^= (1 << bit_pos);
+
+ pos = tmp + nand_chip->ecc.layout->eccpos[0];
+ dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, ecc[tmp]);
+ }
+
+ i++;
+ err_nbr--;
+ }
+
+ return;
+}
+
+static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
+ u8 *ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i, err_nbr;
+ uint8_t *buf_pos;
+ int max_bitflips = 0;
+
+ /* If can correct bitfilps from erased page, do the normal check */
+ if (host->caps->pmecc_correct_erase_page)
+ goto normal_check;
+
+ for (i = 0; i < nand_chip->ecc.total; i++)
+ if (ecc[i] != 0xff)
+ goto normal_check;
+ /* Erased page, return OK */
+ return 0;
+
+normal_check:
+ for (i = 0; i < nand_chip->ecc.steps; i++) {
+ err_nbr = 0;
+ if (pmecc_stat & 0x1) {
+ buf_pos = buf + i * host->pmecc_sector_size;
+
+ pmecc_gen_syndrome(mtd, i);
+ pmecc_substitute(mtd);
+ pmecc_get_sigma(mtd);
+
+ err_nbr = pmecc_err_location(mtd);
+ if (err_nbr == -1) {
+ dev_err(host->dev, "PMECC: Too many errors\n");
+ mtd->ecc_stats.failed++;
+ return -EIO;
+ } else {
+ pmecc_correct_data(mtd, buf_pos, ecc, i,
+ nand_chip->ecc.bytes, err_nbr);
+ mtd->ecc_stats.corrected += err_nbr;
+ max_bitflips = max_t(int, max_bitflips, err_nbr);
+ }
+ }
+ pmecc_stat >>= 1;
+ }
+
+ return max_bitflips;
+}
+
+static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
+{
+ u32 val;
+
+ if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) {
+ dev_err(host->dev, "atmel_nand: wrong pmecc operation type!");
+ return;
+ }
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ val = pmecc_readl_relaxed(host->ecc, CFG);
+
+ if (ecc_op == NAND_ECC_READ)
+ pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP)
+ | PMECC_CFG_AUTO_ENABLE);
+ else
+ pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP)
+ & ~PMECC_CFG_AUTO_ENABLE);
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+}
+
+static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ struct atmel_nand_host *host = chip->priv;
+ int eccsize = chip->ecc.size * chip->ecc.steps;
+ uint8_t *oob = chip->oob_poi;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t stat;
+ unsigned long end_time;
+ int bitflips = 0;
+
+ if (!host->nfc || !host->nfc->use_nfc_sram)
+ pmecc_enable(host, NAND_ECC_READ);
+
+ chip->read_buf(mtd, buf, eccsize);
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to get error status.\n");
+ return -EIO;
+ }
+ cpu_relax();
+ }
+
+ stat = pmecc_readl_relaxed(host->ecc, ISR);
+ if (stat != 0) {
+ bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
+ if (bitflips < 0)
+ /* uncorrectable errors */
+ return 0;
+ }
+
+ return bitflips;
+}
+
+static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ struct atmel_nand_host *host = chip->priv;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ int i, j;
+ unsigned long end_time;
+
+ if (!host->nfc || !host->nfc->write_by_sram) {
+ pmecc_enable(host, NAND_ECC_WRITE);
+ chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+ }
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
+ return -EIO;
+ }
+ cpu_relax();
+ }
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ for (j = 0; j < chip->ecc.bytes; j++) {
+ int pos;
+
+ pos = i * chip->ecc.bytes + j;
+ chip->oob_poi[eccpos[pos]] =
+ pmecc_readb_ecc_relaxed(host->ecc, i, j);
+ }
+ }
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void atmel_pmecc_core_init(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ uint32_t val = 0;
+ struct nand_ecclayout *ecc_layout;
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+
+ switch (host->pmecc_corr_cap) {
+ case 2:
+ val = PMECC_CFG_BCH_ERR2;
+ break;
+ case 4:
+ val = PMECC_CFG_BCH_ERR4;
+ break;
+ case 8:
+ val = PMECC_CFG_BCH_ERR8;
+ break;
+ case 12:
+ val = PMECC_CFG_BCH_ERR12;
+ break;
+ case 24:
+ val = PMECC_CFG_BCH_ERR24;
+ break;
+ }
+
+ if (host->pmecc_sector_size == 512)
+ val |= PMECC_CFG_SECTOR512;
+ else if (host->pmecc_sector_size == 1024)
+ val |= PMECC_CFG_SECTOR1024;
+
+ switch (nand_chip->ecc.steps) {
+ case 1:
+ val |= PMECC_CFG_PAGE_1SECTOR;
+ break;
+ case 2:
+ val |= PMECC_CFG_PAGE_2SECTORS;
+ break;
+ case 4:
+ val |= PMECC_CFG_PAGE_4SECTORS;
+ break;
+ case 8:
+ val |= PMECC_CFG_PAGE_8SECTORS;
+ break;
+ }
+
+ val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
+ | PMECC_CFG_AUTO_DISABLE);
+ pmecc_writel(host->ecc, CFG, val);
+
+ ecc_layout = nand_chip->ecc.layout;
+ pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
+ pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
+ pmecc_writel(host->ecc, EADDR,
+ ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
+ /* See datasheet about PMECC Clock Control Register */
+ pmecc_writel(host->ecc, CLK, 2);
+ pmecc_writel(host->ecc, IDR, 0xff);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+}
+
+/*
+ * Get minimum ecc requirements from NAND.
+ * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
+ * will set them according to minimum ecc requirement. Otherwise, use the
+ * value in DTS file.
+ * return 0 if success. otherwise return error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+ int *cap, int *sector_size)
+{
+ /* Get minimum ECC requirements */
+ if (host->nand_chip.ecc_strength_ds) {
+ *cap = host->nand_chip.ecc_strength_ds;
+ *sector_size = host->nand_chip.ecc_step_ds;
+ dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
+ *cap, *sector_size);
+ } else {
+ *cap = 2;
+ *sector_size = 512;
+ dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
+ }
+
+ /* If device tree doesn't specify, use NAND's minimum ECC parameters */
+ if (host->pmecc_corr_cap == 0) {
+ /* use the most fitable ecc bits (the near bigger one ) */
+ if (*cap <= 2)
+ host->pmecc_corr_cap = 2;
+ else if (*cap <= 4)
+ host->pmecc_corr_cap = 4;
+ else if (*cap <= 8)
+ host->pmecc_corr_cap = 8;
+ else if (*cap <= 12)
+ host->pmecc_corr_cap = 12;
+ else if (*cap <= 24)
+ host->pmecc_corr_cap = 24;
+ else
+ return -EINVAL;
+ }
+ if (host->pmecc_sector_size == 0) {
+ /* use the most fitable sector size (the near smaller one ) */
+ if (*sector_size >= 1024)
+ host->pmecc_sector_size = 1024;
+ else if (*sector_size >= 512)
+ host->pmecc_sector_size = 512;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int deg(unsigned int poly)
+{
+ /* polynomial degree is the most-significant bit index */
+ return fls(poly) - 1;
+}
+
+static int build_gf_tables(int mm, unsigned int poly,
+ int16_t *index_of, int16_t *alpha_to)
+{
+ unsigned int i, x = 1;
+ const unsigned int k = 1 << deg(poly);
+ unsigned int nn = (1 << mm) - 1;
+
+ /* primitive polynomial must be of degree m */
+ if (k != (1u << mm))
+ return -EINVAL;
+
+ for (i = 0; i < nn; i++) {
+ alpha_to[i] = x;
+ index_of[x] = i;
+ if (i && (x == 1))
+ /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+ return -EINVAL;
+ x <<= 1;
+ if (x & k)
+ x ^= poly;
+ }
+ alpha_to[nn] = 1;
+ index_of[0] = 0;
+
+ return 0;
+}
+
+static uint16_t *create_lookup_table(struct device *dev, int sector_size)
+{
+ int degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 :
+ PMECC_GF_DIMENSION_14;
+ unsigned int poly = (sector_size == 512) ?
+ PMECC_GF_13_PRIMITIVE_POLY :
+ PMECC_GF_14_PRIMITIVE_POLY;
+ int table_size = (sector_size == 512) ?
+ PMECC_LOOKUP_TABLE_SIZE_512 :
+ PMECC_LOOKUP_TABLE_SIZE_1024;
+
+ int16_t *addr = devm_kzalloc(dev, 2 * table_size * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (addr && build_gf_tables(degree, poly, addr, addr + table_size))
+ return NULL;
+
+ return addr;
+}
+
+static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
+ struct atmel_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *nand_chip = &host->nand_chip;
+ struct resource *regs, *regs_pmerr, *regs_rom;
+ uint16_t *galois_table;
+ int cap, sector_size, err_no;
+
+ err_no = pmecc_choose_ecc(host, &cap, &sector_size);
+ if (err_no) {
+ dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
+ return err_no;
+ }
+
+ if (cap > host->pmecc_corr_cap ||
+ sector_size != host->pmecc_sector_size)
+ dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
+
+ cap = host->pmecc_corr_cap;
+ sector_size = host->pmecc_sector_size;
+ host->pmecc_lookup_table_offset = (sector_size == 512) ?
+ host->pmecc_lookup_table_offset_512 :
+ host->pmecc_lookup_table_offset_1024;
+
+ dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
+ cap, sector_size);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs) {
+ dev_warn(host->dev,
+ "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->ecc)) {
+ err_no = PTR_ERR(host->ecc);
+ goto err;
+ }
+
+ regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr);
+ if (IS_ERR(host->pmerrloc_base)) {
+ err_no = PTR_ERR(host->pmerrloc_base);
+ goto err;
+ }
+
+ if (!host->has_no_lookup_table) {
+ regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev,
+ regs_rom);
+ if (IS_ERR(host->pmecc_rom_base)) {
+ dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
+ host->has_no_lookup_table = true;
+ }
+ }
+
+ if (host->has_no_lookup_table) {
+ /* Build the look-up table in runtime */
+ galois_table = create_lookup_table(host->dev, sector_size);
+ if (!galois_table) {
+ dev_err(host->dev, "Failed to build a lookup table in runtime!\n");
+ err_no = -EINVAL;
+ goto err;
+ }
+
+ host->pmecc_rom_base = (void __iomem *)galois_table;
+ host->pmecc_lookup_table_offset = 0;
+ }
+
+ nand_chip->ecc.size = sector_size;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ case 8192:
+ if (sector_size > mtd->writesize) {
+ dev_err(host->dev, "pmecc sector size is bigger than the page size!\n");
+ err_no = -EINVAL;
+ goto err;
+ }
+
+ host->pmecc_degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
+ host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
+ host->pmecc_alpha_to = pmecc_get_alpha_to(host);
+ host->pmecc_index_of = host->pmecc_rom_base +
+ host->pmecc_lookup_table_offset;
+
+ nand_chip->ecc.strength = cap;
+ nand_chip->ecc.bytes = pmecc_get_ecc_bytes(cap, sector_size);
+ nand_chip->ecc.steps = mtd->writesize / sector_size;
+ nand_chip->ecc.total = nand_chip->ecc.bytes *
+ nand_chip->ecc.steps;
+ if (nand_chip->ecc.total >
+ mtd->oobsize - PMECC_OOB_RESERVED_BYTES) {
+ dev_err(host->dev, "No room for ECC bytes\n");
+ err_no = -EINVAL;
+ goto err;
+ }
+ pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
+ mtd->oobsize,
+ nand_chip->ecc.total);
+
+ nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
+ break;
+ default:
+ dev_warn(host->dev,
+ "Unsupported page size for PMECC, use Software ECC\n");
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ /* Allocate data for PMECC computation */
+ err_no = pmecc_data_alloc(host);
+ if (err_no) {
+ dev_err(host->dev,
+ "Cannot allocate memory for PMECC computation!\n");
+ goto err;
+ }
+
+ nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
+ nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
+ nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
+
+ atmel_pmecc_core_init(mtd);
+
+ return 0;
+
+err:
+ return err_no;
+}
+
+/*
+ * Calculate HW ECC
+ *
+ * function called after a write
+ *
+ * mtd: MTD block structure
+ * dat: raw data (unused)
+ * ecc_code: buffer for ECC
+ */
+static int atmel_nand_calculate(struct mtd_info *mtd,
+ const u_char *dat, unsigned char *ecc_code)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ unsigned int ecc_value;
+
+ /* get the first 2 ECC bytes */
+ ecc_value = ecc_readl(host->ecc, PR);
+
+ ecc_code[0] = ecc_value & 0xFF;
+ ecc_code[1] = (ecc_value >> 8) & 0xFF;
+
+ /* get the last 2 ECC bytes */
+ ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
+
+ ecc_code[2] = ecc_value & 0xFF;
+ ecc_code[3] = (ecc_value >> 8) & 0xFF;
+
+ return 0;
+}
+
+/*
+ * HW ECC read page function
+ *
+ * mtd: mtd info structure
+ * chip: nand chip info structure
+ * buf: buffer to store read data
+ * oob_required: caller expects OOB data read to chip->oob_poi
+ */
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ uint8_t *ecc_pos;
+ int stat;
+ unsigned int max_bitflips = 0;
+
+ /*
+ * Errata: ALE is incorrectly wired up to the ECC controller
+ * on the AP7000, so it will include the address cycles in the
+ * ECC calculation.
+ *
+ * Workaround: Reset the parity registers before reading the
+ * actual data.
+ */
+ struct atmel_nand_host *host = chip->priv;
+ if (host->board.need_reset_workaround)
+ ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+
+ /* read the page */
+ chip->read_buf(mtd, p, eccsize);
+
+ /* move to ECC position if needed */
+ if (eccpos[0] != 0) {
+ /* This only works on large pages
+ * because the ECC controller waits for
+ * NAND_CMD_RNDOUTSTART after the
+ * NAND_CMD_RNDOUT.
+ * anyway, for small pages, the eccpos[0] == 0
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + eccpos[0], -1);
+ }
+
+ /* the ECC controller needs to read the ECC just after the data */
+ ecc_pos = oob + eccpos[0];
+ chip->read_buf(mtd, ecc_pos, eccbytes);
+
+ /* check if there's an error */
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ /* get back to oob start (end of page) */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+
+ /* read the oob */
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ return max_bitflips;
+}
+
+/*
+ * HW ECC Correction
+ *
+ * function called after a read
+ *
+ * mtd: MTD block structure
+ * dat: raw data read from the chip
+ * read_ecc: ECC from the chip (unused)
+ * isnull: unused
+ *
+ * Detect and correct a 1 bit error for a page
+ */
+static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ unsigned int ecc_status;
+ unsigned int ecc_word, ecc_bit;
+
+ /* get the status from the Status Register */
+ ecc_status = ecc_readl(host->ecc, SR);
+
+ /* if there's no error */
+ if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
+ return 0;
+
+ /* get error bit offset (4 bits) */
+ ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
+ /* get word address (12 bits) */
+ ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
+ ecc_word >>= 4;
+
+ /* if there are multiple errors */
+ if (ecc_status & ATMEL_ECC_MULERR) {
+ /* check if it is a freshly erased block
+ * (filled with 0xff) */
+ if ((ecc_bit == ATMEL_ECC_BITADDR)
+ && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
+ /* the block has just been erased, return OK */
+ return 0;
+ }
+ /* it doesn't seems to be a freshly
+ * erased block.
+ * We can't correct so many errors */
+ dev_dbg(host->dev, "atmel_nand : multiple errors detected."
+ " Unable to correct.\n");
+ return -EIO;
+ }
+
+ /* if there's a single bit error : we can correct it */
+ if (ecc_status & ATMEL_ECC_ECCERR) {
+ /* there's nothing much to do here.
+ * the bit error is on the ECC itself.
+ */
+ dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
+ " Nothing to correct\n");
+ return 0;
+ }
+
+ dev_dbg(host->dev, "atmel_nand : one bit error on data."
+ " (word offset in the page :"
+ " 0x%x bit offset : 0x%x)\n",
+ ecc_word, ecc_bit);
+ /* correct the error */
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* 16 bits words */
+ ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
+ } else {
+ /* 8 bits words */
+ dat[ecc_word] ^= (1 << ecc_bit);
+ }
+ dev_dbg(host->dev, "atmel_nand : error corrected\n");
+ return 1;
+}
+
+/*
+ * Enable HW ECC : unused on most chips
+ */
+static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (host->board.need_reset_workaround)
+ ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+}
+
+static const struct of_device_id atmel_nand_dt_ids[];
+
+static int atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
+{
+ u32 val;
+ u32 offset[2];
+ int ecc_mode;
+ struct atmel_nand_data *board = &host->board;
+ enum of_gpio_flags flags = 0;
+
+ host->caps = (struct atmel_nand_caps *)
+ of_match_device(atmel_nand_dt_ids, host->dev)->data;
+
+ if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid addr-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->ale = val;
+ }
+
+ if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid cmd-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->cle = val;
+ }
+
+ ecc_mode = of_get_nand_ecc_mode(np);
+
+ board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
+
+ board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
+
+ if (of_get_nand_bus_width(np) == 16)
+ board->bus_width_16 = 1;
+
+ board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
+ board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
+
+ board->enable_pin = of_get_gpio(np, 1);
+ board->det_pin = of_get_gpio(np, 2);
+
+ host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
+
+ /* load the nfc driver if there is */
+ of_platform_populate(np, NULL, NULL, host->dev);
+
+ if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
+ return 0; /* Not using PMECC */
+
+ /* use PMECC, get correction capability, sector size and lookup
+ * table offset.
+ * If correction bits and sector size are not specified, then find
+ * them from NAND ONFI parameters.
+ */
+ if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
+ if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+ (val != 24)) {
+ dev_err(host->dev,
+ "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_corr_cap = (u8)val;
+ }
+
+ if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
+ if ((val != 512) && (val != 1024)) {
+ dev_err(host->dev,
+ "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_sector_size = (u16)val;
+ }
+
+ if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
+ offset, 2) != 0) {
+ dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n");
+ host->has_no_lookup_table = true;
+ /* Will build a lookup table and initialize the offset later */
+ return 0;
+ }
+ if (!offset[0] && !offset[1]) {
+ dev_err(host->dev, "Invalid PMECC lookup table offset\n");
+ return -EINVAL;
+ }
+ host->pmecc_lookup_table_offset_512 = offset[0];
+ host->pmecc_lookup_table_offset_1024 = offset[1];
+
+ return 0;
+}
+
+static int atmel_hw_nand_init_params(struct platform_device *pdev,
+ struct atmel_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *nand_chip = &host->nand_chip;
+ struct resource *regs;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs) {
+ dev_err(host->dev,
+ "Can't get I/O resource regs, use software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->ecc))
+ return PTR_ERR(host->ecc);
+
+ /* ECC is calculated for the whole page (1 step) */
+ nand_chip->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ nand_chip->ecc.layout = &atmel_oobinfo_small;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
+ break;
+ case 1024:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
+ break;
+ case 2048:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
+ break;
+ case 4096:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ /* set up for HW ECC */
+ nand_chip->ecc.calculate = atmel_nand_calculate;
+ nand_chip->ecc.correct = atmel_nand_correct;
+ nand_chip->ecc.hwctl = atmel_nand_hwctl;
+ nand_chip->ecc.read_page = atmel_nand_read_page;
+ nand_chip->ecc.bytes = 4;
+ nand_chip->ecc.strength = 1;
+
+ return 0;
+}
+
+static inline u32 nfc_read_status(struct atmel_nand_host *host)
+{
+ u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE;
+ u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR);
+
+ if (unlikely(nfc_status & err_flags)) {
+ if (nfc_status & NFC_SR_DTOE)
+ dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n");
+ else if (nfc_status & NFC_SR_UNDEF)
+ dev_err(host->dev, "NFC: Access Undefined Area Error\n");
+ else if (nfc_status & NFC_SR_AWB)
+ dev_err(host->dev, "NFC: Access memory While NFC is busy\n");
+ else if (nfc_status & NFC_SR_ASE)
+ dev_err(host->dev, "NFC: Access memory Size Error\n");
+ }
+
+ return nfc_status;
+}
+
+/* SMC interrupt service routine */
+static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
+{
+ struct atmel_nand_host *host = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ status = nfc_read_status(host);
+ mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+ pending = status & mask;
+
+ if (pending & NFC_SR_XFR_DONE) {
+ complete(&host->nfc->comp_xfer_done);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
+ ret = IRQ_HANDLED;
+ }
+ if (pending & NFC_SR_RB_EDGE) {
+ complete(&host->nfc->comp_ready);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
+ ret = IRQ_HANDLED;
+ }
+ if (pending & NFC_SR_CMD_DONE) {
+ complete(&host->nfc->comp_cmd_done);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/* NFC(Nand Flash Controller) related functions */
+static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+ if (flag & NFC_SR_XFR_DONE)
+ init_completion(&host->nfc->comp_xfer_done);
+
+ if (flag & NFC_SR_RB_EDGE)
+ init_completion(&host->nfc->comp_ready);
+
+ if (flag & NFC_SR_CMD_DONE)
+ init_completion(&host->nfc->comp_cmd_done);
+
+ /* Enable interrupt that need to wait for */
+ nfc_writel(host->nfc->hsmc_regs, IER, flag);
+}
+
+static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+ int i, index = 0;
+ struct completion *comp[3]; /* Support 3 interrupt completion */
+
+ if (flag & NFC_SR_XFR_DONE)
+ comp[index++] = &host->nfc->comp_xfer_done;
+
+ if (flag & NFC_SR_RB_EDGE)
+ comp[index++] = &host->nfc->comp_ready;
+
+ if (flag & NFC_SR_CMD_DONE)
+ comp[index++] = &host->nfc->comp_cmd_done;
+
+ if (index == 0) {
+ dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < index; i++) {
+ if (wait_for_completion_timeout(comp[i],
+ msecs_to_jiffies(NFC_TIME_OUT_MS)))
+ continue; /* wait for next completion */
+ else
+ goto err_timeout;
+ }
+
+ return 0;
+
+err_timeout:
+ dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
+ /* Disable the interrupt as it is not handled by interrupt handler */
+ nfc_writel(host->nfc->hsmc_regs, IDR, flag);
+ return -ETIMEDOUT;
+}
+
+static int nfc_send_command(struct atmel_nand_host *host,
+ unsigned int cmd, unsigned int addr, unsigned char cycle0)
+{
+ unsigned long timeout;
+ u32 flag = NFC_SR_CMD_DONE;
+ flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0;
+
+ dev_dbg(host->dev,
+ "nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
+ cmd, addr, cycle0);
+
+ timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+ while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(host->dev,
+ "Time out to wait for NFC ready!\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ nfc_prepare_interrupt(host, flag);
+ nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
+ nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
+ return nfc_wait_interrupt(host, flag);
+}
+
+static int nfc_device_ready(struct mtd_info *mtd)
+{
+ u32 status, mask;
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ status = nfc_read_status(host);
+ mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+
+ /* The mask should be 0. If not we may lost interrupts */
+ if (unlikely(mask & status))
+ dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
+ mask & status);
+
+ return status & NFC_SR_RB_EDGE;
+}
+
+static void nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ if (chip == -1)
+ nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE);
+ else
+ nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
+}
+
+static int nfc_make_addr(struct mtd_info *mtd, int command, int column,
+ int page_addr, unsigned int *addr1234, unsigned int *cycle0)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ int acycle = 0;
+ unsigned char addr_bytes[8];
+ int index = 0, bit_shift;
+
+ BUG_ON(addr1234 == NULL || cycle0 == NULL);
+
+ *cycle0 = 0;
+ *addr1234 = 0;
+
+ if (column != -1) {
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ addr_bytes[acycle++] = column & 0xff;
+ if (mtd->writesize > 512)
+ addr_bytes[acycle++] = (column >> 8) & 0xff;
+ }
+
+ if (page_addr != -1) {
+ addr_bytes[acycle++] = page_addr & 0xff;
+ addr_bytes[acycle++] = (page_addr >> 8) & 0xff;
+ if (chip->chipsize > (128 << 20))
+ addr_bytes[acycle++] = (page_addr >> 16) & 0xff;
+ }
+
+ if (acycle > 4)
+ *cycle0 = addr_bytes[index++];
+
+ for (bit_shift = 0; index < acycle; bit_shift += 8)
+ *addr1234 += addr_bytes[index++] << bit_shift;
+
+ /* return acycle in cmd register */
+ return acycle << NFCADDR_CMD_ACYCLE_BIT_POS;
+}
+
+static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ unsigned long timeout;
+ unsigned int nfc_addr_cmd = 0;
+
+ unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+
+ /* Set default settings: no cmd2, no addr cycle. read from nand */
+ unsigned int cmd2 = 0;
+ unsigned int vcmd2 = 0;
+ int acycle = NFCADDR_CMD_ACYCLE_NONE;
+ int csid = NFCADDR_CMD_CSID_3;
+ int dataen = NFCADDR_CMD_DATADIS;
+ int nfcwr = NFCADDR_CMD_NFCRD;
+ unsigned int addr1234 = 0;
+ unsigned int cycle0 = 0;
+ bool do_addr = true;
+ host->nfc->data_in_sram = NULL;
+
+ dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n",
+ __func__, command, column, page_addr);
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr;
+ nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+ udelay(chip->chip_delay);
+
+ nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1);
+ timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(host->dev,
+ "Time out to wait status ready!\n");
+ break;
+ }
+ }
+ return;
+ case NAND_CMD_STATUS:
+ do_addr = false;
+ break;
+ case NAND_CMD_PARAM:
+ case NAND_CMD_READID:
+ do_addr = false;
+ acycle = NFCADDR_CMD_ACYCLE_1;
+ if (column != -1)
+ addr1234 = column;
+ break;
+ case NAND_CMD_RNDOUT:
+ cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS;
+ vcmd2 = NFCADDR_CMD_VCMD2;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+ cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+ }
+ if (host->nfc->use_nfc_sram) {
+ /* Enable Data transfer to sram */
+ dataen = NFCADDR_CMD_DATAEN;
+
+ /* Need enable PMECC now, since NFC will transfer
+ * data in bus after sending nfc read command.
+ */
+ if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+ pmecc_enable(host, NAND_ECC_READ);
+ }
+
+ cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS;
+ vcmd2 = NFCADDR_CMD_VCMD2;
+ break;
+ /* For prgramming command, the cmd need set to write enable */
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ nfcwr = NFCADDR_CMD_NFCWR;
+ if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN)
+ dataen = NFCADDR_CMD_DATAEN;
+ break;
+ default:
+ break;
+ }
+
+ if (do_addr)
+ acycle = nfc_make_addr(mtd, command, column, page_addr,
+ &addr1234, &cycle0);
+
+ nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
+ nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in, and deplete1 need no delay.
+ */
+ switch (command) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ return;
+
+ case NAND_CMD_READ0:
+ if (dataen == NFCADDR_CMD_DATAEN) {
+ host->nfc->data_in_sram = host->nfc->sram_bank0 +
+ nfc_get_sram_off(host);
+ return;
+ }
+ /* fall through */
+ default:
+ nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
+ nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
+ }
+}
+
+static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
+{
+ int cfg, len;
+ int status = 0;
+ struct atmel_nand_host *host = chip->priv;
+ void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
+
+ /* Subpage write is not supported */
+ if (offset || (data_len < mtd->writesize))
+ return -EINVAL;
+
+ len = mtd->writesize;
+ /* Copy page data to sram that will write to nand via NFC */
+ if (use_dma) {
+ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
+ /* Fall back to use cpu copy */
+ memcpy(sram, buf, len);
+ } else {
+ memcpy(sram, buf, len);
+ }
+
+ cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
+ if (unlikely(raw) && oob_required) {
+ memcpy(sram + len, chip->oob_poi, mtd->oobsize);
+ len += mtd->oobsize;
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
+ } else {
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE);
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+ /*
+ * When use NFC sram, need set up PMECC before send
+ * NAND_CMD_SEQIN command. Since when the nand command
+ * is sent, nfc will do transfer from sram and nand.
+ */
+ pmecc_enable(host, NAND_ECC_WRITE);
+
+ host->nfc->will_write_sram = true;
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ host->nfc->will_write_sram = false;
+
+ if (likely(!raw))
+ /* Need to write ecc into oob */
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int nfc_sram_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
+ int res = 0;
+
+ /* Initialize the NFC CFG register */
+ unsigned int cfg_nfc = 0;
+
+ /* set page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ cfg_nfc = NFC_CFG_PAGESIZE_512;
+ break;
+ case 1024:
+ cfg_nfc = NFC_CFG_PAGESIZE_1024;
+ break;
+ case 2048:
+ cfg_nfc = NFC_CFG_PAGESIZE_2048;
+ break;
+ case 4096:
+ cfg_nfc = NFC_CFG_PAGESIZE_4096;
+ break;
+ case 8192:
+ cfg_nfc = NFC_CFG_PAGESIZE_8192;
+ break;
+ default:
+ dev_err(host->dev, "Unsupported page size for NFC.\n");
+ res = -ENXIO;
+ return res;
+ }
+
+ /* oob bytes size = (NFCSPARESIZE + 1) * 4
+ * Max support spare size is 512 bytes. */
+ cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS
+ & NFC_CFG_NFC_SPARESIZE);
+ /* default set a max timeout */
+ cfg_nfc |= NFC_CFG_RSPARE |
+ NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL;
+
+ nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc);
+
+ host->nfc->will_write_sram = false;
+ nfc_set_sram_bank(host, 0);
+
+ /* Use Write page with NFC SRAM only for PMECC or ECC NONE. */
+ if (host->nfc->write_by_sram) {
+ if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) ||
+ chip->ecc.mode == NAND_ECC_NONE)
+ chip->write_page = nfc_sram_write_page;
+ else
+ host->nfc->write_by_sram = false;
+ }
+
+ dev_info(host->dev, "Using NFC Sram read %s\n",
+ host->nfc->write_by_sram ? "and write" : "");
+ return 0;
+}
+
+static struct platform_driver atmel_nand_nfc_driver;
+/*
+ * Probe for the NAND device.
+ */
+static int atmel_nand_probe(struct platform_device *pdev)
+{
+ struct atmel_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *mem;
+ struct mtd_part_parser_data ppdata = {};
+ int res, irq;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ res = platform_driver_register(&atmel_nand_nfc_driver);
+ if (res)
+ dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n");
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->io_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(host->io_base)) {
+ res = PTR_ERR(host->io_base);
+ goto err_nand_ioremap;
+ }
+ host->io_phys = (dma_addr_t)mem->start;
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ host->dev = &pdev->dev;
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ /* Only when CONFIG_OF is enabled of_node can be parsed */
+ res = atmel_of_init_port(host, pdev->dev.of_node);
+ if (res)
+ goto err_nand_ioremap;
+ } else {
+ memcpy(&host->board, dev_get_platdata(&pdev->dev),
+ sizeof(struct atmel_nand_data));
+ }
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+ mtd->owner = THIS_MODULE;
+
+ /* Set address of NAND IO lines */
+ nand_chip->IO_ADDR_R = host->io_base;
+ nand_chip->IO_ADDR_W = host->io_base;
+
+ if (nand_nfc.is_initialized) {
+ /* NFC driver is probed and initialized */
+ host->nfc = &nand_nfc;
+
+ nand_chip->select_chip = nfc_select_chip;
+ nand_chip->dev_ready = nfc_device_ready;
+ nand_chip->cmdfunc = nfc_nand_command;
+
+ /* Initialize the interrupt for NFC */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(host->dev, "Cannot get HSMC irq!\n");
+ res = irq;
+ goto err_nand_ioremap;
+ }
+
+ res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt,
+ 0, "hsmc", host);
+ if (res) {
+ dev_err(&pdev->dev, "Unable to request HSMC irq %d\n",
+ irq);
+ goto err_nand_ioremap;
+ }
+ } else {
+ res = atmel_nand_set_enable_ready_pins(mtd);
+ if (res)
+ goto err_nand_ioremap;
+
+ nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
+ }
+
+ nand_chip->ecc.mode = host->board.ecc_mode;
+ nand_chip->chip_delay = 40; /* 40us command delay time */
+
+ if (host->board.bus_width_16) /* 16-bit bus width */
+ nand_chip->options |= NAND_BUSWIDTH_16;
+
+ nand_chip->read_buf = atmel_read_buf;
+ nand_chip->write_buf = atmel_write_buf;
+
+ platform_set_drvdata(pdev, host);
+ atmel_nand_enable(host);
+
+ if (gpio_is_valid(host->board.det_pin)) {
+ res = devm_gpio_request(&pdev->dev,
+ host->board.det_pin, "nand_det");
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request det gpio %d\n",
+ host->board.det_pin);
+ goto err_no_card;
+ }
+
+ res = gpio_direction_input(host->board.det_pin);
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request input direction det gpio %d\n",
+ host->board.det_pin);
+ goto err_no_card;
+ }
+
+ if (gpio_get_value(host->board.det_pin)) {
+ dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
+ res = -ENXIO;
+ goto err_no_card;
+ }
+ }
+
+ if (host->board.on_flash_bbt || on_flash_bbt) {
+ dev_info(&pdev->dev, "Use On Flash BBT\n");
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+ }
+
+ if (!host->board.has_dma)
+ use_dma = 0;
+
+ if (use_dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!host->dma_chan) {
+ dev_err(host->dev, "Failed to request DMA channel\n");
+ use_dma = 0;
+ }
+ }
+ if (use_dma)
+ dev_info(host->dev, "Using %s for DMA transfers.\n",
+ dma_chan_name(host->dma_chan));
+ else
+ dev_info(host->dev, "No DMA support for NAND access.\n");
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_scan_ident;
+ }
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
+ if (host->has_pmecc)
+ res = atmel_pmecc_nand_init_params(pdev, host);
+ else
+ res = atmel_hw_nand_init_params(pdev, host);
+
+ if (res != 0)
+ goto err_hw_ecc;
+ }
+
+ /* initialize the nfc configuration register */
+ if (host->nfc && host->nfc->use_nfc_sram) {
+ res = nfc_sram_init(mtd);
+ if (res) {
+ host->nfc->use_nfc_sram = false;
+ dev_err(host->dev, "Disable use nfc sram for data transfer.\n");
+ }
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_scan_tail;
+ }
+
+ mtd->name = "atmel_nand";
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata,
+ host->board.parts, host->board.num_parts);
+ if (!res)
+ return res;
+
+err_scan_tail:
+ if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW)
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+err_hw_ecc:
+err_scan_ident:
+err_no_card:
+ atmel_nand_disable(host);
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+err_nand_ioremap:
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int atmel_nand_remove(struct platform_device *pdev)
+{
+ struct atmel_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+
+ atmel_nand_disable(host);
+
+ if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ pmerrloc_writel(host->pmerrloc_base, ELDIS,
+ PMERRLOC_DISABLE);
+ }
+
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+
+ platform_driver_unregister(&atmel_nand_nfc_driver);
+
+ return 0;
+}
+
+static struct atmel_nand_caps at91rm9200_caps = {
+ .pmecc_correct_erase_page = false,
+};
+
+static struct atmel_nand_caps sama5d4_caps = {
+ .pmecc_correct_erase_page = true,
+};
+
+static const struct of_device_id atmel_nand_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
+ { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
+
+static int atmel_nand_nfc_probe(struct platform_device *pdev)
+{
+ struct atmel_nfc *nfc = &nand_nfc;
+ struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram;
+ int ret;
+
+ nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs);
+ if (IS_ERR(nfc->base_cmd_regs))
+ return PTR_ERR(nfc->base_cmd_regs);
+
+ nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs);
+ if (IS_ERR(nfc->hsmc_regs))
+ return PTR_ERR(nfc->hsmc_regs);
+
+ nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (nfc_sram) {
+ nfc->sram_bank0 = (void * __force)
+ devm_ioremap_resource(&pdev->dev, nfc_sram);
+ if (IS_ERR(nfc->sram_bank0)) {
+ dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
+ PTR_ERR(nfc->sram_bank0));
+ } else {
+ nfc->use_nfc_sram = true;
+ nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start;
+
+ if (pdev->dev.of_node)
+ nfc->write_by_sram = of_property_read_bool(
+ pdev->dev.of_node,
+ "atmel,write-by-sram");
+ }
+ }
+
+ nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
+ nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
+
+ nfc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(nfc->clk)) {
+ ret = clk_prepare_enable(nfc->clk);
+ if (ret)
+ return ret;
+ } else {
+ dev_warn(&pdev->dev, "NFC clock missing, update your Device Tree");
+ }
+
+ nfc->is_initialized = true;
+ dev_info(&pdev->dev, "NFC is probed.\n");
+
+ return 0;
+}
+
+static int atmel_nand_nfc_remove(struct platform_device *pdev)
+{
+ struct atmel_nfc *nfc = &nand_nfc;
+
+ if (!IS_ERR(nfc->clk))
+ clk_disable_unprepare(nfc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_nand_nfc_match[] = {
+ { .compatible = "atmel,sama5d3-nfc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
+
+static struct platform_driver atmel_nand_nfc_driver = {
+ .driver = {
+ .name = "atmel_nand_nfc",
+ .of_match_table = of_match_ptr(atmel_nand_nfc_match),
+ },
+ .probe = atmel_nand_nfc_probe,
+ .remove = atmel_nand_nfc_remove,
+};
+
+static struct platform_driver atmel_nand_driver = {
+ .probe = atmel_nand_probe,
+ .remove = atmel_nand_remove,
+ .driver = {
+ .name = "atmel_nand",
+ .of_match_table = of_match_ptr(atmel_nand_dt_ids),
+ },
+};
+
+module_platform_driver(atmel_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rick Bronson");
+MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
+MODULE_ALIAS("platform:atmel_nand");
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
new file mode 100644
index 000000000..668e7358f
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -0,0 +1,158 @@
+/*
+ * Error Corrected Code Controller (ECC) - System peripherals regsters.
+ * Based on AT91SAM9260 datasheet revision B.
+ *
+ * Copyright (C) 2007 Andrew Victor
+ * Copyright (C) 2007 - 2012 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_ECC_H
+#define ATMEL_NAND_ECC_H
+
+#define ATMEL_ECC_CR 0x00 /* Control register */
+#define ATMEL_ECC_RST (1 << 0) /* Reset parity */
+
+#define ATMEL_ECC_MR 0x04 /* Mode register */
+#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */
+#define ATMEL_ECC_PAGESIZE_528 (0)
+#define ATMEL_ECC_PAGESIZE_1056 (1)
+#define ATMEL_ECC_PAGESIZE_2112 (2)
+#define ATMEL_ECC_PAGESIZE_4224 (3)
+
+#define ATMEL_ECC_SR 0x08 /* Status register */
+#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */
+#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */
+#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */
+
+#define ATMEL_ECC_PR 0x0c /* Parity register */
+#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */
+#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */
+
+#define ATMEL_ECC_NPR 0x10 /* NParity register */
+#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
+
+/* PMECC Register Definitions */
+#define ATMEL_PMECC_CFG 0x000 /* Configuration Register */
+#define PMECC_CFG_BCH_ERR2 (0 << 0)
+#define PMECC_CFG_BCH_ERR4 (1 << 0)
+#define PMECC_CFG_BCH_ERR8 (2 << 0)
+#define PMECC_CFG_BCH_ERR12 (3 << 0)
+#define PMECC_CFG_BCH_ERR24 (4 << 0)
+
+#define PMECC_CFG_SECTOR512 (0 << 4)
+#define PMECC_CFG_SECTOR1024 (1 << 4)
+
+#define PMECC_CFG_PAGE_1SECTOR (0 << 8)
+#define PMECC_CFG_PAGE_2SECTORS (1 << 8)
+#define PMECC_CFG_PAGE_4SECTORS (2 << 8)
+#define PMECC_CFG_PAGE_8SECTORS (3 << 8)
+
+#define PMECC_CFG_READ_OP (0 << 12)
+#define PMECC_CFG_WRITE_OP (1 << 12)
+
+#define PMECC_CFG_SPARE_ENABLE (1 << 16)
+#define PMECC_CFG_SPARE_DISABLE (0 << 16)
+
+#define PMECC_CFG_AUTO_ENABLE (1 << 20)
+#define PMECC_CFG_AUTO_DISABLE (0 << 20)
+
+#define ATMEL_PMECC_SAREA 0x004 /* Spare area size */
+#define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */
+#define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */
+#define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */
+#define PMECC_CLK_133MHZ (2 << 0)
+
+#define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */
+#define PMECC_CTRL_RST (1 << 0)
+#define PMECC_CTRL_DATA (1 << 1)
+#define PMECC_CTRL_USER (1 << 2)
+#define PMECC_CTRL_ENABLE (1 << 4)
+#define PMECC_CTRL_DISABLE (1 << 5)
+
+#define ATMEL_PMECC_SR 0x018 /* PMECC status register */
+#define PMECC_SR_BUSY (1 << 0)
+#define PMECC_SR_ENABLE (1 << 4)
+
+#define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */
+#define PMECC_IER_ENABLE (1 << 0)
+#define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */
+#define PMECC_IER_DISABLE (1 << 0)
+#define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */
+#define PMECC_IER_MASK (1 << 0)
+#define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */
+#define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */
+#define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */
+
+/* PMERRLOC Register Definitions */
+#define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */
+#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
+#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
+#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
+
+#define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */
+#define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */
+#define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */
+#define PMERRLOC_DISABLE (1 << 0)
+
+#define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */
+#define PMERRLOC_ELSR_BUSY (1 << 0)
+#define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */
+#define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */
+#define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */
+#define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */
+#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
+#define PMERRLOC_CALC_DONE (1 << 0)
+#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
+#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */
+
+/* Register access macros for PMECC */
+#define pmecc_readl_relaxed(addr, reg) \
+ readl_relaxed((addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_readb_ecc_relaxed(addr, sector, n) \
+ readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
+
+#define pmecc_readl_rem_relaxed(addr, sector, n) \
+ readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
+
+#define pmerrloc_readl_relaxed(addr, reg) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
+ writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_sigma_relaxed(addr, n) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_el_relaxed(addr, n) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13 13
+#define PMECC_GF_DIMENSION_14 14
+
+/* Primitive Polynomial used by PMECC */
+#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
+#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
+
+#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
+#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
+
+/* Time out value for reading PMECC status register */
+#define PMECC_MAX_TIMEOUT_MS 100
+
+/* Reserved bytes in oob area */
+#define PMECC_OOB_RESERVED_BYTES 2
+
+#endif
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
new file mode 100644
index 000000000..4d5d26221
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -0,0 +1,103 @@
+/*
+ * Atmel Nand Flash Controller (NFC) - System peripherals regsters.
+ * Based on SAMA5D3 datasheet.
+ *
+ * © Copyright 2013 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_NFC_H
+#define ATMEL_NAND_NFC_H
+
+/*
+ * HSMC NFC registers
+ */
+#define ATMEL_HSMC_NFC_CFG 0x00 /* NFC Configuration Register */
+#define NFC_CFG_PAGESIZE (7 << 0)
+#define NFC_CFG_PAGESIZE_512 (0 << 0)
+#define NFC_CFG_PAGESIZE_1024 (1 << 0)
+#define NFC_CFG_PAGESIZE_2048 (2 << 0)
+#define NFC_CFG_PAGESIZE_4096 (3 << 0)
+#define NFC_CFG_PAGESIZE_8192 (4 << 0)
+#define NFC_CFG_WSPARE (1 << 8)
+#define NFC_CFG_RSPARE (1 << 9)
+#define NFC_CFG_NFC_DTOCYC (0xf << 16)
+#define NFC_CFG_NFC_DTOMUL (0x7 << 20)
+#define NFC_CFG_NFC_SPARESIZE (0x7f << 24)
+#define NFC_CFG_NFC_SPARESIZE_BIT_POS 24
+
+#define ATMEL_HSMC_NFC_CTRL 0x04 /* NFC Control Register */
+#define NFC_CTRL_ENABLE (1 << 0)
+#define NFC_CTRL_DISABLE (1 << 1)
+
+#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
+#define NFC_SR_BUSY (1 << 8)
+#define NFC_SR_XFR_DONE (1 << 16)
+#define NFC_SR_CMD_DONE (1 << 17)
+#define NFC_SR_DTOE (1 << 20)
+#define NFC_SR_UNDEF (1 << 21)
+#define NFC_SR_AWB (1 << 22)
+#define NFC_SR_ASE (1 << 23)
+#define NFC_SR_RB_EDGE (1 << 24)
+
+#define ATMEL_HSMC_NFC_IER 0x0c
+#define ATMEL_HSMC_NFC_IDR 0x10
+#define ATMEL_HSMC_NFC_IMR 0x14
+#define ATMEL_HSMC_NFC_CYCLE0 0x18 /* NFC Address Cycle Zero */
+#define ATMEL_HSMC_NFC_ADDR_CYCLE0 (0xff)
+
+#define ATMEL_HSMC_NFC_BANK 0x1c /* NFC Bank Register */
+#define ATMEL_HSMC_NFC_BANK0 (0 << 0)
+#define ATMEL_HSMC_NFC_BANK1 (1 << 0)
+
+#define nfc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_HSMC_NFC_##reg)
+
+#define nfc_readl(addr, reg) \
+ readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg)
+
+/*
+ * NFC Address Command definitions
+ */
+#define NFCADDR_CMD_CMD1 (0xff << 2) /* Command for Cycle 1 */
+#define NFCADDR_CMD_CMD1_BIT_POS 2
+#define NFCADDR_CMD_CMD2 (0xff << 10) /* Command for Cycle 2 */
+#define NFCADDR_CMD_CMD2_BIT_POS 10
+#define NFCADDR_CMD_VCMD2 (0x1 << 18) /* Valid Cycle 2 Command */
+#define NFCADDR_CMD_ACYCLE (0x7 << 19) /* Number of Address required */
+#define NFCADDR_CMD_ACYCLE_NONE (0x0 << 19)
+#define NFCADDR_CMD_ACYCLE_1 (0x1 << 19)
+#define NFCADDR_CMD_ACYCLE_2 (0x2 << 19)
+#define NFCADDR_CMD_ACYCLE_3 (0x3 << 19)
+#define NFCADDR_CMD_ACYCLE_4 (0x4 << 19)
+#define NFCADDR_CMD_ACYCLE_5 (0x5 << 19)
+#define NFCADDR_CMD_ACYCLE_BIT_POS 19
+#define NFCADDR_CMD_CSID (0x7 << 22) /* Chip Select Identifier */
+#define NFCADDR_CMD_CSID_0 (0x0 << 22)
+#define NFCADDR_CMD_CSID_1 (0x1 << 22)
+#define NFCADDR_CMD_CSID_2 (0x2 << 22)
+#define NFCADDR_CMD_CSID_3 (0x3 << 22)
+#define NFCADDR_CMD_CSID_4 (0x4 << 22)
+#define NFCADDR_CMD_CSID_5 (0x5 << 22)
+#define NFCADDR_CMD_CSID_6 (0x6 << 22)
+#define NFCADDR_CMD_CSID_7 (0x7 << 22)
+#define NFCADDR_CMD_DATAEN (0x1 << 25) /* Data Transfer Enable */
+#define NFCADDR_CMD_DATADIS (0x0 << 25) /* Data Transfer Disable */
+#define NFCADDR_CMD_NFCRD (0x0 << 26) /* NFC Read Enable */
+#define NFCADDR_CMD_NFCWR (0x1 << 26) /* NFC Write Enable */
+#define NFCADDR_CMD_NFCBUSY (0x1 << 27) /* NFC Busy */
+
+#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \
+ writel((addr1234), (cmd) + nfc_base)
+
+#define nfc_cmd_readl(bitstatus, nfc_base) \
+ readl_relaxed((bitstatus) + nfc_base)
+
+#define NFC_TIME_OUT_MS 100
+#define NFC_SRAM_BANK1_OFFSET 0x1200
+
+#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
new file mode 100644
index 000000000..c0c3be180
--- /dev/null
+++ b/drivers/mtd/nand/au1550nd.c
@@ -0,0 +1,515 @@
+/*
+ * drivers/mtd/nand/au1550nd.c
+ *
+ * Copyright (C) 2004 Embedded Edge, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
+
+
+struct au1550nd_ctx {
+ struct mtd_info info;
+ struct nand_chip chip;
+
+ int cs;
+ void __iomem *base;
+ void (*write_byte)(struct mtd_info *, u_char);
+};
+
+/**
+ * au_read_byte - read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 8bit buswidth
+ */
+static u_char au_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u_char ret = readb(this->IO_ADDR_R);
+ wmb(); /* drain writebuffer */
+ return ret;
+}
+
+/**
+ * au_write_byte - write one byte to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * write function for 8it buswidth
+ */
+static void au_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writeb(byte, this->IO_ADDR_W);
+ wmb(); /* drain writebuffer */
+}
+
+/**
+ * au_read_byte16 - read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 16bit buswidth with endianness conversion
+ */
+static u_char au_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
+ wmb(); /* drain writebuffer */
+ return ret;
+}
+
+/**
+ * au_write_byte16 - write one byte endianness aware to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * write function for 16bit buswidth with endianness conversion
+ */
+static void au_write_byte16(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
+ wmb(); /* drain writebuffer */
+}
+
+/**
+ * au_read_word - read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 16bit buswidth without endianness conversion
+ */
+static u16 au_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u16 ret = readw(this->IO_ADDR_R);
+ wmb(); /* drain writebuffer */
+ return ret;
+}
+
+/**
+ * au_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 8bit buswidth
+ */
+static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ writeb(buf[i], this->IO_ADDR_W);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 8bit buswidth
+ */
+static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = readb(this->IO_ADDR_R);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_write_buf16 - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 16bit buswidth
+ */
+static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++) {
+ writew(p[i], this->IO_ADDR_W);
+ wmb(); /* drain writebuffer */
+ }
+
+}
+
+/**
+ * au_read_buf16 - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 16bit buswidth
+ */
+static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++) {
+ p[i] = readw(this->IO_ADDR_R);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/* Select the chip by setting nCE to low */
+#define NAND_CTL_SETNCE 1
+/* Deselect the chip by setting nCE to high */
+#define NAND_CTL_CLRNCE 2
+/* Select the command latch by setting CLE to high */
+#define NAND_CTL_SETCLE 3
+/* Deselect the command latch by setting CLE to low */
+#define NAND_CTL_CLRCLE 4
+/* Select the address latch by setting ALE to high */
+#define NAND_CTL_SETALE 5
+/* Deselect the address latch by setting ALE to low */
+#define NAND_CTL_CLRALE 6
+
+static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
+
+ switch (cmd) {
+
+ case NAND_CTL_SETCLE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
+ break;
+
+ case NAND_CTL_CLRCLE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+ break;
+
+ case NAND_CTL_SETALE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
+ break;
+
+ case NAND_CTL_CLRALE:
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+ /* FIXME: Nobody knows why this is necessary,
+ * but it works only that way */
+ udelay(1);
+ break;
+
+ case NAND_CTL_SETNCE:
+ /* assert (force assert) chip enable */
+ alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+ break;
+
+ case NAND_CTL_CLRNCE:
+ /* deassert chip enable */
+ alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+ break;
+ }
+
+ this->IO_ADDR_R = this->IO_ADDR_W;
+
+ wmb(); /* Drain the writebuffer */
+}
+
+int au1550_device_ready(struct mtd_info *mtd)
+{
+ return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
+}
+
+/**
+ * au1550_select_chip - control -CE line
+ * Forbid driving -CE manually permitting the NAND controller to do this.
+ * Keeping -CE asserted during the whole sector reads interferes with the
+ * NOR flash and PCMCIA drivers as it causes contention on the static bus.
+ * We only have to hold -CE low for the NAND read commands since the flash
+ * chip needs it to be asserted during chip not ready time but the NAND
+ * controller keeps it released.
+ *
+ * @mtd: MTD device structure
+ * @chip: chipnumber to select, -1 for deselect
+ */
+static void au1550_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+/**
+ * au1550_command - Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ */
+static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
+ int ce_override = 0, i;
+ unsigned long flags = 0;
+
+ /* Begin command latch cycle */
+ au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ ctx->write_byte(mtd, readcmd);
+ }
+ ctx->write_byte(mtd, command);
+
+ /* Set ALE and clear CLE to start address cycle */
+ au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ au1550_hwcontrol(mtd, NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ ctx->write_byte(mtd, column);
+ }
+ if (page_addr != -1) {
+ ctx->write_byte(mtd, (u8)(page_addr & 0xff));
+
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB) {
+ /*
+ * NAND controller will release -CE after
+ * the last address byte is written, so we'll
+ * have to forcibly assert it. No interrupts
+ * are allowed while we do this as we don't
+ * want the NOR flash or PCMCIA drivers to
+ * steal our precious bytes of data...
+ */
+ ce_override = 1;
+ local_irq_save(flags);
+ au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
+ }
+
+ ctx->write_byte(mtd, (u8)(page_addr >> 8));
+
+ /* One more address cycle for devices > 32MiB */
+ if (this->chipsize > (32 << 20))
+ ctx->write_byte(mtd,
+ ((page_addr >> 16) & 0x0f));
+ }
+ /* Latch in address */
+ au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
+ }
+
+ /*
+ * Program and erase have their own busy handlers.
+ * Status and sequential in need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ break;
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ case NAND_CMD_READOOB:
+ /* Check if we're really driving -CE low (just in case) */
+ if (unlikely(!ce_override))
+ break;
+
+ /* Apply a short delay always to ensure that we do wait tWB. */
+ ndelay(100);
+ /* Wait for a chip to become ready... */
+ for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
+ udelay(1);
+
+ /* Release -CE and re-enable interrupts. */
+ au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
+ local_irq_restore(flags);
+ return;
+ }
+ /* Apply this short delay always to ensure that we do wait tWB. */
+ ndelay(100);
+
+ while(!this->dev_ready(mtd));
+}
+
+static int find_nand_cs(unsigned long nand_base)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int au1550nd_probe(struct platform_device *pdev)
+{
+ struct au1550nd_platdata *pd;
+ struct au1550nd_ctx *ctx;
+ struct nand_chip *this;
+ struct resource *r;
+ int ret, cs;
+
+ pd = dev_get_platdata(&pdev->dev);
+ if (!pd) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no NAND memory resource\n");
+ ret = -ENODEV;
+ goto out1;
+ }
+ if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+ dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ctx->base = ioremap_nocache(r->start, 0x1000);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ this = &ctx->chip;
+ ctx->info.priv = this;
+ ctx->info.owner = THIS_MODULE;
+
+ /* figure out which CS# r->start belongs to */
+ cs = find_nand_cs(r->start);
+ if (cs < 0) {
+ dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->cs = cs;
+
+ this->dev_ready = au1550_device_ready;
+ this->select_chip = au1550_select_chip;
+ this->cmdfunc = au1550_command;
+
+ /* 30 us command delay time */
+ this->chip_delay = 30;
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ if (pd->devwidth)
+ this->options |= NAND_BUSWIDTH_16;
+
+ this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
+ ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
+ this->read_word = au_read_word;
+ this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
+ this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+
+ ret = nand_scan(&ctx->info, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+ goto out3;
+ }
+
+ mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
+
+ platform_set_drvdata(pdev, ctx);
+
+ return 0;
+
+out3:
+ iounmap(ctx->base);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+ return ret;
+}
+
+static int au1550nd_remove(struct platform_device *pdev)
+{
+ struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ nand_release(&ctx->info);
+ iounmap(ctx->base);
+ release_mem_region(r->start, 0x1000);
+ kfree(ctx);
+ return 0;
+}
+
+static struct platform_driver au1550nd_driver = {
+ .driver = {
+ .name = "au1550-nand",
+ },
+ .probe = au1550nd_probe,
+ .remove = au1550nd_remove,
+};
+
+module_platform_driver(au1550nd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Embedded Edge, LLC");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
new file mode 100644
index 000000000..f05b119e1
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/Makefile
@@ -0,0 +1,4 @@
+bcm47xxnflash-y += main.o
+bcm47xxnflash-y += ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 000000000..c005a6233
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,26 @@
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+
+struct bcm47xxnflash {
+ struct bcma_drv_cc *cc;
+
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+
+ unsigned curr_command;
+ int curr_page_addr;
+ int curr_column;
+
+ u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
new file mode 100644
index 000000000..461577cfb
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -0,0 +1,79 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxnflash *b47n;
+ int err = 0;
+
+ b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+ if (!b47n)
+ return -ENOMEM;
+
+ b47n->nand_chip.priv = b47n;
+ b47n->mtd.owner = THIS_MODULE;
+ b47n->mtd.priv = &b47n->nand_chip; /* Required */
+ b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+ if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ err = bcm47xxnflash_ops_bcm4706_init(b47n);
+ } else {
+ pr_err("Device not supported\n");
+ err = -ENOTSUPP;
+ }
+ if (err) {
+ pr_err("Initialization failed: %d\n", err);
+ return err;
+ }
+
+ err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int bcm47xxnflash_remove(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+
+ if (nflash->mtd)
+ mtd_device_unregister(nflash->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+ .probe = bcm47xxnflash_probe,
+ .remove = bcm47xxnflash_remove,
+ .driver = {
+ .name = "bcma_nflash",
+ },
+};
+
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 000000000..592befc7f
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,454 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/bcma/bcma.h>
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES 10000
+
+#define NFLASH_SECTOR_SIZE 512
+
+#define NCTL_CMD0 0x00010000
+#define NCTL_COL 0x00020000 /* Update column with value from BCMA_CC_NFLASH_COL_ADDR */
+#define NCTL_ROW 0x00040000 /* Update row (page) with value from BCMA_CC_NFLASH_ROW_ADDR */
+#define NCTL_CMD1W 0x00080000
+#define NCTL_READ 0x00100000
+#define NCTL_WRITE 0x00200000
+#define NCTL_SPECADDR 0x01000000
+#define NCTL_READY 0x04000000
+#define NCTL_ERR 0x08000000
+#define NCTL_CSA 0x40000000
+#define NCTL_START 0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+ return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+ int i = 0;
+
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+ i = 0;
+ break;
+ }
+ }
+ if (i) {
+ pr_err("NFLASH control command not ready!\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+ int i;
+
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+ BCMA_CC_NFLASH_CTL_ERR) {
+ pr_err("Error on polling\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ pr_err("Polling timeout!\n");
+ return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ u32 ctlcode;
+ u32 *dest = (u32 *)buf;
+ int i;
+ int toread;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ while (len) {
+ /* We can read maximum of 0x200 bytes at once */
+ toread = min(len, 0x200);
+
+ /* Set page and column */
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to read */
+ ctlcode = NCTL_CSA | NCTL_CMD1W | NCTL_ROW | NCTL_COL |
+ NCTL_CMD0;
+ ctlcode |= NAND_CMD_READSTART << 8;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+ return;
+ if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+ return;
+
+ /* Eventually read some data :) */
+ for (i = 0; i < toread; i += 4, dest++) {
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+ if (i == toread - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode))
+ return;
+ *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+ }
+
+ b47n->curr_column += toread;
+ len -= toread;
+ }
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+
+ u32 ctlcode;
+ const u32 *data = (u32 *)buf;
+ int i;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ for (i = 0; i < len; i += 4, data++) {
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+ if (i == len - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+ pr_err("%s ctl_cmd didn't work!\n", __func__);
+ return;
+ }
+ }
+
+ b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ u32 code = 0;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (cmd & NAND_CTRL_CLE)
+ code = cmd | NCTL_CMD0;
+
+ /* nCS is not needed for reset command */
+ if (cmd != NAND_CMD_RESET)
+ code |= NCTL_CSA;
+
+ bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+}
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
+ int chip)
+{
+ return;
+}
+
+static int bcm47xxnflash_ops_bcm4706_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ return !!(bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_CTL) & NCTL_READY);
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
+ unsigned command, int column,
+ int page_addr)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 ctlcode;
+ int i;
+
+ if (column != -1)
+ b47n->curr_column = column;
+ if (page_addr != -1)
+ b47n->curr_page_addr = page_addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ nand_chip->cmd_ctrl(mtd, command, NAND_CTRL_CLE);
+
+ ndelay(100);
+ nand_wait_ready(mtd);
+ break;
+ case NAND_CMD_READID:
+ ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+ ctlcode |= NAND_CMD_READID;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+
+ /*
+ * Reading is specific, last one has to go without NCTL_CSA
+ * bit. We don't know how many reads NAND subsystem is going
+ * to perform, so cache everything.
+ */
+ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+ ctlcode = NCTL_CSA | NCTL_READ;
+ if (i == ARRAY_SIZE(b47n->id_data) - 1)
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+ b47n->id_data[i] =
+ bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+ & 0xFF;
+ }
+
+ break;
+ case NAND_CMD_STATUS:
+ ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("STATUS command error\n");
+ break;
+ case NAND_CMD_READ0:
+ break;
+ case NAND_CMD_READOOB:
+ if (page_addr != -1)
+ b47n->curr_column += mtd->writesize;
+ break;
+ case NAND_CMD_ERASE1:
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+ ctlcode = NCTL_ROW | NCTL_CMD1W | NCTL_CMD0 |
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("ERASE1 failed\n");
+ break;
+ case NAND_CMD_ERASE2:
+ break;
+ case NAND_CMD_SEQIN:
+ /* Set page and column */
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to write */
+ ctlcode = 0x40000000 | NCTL_ROW | NCTL_COL | NCTL_CMD0;
+ ctlcode |= NAND_CMD_SEQIN;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("SEQIN failed\n");
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_CMD0 |
+ NAND_CMD_PAGEPROG))
+ pr_err("PAGEPROG failed\n");
+ if (bcm47xxnflash_ops_bcm4706_poll(cc))
+ pr_err("PAGEPROG not ready\n");
+ break;
+ default:
+ pr_err("Command 0x%X unsupported\n", command);
+ break;
+ }
+ b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 tmp = 0;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READID:
+ if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+ pr_err("Requested invalid id_data: %d\n",
+ b47n->curr_column);
+ return 0;
+ }
+ return b47n->id_data[b47n->curr_column++];
+ case NAND_CMD_STATUS:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+ return 0;
+ return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+ return tmp & 0xFF;
+ }
+
+ pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+ return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
+ uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_SEQIN:
+ bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip;
+ int err;
+ u32 freq;
+ u16 clock;
+ u8 w0, w1, w2, w3, w4;
+
+ unsigned long chipsize; /* MiB */
+ u8 tbits, col_bits, col_size, row_bits, row_bsize;
+ u32 val;
+
+ b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ nand_chip->cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
+ nand_chip->dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
+ b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+
+ nand_chip->chip_delay = 50;
+ b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+ b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+
+ /* Enable NAND flash access */
+ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ BCMA_CC_4706_FLASHSCFG_NF1);
+
+ /* Configure wait counters */
+ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+ /* 400 MHz */
+ freq = 400000000 / 4;
+ } else {
+ freq = bcma_chipco_pll_read(b47n->cc, 4);
+ freq = (freq & 0xFFF) >> 3;
+ /* Fixed reference clock 25 MHz and m = 2 */
+ freq = (freq * 25000000 / 2) / 4;
+ }
+ clock = freq / 1000000;
+ w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+ w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+ w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+ (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+ /* Scan NAND */
+ err = nand_scan(&b47n->mtd, 1);
+ if (err) {
+ pr_err("Could not scan NAND flash: %d\n", err);
+ goto exit;
+ }
+
+ /* Configure FLASH */
+ chipsize = b47n->nand_chip.chipsize >> 20;
+ tbits = ffs(chipsize); /* find first bit set */
+ if (!tbits || tbits != fls(chipsize)) {
+ pr_err("Invalid flash size: 0x%lX\n", chipsize);
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+ col_bits = b47n->nand_chip.page_shift + 1;
+ col_size = (col_bits + 7) / 8;
+
+ row_bits = tbits - col_bits + 1;
+ row_bsize = (row_bits + 7) / 8;
+
+ val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+ if (err)
+ bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ ~BCMA_CC_4706_FLASHSCFG_NF1);
+ return err;
+}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
new file mode 100644
index 000000000..4d8d4ba4b
--- /dev/null
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -0,0 +1,847 @@
+/* linux/drivers/mtd/nand/bf5xx_nand.c
+ *
+ * Copyright 2006-2008 Analog Devices Inc.
+ * http://blackfin.uclinux.org/
+ * Bryan Wu <bryan.wu@analog.com>
+ *
+ * Blackfin BF5xx on-chip NAND flash controller driver
+ *
+ * Derived from drivers/mtd/nand/s3c2410.c
+ * Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk>
+ *
+ * Derived from drivers/mtd/nand/cafe.c
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Changelog:
+ * 12-Jun-2007 Bryan Wu: Initial version
+ * 18-Jul-2007 Bryan Wu:
+ * - ECC_HW and ECC_SW supported
+ * - DMA supported in ECC_HW
+ * - YAFFS tested as rootfs in both ECC_HW and ECC_SW
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+#include <asm/cacheflush.h>
+#include <asm/nand.h>
+#include <asm/portmux.h>
+
+#define DRV_NAME "bf5xx-nand"
+#define DRV_VERSION "1.2"
+#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
+#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
+
+/* NFC_STAT Masks */
+#define NBUSY 0x01 /* Not Busy */
+#define WB_FULL 0x02 /* Write Buffer Full */
+#define PG_WR_STAT 0x04 /* Page Write Pending */
+#define PG_RD_STAT 0x08 /* Page Read Pending */
+#define WB_EMPTY 0x10 /* Write Buffer Empty */
+
+/* NFC_IRQSTAT Masks */
+#define NBUSYIRQ 0x01 /* Not Busy IRQ */
+#define WB_OVF 0x02 /* Write Buffer Overflow */
+#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
+#define RD_RDY 0x08 /* Read Data Ready */
+#define WR_DONE 0x10 /* Page Write Done */
+
+/* NFC_RST Masks */
+#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
+
+/* NFC_PGCTL Masks */
+#define PG_RD_START 0x01 /* Page Read Start */
+#define PG_WR_START 0x02 /* Page Write Start */
+
+#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
+static int hardware_ecc = 1;
+#else
+static int hardware_ecc;
+#endif
+
+static const unsigned short bfin_nfc_pin_req[] =
+ {P_NAND_CE,
+ P_NAND_RB,
+ P_NAND_D0,
+ P_NAND_D1,
+ P_NAND_D2,
+ P_NAND_D3,
+ P_NAND_D4,
+ P_NAND_D5,
+ P_NAND_D6,
+ P_NAND_D7,
+ P_NAND_WE,
+ P_NAND_RE,
+ P_NAND_CLE,
+ P_NAND_ALE,
+ 0};
+
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+static struct nand_ecclayout bootrom_ecclayout = {
+ .eccbytes = 24,
+ .eccpos = {
+ 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2,
+ 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2,
+ 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2,
+ 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2,
+ 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2,
+ 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2,
+ 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2,
+ 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2
+ },
+ .oobfree = {
+ { 0x8 * 0 + 3, 5 },
+ { 0x8 * 1 + 3, 5 },
+ { 0x8 * 2 + 3, 5 },
+ { 0x8 * 3 + 3, 5 },
+ { 0x8 * 4 + 3, 5 },
+ { 0x8 * 5 + 3, 5 },
+ { 0x8 * 6 + 3, 5 },
+ { 0x8 * 7 + 3, 5 },
+ }
+};
+#endif
+
+/*
+ * Data structures for bf5xx nand flash controller driver
+ */
+
+/* bf5xx nand info */
+struct bf5xx_nand_info {
+ /* mtd info */
+ struct nand_hw_control controller;
+ struct mtd_info mtd;
+ struct nand_chip chip;
+
+ /* platform info */
+ struct bf5xx_nand_platform *platform;
+
+ /* device info */
+ struct device *device;
+
+ /* DMA stuff */
+ struct completion dma_completion;
+};
+
+/*
+ * Conversion functions
+ */
+static struct bf5xx_nand_info *mtd_to_nand_info(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct bf5xx_nand_info, mtd);
+}
+
+static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
+{
+ return platform_get_drvdata(pdev);
+}
+
+static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
+{
+ return dev_get_platdata(&pdev->dev);
+}
+
+/*
+ * struct nand_chip interface function pointers
+ */
+
+/*
+ * bf5xx_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+ */
+static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ cpu_relax();
+
+ if (ctrl & NAND_CLE)
+ bfin_write_NFC_CMD(cmd);
+ else if (ctrl & NAND_ALE)
+ bfin_write_NFC_ADDR(cmd);
+ SSYNC();
+}
+
+/*
+ * bf5xx_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+ */
+static int bf5xx_nand_devready(struct mtd_info *mtd)
+{
+ unsigned short val = bfin_read_NFC_STAT();
+
+ if ((val & NBUSY) == NBUSY)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * ECC functions
+ * These allow the bf5xx to use the controller's ECC
+ * generator block to ECC the data as it passes through
+ */
+
+/*
+ * ECC error correction function
+ */
+static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ u32 syndrome[5];
+ u32 calced, stored;
+ int i;
+ unsigned short failing_bit, failing_byte;
+ u_char data;
+
+ calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16);
+ stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16);
+
+ syndrome[0] = (calced ^ stored);
+
+ /*
+ * syndrome 0: all zero
+ * No error in data
+ * No action
+ */
+ if (!syndrome[0] || !calced || !stored)
+ return 0;
+
+ /*
+ * sysdrome 0: only one bit is one
+ * ECC data was incorrect
+ * No action
+ */
+ if (hweight32(syndrome[0]) == 1) {
+ dev_err(info->device, "ECC data was incorrect!\n");
+ return 1;
+ }
+
+ syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF);
+ syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF);
+ syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF);
+ syndrome[4] = syndrome[2] ^ syndrome[3];
+
+ for (i = 0; i < 5; i++)
+ dev_info(info->device, "syndrome[%d] 0x%08x\n", i, syndrome[i]);
+
+ dev_info(info->device,
+ "calced[0x%08x], stored[0x%08x]\n",
+ calced, stored);
+
+ /*
+ * sysdrome 0: exactly 11 bits are one, each parity
+ * and parity' pair is 1 & 0 or 0 & 1.
+ * 1-bit correctable error
+ * Correct the error
+ */
+ if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) {
+ dev_info(info->device,
+ "1-bit correctable error, correct it.\n");
+ dev_info(info->device,
+ "syndrome[1] 0x%08x\n", syndrome[1]);
+
+ failing_bit = syndrome[1] & 0x7;
+ failing_byte = syndrome[1] >> 0x3;
+ data = *(dat + failing_byte);
+ data = data ^ (0x1 << failing_bit);
+ *(dat + failing_byte) = data;
+
+ return 0;
+ }
+
+ /*
+ * sysdrome 0: random data
+ * More than 1-bit error, non-correctable error
+ * Discard data, mark bad block
+ */
+ dev_err(info->device,
+ "More than 1-bit error, non-correctable error.\n");
+ dev_err(info->device,
+ "Please discard data, mark bad block\n");
+
+ return 1;
+}
+
+static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+
+ /* If ecc size is 512, correct second 256 bytes */
+ if (chip->ecc.size == 512) {
+ dat += 256;
+ read_ecc += 3;
+ calc_ecc += 3;
+ ret |= bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+ }
+
+ return ret;
+}
+
+static void bf5xx_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ return;
+}
+
+static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+ u16 ecc0, ecc1;
+ u32 code[2];
+ u8 *p;
+
+ /* first 3 bytes ECC code for 256 page size */
+ ecc0 = bfin_read_NFC_ECC0();
+ ecc1 = bfin_read_NFC_ECC1();
+
+ code[0] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
+
+ dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
+
+ p = (u8 *) code;
+ memcpy(ecc_code, p, 3);
+
+ /* second 3 bytes ECC code for 512 ecc size */
+ if (chip->ecc.size == 512) {
+ ecc0 = bfin_read_NFC_ECC2();
+ ecc1 = bfin_read_NFC_ECC3();
+ code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
+
+ /* second 3 bytes in ecc_code for second 256
+ * bytes of 512 page size
+ */
+ p = (u8 *) (code + 1);
+ memcpy((ecc_code + 3), p, 3);
+ dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
+ }
+
+ return 0;
+}
+
+/*
+ * PIO mode for buffer writing and reading
+ */
+static void bf5xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ unsigned short val;
+
+ /*
+ * Data reads are requested by first writing to NFC_DATA_RD
+ * and then reading back from NFC_READ.
+ */
+ for (i = 0; i < len; i++) {
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ cpu_relax();
+
+ /* Contents do not matter */
+ bfin_write_NFC_DATA_RD(0x0000);
+ SSYNC();
+
+ while ((bfin_read_NFC_IRQSTAT() & RD_RDY) != RD_RDY)
+ cpu_relax();
+
+ buf[i] = bfin_read_NFC_READ();
+
+ val = bfin_read_NFC_IRQSTAT();
+ val |= RD_RDY;
+ bfin_write_NFC_IRQSTAT(val);
+ SSYNC();
+ }
+}
+
+static uint8_t bf5xx_nand_read_byte(struct mtd_info *mtd)
+{
+ uint8_t val;
+
+ bf5xx_nand_read_buf(mtd, &val, 1);
+
+ return val;
+}
+
+static void bf5xx_nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ while (bfin_read_NFC_STAT() & WB_FULL)
+ cpu_relax();
+
+ bfin_write_NFC_DATA_WR(buf[i]);
+ SSYNC();
+ }
+}
+
+static void bf5xx_nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ /*
+ * Data reads are requested by first writing to NFC_DATA_RD
+ * and then reading back from NFC_READ.
+ */
+ bfin_write_NFC_DATA_RD(0x5555);
+
+ SSYNC();
+
+ for (i = 0; i < len; i++)
+ p[i] = bfin_read_NFC_READ();
+}
+
+static void bf5xx_nand_write_buf16(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ bfin_write_NFC_DATA_WR(p[i]);
+
+ SSYNC();
+}
+
+/*
+ * DMA functions for buffer writing and reading
+ */
+static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
+{
+ struct bf5xx_nand_info *info = dev_id;
+
+ clear_dma_irqstat(CH_NFC);
+ disable_dma(CH_NFC);
+ complete(&info->dma_completion);
+
+ return IRQ_HANDLED;
+}
+
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
+ uint8_t *buf, int is_read)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned short val;
+
+ dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n",
+ mtd, buf, is_read);
+
+ /*
+ * Before starting a dma transfer, be sure to invalidate/flush
+ * the cache over the address range of your DMA buffer to
+ * prevent cache coherency problems. Otherwise very subtle bugs
+ * can be introduced to your driver.
+ */
+ if (is_read)
+ invalidate_dcache_range((unsigned int)buf,
+ (unsigned int)(buf + chip->ecc.size));
+ else
+ flush_dcache_range((unsigned int)buf,
+ (unsigned int)(buf + chip->ecc.size));
+
+ /*
+ * This register must be written before each page is
+ * transferred to generate the correct ECC register
+ * values.
+ */
+ bfin_write_NFC_RST(ECC_RST);
+ SSYNC();
+ while (bfin_read_NFC_RST() & ECC_RST)
+ cpu_relax();
+
+ disable_dma(CH_NFC);
+ clear_dma_irqstat(CH_NFC);
+
+ /* setup DMA register with Blackfin DMA API */
+ set_dma_config(CH_NFC, 0x0);
+ set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+ /* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+ set_dma_x_count(CH_NFC, (chip->ecc.size >> 1));
+ set_dma_x_modify(CH_NFC, 2);
+ val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
+ set_dma_x_count(CH_NFC, (chip->ecc.size >> 2));
+ set_dma_x_modify(CH_NFC, 4);
+ val = DI_EN | WDSIZE_32;
+#endif
+ /* setup write or read operation */
+ if (is_read)
+ val |= WNR;
+ set_dma_config(CH_NFC, val);
+ enable_dma(CH_NFC);
+
+ /* Start PAGE read/write operation */
+ if (is_read)
+ bfin_write_NFC_PGCTL(PG_RD_START);
+ else
+ bfin_write_NFC_PGCTL(PG_WR_START);
+ wait_for_completion(&info->dma_completion);
+}
+
+static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
+ uint8_t *buf, int len)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len);
+
+ if (len == chip->ecc.size)
+ bf5xx_nand_dma_rw(mtd, buf, 1);
+ else
+ bf5xx_nand_read_buf(mtd, buf, len);
+}
+
+static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len);
+
+ if (len == chip->ecc.size)
+ bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0);
+ else
+ bf5xx_nand_write_buf(mtd, buf, len);
+}
+
+static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
+ bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * System initialization functions
+ */
+static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
+{
+ int ret;
+
+ /* Do not use dma */
+ if (!hardware_ecc)
+ return 0;
+
+ init_completion(&info->dma_completion);
+
+ /* Request NFC DMA channel */
+ ret = request_dma(CH_NFC, "BF5XX NFC driver");
+ if (ret < 0) {
+ dev_err(info->device, " unable to get DMA channel\n");
+ return ret;
+ }
+
+#ifdef CONFIG_BF54x
+ /* Setup DMAC1 channel mux for NFC which shared with SDH */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() & ~1);
+ SSYNC();
+#endif
+
+ set_dma_callback(CH_NFC, bf5xx_nand_dma_irq, info);
+
+ /* Turn off the DMA channel first */
+ disable_dma(CH_NFC);
+ return 0;
+}
+
+static void bf5xx_nand_dma_remove(struct bf5xx_nand_info *info)
+{
+ /* Free NFC DMA channel */
+ if (hardware_ecc)
+ free_dma(CH_NFC);
+}
+
+/*
+ * BF5XX NFC hardware initialization
+ * - pin mux setup
+ * - clear interrupt status
+ */
+static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
+{
+ int err = 0;
+ unsigned short val;
+ struct bf5xx_nand_platform *plat = info->platform;
+
+ /* setup NFC_CTL register */
+ dev_info(info->device,
+ "data_width=%d, wr_dly=%d, rd_dly=%d\n",
+ (plat->data_width ? 16 : 8),
+ plat->wr_dly, plat->rd_dly);
+
+ val = (1 << NFC_PG_SIZE_OFFSET) |
+ (plat->data_width << NFC_NWIDTH_OFFSET) |
+ (plat->rd_dly << NFC_RDDLY_OFFSET) |
+ (plat->wr_dly << NFC_WRDLY_OFFSET);
+ dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val);
+
+ bfin_write_NFC_CTL(val);
+ SSYNC();
+
+ /* clear interrupt status */
+ bfin_write_NFC_IRQMASK(0x0);
+ SSYNC();
+ val = bfin_read_NFC_IRQSTAT();
+ bfin_write_NFC_IRQSTAT(val);
+ SSYNC();
+
+ /* DMA initialization */
+ if (bf5xx_nand_dma_init(info))
+ err = -ENXIO;
+
+ return err;
+}
+
+/*
+ * Device management interface
+ */
+static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
+{
+ struct mtd_info *mtd = &info->mtd;
+ struct mtd_partition *parts = info->platform->partitions;
+ int nr = info->platform->nr_partitions;
+
+ return mtd_device_register(mtd, parts, nr);
+}
+
+static int bf5xx_nand_remove(struct platform_device *pdev)
+{
+ struct bf5xx_nand_info *info = to_nand_info(pdev);
+
+ /* first thing we need to do is release all our mtds
+ * and their partitions, then go through freeing the
+ * resources used
+ */
+ nand_release(&info->mtd);
+
+ peripheral_free_list(bfin_nfc_pin_req);
+ bf5xx_nand_dma_remove(info);
+
+ return 0;
+}
+
+static int bf5xx_nand_scan(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ if (hardware_ecc) {
+ /*
+ * for nand with page size > 512B, think it as several sections with 512B
+ */
+ if (likely(mtd->writesize >= 512)) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
+ } else {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
+ SSYNC();
+ }
+ }
+
+ return nand_scan_tail(mtd);
+}
+
+/*
+ * bf5xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+ */
+static int bf5xx_nand_probe(struct platform_device *pdev)
+{
+ struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
+ struct bf5xx_nand_info *info = NULL;
+ struct nand_chip *chip = NULL;
+ struct mtd_info *mtd = NULL;
+ int err = 0;
+
+ dev_dbg(&pdev->dev, "(%p)\n", pdev);
+
+ if (!plat) {
+ dev_err(&pdev->dev, "no platform specific information\n");
+ return -EINVAL;
+ }
+
+ if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
+ dev_err(&pdev->dev, "requesting Peripherals failed\n");
+ return -EFAULT;
+ }
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->device = &pdev->dev;
+ info->platform = plat;
+
+ /* initialise chip data struct */
+ chip = &info->chip;
+
+ if (plat->data_width)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ chip->options |= NAND_CACHEPRG | NAND_SKIP_BBTSCAN;
+
+ chip->read_buf = (plat->data_width) ?
+ bf5xx_nand_read_buf16 : bf5xx_nand_read_buf;
+ chip->write_buf = (plat->data_width) ?
+ bf5xx_nand_write_buf16 : bf5xx_nand_write_buf;
+
+ chip->read_byte = bf5xx_nand_read_byte;
+
+ chip->cmd_ctrl = bf5xx_nand_hwcontrol;
+ chip->dev_ready = bf5xx_nand_devready;
+
+ chip->priv = &info->mtd;
+ chip->controller = &info->controller;
+
+ chip->IO_ADDR_R = (void __iomem *) NFC_READ;
+ chip->IO_ADDR_W = (void __iomem *) NFC_DATA_WR;
+
+ chip->chip_delay = 0;
+
+ /* initialise mtd info data struct */
+ mtd = &info->mtd;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+
+ /* initialise the hardware */
+ err = bf5xx_nand_hw_init(info);
+ if (err)
+ goto out_err;
+
+ /* setup hardware ECC data struct */
+ if (hardware_ecc) {
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+ chip->ecc.layout = &bootrom_ecclayout;
+#endif
+ chip->read_buf = bf5xx_nand_dma_read_buf;
+ chip->write_buf = bf5xx_nand_dma_write_buf;
+ chip->ecc.calculate = bf5xx_nand_calculate_ecc;
+ chip->ecc.correct = bf5xx_nand_correct_data;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.hwctl = bf5xx_nand_enable_hwecc;
+ chip->ecc.read_page_raw = bf5xx_nand_read_page_raw;
+ chip->ecc.write_page_raw = bf5xx_nand_write_page_raw;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ /* scan hardware nand chip and setup mtd info data struct */
+ if (bf5xx_nand_scan(mtd)) {
+ err = -ENXIO;
+ goto out_err_nand_scan;
+ }
+
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+ chip->badblockpos = 63;
+#endif
+
+ /* add NAND partition */
+ bf5xx_nand_add_partition(info);
+
+ dev_dbg(&pdev->dev, "initialised ok\n");
+ return 0;
+
+out_err_nand_scan:
+ bf5xx_nand_dma_remove(info);
+out_err:
+ peripheral_free_list(bfin_nfc_pin_req);
+
+ return err;
+}
+
+/* driver device registration */
+static struct platform_driver bf5xx_nand_driver = {
+ .probe = bf5xx_nand_probe,
+ .remove = bf5xx_nand_remove,
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+module_platform_driver(bf5xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
new file mode 100644
index 000000000..9a0f45f1d
--- /dev/null
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -0,0 +1,869 @@
+/*
+ * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
+ *
+ * The data sheet for this device can be found at:
+ * http://wiki.laptop.org/go/Datasheets
+ *
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#define DEBUG
+
+#include <linux/device.h>
+#undef DEBUG
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/rslib.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#define CAFE_NAND_CTRL1 0x00
+#define CAFE_NAND_CTRL2 0x04
+#define CAFE_NAND_CTRL3 0x08
+#define CAFE_NAND_STATUS 0x0c
+#define CAFE_NAND_IRQ 0x10
+#define CAFE_NAND_IRQ_MASK 0x14
+#define CAFE_NAND_DATA_LEN 0x18
+#define CAFE_NAND_ADDR1 0x1c
+#define CAFE_NAND_ADDR2 0x20
+#define CAFE_NAND_TIMING1 0x24
+#define CAFE_NAND_TIMING2 0x28
+#define CAFE_NAND_TIMING3 0x2c
+#define CAFE_NAND_NONMEM 0x30
+#define CAFE_NAND_ECC_RESULT 0x3C
+#define CAFE_NAND_DMA_CTRL 0x40
+#define CAFE_NAND_DMA_ADDR0 0x44
+#define CAFE_NAND_DMA_ADDR1 0x48
+#define CAFE_NAND_ECC_SYN01 0x50
+#define CAFE_NAND_ECC_SYN23 0x54
+#define CAFE_NAND_ECC_SYN45 0x58
+#define CAFE_NAND_ECC_SYN67 0x5c
+#define CAFE_NAND_READ_DATA 0x1000
+#define CAFE_NAND_WRITE_DATA 0x2000
+
+#define CAFE_GLOBAL_CTRL 0x3004
+#define CAFE_GLOBAL_IRQ 0x3008
+#define CAFE_GLOBAL_IRQ_MASK 0x300c
+#define CAFE_NAND_RESET 0x3034
+
+/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
+#define CTRL1_CHIPSELECT (1<<19)
+
+struct cafe_priv {
+ struct nand_chip nand;
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ struct rs_control *rs;
+ uint32_t ctl1;
+ uint32_t ctl2;
+ int datalen;
+ int nr_data;
+ int data_pos;
+ int page_addr;
+ dma_addr_t dmaaddr;
+ unsigned char *dmabuf;
+};
+
+static int usedma = 1;
+module_param(usedma, int, 0644);
+
+static int skipbbt = 0;
+module_param(skipbbt, int, 0644);
+
+static int debug = 0;
+module_param(debug, int, 0644);
+
+static int regdebug = 0;
+module_param(regdebug, int, 0644);
+
+static int checkecc = 1;
+module_param(checkecc, int, 0644);
+
+static unsigned int numtimings;
+static int timing[3];
+module_param_array(timing, int, &numtimings, 0644);
+
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+/* Hrm. Why isn't this already conditional on something in the struct device? */
+#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
+
+/* Make it easier to switch to PIO if we need to */
+#define cafe_readl(cafe, addr) readl((cafe)->mmio + CAFE_##addr)
+#define cafe_writel(cafe, datum, addr) writel(datum, (cafe)->mmio + CAFE_##addr)
+
+static int cafe_device_ready(struct mtd_info *mtd)
+{
+ struct cafe_priv *cafe = mtd->priv;
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+
+ cafe_writel(cafe, irqs, NAND_IRQ);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
+ result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
+ cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ return result;
+}
+
+
+static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ if (usedma)
+ memcpy(cafe->dmabuf + cafe->datalen, buf, len);
+ else
+ memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
+
+ cafe->datalen += len;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
+ len, cafe->datalen);
+}
+
+static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ if (usedma)
+ memcpy(buf, cafe->dmabuf + cafe->datalen, len);
+ else
+ memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
+ len, cafe->datalen);
+ cafe->datalen += len;
+}
+
+static uint8_t cafe_read_byte(struct mtd_info *mtd)
+{
+ struct cafe_priv *cafe = mtd->priv;
+ uint8_t d;
+
+ cafe_read_buf(mtd, &d, 1);
+ cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
+
+ return d;
+}
+
+static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct cafe_priv *cafe = mtd->priv;
+ int adrbytes = 0;
+ uint32_t ctl1;
+ uint32_t doneint = 0x80000000;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
+ command, column, page_addr);
+
+ if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
+ /* Second half of a command we already calculated */
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
+ ctl1 = cafe->ctl1;
+ cafe->ctl2 &= ~(1<<30);
+ cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
+ cafe->ctl1, cafe->nr_data);
+ goto do_command;
+ }
+ /* Reset ECC engine */
+ cafe_writel(cafe, 0, NAND_CTRL2);
+
+ /* Emulate NAND_CMD_READOOB on large-page chips */
+ if (mtd->writesize > 512 &&
+ command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* FIXME: Do we need to send read command before sending data
+ for small-page chips, to position the buffer correctly? */
+
+ if (column != -1) {
+ cafe_writel(cafe, column, NAND_ADDR1);
+ adrbytes = 2;
+ if (page_addr != -1)
+ goto write_adr2;
+ } else if (page_addr != -1) {
+ cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
+ page_addr >>= 16;
+ write_adr2:
+ cafe_writel(cafe, page_addr, NAND_ADDR2);
+ adrbytes += 2;
+ if (mtd->size > mtd->writesize << 16)
+ adrbytes++;
+ }
+
+ cafe->data_pos = cafe->datalen = 0;
+
+ /* Set command valid bit, mask in the chip select bit */
+ ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
+
+ /* Set RD or WR bits as appropriate */
+ if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
+ ctl1 |= (1<<26); /* rd */
+ /* Always 5 bytes, for now */
+ cafe->datalen = 4;
+ /* And one address cycle -- even for STATUS, since the controller doesn't work without */
+ adrbytes = 1;
+ } else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
+ ctl1 |= 1<<26; /* rd */
+ /* For now, assume just read to end of page */
+ cafe->datalen = mtd->writesize + mtd->oobsize - column;
+ } else if (command == NAND_CMD_SEQIN)
+ ctl1 |= 1<<25; /* wr */
+
+ /* Set number of address bytes */
+ if (adrbytes)
+ ctl1 |= ((adrbytes-1)|8) << 27;
+
+ if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
+ /* Ignore the first command of a pair; the hardware
+ deals with them both at once, later */
+ cafe->ctl1 = ctl1;
+ cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
+ cafe->ctl1, cafe->datalen);
+ return;
+ }
+ /* RNDOUT and READ0 commands need a following byte */
+ if (command == NAND_CMD_RNDOUT)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
+ else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
+
+ do_command:
+ cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
+ cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
+
+ /* NB: The datasheet lies -- we really should be subtracting 1 here */
+ cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
+ cafe_writel(cafe, 0x90000000, NAND_IRQ);
+ if (usedma && (ctl1 & (3<<25))) {
+ uint32_t dmactl = 0xc0000000 + cafe->datalen;
+ /* If WR or RD bits set, set up DMA */
+ if (ctl1 & (1<<26)) {
+ /* It's a read */
+ dmactl |= (1<<29);
+ /* ... so it's done when the DMA is done, not just
+ the command. */
+ doneint = 0x10000000;
+ }
+ cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
+ }
+ cafe->datalen = 0;
+
+ if (unlikely(regdebug)) {
+ int i;
+ printk("About to write command %08x to register 0\n", ctl1);
+ for (i=4; i< 0x5c; i+=4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ }
+
+ cafe_writel(cafe, ctl1, NAND_CTRL1);
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ if (1) {
+ int c;
+ uint32_t irqs;
+
+ for (c = 500000; c != 0; c--) {
+ irqs = cafe_readl(cafe, NAND_IRQ);
+ if (irqs & doneint)
+ break;
+ udelay(1);
+ if (!(c % 100000))
+ cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
+ cpu_relax();
+ }
+ cafe_writel(cafe, doneint, NAND_IRQ);
+ cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
+ command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
+ }
+
+ WARN_ON(cafe->ctl2 & (1<<30));
+
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RNDOUT:
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+ return;
+ }
+ nand_wait_ready(mtd);
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+}
+
+static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
+
+ /* Mask the appropriate bit into the stored value of ctl1
+ which will be used by cafe_nand_cmdfunc() */
+ if (chipnr)
+ cafe->ctl1 |= CTRL1_CHIPSELECT;
+ else
+ cafe->ctl1 &= ~CTRL1_CHIPSELECT;
+}
+
+static irqreturn_t cafe_nand_interrupt(int irq, void *id)
+{
+ struct mtd_info *mtd = id;
+ struct cafe_priv *cafe = mtd->priv;
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+ cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
+ if (!irqs)
+ return IRQ_NONE;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
+ return IRQ_HANDLED;
+}
+
+static void cafe_nand_bug(struct mtd_info *mtd)
+{
+ BUG();
+}
+
+static int cafe_nand_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int status = 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/* Don't use -- use nand_read_oob_std for now */
+static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+/**
+ * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore
+ * we need a special oob layout and handling.
+ */
+static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct cafe_priv *cafe = mtd->priv;
+ unsigned int max_bitflips = 0;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
+ cafe_readl(cafe, NAND_ECC_RESULT),
+ cafe_readl(cafe, NAND_ECC_SYN01));
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
+ unsigned short syn[8], pat[4];
+ int pos[4];
+ u8 *oob = chip->oob_poi;
+ int i, n;
+
+ for (i=0; i<8; i+=2) {
+ uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
+ syn[i] = cafe->rs->index_of[tmp & 0xfff];
+ syn[i+1] = cafe->rs->index_of[(tmp >> 16) & 0xfff];
+ }
+
+ n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
+ pat);
+
+ for (i = 0; i < n; i++) {
+ int p = pos[i];
+
+ /* The 12-bit symbols are mapped to bytes here */
+
+ if (p > 1374) {
+ /* out of range */
+ n = -1374;
+ } else if (p == 0) {
+ /* high four bits do not correspond to data */
+ if (pat[i] > 0xff)
+ n = -2048;
+ else
+ buf[0] ^= pat[i];
+ } else if (p == 1365) {
+ buf[2047] ^= pat[i] >> 4;
+ oob[0] ^= pat[i] << 4;
+ } else if (p > 1365) {
+ if ((p & 1) == 1) {
+ oob[3*p/2 - 2048] ^= pat[i] >> 4;
+ oob[3*p/2 - 2047] ^= pat[i] << 4;
+ } else {
+ oob[3*p/2 - 2049] ^= pat[i] >> 8;
+ oob[3*p/2 - 2048] ^= pat[i];
+ }
+ } else if ((p & 1) == 1) {
+ buf[3*p/2] ^= pat[i] >> 4;
+ buf[3*p/2 + 1] ^= pat[i] << 4;
+ } else {
+ buf[3*p/2 - 1] ^= pat[i] >> 8;
+ buf[3*p/2] ^= pat[i];
+ }
+ }
+
+ if (n < 0) {
+ dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
+ cafe_readl(cafe, NAND_ADDR2) * 2048);
+ for (i = 0; i < 0x5c; i += 4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ mtd->ecc_stats.failed++;
+ } else {
+ dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
+ mtd->ecc_stats.corrected += n;
+ max_bitflips = max_t(unsigned int, max_bitflips, n);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static struct nand_ecclayout cafe_oobinfo_2048 = {
+ .eccbytes = 14,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
+ .oobfree = {{14, 50}}
+};
+
+/* Ick. The BBT code really ought to be able to work this bit out
+ for itself from the above, at least for the 2KiB case */
+static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
+static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
+
+static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
+static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
+
+
+static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_2048
+};
+
+static struct nand_ecclayout cafe_oobinfo_512 = {
+ .eccbytes = 14,
+ .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
+ .oobfree = {{14, 2}}
+};
+
+static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_512
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_512
+};
+
+
+static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct cafe_priv *cafe = mtd->priv;
+
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Set up ECC autogeneration */
+ cafe->ctl2 |= (1<<30);
+
+ return 0;
+}
+
+static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ return 0;
+}
+
+/* F_2[X]/(X**6+X+1) */
+static unsigned short gf64_mul(u8 a, u8 b)
+{
+ u8 c;
+ unsigned int i;
+
+ c = 0;
+ for (i = 0; i < 6; i++) {
+ if (a & 1)
+ c ^= b;
+ a >>= 1;
+ b <<= 1;
+ if ((b & 0x40) != 0)
+ b ^= 0x43;
+ }
+
+ return c;
+}
+
+/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
+static u16 gf4096_mul(u16 a, u16 b)
+{
+ u8 ah, al, bh, bl, ch, cl;
+
+ ah = a >> 6;
+ al = a & 0x3f;
+ bh = b >> 6;
+ bl = b & 0x3f;
+
+ ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
+ cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
+
+ return (ch << 6) ^ cl;
+}
+
+static int cafe_mul(int x)
+{
+ if (x == 0)
+ return 1;
+ return gf4096_mul(x, 0xe01);
+}
+
+static int cafe_nand_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mtd_info *mtd;
+ struct cafe_priv *cafe;
+ uint32_t ctrl;
+ int err = 0;
+ int old_dma;
+ struct nand_buffers *nbuf;
+
+ /* Very old versions shared the same PCI ident for all three
+ functions on the chip. Verify the class too... */
+ if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
+ return -ENODEV;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+ cafe = (void *)(&mtd[1]);
+
+ mtd->dev.parent = &pdev->dev;
+ mtd->priv = cafe;
+ mtd->owner = THIS_MODULE;
+
+ cafe->pdev = pdev;
+ cafe->mmio = pci_iomap(pdev, 0, 0);
+ if (!cafe->mmio) {
+ dev_warn(&pdev->dev, "failed to iomap\n");
+ err = -ENOMEM;
+ goto out_free_mtd;
+ }
+
+ cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
+ if (!cafe->rs) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
+ cafe->nand.cmdfunc = cafe_nand_cmdfunc;
+ cafe->nand.dev_ready = cafe_device_ready;
+ cafe->nand.read_byte = cafe_read_byte;
+ cafe->nand.read_buf = cafe_read_buf;
+ cafe->nand.write_buf = cafe_write_buf;
+ cafe->nand.select_chip = cafe_select_chip;
+
+ cafe->nand.chip_delay = 0;
+
+ /* Enable the following for a flash based bad block table */
+ cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+ cafe->nand.options = NAND_OWN_BUFFERS;
+
+ if (skipbbt) {
+ cafe->nand.options |= NAND_SKIP_BBTSCAN;
+ cafe->nand.block_bad = cafe_nand_block_bad;
+ }
+
+ if (numtimings && numtimings != 3) {
+ dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
+ }
+
+ if (numtimings == 3) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ timing[0] = cafe_readl(cafe, NAND_TIMING1);
+ timing[1] = cafe_readl(cafe, NAND_TIMING2);
+ timing[2] = cafe_readl(cafe, NAND_TIMING3);
+
+ if (timing[0] | timing[1] | timing[2]) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
+ timing[0] = timing[1] = timing[2] = 0xffffffff;
+ }
+ }
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+ err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
+ "CAFE NAND", mtd);
+ if (err) {
+ dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
+ goto out_ior;
+ }
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
+ cafe_readl(cafe, GLOBAL_CTRL),
+ cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ /* Do not use the DMA for the nand_scan_ident() */
+ old_dma = usedma;
+ usedma = 0;
+
+ /* Scan to find existence of the device */
+ if (nand_scan_ident(mtd, 2, NULL)) {
+ err = -ENXIO;
+ goto out_irq;
+ }
+
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf) {
+ err = -ENOMEM;
+ goto out_irq;
+ }
+ cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
+
+ /* Set up DMA address */
+ cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+ if (sizeof(cafe->dmaaddr) > 4)
+ /* Shift in two parts to shut the compiler up */
+ cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+ else
+ cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
+ cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
+
+ /* this driver does not need the @ecccalc and @ecccode */
+ nbuf->ecccalc = NULL;
+ nbuf->ecccode = NULL;
+ nbuf->databuf = (uint8_t *)(nbuf + 1);
+
+ /* Restore the DMA flag */
+ usedma = old_dma;
+
+ cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
+ if (mtd->writesize == 2048)
+ cafe->ctl2 |= 1<<29; /* 2KiB page size */
+
+ /* Set up ECC according to the type of chip we found */
+ if (mtd->writesize == 2048) {
+ cafe->nand.ecc.layout = &cafe_oobinfo_2048;
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
+ } else if (mtd->writesize == 512) {
+ cafe->nand.ecc.layout = &cafe_oobinfo_512;
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
+ } else {
+ printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",
+ mtd->writesize);
+ goto out_free_dma;
+ }
+ cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+ cafe->nand.ecc.size = mtd->writesize;
+ cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
+ cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
+ cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
+ cafe->nand.ecc.correct = (void *)cafe_nand_bug;
+ cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+ cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+ cafe->nand.ecc.read_page = cafe_nand_read_page;
+ cafe->nand.ecc.read_oob = cafe_nand_read_oob;
+
+ err = nand_scan_tail(mtd);
+ if (err)
+ goto out_free_dma;
+
+ pci_set_drvdata(pdev, mtd);
+
+ mtd->name = "cafe_nand";
+ mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+
+ goto out;
+
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ cafe->dmabuf, cafe->dmaaddr);
+ out_irq:
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ out_ior:
+ pci_iounmap(pdev, cafe->mmio);
+ out_free_mtd:
+ kfree(mtd);
+ out:
+ return err;
+}
+
+static void cafe_nand_remove(struct pci_dev *pdev)
+{
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct cafe_priv *cafe = mtd->priv;
+
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ nand_release(mtd);
+ free_rs(cafe->rs);
+ pci_iounmap(pdev, cafe->mmio);
+ dma_free_coherent(&cafe->pdev->dev,
+ 2112 + sizeof(struct nand_buffers) +
+ mtd->writesize + mtd->oobsize,
+ cafe->dmabuf, cafe->dmaaddr);
+ kfree(mtd);
+}
+
+static const struct pci_device_id cafe_nand_tbl[] = {
+ { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
+
+static int cafe_nand_resume(struct pci_dev *pdev)
+{
+ uint32_t ctrl;
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct cafe_priv *cafe = mtd->priv;
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+
+ /* Restore timing configuration */
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Set up DMA address */
+ cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+ if (sizeof(cafe->dmaaddr) > 4)
+ /* Shift in two parts to shut the compiler up */
+ cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+ else
+ cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ return 0;
+}
+
+static struct pci_driver cafe_nand_pci_driver = {
+ .name = "CAFÉ NAND",
+ .id_table = cafe_nand_tbl,
+ .probe = cafe_nand_probe,
+ .remove = cafe_nand_remove,
+ .resume = cafe_nand_resume,
+};
+
+module_pci_driver(cafe_nand_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND flash driver for OLPC CAFÉ chip");
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
new file mode 100644
index 000000000..66ec95e6c
--- /dev/null
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -0,0 +1,250 @@
+/*
+ * linux/drivers/mtd/nand/cmx270-nand.c
+ *
+ * Copyright (C) 2006 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Derived from drivers/mtd/nand/h1910.c
+ * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * CM-X270 board.
+ */
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <mach/pxa2xx-regs.h>
+
+#define GPIO_NAND_CS (11)
+#define GPIO_NAND_RB (89)
+
+/* MTD structure for CM-X270 board */
+static struct mtd_info *cmx270_nand_mtd;
+
+/* remaped IO address of the device */
+static void __iomem *cmx270_nand_io;
+
+/*
+ * Define static partitions for flash device
+ */
+static struct mtd_partition partition_info[] = {
+ [0] = {
+ .name = "cmx270-0",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL
+ }
+};
+#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
+
+static u_char cmx270_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ return (readl(this->IO_ADDR_R) >> 16);
+}
+
+static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ writel((*buf++ << 16), this->IO_ADDR_W);
+}
+
+static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ *buf++ = readl(this->IO_ADDR_R) >> 16;
+}
+
+static inline void nand_cs_on(void)
+{
+ gpio_set_value(GPIO_NAND_CS, 0);
+}
+
+static void nand_cs_off(void)
+{
+ dsb();
+
+ gpio_set_value(GPIO_NAND_CS, 1);
+}
+
+/*
+ * hardware specific access to control-lines
+ */
+static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ struct nand_chip* this = mtd->priv;
+ unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
+
+ dsb();
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if ( ctrl & NAND_ALE )
+ nandaddr |= (1 << 3);
+ else
+ nandaddr &= ~(1 << 3);
+ if ( ctrl & NAND_CLE )
+ nandaddr |= (1 << 2);
+ else
+ nandaddr &= ~(1 << 2);
+ if ( ctrl & NAND_NCE )
+ nand_cs_on();
+ else
+ nand_cs_off();
+ }
+
+ dsb();
+ this->IO_ADDR_W = (void __iomem*)nandaddr;
+ if (dat != NAND_CMD_NONE)
+ writel((dat << 16), this->IO_ADDR_W);
+
+ dsb();
+}
+
+/*
+ * read device ready pin
+ */
+static int cmx270_device_ready(struct mtd_info *mtd)
+{
+ dsb();
+
+ return (gpio_get_value(GPIO_NAND_RB));
+}
+
+/*
+ * Main initialization routine
+ */
+static int __init cmx270_init(void)
+{
+ struct nand_chip *this;
+ int ret;
+
+ if (!(machine_is_armcore() && cpu_is_pxa27x()))
+ return -ENODEV;
+
+ ret = gpio_request(GPIO_NAND_CS, "NAND CS");
+ if (ret) {
+ pr_warning("CM-X270: failed to request NAND CS gpio\n");
+ return ret;
+ }
+
+ gpio_direction_output(GPIO_NAND_CS, 1);
+
+ ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
+ if (ret) {
+ pr_warning("CM-X270: failed to request NAND R/B gpio\n");
+ goto err_gpio_request;
+ }
+
+ gpio_direction_input(GPIO_NAND_RB);
+
+ /* Allocate memory for MTD device structure and private data */
+ cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!cmx270_nand_mtd) {
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
+ if (!cmx270_nand_io) {
+ pr_debug("Unable to ioremap NAND device\n");
+ ret = -EINVAL;
+ goto err_ioremap;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&cmx270_nand_mtd[1]);
+
+ /* Link the private data with the MTD structure */
+ cmx270_nand_mtd->owner = THIS_MODULE;
+ cmx270_nand_mtd->priv = this;
+
+ /* insert callbacks */
+ this->IO_ADDR_R = cmx270_nand_io;
+ this->IO_ADDR_W = cmx270_nand_io;
+ this->cmd_ctrl = cmx270_hwcontrol;
+ this->dev_ready = cmx270_device_ready;
+
+ /* 15 us command delay time */
+ this->chip_delay = 20;
+ this->ecc.mode = NAND_ECC_SOFT;
+
+ /* read/write functions */
+ this->read_byte = cmx270_read_byte;
+ this->read_buf = cmx270_read_buf;
+ this->write_buf = cmx270_write_buf;
+
+ /* Scan to find existence of the device */
+ if (nand_scan (cmx270_nand_mtd, 1)) {
+ pr_notice("No NAND device\n");
+ ret = -ENXIO;
+ goto err_scan;
+ }
+
+ /* Register the partitions */
+ ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
+ partition_info, NUM_PARTITIONS);
+ if (ret)
+ goto err_scan;
+
+ /* Return happy */
+ return 0;
+
+err_scan:
+ iounmap(cmx270_nand_io);
+err_ioremap:
+ kfree(cmx270_nand_mtd);
+err_kzalloc:
+ gpio_free(GPIO_NAND_RB);
+err_gpio_request:
+ gpio_free(GPIO_NAND_CS);
+
+ return ret;
+
+}
+module_init(cmx270_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit cmx270_cleanup(void)
+{
+ /* Release resources, unregister device */
+ nand_release(cmx270_nand_mtd);
+
+ gpio_free(GPIO_NAND_RB);
+ gpio_free(GPIO_NAND_CS);
+
+ iounmap(cmx270_nand_io);
+
+ /* Free the MTD device structure */
+ kfree (cmx270_nand_mtd);
+}
+module_exit(cmx270_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
new file mode 100644
index 000000000..88109d375
--- /dev/null
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -0,0 +1,354 @@
+/*
+ * drivers/mtd/nand/cs553x_nand.c
+ *
+ * (C) 2005, 2006 Red Hat Inc.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Tom Sylla <tom.sylla@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash controller found on
+ * the AMD CS5535/CS5536 companion chipsets for the Geode processor.
+ * mtd-id for command line partitioning is cs553x_nand_cs[0-3]
+ * where 0-3 reflects the chip select for NAND.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/msr.h>
+#include <asm/io.h>
+
+#define NR_CS553X_CONTROLLERS 4
+
+#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */
+#define CAP_CS5535 0x2df000ULL
+#define CAP_CS5536 0x5df500ULL
+
+/* NAND Timing MSRs */
+#define MSR_NANDF_DATA 0x5140001b /* NAND Flash Data Timing MSR */
+#define MSR_NANDF_CTL 0x5140001c /* NAND Flash Control Timing */
+#define MSR_NANDF_RSVD 0x5140001d /* Reserved */
+
+/* NAND BAR MSRs */
+#define MSR_DIVIL_LBAR_FLSH0 0x51400010 /* Flash Chip Select 0 */
+#define MSR_DIVIL_LBAR_FLSH1 0x51400011 /* Flash Chip Select 1 */
+#define MSR_DIVIL_LBAR_FLSH2 0x51400012 /* Flash Chip Select 2 */
+#define MSR_DIVIL_LBAR_FLSH3 0x51400013 /* Flash Chip Select 3 */
+ /* Each made up of... */
+#define FLSH_LBAR_EN (1ULL<<32)
+#define FLSH_NOR_NAND (1ULL<<33) /* 1 for NAND */
+#define FLSH_MEM_IO (1ULL<<34) /* 1 for MMIO */
+ /* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
+ /* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
+
+/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
+#define MSR_DIVIL_BALL_OPTS 0x51400015
+#define PIN_OPT_IDE (1<<0) /* 0 for flash, 1 for IDE */
+
+/* Registers within the NAND flash controller BAR -- memory mapped */
+#define MM_NAND_DATA 0x00 /* 0 to 0x7ff, in fact */
+#define MM_NAND_CTL 0x800 /* Any even address 0x800-0x80e */
+#define MM_NAND_IO 0x801 /* Any odd address 0x801-0x80f */
+#define MM_NAND_STS 0x810
+#define MM_NAND_ECC_LSB 0x811
+#define MM_NAND_ECC_MSB 0x812
+#define MM_NAND_ECC_COL 0x813
+#define MM_NAND_LAC 0x814
+#define MM_NAND_ECC_CTL 0x815
+
+/* Registers within the NAND flash controller BAR -- I/O mapped */
+#define IO_NAND_DATA 0x00 /* 0 to 3, in fact */
+#define IO_NAND_CTL 0x04
+#define IO_NAND_IO 0x05
+#define IO_NAND_STS 0x06
+#define IO_NAND_ECC_CTL 0x08
+#define IO_NAND_ECC_LSB 0x09
+#define IO_NAND_ECC_MSB 0x0a
+#define IO_NAND_ECC_COL 0x0b
+#define IO_NAND_LAC 0x0c
+
+#define CS_NAND_CTL_DIST_EN (1<<4) /* Enable NAND Distract interrupt */
+#define CS_NAND_CTL_RDY_INT_MASK (1<<3) /* Enable RDY/BUSY# interrupt */
+#define CS_NAND_CTL_ALE (1<<2)
+#define CS_NAND_CTL_CLE (1<<1)
+#define CS_NAND_CTL_CE (1<<0) /* Keep low; 1 to reset */
+
+#define CS_NAND_STS_FLASH_RDY (1<<3)
+#define CS_NAND_CTLR_BUSY (1<<2)
+#define CS_NAND_CMD_COMP (1<<1)
+#define CS_NAND_DIST_ST (1<<0)
+
+#define CS_NAND_ECC_PARITY (1<<2)
+#define CS_NAND_ECC_CLRECC (1<<1)
+#define CS_NAND_ECC_ENECC (1<<0)
+
+static void cs553x_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ while (unlikely(len > 0x800)) {
+ memcpy_fromio(buf, this->IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, this->IO_ADDR_R, len);
+}
+
+static void cs553x_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+
+ while (unlikely(len > 0x800)) {
+ memcpy_toio(this->IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(this->IO_ADDR_R, buf, len);
+}
+
+static unsigned char cs553x_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return readb(this->IO_ADDR_R);
+}
+
+static void cs553x_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ int i = 100000;
+
+ while (i && readb(this->IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
+ udelay(1);
+ i--;
+ }
+ writeb(byte, this->IO_ADDR_W + 0x801);
+}
+
+static void cs553x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+ if (ctrl & NAND_CTRL_CHANGE) {
+ unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
+ writeb(ctl, mmio_base + MM_NAND_CTL);
+ }
+ if (cmd != NAND_CMD_NONE)
+ cs553x_write_byte(mtd, cmd);
+}
+
+static int cs553x_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+ unsigned char foo = readb(mmio_base + MM_NAND_STS);
+
+ return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
+}
+
+static void cs_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+
+ writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
+}
+
+static int cs_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ uint32_t ecc;
+ struct nand_chip *this = mtd->priv;
+ void __iomem *mmio_base = this->IO_ADDR_R;
+
+ ecc = readl(mmio_base + MM_NAND_STS);
+
+ ecc_code[1] = ecc >> 8;
+ ecc_code[0] = ecc >> 16;
+ ecc_code[2] = ecc >> 24;
+ return 0;
+}
+
+static struct mtd_info *cs553x_mtd[4];
+
+static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
+{
+ int err = 0;
+ struct nand_chip *this;
+ struct mtd_info *new_mtd;
+
+ printk(KERN_NOTICE "Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n", cs, mmio?"MM":"P", adr);
+
+ if (!mmio) {
+ printk(KERN_NOTICE "PIO mode not yet implemented for CS553X NAND controller\n");
+ return -ENXIO;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ new_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ if (!new_mtd) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&new_mtd[1]);
+
+ /* Link the private data with the MTD structure */
+ new_mtd->priv = this;
+ new_mtd->owner = THIS_MODULE;
+
+ /* map physical address */
+ this->IO_ADDR_R = this->IO_ADDR_W = ioremap(adr, 4096);
+ if (!this->IO_ADDR_R) {
+ printk(KERN_WARNING "ioremap cs553x NAND @0x%08lx failed\n", adr);
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ this->cmd_ctrl = cs553x_hwcontrol;
+ this->dev_ready = cs553x_device_ready;
+ this->read_byte = cs553x_read_byte;
+ this->read_buf = cs553x_read_buf;
+ this->write_buf = cs553x_write_buf;
+
+ this->chip_delay = 0;
+
+ this->ecc.mode = NAND_ECC_HW;
+ this->ecc.size = 256;
+ this->ecc.bytes = 3;
+ this->ecc.hwctl = cs_enable_hwecc;
+ this->ecc.calculate = cs_calculate_ecc;
+ this->ecc.correct = nand_correct_data;
+ this->ecc.strength = 1;
+
+ /* Enable the following for a flash based bad block table */
+ this->bbt_options = NAND_BBT_USE_FLASH;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(new_mtd, 1)) {
+ err = -ENXIO;
+ goto out_ior;
+ }
+
+ new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
+
+ cs553x_mtd[cs] = new_mtd;
+ goto out;
+
+out_ior:
+ iounmap(this->IO_ADDR_R);
+out_mtd:
+ kfree(new_mtd);
+out:
+ return err;
+}
+
+static int is_geode(void)
+{
+ /* These are the CPUs which will have a CS553[56] companion chip */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 10)
+ return 1; /* Geode LX */
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 5)
+ return 1; /* Geode GX (née GX2) */
+
+ return 0;
+}
+
+static int __init cs553x_init(void)
+{
+ int err = -ENXIO;
+ int i;
+ uint64_t val;
+
+ /* If the CPU isn't a Geode GX or LX, abort */
+ if (!is_geode())
+ return -ENXIO;
+
+ /* If it doesn't have the CS553[56], abort */
+ rdmsrl(MSR_DIVIL_GLD_CAP, val);
+ val &= ~0xFFULL;
+ if (val != CAP_CS5535 && val != CAP_CS5536)
+ return -ENXIO;
+
+ /* If it doesn't have the NAND controller enabled, abort */
+ rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+ if (val & PIN_OPT_IDE) {
+ printk(KERN_INFO "CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+
+ if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
+ err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
+ }
+
+ /* Register all devices together here. This means we can easily hack it to
+ do mtdconcat etc. if we want to. */
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ if (cs553x_mtd[i]) {
+ /* If any devices registered, return success. Else the last error. */
+ mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
+ NULL, 0);
+ err = 0;
+ }
+ }
+
+ return err;
+}
+
+module_init(cs553x_init);
+
+static void __exit cs553x_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ struct mtd_info *mtd = cs553x_mtd[i];
+ struct nand_chip *this;
+ void __iomem *mmio_base;
+
+ if (!mtd)
+ continue;
+
+ this = cs553x_mtd[i]->priv;
+ mmio_base = this->IO_ADDR_R;
+
+ /* Release resources, unregister device */
+ nand_release(cs553x_mtd[i]);
+ kfree(cs553x_mtd[i]->name);
+ cs553x_mtd[i] = NULL;
+
+ /* unmap physical address */
+ iounmap(mmio_base);
+
+ /* Free the MTD device structure */
+ kfree(mtd);
+ }
+}
+
+module_exit(cs553x_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
new file mode 100644
index 000000000..feb6d18de
--- /dev/null
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -0,0 +1,883 @@
+/*
+ * davinci_nand.c - NAND Flash Driver for DaVinci family chips
+ *
+ * Copyright © 2006 Texas Instruments.
+ *
+ * Port to 2.6.23 Copyright © 2008 by:
+ * Sander Huijsen <Shuijsen@optelecom-nkf.com>
+ * Troy Kisky <troy.kisky@boundarydevices.com>
+ * Dirk Behme <Dirk.Behme@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
+
+/*
+ * This is a device driver for the NAND flash controller found on the
+ * various DaVinci family chips. It handles up to four SoC chipselects,
+ * and some flavors of secondary chipselect (e.g. based on A12) as used
+ * with multichip packages.
+ *
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
+ * available on chips like the DM355 and OMAP-L137 and needed with the
+ * more error-prone MLC NAND chips.
+ *
+ * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
+ * outputs in a "wire-AND" configuration, with no per-chip signals.
+ */
+struct davinci_nand_info {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
+
+ struct device *dev;
+ struct clk *clk;
+
+ bool is_readmode;
+
+ void __iomem *base;
+ void __iomem *vaddr;
+
+ uint32_t ioaddr;
+ uint32_t current_cs;
+
+ uint32_t mask_chipsel;
+ uint32_t mask_ale;
+ uint32_t mask_cle;
+
+ uint32_t core_chipsel;
+
+ struct davinci_aemif_timing *timing;
+};
+
+static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
+
+#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
+
+
+static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
+ int offset)
+{
+ return __raw_readl(info->base + offset);
+}
+
+static inline void davinci_nand_writel(struct davinci_nand_info *info,
+ int offset, unsigned long value)
+{
+ __raw_writel(value, info->base + offset);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Access to hardware control lines: ALE, CLE, secondary chipselect.
+ */
+
+static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ uint32_t addr = info->current_cs;
+ struct nand_chip *nand = mtd->priv;
+
+ /* Did the control lines change? */
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
+ addr |= info->mask_cle;
+ else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
+ addr |= info->mask_ale;
+
+ nand->IO_ADDR_W = (void __iomem __force *)addr;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ iowrite8(cmd, nand->IO_ADDR_W);
+}
+
+static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ uint32_t addr = info->ioaddr;
+
+ /* maybe kick in a second chipselect */
+ if (chip > 0)
+ addr |= info->mask_chipsel;
+ info->current_cs = addr;
+
+ info->chip.IO_ADDR_W = (void __iomem __force *)addr;
+ info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 1-bit hardware ECC ... context maintained for each core chipselect
+ */
+
+static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+ return davinci_nand_readl(info, NANDF1ECC_OFFSET
+ + 4 * info->core_chipsel);
+}
+
+static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info;
+ uint32_t nandcfr;
+ unsigned long flags;
+
+ info = to_davinci_nand(mtd);
+
+ /* Reset ECC hardware */
+ nand_davinci_readecc_1bit(mtd);
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Restart ECC hardware */
+ nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
+ nandcfr |= BIT(8 + info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/*
+ * Read hardware ECC value and pack into three bytes
+ */
+static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
+ unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
+
+ /* invert so that erased block ecc is correct */
+ ecc24 = ~ecc24;
+ ecc_code[0] = (u_char)(ecc24);
+ ecc_code[1] = (u_char)(ecc24 >> 8);
+ ecc_code[2] = (u_char)(ecc24 >> 16);
+
+ return 0;
+}
+
+static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
+ (read_ecc[2] << 16);
+ uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
+ (calc_ecc[2] << 16);
+ uint32_t diff = eccCalc ^ eccNand;
+
+ if (diff) {
+ if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
+ /* Correctable error */
+ if ((diff >> (12 + 3)) < chip->ecc.size) {
+ dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
+ return 1;
+ } else {
+ return -1;
+ }
+ } else if (!(diff & (diff - 1))) {
+ /* Single bit ECC error in the ECC itself,
+ * nothing to fix */
+ return 1;
+ } else {
+ /* Uncorrectable error */
+ return -1;
+ }
+
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ u32 ecc_state;
+ unsigned num_errors, corrected;
+ unsigned long timeo;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /*
+ * Clear any previous address calculation by doing a dummy read of an
+ * error address register.
+ */
+ davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+
+ /*
+ * ECC_STATE field reads 0x3 (Error correction complete) immediately
+ * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
+ * begin trying to poll for the state, you may fall right out of your
+ * loop without any of the correction calculations having taken place.
+ * The recommendation from the hardware team is to initially delay as
+ * long as ECC_STATE reads less than 4. After that, ECC HW has entered
+ * correction state.
+ */
+ timeo = jiffies + usecs_to_jiffies(100);
+ do {
+ ecc_state = (davinci_nand_readl(info,
+ NANDFSR_OFFSET) >> 8) & 0x0f;
+ cpu_relax();
+ } while ((ecc_state < 4) && time_before(jiffies, timeo));
+
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return 0;
+ case 1: /* five or more errors detected */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
+ * how these chips are normally wired. This translates to both 8 and 16
+ * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
+ *
+ * For now we assume that configuration, or any other one which ignores
+ * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
+ * and have that transparently morphed into multiple NAND operations.
+ */
+static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
+ else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
+ else
+ ioread8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+static void nand_davinci_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
+ else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
+ else
+ iowrite8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+/*
+ * Check hardware register for wait status. Returns 1 if device is ready,
+ * 0 if it is still busy.
+ */
+static int nand_davinci_dev_ready(struct mtd_info *mtd)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+ return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash,
+ * storing ten ECC bytes plus the manufacturer's bad block marker byte,
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_2048 = {
+ .eccbytes = 40,
+ .eccpos = {
+ /* at the end of spare sector */
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ /* 2 bytes at offset 0 hold manufacturer badblock markers */
+ {.offset = 2, .length = 22, },
+ /* 5 bytes at offset 8 hold BBT markers */
+ /* 8 bytes at offset 16 hold JFFS2 clean markers */
+ },
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_nand_of_match[] = {
+ {.compatible = "ti,davinci-nand", },
+ {.compatible = "ti,keystone-nand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
+
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
+ struct davinci_nand_pdata *pdata;
+ const char *mode;
+ u32 prop;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct davinci_nand_pdata),
+ GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-chipselect", &prop))
+ pdev->id = prop;
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-ale", &prop))
+ pdata->mask_ale = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-cle", &prop))
+ pdata->mask_cle = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-chipsel", &prop))
+ pdata->mask_chipsel = prop;
+ if (!of_property_read_string(pdev->dev.of_node,
+ "nand-ecc-mode", &mode) ||
+ !of_property_read_string(pdev->dev.of_node,
+ "ti,davinci-ecc-mode", &mode)) {
+ if (!strncmp("none", mode, 4))
+ pdata->ecc_mode = NAND_ECC_NONE;
+ if (!strncmp("soft", mode, 4))
+ pdata->ecc_mode = NAND_ECC_SOFT;
+ if (!strncmp("hw", mode, 2))
+ pdata->ecc_mode = NAND_ECC_HW;
+ }
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-ecc-bits", &prop))
+ pdata->ecc_bits = prop;
+
+ prop = of_get_nand_bus_width(pdev->dev.of_node);
+ if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-nand-buswidth", &prop))
+ if (prop == 16)
+ pdata->options |= NAND_BUSWIDTH_16;
+ if (of_property_read_bool(pdev->dev.of_node,
+ "nand-on-flash-bbt") ||
+ of_property_read_bool(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt"))
+ pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "ti,keystone-nand")) {
+ pdata->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+ }
+
+ return dev_get_platdata(&pdev->dev);
+}
+#else
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ return dev_get_platdata(&pdev->dev);
+}
+#endif
+
+static int nand_davinci_probe(struct platform_device *pdev)
+{
+ struct davinci_nand_pdata *pdata;
+ struct davinci_nand_info *info;
+ struct resource *res1;
+ struct resource *res2;
+ void __iomem *vaddr;
+ void __iomem *base;
+ int ret;
+ uint32_t val;
+ nand_ecc_modes_t ecc_mode;
+
+ pdata = nand_davinci_get_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
+ /* which external chipselect will we be managing? */
+ if (pdev->id < 0 || pdev->id > 3)
+ return -ENODEV;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res1 || !res2) {
+ dev_err(&pdev->dev, "resource missing\n");
+ return -EINVAL;
+ }
+
+ vaddr = devm_ioremap_resource(&pdev->dev, res1);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ /*
+ * This registers range is used to setup NAND settings. In case with
+ * TI AEMIF driver, the same memory address range is requested already
+ * by AEMIF, so we cannot request it twice, just ioremap.
+ * The AEMIF and NAND drivers not use the same registers in this range.
+ */
+ base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+ return -EADDRNOTAVAIL;
+ }
+
+ info->dev = &pdev->dev;
+ info->base = base;
+ info->vaddr = vaddr;
+
+ info->mtd.priv = &info->chip;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ info->mtd.dev.parent = &pdev->dev;
+
+ info->chip.IO_ADDR_R = vaddr;
+ info->chip.IO_ADDR_W = vaddr;
+ info->chip.chip_delay = 0;
+ info->chip.select_chip = nand_davinci_select_chip;
+
+ /* options such as NAND_BBT_USE_FLASH */
+ info->chip.bbt_options = pdata->bbt_options;
+ /* options such as 16-bit widths */
+ info->chip.options = pdata->options;
+ info->chip.bbt_td = pdata->bbt_td;
+ info->chip.bbt_md = pdata->bbt_md;
+ info->timing = pdata->timing;
+
+ info->ioaddr = (uint32_t __force) vaddr;
+
+ info->current_cs = info->ioaddr;
+ info->core_chipsel = pdev->id;
+ info->mask_chipsel = pdata->mask_chipsel;
+
+ /* use nandboot-capable ALE/CLE masks by default */
+ info->mask_ale = pdata->mask_ale ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
+
+ /* Set address of hardware control function */
+ info->chip.cmd_ctrl = nand_davinci_hwcontrol;
+ info->chip.dev_ready = nand_davinci_dev_ready;
+
+ /* Speed up buffer I/O */
+ info->chip.read_buf = nand_davinci_read_buf;
+ info->chip.write_buf = nand_davinci_write_buf;
+
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+
+ ret = -EINVAL;
+ switch (ecc_mode) {
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
+ break;
+ case NAND_ECC_HW:
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ return ret;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
+ info->chip.ecc.size = 512;
+ info->chip.ecc.strength = pdata->ecc_bits;
+ break;
+ default:
+ return -EINVAL;
+ }
+ info->chip.ecc.mode = ecc_mode;
+
+ info->clk = devm_clk_get(&pdev->dev, "aemif");
+ if (IS_ERR(info->clk)) {
+ ret = PTR_ERR(info->clk);
+ dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
+ ret);
+ goto err_clk_enable;
+ }
+
+ spin_lock_irq(&davinci_nand_lock);
+
+ /* put CSxNAND into NAND mode */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val |= BIT(info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ spin_unlock_irq(&davinci_nand_lock);
+
+ /* Scan to find existence of the device(s) */
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
+ goto err;
+ }
+
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+ if (chunks == 4) {
+ info->ecclayout = hwecc4_2048;
+ info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ goto syndrome_done;
+ }
+
+ /* 4KiB page chips are not yet supported. The eccpos from
+ * nand_ecclayout cannot hold 80 bytes and change to eccpos[]
+ * breaks userspace ioctl interface with mtd-utils. Once we
+ * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
+ * for the 4KiB page chips.
+ *
+ * TODO: Note that nand_ecclayout has now been expanded and can
+ * hold plenty of OOB entries.
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for 4KiB-page NAND\n");
+ ret = -EIO;
+ goto err;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err;
+
+ if (pdata->parts)
+ ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata->parts, pdata->nr_parts);
+ else {
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
+ NULL, 0);
+ }
+ if (ret < 0)
+ goto err;
+
+ val = davinci_nand_readl(info, NRCSR_OFFSET);
+ dev_info(&pdev->dev, "controller rev. %d.%d\n",
+ (val >> 8) & 0xff, val & 0xff);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(info->clk);
+
+err_clk_enable:
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+ return ret;
+}
+
+static int nand_davinci_remove(struct platform_device *pdev)
+{
+ struct davinci_nand_info *info = platform_get_drvdata(pdev);
+
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ nand_release(&info->mtd);
+
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+static struct platform_driver nand_davinci_driver = {
+ .probe = nand_davinci_probe,
+ .remove = nand_davinci_remove,
+ .driver = {
+ .name = "davinci_nand",
+ .of_match_table = of_match_ptr(davinci_nand_of_match),
+ },
+};
+MODULE_ALIAS("platform:davinci_nand");
+
+module_platform_driver(nand_davinci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("Davinci NAND flash driver");
+
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
new file mode 100644
index 000000000..870c7fc0f
--- /dev/null
+++ b/drivers/mtd/nand/denali.c
@@ -0,0 +1,1621 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+
+#include "denali.h"
+
+MODULE_LICENSE("GPL");
+
+/*
+ * We define a module parameter that allows the user to override
+ * the hardware and decide what timing mode should be used.
+ */
+#define NAND_DEFAULT_TIMINGS -1
+
+static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
+module_param(onfi_timing_mode, int, S_IRUGO);
+MODULE_PARM_DESC(onfi_timing_mode,
+ "Overrides default ONFI setting. -1 indicates use default timings");
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/*
+ * We define a macro here that combines all interrupts this driver uses into
+ * a single constant value, for convenience.
+ */
+#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
+ INTR_STATUS__ECC_TRANSACTION_DONE | \
+ INTR_STATUS__ECC_ERR | \
+ INTR_STATUS__PROGRAM_FAIL | \
+ INTR_STATUS__LOAD_COMP | \
+ INTR_STATUS__PROGRAM_COMP | \
+ INTR_STATUS__TIME_OUT | \
+ INTR_STATUS__ERASE_FAIL | \
+ INTR_STATUS__RST_COMP | \
+ INTR_STATUS__ERASE_COMP)
+
+/*
+ * indicates whether or not the internal value for the flash bank is
+ * valid or not
+ */
+#define CHIP_SELECT_INVALID -1
+
+#define SUPPORT_8BITECC 1
+
+/*
+ * This macro divides two integers and rounds fractional values up
+ * to the nearest integer value.
+ */
+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+
+/*
+ * this macro allows us to convert from an MTD structure to our own
+ * device context (denali) structure.
+ */
+#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
+
+/*
+ * These constants are defined by the driver to enable common driver
+ * configuration options.
+ */
+#define SPARE_ACCESS 0x41
+#define MAIN_ACCESS 0x42
+#define MAIN_SPARE_ACCESS 0x43
+#define PIPELINE_ACCESS 0x2000
+
+#define DENALI_READ 0
+#define DENALI_WRITE 0x100
+
+/* types of device accesses. We can issue commands and get status */
+#define COMMAND_CYCLE 0
+#define ADDR_CYCLE 1
+#define STATUS_CYCLE 2
+
+/*
+ * this is a helper macro that allows us to
+ * format the bank into the proper bits for the controller
+ */
+#define BANK(x) ((x) << 24)
+
+/* forward declarations */
+static void clear_interrupts(struct denali_nand_info *denali);
+static uint32_t wait_for_irq(struct denali_nand_info *denali,
+ uint32_t irq_mask);
+static void denali_irq_enable(struct denali_nand_info *denali,
+ uint32_t int_mask);
+static uint32_t read_interrupt_status(struct denali_nand_info *denali);
+
+/*
+ * Certain operations for the denali NAND controller use an indexed mode to
+ * read/write data. The operation is performed by writing the address value
+ * of the command to the device memory followed by the data. This function
+ * abstracts this common operation.
+ */
+static void index_addr(struct denali_nand_info *denali,
+ uint32_t address, uint32_t data)
+{
+ iowrite32(address, denali->flash_mem);
+ iowrite32(data, denali->flash_mem + 0x10);
+}
+
+/* Perform an indexed read of the device */
+static void index_addr_read_data(struct denali_nand_info *denali,
+ uint32_t address, uint32_t *pdata)
+{
+ iowrite32(address, denali->flash_mem);
+ *pdata = ioread32(denali->flash_mem + 0x10);
+}
+
+/*
+ * We need to buffer some data for some of the NAND core routines.
+ * The operations manage buffering that data.
+ */
+static void reset_buf(struct denali_nand_info *denali)
+{
+ denali->buf.head = denali->buf.tail = 0;
+}
+
+static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
+{
+ denali->buf.buf[denali->buf.tail++] = byte;
+}
+
+/* reads the status of the device */
+static void read_status(struct denali_nand_info *denali)
+{
+ uint32_t cmd;
+
+ /* initialize the data buffer to store status */
+ reset_buf(denali);
+
+ cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
+ if (cmd)
+ write_byte_to_buf(denali, NAND_STATUS_WP);
+ else
+ write_byte_to_buf(denali, 0);
+}
+
+/* resets a specific device connected to the core */
+static void reset_bank(struct denali_nand_info *denali)
+{
+ uint32_t irq_status;
+ uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT;
+
+ clear_interrupts(denali);
+
+ iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
+
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status & INTR_STATUS__TIME_OUT)
+ dev_err(denali->dev, "reset bank failed.\n");
+}
+
+/* Reset the flash controller */
+static uint16_t denali_nand_reset(struct denali_nand_info *denali)
+{
+ int i;
+
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
+
+ for (i = 0; i < denali->max_banks; i++) {
+ iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
+ while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
+ (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
+ cpu_relax();
+ if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
+ INTR_STATUS__TIME_OUT)
+ dev_dbg(denali->dev,
+ "NAND Reset operation timed out on bank %d\n", i);
+ }
+
+ for (i = 0; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
+
+ return PASS;
+}
+
+/*
+ * this routine calculates the ONFI timing values for a given mode and
+ * programs the clocking register accordingly. The mode is determined by
+ * the get_onfi_nand_para routine.
+ */
+static void nand_onfi_timing_set(struct denali_nand_info *denali,
+ uint16_t mode)
+{
+ uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
+ uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
+ uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
+ uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
+ uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
+ uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
+ uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
+ uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
+ uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
+ uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
+
+ uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
+ uint16_t dv_window = 0;
+ uint16_t en_lo, en_hi;
+ uint16_t acc_clks;
+ uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
+
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ en_lo = CEIL_DIV(Trp[mode], CLK_X);
+ en_hi = CEIL_DIV(Treh[mode], CLK_X);
+#if ONFI_BLOOM_TIME
+ if ((en_hi * CLK_X) < (Treh[mode] + 2))
+ en_hi++;
+#endif
+
+ if ((en_lo + en_hi) * CLK_X < Trc[mode])
+ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
+
+ if ((en_lo + en_hi) < CLK_MULTI)
+ en_lo += CLK_MULTI - en_lo - en_hi;
+
+ while (dv_window < 8) {
+ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
+
+ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
+
+ data_invalid = data_invalid_rhoh < data_invalid_rloh ?
+ data_invalid_rhoh : data_invalid_rloh;
+
+ dv_window = data_invalid - Trea[mode];
+
+ if (dv_window < 8)
+ en_lo++;
+ }
+
+ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
+
+ while (acc_clks * CLK_X - Trea[mode] < 3)
+ acc_clks++;
+
+ if (data_invalid - acc_clks * CLK_X < 2)
+ dev_warn(denali->dev, "%s, Line %d: Warning!\n",
+ __FILE__, __LINE__);
+
+ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
+ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
+ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
+ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
+ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
+ if (cs_cnt == 0)
+ cs_cnt = 1;
+
+ if (Tcea[mode]) {
+ while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode])
+ cs_cnt++;
+ }
+
+#if MODE5_WORKAROUND
+ if (mode == 5)
+ acc_clks = 5;
+#endif
+
+ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
+ if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 &&
+ ioread32(denali->flash_reg + DEVICE_ID) == 0x88)
+ acc_clks = 6;
+
+ iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
+ iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
+ iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
+ iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
+ iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
+ iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
+ iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
+ iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
+}
+
+/* queries the NAND device to see what ONFI modes it supports. */
+static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
+{
+ int i;
+
+ /*
+ * we needn't to do a reset here because driver has already
+ * reset all the banks before
+ */
+ if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ ONFI_TIMING_MODE__VALUE))
+ return FAIL;
+
+ for (i = 5; i > 0; i--) {
+ if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ (0x01 << i))
+ break;
+ }
+
+ nand_onfi_timing_set(denali, i);
+
+ /*
+ * By now, all the ONFI devices we know support the page cache
+ * rw feature. So here we enable the pipeline_rw_ahead feature
+ */
+ /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
+ /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
+
+ return PASS;
+}
+
+static void get_samsung_nand_para(struct denali_nand_info *denali,
+ uint8_t device_id)
+{
+ if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
+ /* Set timing register values according to datasheet */
+ iowrite32(5, denali->flash_reg + ACC_CLKS);
+ iowrite32(20, denali->flash_reg + RE_2_WE);
+ iowrite32(12, denali->flash_reg + WE_2_RE);
+ iowrite32(14, denali->flash_reg + ADDR_2_DATA);
+ iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
+ iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
+ iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
+ }
+}
+
+static void get_toshiba_nand_para(struct denali_nand_info *denali)
+{
+ uint32_t tmp;
+
+ /*
+ * Workaround to fix a controller bug which reports a wrong
+ * spare area size for some kind of Toshiba NAND device
+ */
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
+ iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(tmp,
+ denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+#if SUPPORT_15BITECC
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ }
+}
+
+static void get_hynix_nand_para(struct denali_nand_info *denali,
+ uint8_t device_id)
+{
+ uint32_t main_size, spare_size;
+
+ switch (device_id) {
+ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
+ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
+ iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
+ iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ main_size = 4096 *
+ ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ spare_size = 224 *
+ ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ iowrite32(main_size,
+ denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+ iowrite32(spare_size,
+ denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+ iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
+#if SUPPORT_15BITECC
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ break;
+ default:
+ dev_warn(denali->dev,
+ "Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
+ "Will use default parameter values instead.\n",
+ device_id);
+ }
+}
+
+/*
+ * determines how many NAND chips are connected to the controller. Note for
+ * Intel CE4100 devices we don't support more than one device.
+ */
+static void find_valid_banks(struct denali_nand_info *denali)
+{
+ uint32_t id[denali->max_banks];
+ int i;
+
+ denali->total_used_banks = 1;
+ for (i = 0; i < denali->max_banks; i++) {
+ index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
+ index_addr(denali, MODE_11 | (i << 24) | 1, 0);
+ index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
+
+ dev_dbg(denali->dev,
+ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
+
+ if (i == 0) {
+ if (!(id[i] & 0x0ff))
+ break; /* WTF? */
+ } else {
+ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
+ denali->total_used_banks++;
+ else
+ break;
+ }
+ }
+
+ if (denali->platform == INTEL_CE4100) {
+ /*
+ * Platform limitations of the CE4100 device limit
+ * users to a single chip solution for NAND.
+ * Multichip support is not enabled.
+ */
+ if (denali->total_used_banks != 1) {
+ dev_err(denali->dev,
+ "Sorry, Intel CE4100 only supports a single NAND device.\n");
+ BUG();
+ }
+ }
+ dev_dbg(denali->dev,
+ "denali->total_used_banks: %d\n", denali->total_used_banks);
+}
+
+/*
+ * Use the configuration feature register to determine the maximum number of
+ * banks that the hardware supports.
+ */
+static void detect_max_banks(struct denali_nand_info *denali)
+{
+ uint32_t features = ioread32(denali->flash_reg + FEATURES);
+
+ denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+}
+
+static void detect_partition_feature(struct denali_nand_info *denali)
+{
+ /*
+ * For MRST platform, denali->fwblks represent the
+ * number of blocks firmware is taken,
+ * FW is in protect partition and MTD driver has no
+ * permission to access it. So let driver know how many
+ * blocks it can't touch.
+ */
+ if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
+ PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
+ denali->fwblks =
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
+ MIN_MAX_BANK__MIN_VALUE) *
+ denali->blksperchip)
+ +
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
+ MIN_BLK_ADDR__VALUE);
+ } else {
+ denali->fwblks = SPECTRA_START_BLOCK;
+ }
+ } else {
+ denali->fwblks = SPECTRA_START_BLOCK;
+ }
+}
+
+static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
+{
+ uint16_t status = PASS;
+ uint32_t id_bytes[8], addr;
+ uint8_t maf_id, device_id;
+ int i;
+
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ /*
+ * Use read id method to get device ID and other params.
+ * For some NAND chips, controller can't report the correct
+ * device ID by reading from DEVICE_ID register
+ */
+ addr = MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, addr | 0, 0x90);
+ index_addr(denali, addr | 1, 0);
+ for (i = 0; i < 8; i++)
+ index_addr_read_data(denali, addr | 2, &id_bytes[i]);
+ maf_id = id_bytes[0];
+ device_id = id_bytes[1];
+
+ if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
+ if (FAIL == get_onfi_nand_para(denali))
+ return FAIL;
+ } else if (maf_id == 0xEC) { /* Samsung NAND */
+ get_samsung_nand_para(denali, device_id);
+ } else if (maf_id == 0x98) { /* Toshiba NAND */
+ get_toshiba_nand_para(denali);
+ } else if (maf_id == 0xAD) { /* Hynix NAND */
+ get_hynix_nand_para(denali, device_id);
+ }
+
+ dev_info(denali->dev,
+ "Dump timing register values:\n"
+ "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
+ "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + RE_2_RE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ find_valid_banks(denali);
+
+ detect_partition_feature(denali);
+
+ /*
+ * If the user specified to override the default timings
+ * with a specific ONFI mode, we apply those changes here.
+ */
+ if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
+ nand_onfi_timing_set(denali, onfi_timing_mode);
+
+ return status;
+}
+
+static void denali_set_intr_modes(struct denali_nand_info *denali,
+ uint16_t INT_ENABLE)
+{
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (INT_ENABLE)
+ iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
+ else
+ iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
+}
+
+/*
+ * validation function to verify that the controlling software is making
+ * a valid request
+ */
+static inline bool is_flash_bank_valid(int flash_bank)
+{
+ return flash_bank >= 0 && flash_bank < 4;
+}
+
+static void denali_irq_init(struct denali_nand_info *denali)
+{
+ uint32_t int_mask;
+ int i;
+
+ /* Disable global interrupts */
+ denali_set_intr_modes(denali, false);
+
+ int_mask = DENALI_IRQ_ALL;
+
+ /* Clear all status bits */
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
+
+ denali_irq_enable(denali, int_mask);
+}
+
+static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
+{
+ denali_set_intr_modes(denali, false);
+ free_irq(irqnum, denali);
+}
+
+static void denali_irq_enable(struct denali_nand_info *denali,
+ uint32_t int_mask)
+{
+ int i;
+
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
+}
+
+/*
+ * This function only returns when an interrupt that this driver cares about
+ * occurs. This is to reduce the overhead of servicing interrupts
+ */
+static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
+{
+ return read_interrupt_status(denali) & DENALI_IRQ_ALL;
+}
+
+/* Interrupts are cleared by writing a 1 to the appropriate status bit */
+static inline void clear_interrupt(struct denali_nand_info *denali,
+ uint32_t irq_mask)
+{
+ uint32_t intr_status_reg;
+
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
+
+ iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
+}
+
+static void clear_interrupts(struct denali_nand_info *denali)
+{
+ uint32_t status;
+
+ spin_lock_irq(&denali->irq_lock);
+
+ status = read_interrupt_status(denali);
+ clear_interrupt(denali, status);
+
+ denali->irq_status = 0x0;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static uint32_t read_interrupt_status(struct denali_nand_info *denali)
+{
+ uint32_t intr_status_reg;
+
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
+
+ return ioread32(denali->flash_reg + intr_status_reg);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device. Note that on CE4100, this is a shared interrupt.
+ */
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+ struct denali_nand_info *denali = dev_id;
+ uint32_t irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&denali->irq_lock);
+
+ /* check to see if a valid NAND chip has been selected. */
+ if (is_flash_bank_valid(denali->flash_bank)) {
+ /*
+ * check to see if controller generated the interrupt,
+ * since this is a shared interrupt
+ */
+ irq_status = denali_irq_detected(denali);
+ if (irq_status != 0) {
+ /* handle interrupt */
+ /* first acknowledge it */
+ clear_interrupt(denali, irq_status);
+ /*
+ * store the status in the device context for someone
+ * to read
+ */
+ denali->irq_status |= irq_status;
+ /* notify anyone who cares that it happened */
+ complete(&denali->complete);
+ /* tell the OS that we've handled this */
+ result = IRQ_HANDLED;
+ }
+ }
+ spin_unlock(&denali->irq_lock);
+ return result;
+}
+#define BANK(x) ((x) << 24)
+
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ unsigned long comp_res;
+ uint32_t intr_status;
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ do {
+ comp_res =
+ wait_for_completion_timeout(&denali->complete, timeout);
+ spin_lock_irq(&denali->irq_lock);
+ intr_status = denali->irq_status;
+
+ if (intr_status & irq_mask) {
+ denali->irq_status &= ~irq_mask;
+ spin_unlock_irq(&denali->irq_lock);
+ /* our interrupt was detected */
+ break;
+ }
+
+ /*
+ * these are not the interrupts you are looking for -
+ * need to wait again
+ */
+ spin_unlock_irq(&denali->irq_lock);
+ } while (comp_res != 0);
+
+ if (comp_res == 0) {
+ /* timeout */
+ pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
+ intr_status, irq_mask);
+
+ intr_status = 0;
+ }
+ return intr_status;
+}
+
+/*
+ * This helper function setups the registers for ECC and whether or not
+ * the spare area will be transferred.
+ */
+static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare)
+{
+ int ecc_en_flag, transfer_spare_flag;
+
+ /* set ECC, transfer spare bits if needed */
+ ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
+ transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
+
+ /* Enable spare area/ECC per user's request. */
+ iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
+ iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
+}
+
+/*
+ * sends a pipeline command operation to the controller. See the Denali NAND
+ * controller's user guide for more information (section 4.2.3.6).
+ */
+static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
+ bool ecc_en, bool transfer_spare,
+ int access_type, int op)
+{
+ int status = PASS;
+ uint32_t page_count = 1;
+ uint32_t addr, cmd, irq_status, irq_mask;
+
+ if (op == DENALI_READ)
+ irq_mask = INTR_STATUS__LOAD_COMP;
+ else if (op == DENALI_WRITE)
+ irq_mask = 0;
+ else
+ BUG();
+
+ setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
+
+ clear_interrupts(denali);
+
+ addr = BANK(denali->flash_bank) | denali->page;
+
+ if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
+ /* read spare area */
+ cmd = MODE_10 | addr;
+ index_addr(denali, cmd, access_type);
+
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else if (op == DENALI_READ) {
+ /* setup page read request for access type */
+ cmd = MODE_10 | addr;
+ index_addr(denali, cmd, access_type);
+
+ /*
+ * page 33 of the NAND controller spec indicates we should not
+ * use the pipeline commands in Spare area only mode.
+ * So we don't.
+ */
+ if (access_type == SPARE_ACCESS) {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ } else {
+ index_addr(denali, cmd,
+ PIPELINE_ACCESS | op | page_count);
+
+ /*
+ * wait for command to be accepted
+ * can always use status0 bit as the
+ * mask is identical for each bank.
+ */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0) {
+ dev_err(denali->dev,
+ "cmd, page, addr on timeout (0x%x, 0x%x, 0x%x)\n",
+ cmd, denali->page, addr);
+ status = FAIL;
+ } else {
+ cmd = MODE_01 | addr;
+ iowrite32(cmd, denali->flash_mem);
+ }
+ }
+ }
+ return status;
+}
+
+/* helper function that simply writes a buffer to the flash */
+static int write_data_to_flash_mem(struct denali_nand_info *denali,
+ const uint8_t *buf, int len)
+{
+ uint32_t *buf32;
+ int i;
+
+ /*
+ * verify that the len is a multiple of 4.
+ * see comment in read_data_from_flash_mem()
+ */
+ BUG_ON((len % 4) != 0);
+
+ /* write the data to the flash memory */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ iowrite32(*buf32++, denali->flash_mem + 0x10);
+ return i * 4; /* intent is to return the number of bytes read */
+}
+
+/* helper function that simply reads a buffer from the flash */
+static int read_data_from_flash_mem(struct denali_nand_info *denali,
+ uint8_t *buf, int len)
+{
+ uint32_t *buf32;
+ int i;
+
+ /*
+ * we assume that len will be a multiple of 4, if not it would be nice
+ * to know about it ASAP rather than have random failures...
+ * This assumption is based on the fact that this function is designed
+ * to be used to read flash pages, which are typically multiples of 4.
+ */
+ BUG_ON((len % 4) != 0);
+
+ /* transfer the data from the flash */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ *buf32++ = ioread32(denali->flash_mem + 0x10);
+ return i * 4; /* intent is to return the number of bytes read */
+}
+
+/* writes OOB data to the device */
+static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status;
+ uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
+ int status = 0;
+
+ denali->page = page;
+
+ if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
+ DENALI_WRITE) == PASS) {
+ write_data_to_flash_mem(denali, buf, mtd->oobsize);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0) {
+ dev_err(denali->dev, "OOB write failed\n");
+ status = -EIO;
+ }
+ } else {
+ dev_err(denali->dev, "unable to send pipeline command\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* reads OOB data from the device */
+static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_mask = INTR_STATUS__LOAD_COMP;
+ uint32_t irq_status, addr, cmd;
+
+ denali->page = page;
+
+ if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
+ DENALI_READ) == PASS) {
+ read_data_from_flash_mem(denali, buf, mtd->oobsize);
+
+ /*
+ * wait for command to be accepted
+ * can always use status0 bit as the
+ * mask is identical for each bank.
+ */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ dev_err(denali->dev, "page on OOB timeout %d\n",
+ denali->page);
+
+ /*
+ * We set the device back to MAIN_ACCESS here as I observed
+ * instability with the controller if you do a block erase
+ * and the last transaction was a SPARE_ACCESS. Block erase
+ * is reliable (according to the MTD test infrastructure)
+ * if you are in MAIN_ACCESS.
+ */
+ addr = BANK(denali->flash_bank) | denali->page;
+ cmd = MODE_10 | addr;
+ index_addr(denali, cmd, MAIN_ACCESS);
+ }
+}
+
+/*
+ * this function examines buffers to see if they contain data that
+ * indicate that the buffer is part of an erased region of flash.
+ */
+static bool is_erased(uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != 0xFF)
+ return false;
+ return true;
+}
+#define ECC_SECTOR_SIZE 512
+
+#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
+#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
+#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
+#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
+#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
+#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
+
+static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
+ uint32_t irq_status, unsigned int *max_bitflips)
+{
+ bool check_erased_page = false;
+ unsigned int bitflips = 0;
+
+ if (irq_status & INTR_STATUS__ECC_ERR) {
+ /* read the ECC errors. we'll ignore them for now */
+ uint32_t err_address, err_correction_info, err_byte,
+ err_sector, err_device, err_correction_value;
+ denali_set_intr_modes(denali, false);
+
+ do {
+ err_address = ioread32(denali->flash_reg +
+ ECC_ERROR_ADDRESS);
+ err_sector = ECC_SECTOR(err_address);
+ err_byte = ECC_BYTE(err_address);
+
+ err_correction_info = ioread32(denali->flash_reg +
+ ERR_CORRECTION_INFO);
+ err_correction_value =
+ ECC_CORRECTION_VALUE(err_correction_info);
+ err_device = ECC_ERR_DEVICE(err_correction_info);
+
+ if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
+ /*
+ * If err_byte is larger than ECC_SECTOR_SIZE,
+ * means error happened in OOB, so we ignore
+ * it. It's no need for us to correct it
+ * err_device is represented the NAND error
+ * bits are happened in if there are more
+ * than one NAND connected.
+ */
+ if (err_byte < ECC_SECTOR_SIZE) {
+ int offset;
+
+ offset = (err_sector *
+ ECC_SECTOR_SIZE +
+ err_byte) *
+ denali->devnum +
+ err_device;
+ /* correct the ECC error */
+ buf[offset] ^= err_correction_value;
+ denali->mtd.ecc_stats.corrected++;
+ bitflips++;
+ }
+ } else {
+ /*
+ * if the error is not correctable, need to
+ * look at the page to see if it is an erased
+ * page. if so, then it's not a real ECC error
+ */
+ check_erased_page = true;
+ }
+ } while (!ECC_LAST_ERR(err_correction_info));
+ /*
+ * Once handle all ecc errors, controller will triger
+ * a ECC_TRANSACTION_DONE interrupt, so here just wait
+ * for a while for this interrupt
+ */
+ while (!(read_interrupt_status(denali) &
+ INTR_STATUS__ECC_TRANSACTION_DONE))
+ cpu_relax();
+ clear_interrupts(denali);
+ denali_set_intr_modes(denali, true);
+ }
+ *max_bitflips = bitflips;
+ return check_erased_page;
+}
+
+/* programs the controller to either enable/disable DMA transfers */
+static void denali_enable_dma(struct denali_nand_info *denali, bool en)
+{
+ iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
+ ioread32(denali->flash_reg + DMA_ENABLE);
+}
+
+/* setups the HW to perform the data DMA */
+static void denali_setup_dma(struct denali_nand_info *denali, int op)
+{
+ uint32_t mode;
+ const int page_count = 1;
+ uint32_t addr = denali->buf.dma_buf;
+
+ mode = MODE_10 | BANK(denali->flash_bank);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes */
+ index_addr(denali, mode | 0x14000, 0x2400);
+}
+
+/*
+ * writes a page. user specifies type, and this function handles the
+ * configuration details.
+ */
+static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, bool raw_xfer)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+ uint32_t irq_status;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
+
+ /*
+ * if it is a raw xfer, we want to disable ecc and send the spare area.
+ * !raw_xfer - enable ecc
+ * raw_xfer - transfer spare
+ */
+ setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+
+ /* copy buffer into DMA buffer */
+ memcpy(denali->buf.buf, buf, mtd->writesize);
+
+ if (raw_xfer) {
+ /* transfer the data to the spare area */
+ memcpy(denali->buf.buf + mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize);
+ }
+
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
+
+ clear_interrupts(denali);
+ denali_enable_dma(denali, true);
+
+ denali_setup_dma(denali, DENALI_WRITE);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0) {
+ dev_err(denali->dev, "timeout on write_page (type = %d)\n",
+ raw_xfer);
+ denali->status = NAND_STATUS_FAIL;
+ }
+
+ denali_enable_dma(denali, false);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+/* NAND core entry points */
+
+/*
+ * this is the callback that the NAND core calls to write a page. Since
+ * writing a page with ECC or without is similar, all the work is done
+ * by write_page above.
+ */
+static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ /*
+ * for regular page writes, we let HW handle all the ECC
+ * data written to the device.
+ */
+ return write_page(mtd, chip, buf, false);
+}
+
+/*
+ * This is the callback that the NAND core calls to write a page without ECC.
+ * raw access is similar to ECC page writes, so all the work is done in the
+ * write_page() function above.
+ */
+static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ /*
+ * for raw page writes, we want to disable ECC and simply write
+ * whatever data is in the buffer.
+ */
+ return write_page(mtd, chip, buf, true);
+}
+
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return write_oob_data(mtd, chip->oob_poi, page);
+}
+
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ read_oob_data(mtd, chip->oob_poi, page);
+
+ return 0;
+}
+
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ unsigned int max_bitflips;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status;
+ uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
+ INTR_STATUS__ECC_ERR;
+ bool check_erased_page = false;
+
+ if (page != denali->page) {
+ dev_err(denali->dev,
+ "IN %s: page %d is not equal to denali->page %d",
+ __func__, page, denali->page);
+ BUG();
+ }
+
+ setup_ecc_for_xfer(denali, true, false);
+
+ denali_enable_dma(denali, true);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+
+ check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
+ denali_enable_dma(denali, false);
+
+ if (check_erased_page) {
+ read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
+
+ /* check ECC failures that may have occurred on erased pages */
+ if (check_erased_page) {
+ if (!is_erased(buf, denali->mtd.writesize))
+ denali->mtd.ecc_stats.failed++;
+ if (!is_erased(buf, denali->mtd.oobsize))
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ return max_bitflips;
+}
+
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
+
+ if (page != denali->page) {
+ dev_err(denali->dev,
+ "IN %s: page %d is not equal to denali->page %d",
+ __func__, page, denali->page);
+ BUG();
+ }
+
+ setup_ecc_for_xfer(denali, false, true);
+ denali_enable_dma(denali, true);
+
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ wait_for_irq(denali, irq_mask);
+
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+ denali_enable_dma(denali, false);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+ memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
+
+ return 0;
+}
+
+static uint8_t denali_read_byte(struct mtd_info *mtd)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t result = 0xff;
+
+ if (denali->buf.head < denali->buf.tail)
+ result = denali->buf.buf[denali->buf.head++];
+
+ return result;
+}
+
+static void denali_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ spin_lock_irq(&denali->irq_lock);
+ denali->flash_bank = chip;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int status = denali->status;
+
+ denali->status = 0;
+
+ return status;
+}
+
+static int denali_erase(struct mtd_info *mtd, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ uint32_t cmd, irq_status;
+
+ clear_interrupts(denali);
+
+ /* setup page read request for access type */
+ cmd = MODE_10 | BANK(denali->flash_bank) | page;
+ index_addr(denali, cmd, 0x1);
+
+ /* wait for erase to complete or failure to occur */
+ irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
+ INTR_STATUS__ERASE_FAIL);
+
+ return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
+}
+
+static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
+ int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t addr, id;
+ int i;
+
+ switch (cmd) {
+ case NAND_CMD_PAGEPROG:
+ break;
+ case NAND_CMD_STATUS:
+ read_status(denali);
+ break;
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ reset_buf(denali);
+ /*
+ * sometimes ManufactureId read from register is not right
+ * e.g. some of Micron MT29F32G08QAA MLC NAND chips
+ * So here we send READID cmd to NAND insteand
+ */
+ addr = MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, addr | 0, 0x90);
+ index_addr(denali, addr | 1, 0);
+ for (i = 0; i < 8; i++) {
+ index_addr_read_data(denali, addr | 2, &id);
+ write_byte_to_buf(denali, id);
+ }
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_SEQIN:
+ denali->page = page;
+ break;
+ case NAND_CMD_RESET:
+ reset_bank(denali);
+ break;
+ case NAND_CMD_READOOB:
+ /* TODO: Read OOB data */
+ break;
+ default:
+ pr_err(": unsupported command received 0x%x\n", cmd);
+ break;
+ }
+}
+/* end NAND core entry points */
+
+/* Initialization code to bring the device up to a known good state */
+static void denali_hw_init(struct denali_nand_info *denali)
+{
+ /*
+ * tell driver how many bit controller will skip before
+ * writing ECC code in OOB, this register may be already
+ * set by firmware. So we read this value out.
+ * if this value is 0, just let it be.
+ */
+ denali->bbtskipbytes = ioread32(denali->flash_reg +
+ SPARE_AREA_SKIP_BYTES);
+ detect_max_banks(denali);
+ denali_nand_reset(denali);
+ iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG,
+ denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+
+ iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+
+ /* Should set value for these registers when init */
+ iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(1, denali->flash_reg + ECC_ENABLE);
+ denali_nand_timing_set(denali);
+ denali_irq_init(denali);
+}
+
+/*
+ * Althogh controller spec said SLC ECC is forceb to be 4bit,
+ * but denali controller in MRST only support 15bit and 8bit ECC
+ * correction
+ */
+#define ECC_8BITS 14
+static struct nand_ecclayout nand_8bit_oob = {
+ .eccbytes = 14,
+};
+
+#define ECC_15BITS 26
+static struct nand_ecclayout nand_15bit_oob = {
+ .eccbytes = 26,
+};
+
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* initialize driver data structures */
+static void denali_drv_init(struct denali_nand_info *denali)
+{
+ denali->idx = 0;
+
+ /* setup interrupt handler */
+ /*
+ * the completion object will be used to notify
+ * the callee that the interrupt is done
+ */
+ init_completion(&denali->complete);
+
+ /*
+ * the spinlock will be used to synchronize the ISR with any
+ * element that might be access shared data (interrupt status)
+ */
+ spin_lock_init(&denali->irq_lock);
+
+ /* indicate that MTD has not selected a valid bank yet */
+ denali->flash_bank = CHIP_SELECT_INVALID;
+
+ /* initialize our irq_status variable to indicate no interrupts */
+ denali->irq_status = 0;
+}
+
+int denali_init(struct denali_nand_info *denali)
+{
+ int ret;
+
+ if (denali->platform == INTEL_CE4100) {
+ /*
+ * Due to a silicon limitation, we can only support
+ * ONFI timing mode 1 and below.
+ */
+ if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
+ pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
+ return -EINVAL;
+ }
+ }
+
+ /* allocate a temporary buffer for nand_scan_ident() */
+ denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!denali->buf.buf)
+ return -ENOMEM;
+
+ denali->mtd.dev.parent = denali->dev;
+ denali_hw_init(denali);
+ denali_drv_init(denali);
+
+ /*
+ * denali_isr register is done after all the hardware
+ * initilization is finished
+ */
+ if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
+ DENALI_NAND_NAME, denali)) {
+ pr_err("Spectra: Unable to allocate IRQ\n");
+ return -ENODEV;
+ }
+
+ /* now that our ISR is registered, we can enable interrupts */
+ denali_set_intr_modes(denali, true);
+ denali->mtd.name = "denali-nand";
+ denali->mtd.owner = THIS_MODULE;
+ denali->mtd.priv = &denali->nand;
+
+ /* register the driver with the NAND core subsystem */
+ denali->nand.select_chip = denali_select_chip;
+ denali->nand.cmdfunc = denali_cmdfunc;
+ denali->nand.read_byte = denali_read_byte;
+ denali->nand.waitfunc = denali_waitfunc;
+
+ /*
+ * scan for NAND devices attached to the controller
+ * this is the first stage in a two step process to register
+ * with the nand subsystem
+ */
+ if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
+ ret = -ENXIO;
+ goto failed_req_irq;
+ }
+
+ /* allocate the right size buffer now */
+ devm_kfree(denali->dev, denali->buf.buf);
+ denali->buf.buf = devm_kzalloc(denali->dev,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ GFP_KERNEL);
+ if (!denali->buf.buf) {
+ ret = -ENOMEM;
+ goto failed_req_irq;
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("Spectra: no usable DMA configuration\n");
+ goto failed_req_irq;
+ }
+
+ denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+ dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ ret = -EIO;
+ goto failed_req_irq;
+ }
+
+ /*
+ * support for multi nand
+ * MTD known nothing about multi nand, so we should tell it
+ * the real pagesize and anything necessery
+ */
+ denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ denali->nand.chipsize <<= (denali->devnum - 1);
+ denali->nand.page_shift += (denali->devnum - 1);
+ denali->nand.pagemask = (denali->nand.chipsize >>
+ denali->nand.page_shift) - 1;
+ denali->nand.bbt_erase_shift += (denali->devnum - 1);
+ denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
+ denali->nand.chip_shift += (denali->devnum - 1);
+ denali->mtd.writesize <<= (denali->devnum - 1);
+ denali->mtd.oobsize <<= (denali->devnum - 1);
+ denali->mtd.erasesize <<= (denali->devnum - 1);
+ denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
+ denali->bbtskipbytes *= denali->devnum;
+
+ /*
+ * second stage of the NAND scan
+ * this stage requires information regarding ECC and
+ * bad block management.
+ */
+
+ /* Bad block management */
+ denali->nand.bbt_td = &bbt_main_descr;
+ denali->nand.bbt_md = &bbt_mirror_descr;
+
+ /* skip the scan for now until we have OOB read and write support */
+ denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
+ denali->nand.options |= NAND_SKIP_BBTSCAN;
+ denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+
+ /* no subpage writes on denali */
+ denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
+
+ /*
+ * Denali Controller only support 15bit and 8bit ECC in MRST,
+ * so just let controller do 15bit ECC for MLC and 8bit ECC for
+ * SLC if possible.
+ * */
+ if (!nand_is_slc(&denali->nand) &&
+ (denali->mtd.oobsize > (denali->bbtskipbytes +
+ ECC_15BITS * (denali->mtd.writesize /
+ ECC_SECTOR_SIZE)))) {
+ /* if MLC OOB size is large enough, use 15bit ECC*/
+ denali->nand.ecc.strength = 15;
+ denali->nand.ecc.layout = &nand_15bit_oob;
+ denali->nand.ecc.bytes = ECC_15BITS;
+ iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+ } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
+ ECC_8BITS * (denali->mtd.writesize /
+ ECC_SECTOR_SIZE))) {
+ pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
+ goto failed_req_irq;
+ } else {
+ denali->nand.ecc.strength = 8;
+ denali->nand.ecc.layout = &nand_8bit_oob;
+ denali->nand.ecc.bytes = ECC_8BITS;
+ iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+ }
+
+ denali->nand.ecc.bytes *= denali->devnum;
+ denali->nand.ecc.strength *= denali->devnum;
+ denali->nand.ecc.layout->eccbytes *=
+ denali->mtd.writesize / ECC_SECTOR_SIZE;
+ denali->nand.ecc.layout->oobfree[0].offset =
+ denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
+ denali->nand.ecc.layout->oobfree[0].length =
+ denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
+ denali->bbtskipbytes;
+
+ /*
+ * Let driver know the total blocks number and how many blocks
+ * contained by each nand chip. blksperchip will help driver to
+ * know how many blocks is taken by FW.
+ */
+ denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift;
+ denali->blksperchip = denali->totalblks / denali->nand.numchips;
+
+ /* override the default read operations */
+ denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
+ denali->nand.ecc.read_page = denali_read_page;
+ denali->nand.ecc.read_page_raw = denali_read_page_raw;
+ denali->nand.ecc.write_page = denali_write_page;
+ denali->nand.ecc.write_page_raw = denali_write_page_raw;
+ denali->nand.ecc.read_oob = denali_read_oob;
+ denali->nand.ecc.write_oob = denali_write_oob;
+ denali->nand.erase = denali_erase;
+
+ if (nand_scan_tail(&denali->mtd)) {
+ ret = -ENXIO;
+ goto failed_req_irq;
+ }
+
+ ret = mtd_device_register(&denali->mtd, NULL, 0);
+ if (ret) {
+ dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
+ ret);
+ goto failed_req_irq;
+ }
+ return 0;
+
+failed_req_irq:
+ denali_irq_cleanup(denali->irq, denali);
+
+ return ret;
+}
+EXPORT_SYMBOL(denali_init);
+
+/* driver exit point */
+void denali_remove(struct denali_nand_info *denali)
+{
+ denali_irq_cleanup(denali->irq, denali);
+ dma_unmap_single(denali->dev, denali->buf.dma_buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
new file mode 100644
index 000000000..145bf8893
--- /dev/null
+++ b/drivers/mtd/nand/denali.h
@@ -0,0 +1,483 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DENALI_H__
+#define __DENALI_H__
+
+#include <linux/mtd/nand.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK0 0x0001
+#define DEVICE_RESET__BANK1 0x0002
+#define DEVICE_RESET__BANK2 0x0004
+#define DEVICE_RESET__BANK3 0x0008
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG 0x0001
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE 0xffff
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE 0xffff
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE 0xffff
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE 0xffff
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK0 0x0001
+#define RB_PIN_ENABLED__BANK1 0x0002
+#define RB_PIN_ENABLED__BANK2 0x0004
+#define RB_PIN_ENABLED__BANK3 0x0008
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG 0x0001
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG 0x0001
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG 0x0001
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG 0x0001
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN 0x0001
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG 0x01
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG 0x0001
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG 0x01
+
+#define WE_2_RE 0x100
+#define WE_2_RE__VALUE 0x003f
+
+#define ADDR_2_DATA 0x110
+#define ADDR_2_DATA__VALUE 0x003f
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE 0x003f
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE 0x000f
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE 0x0007
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE 0xffff
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE 0x0003
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE 0x001f
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE 0x000f
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE 0x000f
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE 0x000f
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE 0x001f
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE 0x001f
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE 0x000f
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE 0x001f
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE 0xffff
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE 0x0007
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE 0x00ff
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG 0x0001
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE 0x003f
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE 0x00ff
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE 0x00ff
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE 0x00ff
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE 0x00ff
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE 0x00ff
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
+
+#define REVISION 0x370
+#define REVISION__VALUE 0xffff
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE 0x003f
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS 0x0003
+#define FEATURES__ECC_MAX_ERR 0x003c
+#define FEATURES__DMA 0x0040
+#define FEATURES__CMD_DMA 0x0080
+#define FEATURES__PARTITION 0x0100
+#define FEATURES__XDMA_SIDEBAND 0x0200
+#define FEATURES__GPREG 0x0400
+#define FEATURES__INDEX_ADDR 0x0800
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE 0x0003
+
+#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
+#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
+
+#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS__ECC_ERR 0x0002
+#define INTR_STATUS__DMA_CMD_COMP 0x0004
+#define INTR_STATUS__TIME_OUT 0x0008
+#define INTR_STATUS__PROGRAM_FAIL 0x0010
+#define INTR_STATUS__ERASE_FAIL 0x0020
+#define INTR_STATUS__LOAD_COMP 0x0040
+#define INTR_STATUS__PROGRAM_COMP 0x0080
+#define INTR_STATUS__ERASE_COMP 0x0100
+#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS__LOCKED_BLK 0x0400
+#define INTR_STATUS__UNSUP_CMD 0x0800
+#define INTR_STATUS__INT_ACT 0x1000
+#define INTR_STATUS__RST_COMP 0x2000
+#define INTR_STATUS__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS__PAGE_XFER_INC 0x8000
+
+#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN__ECC_ERR 0x0002
+#define INTR_EN__DMA_CMD_COMP 0x0004
+#define INTR_EN__TIME_OUT 0x0008
+#define INTR_EN__PROGRAM_FAIL 0x0010
+#define INTR_EN__ERASE_FAIL 0x0020
+#define INTR_EN__LOAD_COMP 0x0040
+#define INTR_EN__PROGRAM_COMP 0x0080
+#define INTR_EN__ERASE_COMP 0x0100
+#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN__LOCKED_BLK 0x0400
+#define INTR_EN__UNSUP_CMD 0x0800
+#define INTR_EN__INT_ACT 0x1000
+#define INTR_EN__RST_COMP 0x2000
+#define INTR_EN__PIPE_CMD_ERR 0x4000
+#define INTR_EN__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
+#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
+#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
+
+#define DATA_INTR 0x550
+#define DATA_INTR__WRITE_SPACE_AV 0x0001
+#define DATA_INTR__READ_DATA_AV 0x0002
+
+#define DATA_INTR_EN 0x560
+#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
+#define DATA_INTR_EN__READ_DATA_AV 0x0002
+
+#define GPREG_0 0x570
+#define GPREG_0__VALUE 0xffff
+
+#define GPREG_1 0x580
+#define GPREG_1__VALUE 0xffff
+
+#define GPREG_2 0x590
+#define GPREG_2__VALUE 0xffff
+
+#define GPREG_3 0x5a0
+#define GPREG_3__VALUE 0xffff
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE 0x03ff
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
+#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
+#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
+#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
+#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
+#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG 0x0001
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG 0x0001
+
+#define DMA_INTR 0x720
+#define DMA_INTR__TARGET_ERROR 0x0001
+#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
+
+#define DMA_INTR_EN 0x730
+#define DMA_INTR_EN__TARGET_ERROR 0x0001
+#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE 0xffff
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE 0xffff
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 0x0001
+#define CHNL_ACTIVE__CHANNEL1 0x0002
+#define CHNL_ACTIVE__CHANNEL2 0x0004
+#define CHNL_ACTIVE__CHANNEL3 0x0008
+
+#define ACTIVE_SRC_ID 0x800
+#define ACTIVE_SRC_ID__VALUE 0x00ff
+
+#define PTN_INTR 0x810
+#define PTN_INTR__CONFIG_ERROR 0x0001
+#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR__REG_ACCESS_ERROR 0x0020
+
+#define PTN_INTR_EN 0x820
+#define PTN_INTR_EN__CONFIG_ERROR 0x0001
+#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
+
+#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
+#define PERM_SRC_ID__SRCID 0x00ff
+#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID__READ_ACTIVE 0x4000
+#define PERM_SRC_ID__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
+#define MIN_BLK_ADDR__VALUE 0xffff
+
+#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
+#define MAX_BLK_ADDR__VALUE 0xffff
+
+#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
+#define MIN_MAX_BANK__MIN_VALUE 0x0003
+#define MIN_MAX_BANK__MAX_VALUE 0x000c
+
+
+/* ffsdefs.h */
+#define CLEAR 0 /*use this to clear a field instead of "fail"*/
+#define SET 1 /*use this to set a field instead of "pass"*/
+#define FAIL 1 /*failed flag*/
+#define PASS 0 /*success flag*/
+#define ERR -1 /*error flag*/
+
+/* lld.h */
+#define GOOD_BLOCK 0
+#define DEFECTIVE_BLOCK 1
+#define READ_ERROR 2
+
+#define CLK_X 5
+#define CLK_MULTI 4
+
+/* spectraswconfig.h */
+#define CMD_DMA 0
+
+#define SPECTRA_PARTITION_ID 0
+/**** Block Table and Reserved Block Parameters *****/
+#define SPECTRA_START_BLOCK 3
+#define NUM_FREE_BLOCKS_GATE 30
+
+/* KBV - Updated to LNW scratch register address */
+#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
+#define SCRATCH_REG_SIZE 64
+
+#define GLOB_HWCTL_DEFAULT_BLKS 2048
+
+#define SUPPORT_15BITECC 1
+#define SUPPORT_8BITECC 1
+
+#define CUSTOM_CONF_PARAMS 0
+
+#define ONFI_BLOOM_TIME 1
+#define MODE5_WORKAROUND 0
+
+
+#define MODE_00 0x00000000
+#define MODE_01 0x04000000
+#define MODE_10 0x08000000
+#define MODE_11 0x0C000000
+
+
+#define DATA_TRANSFER_MODE 0
+#define PROTECTION_PER_BLOCK 1
+#define LOAD_WAIT_COUNT 2
+#define PROGRAM_WAIT_COUNT 3
+#define ERASE_WAIT_COUNT 4
+#define INT_MONITOR_CYCLE_COUNT 5
+#define READ_BUSY_PIN_ENABLED 6
+#define MULTIPLANE_OPERATION_SUPPORT 7
+#define PRE_FETCH_MODE 8
+#define CE_DONT_CARE_SUPPORT 9
+#define COPYBACK_SUPPORT 10
+#define CACHE_WRITE_SUPPORT 11
+#define CACHE_READ_SUPPORT 12
+#define NUM_PAGES_IN_BLOCK 13
+#define ECC_ENABLE_SELECT 14
+#define WRITE_ENABLE_2_READ_ENABLE 15
+#define ADDRESS_2_DATA 16
+#define READ_ENABLE_2_WRITE_ENABLE 17
+#define TWO_ROW_ADDRESS_CYCLES 18
+#define MULTIPLANE_ADDRESS_RESTRICT 19
+#define ACC_CLOCKS 20
+#define READ_WRITE_ENABLE_LOW_COUNT 21
+#define READ_WRITE_ENABLE_HIGH_COUNT 22
+
+#define ECC_SECTOR_SIZE 512
+
+struct nand_buf {
+ int head;
+ int tail;
+ uint8_t *buf;
+ dma_addr_t dma_buf;
+};
+
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+#define DT 3
+
+struct denali_nand_info {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ int flash_bank; /* currently selected chip */
+ int status;
+ int platform;
+ struct nand_buf buf;
+ struct device *dev;
+ int total_used_banks;
+ uint32_t block; /* stored for future use */
+ uint16_t page;
+ void __iomem *flash_reg; /* Mapped io reg base address */
+ void __iomem *flash_mem; /* Mapped io reg base address */
+
+ /* elements used by ISR */
+ struct completion complete;
+ spinlock_t irq_lock;
+ uint32_t irq_status;
+ int irq_debug_array[32];
+ int idx;
+ int irq;
+
+ uint32_t devnum; /* represent how many nands connected */
+ uint32_t fwblks; /* represent how many blocks FW used */
+ uint32_t totalblks;
+ uint32_t blksperchip;
+ uint32_t bbtskipbytes;
+ uint32_t max_banks;
+};
+
+extern int denali_init(struct denali_nand_info *denali);
+extern void denali_remove(struct denali_nand_info *denali);
+
+#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
new file mode 100644
index 000000000..0cb1e8d9f
--- /dev/null
+++ b/drivers/mtd/nand/denali_dt.c
@@ -0,0 +1,131 @@
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+struct denali_dt {
+ struct denali_nand_info denali;
+ struct clk *clk;
+};
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+ { .compatible = "denali,denali-nand-dt" },
+ { /* sentinel */ }
+ };
+
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static u64 denali_dma_mask;
+
+static int denali_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *denali_reg, *nand_data;
+ struct denali_dt *dt;
+ struct denali_nand_info *denali;
+ int ret;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+ denali = &dt->denali;
+
+ denali->platform = DT;
+ denali->dev = &ofdev->dev;
+ denali->irq = platform_get_irq(ofdev, 0);
+ if (denali->irq < 0) {
+ dev_err(&ofdev->dev, "no irq defined\n");
+ return denali->irq;
+ }
+
+ denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
+ denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg);
+ if (IS_ERR(denali->flash_reg))
+ return PTR_ERR(denali->flash_reg);
+
+ nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
+ denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data);
+ if (IS_ERR(denali->flash_mem))
+ return PTR_ERR(denali->flash_mem);
+
+ if (!of_property_read_u32(ofdev->dev.of_node,
+ "dma-mask", (u32 *)&denali_dma_mask)) {
+ denali->dev->dma_mask = &denali_dma_mask;
+ } else {
+ denali->dev->dma_mask = NULL;
+ }
+
+ dt->clk = devm_clk_get(&ofdev->dev, NULL);
+ if (IS_ERR(dt->clk)) {
+ dev_err(&ofdev->dev, "no clk available\n");
+ return PTR_ERR(dt->clk);
+ }
+ clk_prepare_enable(dt->clk);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_disable_clk;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(dt->clk);
+
+ return ret;
+}
+
+static int denali_dt_remove(struct platform_device *ofdev)
+{
+ struct denali_dt *dt = platform_get_drvdata(ofdev);
+
+ denali_remove(&dt->denali);
+ clk_disable(dt->clk);
+
+ return 0;
+}
+
+static struct platform_driver denali_dt_driver = {
+ .probe = denali_dt_probe,
+ .remove = denali_dt_remove,
+ .driver = {
+ .name = "denali-nand-dt",
+ .of_match_table = denali_nand_dt_ids,
+ },
+};
+
+module_platform_driver(denali_dt_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
new file mode 100644
index 000000000..6e2f387b8
--- /dev/null
+++ b/drivers/mtd/nand/denali_pci.c
@@ -0,0 +1,142 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand-pci"
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ pr_err("Spectra: pci_enable_device failed.\n");
+ goto failed_alloc_memery;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->irq = dev->irq;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ pr_err("Spectra: Unable to request memory regions\n");
+ goto failed_enable_dev;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ pr_err("Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_req_regions;
+ }
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ pr_err("Spectra: ioremap_nocache failed!");
+ ret = -ENOMEM;
+ goto failed_remap_reg;
+ }
+
+ ret = denali_init(denali);
+ if (ret)
+ goto failed_remap_mem;
+
+ pci_set_drvdata(dev, denali);
+
+ return 0;
+
+failed_remap_mem:
+ iounmap(denali->flash_mem);
+failed_remap_reg:
+ iounmap(denali->flash_reg);
+failed_req_regions:
+ pci_release_regions(dev);
+failed_enable_dev:
+ pci_disable_device(dev);
+failed_alloc_memery:
+ kfree(denali);
+
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ denali_remove(denali);
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ kfree(denali);
+}
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int denali_init_pci(void)
+{
+ return pci_register_driver(&denali_pci_driver);
+}
+module_init(denali_init_pci);
+
+static void denali_exit_pci(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+module_exit(denali_exit_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
new file mode 100644
index 000000000..f68a7bcce
--- /dev/null
+++ b/drivers/mtd/nand/diskonchip.c
@@ -0,0 +1,1716 @@
+/*
+ * drivers/mtd/nand/diskonchip.c
+ *
+ * (C) 2003 Red Hat, Inc.
+ * (C) 2004 Dan Brown <dan_brown@ieee.org>
+ * (C) 2004 Kalev Lember <kalev@smartlink.ee>
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
+ * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
+ *
+ * Error correction code lifted from the old docecc code
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Interface to generic NAND code for M-Systems DiskOnChip devices
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/rslib.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/doc2000.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/inftl.h>
+#include <linux/module.h>
+
+/* Where to look for the devices? */
+#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
+#endif
+
+static unsigned long doc_locations[] __initdata = {
+#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
+ 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+ 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
+ 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+ 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+ 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
+#else
+ 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000,
+ 0xd8000, 0xda000, 0xdc000, 0xde000,
+ 0xe0000, 0xe2000, 0xe4000, 0xe6000,
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+#endif
+#endif
+ 0xffffffff };
+
+static struct mtd_info *doclist = NULL;
+
+struct doc_priv {
+ void __iomem *virtadr;
+ unsigned long physadr;
+ u_char ChipID;
+ u_char CDSNControl;
+ int chips_per_floor; /* The number of chips detected on each floor */
+ int curfloor;
+ int curchip;
+ int mh0_page;
+ int mh1_page;
+ struct mtd_info *nextdoc;
+};
+
+/* This is the syndrome computed by the HW ecc generator upon reading an empty
+ page, one with all 0xff for data and stored ecc code. */
+static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
+
+/* This is the ecc value computed by the HW ecc generator upon writing an empty
+ page, one with all 0xff for data. */
+static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
+
+#define INFTL_BBT_RESERVED_BLOCKS 4
+
+#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
+#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
+#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int bitmask);
+static void doc200x_select_chip(struct mtd_info *mtd, int chip);
+
+static int debug = 0;
+module_param(debug, int, 0);
+
+static int try_dword = 1;
+module_param(try_dword, int, 0);
+
+static int no_ecc_failures = 0;
+module_param(no_ecc_failures, int, 0);
+
+static int no_autopart = 0;
+module_param(no_autopart, int, 0);
+
+static int show_firmware_partition = 0;
+module_param(show_firmware_partition, int, 0);
+
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
+static int inftl_bbt_write = 1;
+#else
+static int inftl_bbt_write = 0;
+#endif
+module_param(inftl_bbt_write, int, 0);
+
+static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
+
+/* Sector size for HW ECC */
+#define SECTOR_SIZE 512
+/* The sector bytes are packed into NB_DATA 10 bit words */
+#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
+/* Number of roots */
+#define NROOTS 4
+/* First consective root */
+#define FCR 510
+/* Number of symbols */
+#define NN 1023
+
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/*
+ * The HW decoder in the DoC ASIC's provides us a error syndrome,
+ * which we must convert to a standard syndrome usable by the generic
+ * Reed-Solomon library code.
+ *
+ * Fabrice Bellard figured this out in the old docecc code. I added
+ * some comments, improved a minor bit and converted it to make use
+ * of the generic Reed-Solomon library. tglx
+ */
+static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
+{
+ int i, j, nerr, errpos[8];
+ uint8_t parity;
+ uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+
+ memset(syn, 0, sizeof(syn));
+ /* Convert the ecc bytes into words */
+ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
+ ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
+ ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
+ ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
+ parity = ecc[1];
+
+ /* Initialize the syndrome buffer */
+ for (i = 0; i < NROOTS; i++)
+ s[i] = ds[0];
+ /*
+ * Evaluate
+ * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
+ * where x = alpha^(FCR + i)
+ */
+ for (j = 1; j < NROOTS; j++) {
+ if (ds[j] == 0)
+ continue;
+ tmp = rs->index_of[ds[j]];
+ for (i = 0; i < NROOTS; i++)
+ s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
+ }
+
+ /* Calc syn[i] = s[i] / alpha^(v + i) */
+ for (i = 0; i < NROOTS; i++) {
+ if (s[i])
+ syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
+ }
+ /* Call the decoder library */
+ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
+
+ /* Incorrectable errors ? */
+ if (nerr < 0)
+ return nerr;
+
+ /*
+ * Correct the errors. The bitpositions are a bit of magic,
+ * but they are given by the design of the de/encoder circuit
+ * in the DoC ASIC's.
+ */
+ for (i = 0; i < nerr; i++) {
+ int index, bitpos, pos = 1015 - errpos[i];
+ uint8_t val;
+ if (pos >= NB_DATA && pos < 1019)
+ continue;
+ if (pos < NB_DATA) {
+ /* extract bit position (MSB first) */
+ pos = 10 * (NB_DATA - 1 - pos) - 6;
+ /* now correct the following 10 bits. At most two bytes
+ can be modified since pos is even */
+ index = (pos >> 3) ^ 1;
+ bitpos = pos & 7;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] >> (2 + bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ index = ((pos >> 3) + 1) ^ 1;
+ bitpos = (bitpos + 10) & 7;
+ if (bitpos == 0)
+ bitpos = 8;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] << (8 - bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ }
+ }
+ /* If the parity is wrong, no rescue possible */
+ return parity ? -EBADMSG : nerr;
+}
+
+static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
+{
+ volatile char dummy;
+ int i;
+
+ for (i = 0; i < cycles; i++) {
+ if (DoC_is_Millennium(doc))
+ dummy = ReadDOC(doc->virtadr, NOP);
+ else if (DoC_is_MillenniumPlus(doc))
+ dummy = ReadDOC(doc->virtadr, Mplus_NOP);
+ else
+ dummy = ReadDOC(doc->virtadr, DOCStatus);
+ }
+
+}
+
+#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ unsigned long timeo = jiffies + (HZ * 10);
+
+ if (debug)
+ printk("_DoC_WaitReady...\n");
+ /* Out-of-line routine to wait for chip response */
+ if (DoC_is_MillenniumPlus(doc)) {
+ while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ } else {
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ }
+
+ return 0;
+}
+
+static inline int DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ int ret = 0;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ DoC_Delay(doc, 4);
+
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ } else {
+ DoC_Delay(doc, 4);
+
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ DoC_Delay(doc, 2);
+ }
+
+ if (debug)
+ printk("DoC_WaitReady OK\n");
+ return ret;
+}
+
+static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (debug)
+ printk("write_byte %02x\n", datum);
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, 2k_CDSN_IO);
+}
+
+static u_char doc2000_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ ret = ReadDOC(docptr, 2k_CDSN_IO);
+ if (debug)
+ printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ for (i = 0; i < len; i++) {
+ buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+}
+
+static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf_dword of %d bytes: ", len);
+
+ if (unlikely((((unsigned long)buf) | len) & 3)) {
+ for (i = 0; i < len; i++) {
+ *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+ } else {
+ for (i = 0; i < len; i += 4) {
+ *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
+ }
+ }
+}
+
+static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t ret;
+
+ doc200x_select_chip(mtd, nr);
+ doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* We can't use dev_ready here, but at least we wait for the
+ * command to complete
+ */
+ udelay(50);
+
+ ret = this->read_byte(mtd) << 8;
+ ret |= this->read_byte(mtd);
+
+ if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
+ /* First chip probe. See if we get same results by 32-bit access */
+ union {
+ uint32_t dword;
+ uint8_t byte[4];
+ } ident;
+ void __iomem *docptr = doc->virtadr;
+
+ doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ udelay(50);
+
+ ident.dword = readl(docptr + DoC_2k_CDSN_IO);
+ if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
+ printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
+ this->read_buf = &doc2000_readbuf_dword;
+ }
+ }
+
+ return ret;
+}
+
+static void __init doc2000_count_chips(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t mfrid;
+ int i;
+
+ /* Max 4 chips per floor on DiskOnChip 2000 */
+ doc->chips_per_floor = 4;
+
+ /* Find out what the first chip is */
+ mfrid = doc200x_ident_chip(mtd, 0);
+
+ /* Find how many chips in each floor. */
+ for (i = 1; i < 4; i++) {
+ if (doc200x_ident_chip(mtd, i) != mfrid)
+ break;
+ }
+ doc->chips_per_floor = i;
+ printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
+}
+
+static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct doc_priv *doc = this->priv;
+
+ int status;
+
+ DoC_WaitReady(doc);
+ this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ DoC_WaitReady(doc);
+ status = (int)this->read_byte(mtd);
+
+ return status;
+}
+
+static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, Mil_CDSN_IO);
+ WriteDOC(datum, docptr, WritePipeTerm);
+}
+
+static u_char doc2001_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ //ReadDOC(docptr, CDSNSlowIO);
+ /* 11.4.5 -- delay twice to allow extended length cycle */
+ DoC_Delay(doc, 2);
+ ReadDOC(docptr, ReadPipeInit);
+ //return ReadDOC(docptr, Mil_CDSN_IO);
+ return ReadDOC(docptr, LastDataRead);
+}
+
+static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i = 0; i < len; i++)
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ /* Terminate write pipeline */
+ WriteDOC(0x00, docptr, WritePipeTerm);
+}
+
+static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i = 0; i < len - 1; i++)
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+
+ /* Terminate read pipeline */
+ buf[i] = ReadDOC(docptr, LastDataRead);
+}
+
+static u_char doc2001plus_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ret = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug)
+ printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i = 0; i < len - 2; i++) {
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+
+ /* Terminate read pipeline */
+ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 2]);
+ buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 1]);
+ if (debug)
+ printk("\n");
+}
+
+static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if (debug)
+ printk("select chip (%d)\n", chip);
+
+ if (chip == -1) {
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+ return;
+ }
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* Assert ChipEnable and deassert WriteProtect */
+ WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+static void doc200x_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if (debug)
+ printk("select chip (%d)\n", chip);
+
+ if (chip == -1)
+ return;
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* 11.4.4 -- deassert CE before changing chip */
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+
+ WriteDOC(floor, docptr, FloorSelect);
+ WriteDOC(chip, docptr, CDSNDeviceSelect);
+
+ doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ doc->CDSNControl &= ~CDSN_CTRL_MSK;
+ doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
+ if (debug)
+ printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ /* 11.4.3 -- 4 NOPs after CSDNControl write */
+ DoC_Delay(doc, 4);
+ }
+ if (cmd != NAND_CMD_NONE) {
+ if (DoC_is_2000(doc))
+ doc2000_write_byte(mtd, cmd);
+ else
+ doc2001_write_byte(mtd, cmd);
+ }
+}
+
+static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /*
+ * Must terminate write pipeline before sending any commands
+ * to the device.
+ */
+ if (command == NAND_CMD_PAGEPROG) {
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ }
+
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ WriteDOC(readcmd, docptr, Mplus_FlashCmd);
+ }
+ WriteDOC(command, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+
+ if (column != -1 || page_addr != -1) {
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ WriteDOC(column, docptr, Mplus_FlashAddress);
+ }
+ if (page_addr != -1) {
+ WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
+ WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
+ /* One more address cycle for higher density devices */
+ if (this->chipsize & 0x0c000000) {
+ WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
+ printk("high density\n");
+ }
+ }
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ /* deassert ALE */
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB || command == NAND_CMD_READID)
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ udelay(this->chip_delay);
+ WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ while (!(this->read_byte(mtd) & 0x40)) ;
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay(this->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+ /* wait until command is processed */
+ while (!this->dev_ready(mtd)) ;
+}
+
+static int doc200x_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (debug)
+ printk("not ready\n");
+ return 0;
+ }
+ if (debug)
+ printk("was ready\n");
+ return 1;
+ } else {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (debug)
+ printk("not ready\n");
+ return 0;
+ }
+ /* 11.4.2 -- Must NOP twice if it's ready */
+ DoC_Delay(doc, 2);
+ if (debug)
+ printk("was ready\n");
+ return 1;
+ }
+}
+
+static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* This is our last resort if we couldn't find or create a BBT. Just
+ pretend all blocks are good. */
+ return 0;
+}
+
+static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ break;
+ }
+}
+
+static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+ break;
+ }
+}
+
+/* This code is only called on write */
+static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ } else {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (ecc_code[i] != empty_write_ecc[i])
+ emptymatch = 0;
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#if 0
+ /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the writebuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, we do have an all-0xff data buffer.
+ Return all-0xff ecc value instead of the computed one, so
+ it'll look just like a freshly-erased page. */
+ if (emptymatch)
+ memset(ecc_code, 0xff, 6);
+#endif
+ return 0;
+}
+
+static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ int i, ret = 0;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint8_t calc_ecc[6];
+ volatile u_char dummy;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ }
+
+ /* Error occurred ? */
+ if (dummy & 0x80) {
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (calc_ecc[i] != empty_read_syndrome[i])
+ emptymatch = 0;
+ }
+ /* If emptymatch=1, the read syndrome is consistent with an
+ all-0xff data and stored ecc block. Check the stored ecc. */
+ if (emptymatch) {
+ for (i = 0; i < 6; i++) {
+ if (read_ecc[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, check the data block. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the readbuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, this is almost certainly a freshly-
+ erased block, in which case the ECC will not come out right.
+ We'll suppress the error and tell the caller everything's
+ OK. Because it is. */
+ if (!emptymatch)
+ ret = doc_ecc_decode(rs_decoder, dat, calc_ecc);
+ if (ret > 0)
+ printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ if (no_ecc_failures && mtd_is_eccerr(ret)) {
+ printk(KERN_ERR "suppressing ECC failure\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+//u_char mydatabuf[528];
+
+/* The strange out-of-order .oobfree list below is a (possibly unneeded)
+ * attempt to retain compatibility. It used to read:
+ * .oobfree = { {8, 8} }
+ * Since that leaves two bytes unusable, it was changed. But the following
+ * scheme might affect existing jffs2 installs by moving the cleanmarker:
+ * .oobfree = { {6, 10} }
+ * jffs2 seems to handle the above gracefully, but the current scheme seems
+ * safer. The only problem with it is that any code that parses oobfree must
+ * be able to handle out-of-order segments.
+ */
+static struct nand_ecclayout doc200x_oobinfo = {
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 4, 5},
+ .oobfree = {{8, 8}, {6, 2}}
+};
+
+/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
+ On successful return, buf will contain a copy of the media header for
+ further processing. id is the string to scan for, and will presumably be
+ either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
+ header. The page #s of the found media headers are placed in mh0_page and
+ mh1_page in the DOC private structure. */
+static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ unsigned offs;
+ int ret;
+ size_t retlen;
+
+ for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize)
+ continue;
+ if (ret) {
+ printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs);
+ }
+ if (memcmp(buf, id, 6))
+ continue;
+ printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
+ if (doc->mh0_page == -1) {
+ doc->mh0_page = offs >> this->page_shift;
+ if (!findmirror)
+ return 1;
+ continue;
+ }
+ doc->mh1_page = offs >> this->page_shift;
+ return 2;
+ }
+ if (doc->mh0_page == -1) {
+ printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
+ return 0;
+ }
+ /* Only one mediaheader was found. We want buf to contain a
+ mediaheader on return, so we'll have to re-read the one we found. */
+ offs = doc->mh0_page << this->page_shift;
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize) {
+ /* Insanity. Give up. */
+ printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct NFTLMediaHeader *mh;
+ const unsigned psize = 1 << this->page_shift;
+ int numparts = 0;
+ unsigned blocks, maxblocks;
+ int offs, numheaders;
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ return 0;
+ }
+ if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
+ goto out;
+ mh = (struct NFTLMediaHeader *)buf;
+
+ le16_to_cpus(&mh->NumEraseUnits);
+ le16_to_cpus(&mh->FirstPhysicalEUN);
+ le32_to_cpus(&mh->FormattedSize);
+
+ printk(KERN_INFO " DataOrgID = %s\n"
+ " NumEraseUnits = %d\n"
+ " FirstPhysicalEUN = %d\n"
+ " FormattedSize = %d\n"
+ " UnitSizeFactor = %d\n",
+ mh->DataOrgID, mh->NumEraseUnits,
+ mh->FirstPhysicalEUN, mh->FormattedSize,
+ mh->UnitSizeFactor);
+
+ blocks = mtd->size >> this->phys_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+
+ if (mh->UnitSizeFactor == 0x00) {
+ /* Auto-determine UnitSizeFactor. The constraints are:
+ - There can be at most 32768 virtual blocks.
+ - There can be at most (virtual block size - page size)
+ virtual blocks (because MediaHeader+BBT must fit in 1).
+ */
+ mh->UnitSizeFactor = 0xff;
+ while (blocks > maxblocks) {
+ blocks >>= 1;
+ maxblocks = min(32768U, (maxblocks << 1) + psize);
+ mh->UnitSizeFactor--;
+ }
+ printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
+ }
+
+ /* NOTE: The lines below modify internal variables of the NAND and MTD
+ layers; variables with have already been configured by nand_scan.
+ Unfortunately, we didn't know before this point what these values
+ should be. Thus, this code is somewhat dependent on the exact
+ implementation of the NAND layer. */
+ if (mh->UnitSizeFactor != 0xff) {
+ this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
+ printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
+ blocks = mtd->size >> this->bbt_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+ }
+
+ if (blocks > maxblocks) {
+ printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
+ goto out;
+ }
+
+ /* Skip past the media headers. */
+ offs = max(doc->mh0_page, doc->mh1_page);
+ offs <<= this->page_shift;
+ offs += mtd->erasesize;
+
+ if (show_firmware_partition == 1) {
+ parts[0].name = " DiskOnChip Firmware / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = offs;
+ numparts = 1;
+ }
+
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
+
+ offs += parts[numparts].size;
+ numparts++;
+
+ if (offs < mtd->size) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = mtd->size - offs;
+ numparts++;
+ }
+
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+/* This is a stripped-down copy of the code in inftlmount.c */
+static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct INFTLMediaHeader *mh;
+ struct INFTLPartition *ip;
+ int numparts = 0;
+ int blocks;
+ int vshift, lastvunit = 0;
+ int i;
+ int end = mtd->size;
+
+ if (inftl_bbt_write)
+ end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ return 0;
+ }
+
+ if (!find_media_headers(mtd, buf, "BNAND", 0))
+ goto out;
+ doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
+ mh = (struct INFTLMediaHeader *)buf;
+
+ le32_to_cpus(&mh->NoOfBootImageBlocks);
+ le32_to_cpus(&mh->NoOfBinaryPartitions);
+ le32_to_cpus(&mh->NoOfBDTLPartitions);
+ le32_to_cpus(&mh->BlockMultiplierBits);
+ le32_to_cpus(&mh->FormatFlags);
+ le32_to_cpus(&mh->PercentUsed);
+
+ printk(KERN_INFO " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = %d.%d.%d.%d\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
+ mh->PercentUsed);
+
+ vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
+
+ blocks = mtd->size >> vshift;
+ if (blocks > 32768) {
+ printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
+ goto out;
+ }
+
+ blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
+ if (inftl_bbt_write && (blocks > mtd->erasesize)) {
+ printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
+ goto out;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &(mh->Partitions[i]);
+ le32_to_cpus(&ip->virtualUnits);
+ le32_to_cpus(&ip->firstUnit);
+ le32_to_cpus(&ip->lastUnit);
+ le32_to_cpus(&ip->flags);
+ le32_to_cpus(&ip->spareUnits);
+ le32_to_cpus(&ip->Reserved0);
+
+ printk(KERN_INFO " PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if ((show_firmware_partition == 1) &&
+ (i == 0) && (ip->firstUnit > 0)) {
+ parts[0].name = " DiskOnChip IPL / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = mtd->erasesize * ip->firstUnit;
+ numparts = 1;
+ }
+
+ if (ip->flags & INFTL_BINARY)
+ parts[numparts].name = " DiskOnChip BDK partition";
+ else
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = ip->firstUnit << vshift;
+ parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
+ numparts++;
+ if (ip->lastUnit > lastvunit)
+ lastvunit = ip->lastUnit;
+ if (ip->flags & INFTL_LAST)
+ break;
+ }
+ lastvunit++;
+ if ((lastvunit << vshift) < end) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = lastvunit << vshift;
+ parts[numparts].size = end - parts[numparts].offset;
+ numparts++;
+ }
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+static int __init nftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[2];
+
+ memset((char *)parts, 0, sizeof(parts));
+ /* On NFTL, we have to find the media headers before we can read the
+ BBTs, since they're stored in the media header eraseblocks. */
+ numparts = nftl_partscan(mtd, parts);
+ if (!numparts)
+ return -EIO;
+ this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->pages[0] = doc->mh0_page + 1;
+ if (doc->mh1_page != -1) {
+ this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->pages[0] = doc->mh1_page + 1;
+ } else {
+ this->bbt_md = NULL;
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ mtd_device_register(mtd, NULL, 0);
+ if (!no_autopart)
+ mtd_device_register(mtd, parts, numparts);
+ return 0;
+}
+
+static int __init inftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[5];
+
+ if (this->numchips > doc->chips_per_floor) {
+ printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
+ return -EIO;
+ }
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->pages[0] = 2;
+ this->bbt_md = NULL;
+ } else {
+ this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->offs = 8;
+ this->bbt_td->len = 8;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_td->reserved_block_code = 0x01;
+ this->bbt_td->pattern = "MSYS_BBT";
+
+ this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_md->options |= NAND_BBT_WRITE;
+ this->bbt_md->offs = 8;
+ this->bbt_md->len = 8;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_md->reserved_block_code = 0x01;
+ this->bbt_md->pattern = "TBB_SYSM";
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ memset((char *)parts, 0, sizeof(parts));
+ numparts = inftl_partscan(mtd, parts);
+ /* At least for now, require the INFTL Media Header. We could probably
+ do without it for non-INFTL use, since all it gives us is
+ autopartitioning, but I want to give it more thought. */
+ if (!numparts)
+ return -EIO;
+ mtd_device_register(mtd, NULL, 0);
+ if (!no_autopart)
+ mtd_device_register(mtd, parts, numparts);
+ return 0;
+}
+
+static inline int __init doc2000_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2000_read_byte;
+ this->write_buf = doc2000_writebuf;
+ this->read_buf = doc2000_readbuf;
+ this->scan_bbt = nftl_scan_bbt;
+
+ doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (NFTL Model)";
+ return (4 * doc->chips_per_floor);
+}
+
+static inline int __init doc2001_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2001_read_byte;
+ this->write_buf = doc2001_writebuf;
+ this->read_buf = doc2001_readbuf;
+
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
+ /* It's not a Millennium; it's one of the newer
+ DiskOnChip 2000 units with a similar ASIC.
+ Treat it like a Millennium, except that it
+ can have multiple chips. */
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (INFTL Model)";
+ this->scan_bbt = inftl_scan_bbt;
+ return (4 * doc->chips_per_floor);
+ } else {
+ /* Bog-standard Millennium */
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium";
+ this->scan_bbt = nftl_scan_bbt;
+ return 1;
+ }
+}
+
+static inline int __init doc2001plus_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->read_byte = doc2001plus_read_byte;
+ this->write_buf = doc2001plus_writebuf;
+ this->read_buf = doc2001plus_readbuf;
+ this->scan_bbt = inftl_scan_bbt;
+ this->cmd_ctrl = NULL;
+ this->select_chip = doc2001plus_select_chip;
+ this->cmdfunc = doc2001plus_command;
+ this->ecc.hwctl = doc2001plus_enable_hwecc;
+
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium Plus";
+
+ return 1;
+}
+
+static int __init doc_probe(unsigned long physadr)
+{
+ unsigned char ChipID;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+ void __iomem *virtadr;
+ unsigned char save_control;
+ unsigned char tmp, tmpb, tmpc;
+ int reg, len, numchips;
+ int ret = 0;
+
+ if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
+ return -EBUSY;
+ virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+ if (!virtadr) {
+ printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
+ ret = -EIO;
+ goto error_ioremap;
+ }
+
+ /* It's not possible to cleanly detect the DiskOnChip - the
+ * bootup procedure will put the device into reset mode, and
+ * it's not possible to talk to it without actually writing
+ * to the DOCControl register. So we store the current contents
+ * of the DOCControl register's location, in case we later decide
+ * that it's not a DiskOnChip, and want to put it back how we
+ * found it.
+ */
+ save_control = ReadDOC(virtadr, DOCControl);
+
+ /* Reset the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+
+ /* Enable the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_Doc2k:
+ reg = DoC_2k_ECCStatus;
+ break;
+ case DOC_ChipID_DocMil:
+ reg = DoC_ECCConf;
+ break;
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ case 0:
+ /* Possible Millennium Plus, need to do more checks */
+ /* Possibly release from power down mode */
+ for (tmp = 0; (tmp < 4); tmp++)
+ ReadDOC(virtadr, Mplus_Power);
+
+ /* Reset the Millennium Plus ASIC */
+ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+
+ mdelay(1);
+ /* Enable the Millennium Plus ASIC */
+ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+ mdelay(1);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_DocMilPlus16:
+ reg = DoC_Mplus_Toggle;
+ break;
+ case DOC_ChipID_DocMilPlus32:
+ printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ break;
+
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ /* Check the TOGGLE bit in the ECC register */
+ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ if ((tmp == tmpb) || (tmp != tmpc)) {
+ printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
+ ret = -ENODEV;
+ goto notfound;
+ }
+
+ for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+ unsigned char oldval;
+ unsigned char newval;
+ nand = mtd->priv;
+ doc = nand->priv;
+ /* Use the alias resolution register to determine if this is
+ in fact the same DOC aliased to a new address. If writes
+ to one chip's alias resolution register change the value on
+ the other chip, they're the same chip. */
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ newval = ReadDOC(virtadr, Mplus_AliasResolution);
+ } else {
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ newval = ReadDOC(virtadr, AliasResolution);
+ }
+ if (oldval != newval)
+ continue;
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ WriteDOC(~newval, virtadr, Mplus_AliasResolution);
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
+ } else {
+ WriteDOC(~newval, virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ WriteDOC(newval, virtadr, AliasResolution); // restore it
+ }
+ newval = ~newval;
+ if (oldval == newval) {
+ printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
+ goto notfound;
+ }
+ }
+
+ printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
+
+ len = sizeof(struct mtd_info) +
+ sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
+ mtd = kzalloc(len, GFP_KERNEL);
+ if (!mtd) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct doc_priv *) (nand + 1);
+ nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
+ nand->bbt_md = nand->bbt_td + 1;
+
+ mtd->priv = nand;
+ mtd->owner = THIS_MODULE;
+
+ nand->priv = doc;
+ nand->select_chip = doc200x_select_chip;
+ nand->cmd_ctrl = doc200x_hwcontrol;
+ nand->dev_ready = doc200x_dev_ready;
+ nand->waitfunc = doc200x_wait;
+ nand->block_bad = doc200x_block_bad;
+ nand->ecc.hwctl = doc200x_enable_hwecc;
+ nand->ecc.calculate = doc200x_calculate_ecc;
+ nand->ecc.correct = doc200x_correct_data;
+
+ nand->ecc.layout = &doc200x_oobinfo;
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 6;
+ nand->ecc.strength = 2;
+ nand->bbt_options = NAND_BBT_USE_FLASH;
+
+ doc->physadr = physadr;
+ doc->virtadr = virtadr;
+ doc->ChipID = ChipID;
+ doc->curfloor = -1;
+ doc->curchip = -1;
+ doc->mh0_page = -1;
+ doc->mh1_page = -1;
+ doc->nextdoc = doclist;
+
+ if (ChipID == DOC_ChipID_Doc2k)
+ numchips = doc2000_init(mtd);
+ else if (ChipID == DOC_ChipID_DocMilPlus16)
+ numchips = doc2001plus_init(mtd);
+ else
+ numchips = doc2001_init(mtd);
+
+ if ((ret = nand_scan(mtd, numchips))) {
+ /* DBB note: i believe nand_release is necessary here, as
+ buffers may have been allocated in nand_base. Check with
+ Thomas. FIX ME! */
+ /* nand_release will call mtd_device_unregister, but we
+ haven't yet added it. This is handled without incident by
+ mtd_device_unregister, as far as I can tell. */
+ nand_release(mtd);
+ kfree(mtd);
+ goto fail;
+ }
+
+ /* Success! */
+ doclist = mtd;
+ return 0;
+
+ notfound:
+ /* Put back the contents of the DOCControl register, in case it's not
+ actually a DiskOnChip. */
+ WriteDOC(save_control, virtadr, DOCControl);
+ fail:
+ iounmap(virtadr);
+
+error_ioremap:
+ release_mem_region(physadr, DOC_IOREMAP_LEN);
+
+ return ret;
+}
+
+static void release_nanddoc(void)
+{
+ struct mtd_info *mtd, *nextmtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+
+ for (mtd = doclist; mtd; mtd = nextmtd) {
+ nand = mtd->priv;
+ doc = nand->priv;
+
+ nextmtd = doc->nextdoc;
+ nand_release(mtd);
+ iounmap(doc->virtadr);
+ release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
+ kfree(mtd);
+ }
+}
+
+static int __init init_nanddoc(void)
+{
+ int i, ret = 0;
+
+ /* We could create the decoder on demand, if memory is a concern.
+ * This way we have it handy, if an error happens
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * first consecutive root is 510
+ * primitve element to generate roots = 1
+ * generator polinomial degree = 4
+ */
+ rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
+ if (!rs_decoder) {
+ printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
+ return -ENOMEM;
+ }
+
+ if (doc_config_location) {
+ printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
+ ret = doc_probe(doc_config_location);
+ if (ret < 0)
+ goto outerr;
+ } else {
+ for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+ /* No banner message any more. Print a message if no DiskOnChip
+ found, so the user knows we at least tried. */
+ if (!doclist) {
+ printk(KERN_INFO "No valid DiskOnChip devices found\n");
+ ret = -ENODEV;
+ goto outerr;
+ }
+ return 0;
+ outerr:
+ free_rs(rs_decoder);
+ return ret;
+}
+
+static void __exit cleanup_nanddoc(void)
+{
+ /* Cleanup the nand/DoC resources */
+ release_nanddoc();
+
+ /* Free the reed solomon resources */
+ if (rs_decoder) {
+ free_rs(rs_decoder);
+ }
+}
+
+module_init(init_nanddoc);
+module_exit(cleanup_nanddoc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
new file mode 100644
index 000000000..e5d7bcaaf
--- /dev/null
+++ b/drivers/mtd/nand/docg4.c
@@ -0,0 +1,1393 @@
+/*
+ * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
+ *
+ * mtd nand driver for M-Systems DiskOnChip G4
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
+ * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
+ * Should work on these as well. Let me know!
+ *
+ * TODO:
+ *
+ * Mechanism for management of password-protected areas
+ *
+ * Hamming ecc when reading oob only
+ *
+ * According to the M-Sys documentation, this device is also available in a
+ * "dual-die" configuration having a 256MB capacity, but no mechanism for
+ * detecting this variant is documented. Currently this driver assumes 128MB
+ * capacity.
+ *
+ * Support for multiple cascaded devices ("floors"). Not sure which gadgets
+ * contain multiple G4s in a cascaded configuration, if any.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+#include <linux/bitrev.h>
+#include <linux/jiffies.h>
+
+/*
+ * In "reliable mode" consecutive 2k pages are used in parallel (in some
+ * fashion) to store the same data. The data can be read back from the
+ * even-numbered pages in the normal manner; odd-numbered pages will appear to
+ * contain junk. Systems that boot from the docg4 typically write the secondary
+ * program loader (SPL) code in this mode. The SPL is loaded by the initial
+ * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
+ * to the reset vector address). This module parameter enables you to use this
+ * driver to write the SPL. When in this mode, no more than 2k of data can be
+ * written at a time, because the addresses do not increment in the normal
+ * manner, and the starting offset must be within an even-numbered 2k region;
+ * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
+ * 0x1a00, ... Reliable mode is a special case and should not be used unless
+ * you know what you're doing.
+ */
+static bool reliable_mode;
+module_param(reliable_mode, bool, 0);
+MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
+
+/*
+ * You'll want to ignore badblocks if you're reading a partition that contains
+ * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
+ * it does not use mtd nand's method for marking bad blocks (using oob area).
+ * This will also skip the check of the "page written" flag.
+ */
+static bool ignore_badblocks;
+module_param(ignore_badblocks, bool, 0);
+MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
+
+struct docg4_priv {
+ struct mtd_info *mtd;
+ struct device *dev;
+ void __iomem *virtadr;
+ int status;
+ struct {
+ unsigned int command;
+ int column;
+ int page;
+ } last_command;
+ uint8_t oob_buf[16];
+ uint8_t ecc_buf[7];
+ int oob_page;
+ struct bch_control *bch;
+};
+
+/*
+ * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
+ * shared with other diskonchip devices (P3, G3 at least).
+ *
+ * Functions with names prefixed with docg4_ are mtd / nand interface functions
+ * (though they may also be called internally). All others are internal.
+ */
+
+#define DOC_IOSPACE_DATA 0x0800
+
+/* register offsets */
+#define DOC_CHIPID 0x1000
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_DATAEND 0x101e
+#define DOC_NOP 0x103e
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
+
+#define DOCG4_MYSTERY_REG 0x1050
+
+/* apparently used only to write oob bytes 6 and 7 */
+#define DOCG4_OOB_6_7 0x1052
+
+/* DOC_FLASHSEQUENCE register commands */
+#define DOC_SEQ_RESET 0x00
+#define DOCG4_SEQ_PAGE_READ 0x03
+#define DOCG4_SEQ_FLUSH 0x29
+#define DOCG4_SEQ_PAGEWRITE 0x16
+#define DOCG4_SEQ_PAGEPROG 0x1e
+#define DOCG4_SEQ_BLOCKERASE 0x24
+#define DOCG4_SEQ_SETMODE 0x45
+
+/* DOC_FLASHCOMMAND register commands */
+#define DOCG4_CMD_PAGE_READ 0x00
+#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOCG4_CMD_FLUSH 0x70
+#define DOCG4_CMD_READ2 0x30
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOCG4_CMD_PAGEWRITE 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
+#define DOC_CMD_RELIABLE_MODE 0x22
+#define DOC_CMD_RESET 0xff
+
+/* DOC_POWERMODE register bits */
+#define DOC_POWERDOWN_READY 0x80
+
+/* DOC_FLASHCONTROL register bits */
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN 0x40
+#define DOC_CTRL_FLASHREADY 0x01
+
+/* DOC_ECCCONF0 register bits */
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_UNKNOWN 0x2000
+#define DOC_ECCCONF0_ECC_ENABLE 0x1000
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/* DOC_ECCCONF1 register bits */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_ECC_ENABLE 0x07
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
+
+/* DOC_ASICMODE register bits */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/* good status values read after read/write/erase operations */
+#define DOCG4_PROGSTATUS_GOOD 0x51
+#define DOCG4_PROGSTATUS_GOOD_2 0xe0
+
+/*
+ * On read operations (page and oob-only), the first byte read from I/O reg is a
+ * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
+ * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
+ */
+#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
+
+/* anatomy of the device */
+#define DOCG4_CHIP_SIZE 0x8000000
+#define DOCG4_PAGE_SIZE 0x200
+#define DOCG4_PAGES_PER_BLOCK 0x200
+#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
+#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
+#define DOCG4_OOB_SIZE 0x10
+#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
+#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
+#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
+
+/* all but the last byte is included in ecc calculation */
+#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
+
+#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
+
+/* expected values from the ID registers */
+#define DOCG4_IDREG1_VALUE 0x0400
+#define DOCG4_IDREG2_VALUE 0xfbff
+
+/* primitive polynomial used to build the Galois field used by hw ecc gen */
+#define DOCG4_PRIMITIVE_POLY 0x4443
+
+#define DOCG4_M 14 /* Galois field is of order 2^14 */
+#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
+
+#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
+
+/*
+ * Bytes 0, 1 are used as badblock marker.
+ * Bytes 2 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 oob bytes only.
+ * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
+ * Byte 15 (the last) is used by the driver as a "page written" flag.
+ */
+static struct nand_ecclayout docg4_oobinfo = {
+ .eccbytes = 9,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
+ .oobavail = 5,
+ .oobfree = { {.offset = 2, .length = 5} }
+};
+
+/*
+ * The device has a nop register which M-Sys claims is for the purpose of
+ * inserting precise delays. But beware; at least some operations fail if the
+ * nop writes are replaced with a generic delay!
+ */
+static inline void write_nop(void __iomem *docptr)
+{
+ writew(0, docptr + DOC_NOP);
+}
+
+static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(nand->IO_ADDR_R);
+}
+
+static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *nand = mtd->priv;
+ uint16_t *p = (uint16_t *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], nand->IO_ADDR_W);
+}
+
+static int poll_status(struct docg4_priv *doc)
+{
+ /*
+ * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
+ * register. Operations known to take a long time (e.g., block erase)
+ * should sleep for a while before calling this.
+ */
+
+ uint16_t flash_status;
+ unsigned long timeo;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* hardware quirk requires reading twice initially */
+ flash_status = readw(docptr + DOC_FLASHCONTROL);
+
+ timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
+ do {
+ cpu_relax();
+ flash_status = readb(docptr + DOC_FLASHCONTROL);
+ } while (!(flash_status & DOC_CTRL_FLASHREADY) &&
+ time_before(jiffies, timeo));
+
+ if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
+ dev_err(doc->dev, "%s: timed out!\n", __func__);
+ return NAND_STATUS_FAIL;
+ }
+
+ return 0;
+}
+
+
+static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
+{
+
+ struct docg4_priv *doc = nand->priv;
+ int status = NAND_STATUS_WP; /* inverse logic?? */
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* report any previously unreported error */
+ if (doc->status) {
+ status |= doc->status;
+ doc->status = 0;
+ return status;
+ }
+
+ status |= poll_status(doc);
+ return status;
+}
+
+static void docg4_select_chip(struct mtd_info *mtd, int chip)
+{
+ /*
+ * Select among multiple cascaded chips ("floors"). Multiple floors are
+ * not yet supported, so the only valid non-negative value is 0.
+ */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
+
+ if (chip < 0)
+ return; /* deselected */
+
+ if (chip > 0)
+ dev_warn(doc->dev, "multiple floors currently unsupported\n");
+
+ writew(0, docptr + DOC_DEVICESELECT);
+}
+
+static void reset(struct mtd_info *mtd)
+{
+ /* full device reset */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+ write_nop(docptr);
+
+ writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
+
+ poll_status(doc);
+}
+
+static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
+{
+ /* read the 7 hw-generated ecc bytes */
+
+ int i;
+ for (i = 0; i < 7; i++) { /* hw quirk; read twice */
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+ }
+}
+
+static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ /*
+ * Called after a page read when hardware reports bitflips.
+ * Up to four bitflips can be corrected.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i, numerrs, errpos[4];
+ const uint8_t blank_read_hwecc[8] = {
+ 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
+
+ read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
+
+ /* check if read error is due to a blank page */
+ if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
+ return 0; /* yes */
+
+ /* skip additional check of "written flag" if ignore_badblocks */
+ if (ignore_badblocks == false) {
+
+ /*
+ * If the hw ecc bytes are not those of a blank page, there's
+ * still a chance that the page is blank, but was read with
+ * errors. Check the "written flag" in last oob byte, which
+ * is set to zero when a page is written. If more than half
+ * the bits are set, assume a blank page. Unfortunately, the
+ * bit flips(s) are not reported in stats.
+ */
+
+ if (nand->oob_poi[15]) {
+ int bit, numsetbits = 0;
+ unsigned long written_flag = nand->oob_poi[15];
+ for_each_set_bit(bit, &written_flag, 8)
+ numsetbits++;
+ if (numsetbits > 4) { /* assume blank */
+ dev_warn(doc->dev,
+ "error(s) in blank page "
+ "at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis!
+ */
+ for (i = 0; i < 7; i++)
+ doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
+
+ numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
+ doc->ecc_buf, NULL, errpos);
+
+ if (numerrs == -EBADMSG) {
+ dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
+ page * DOCG4_PAGE_SIZE);
+ return -EBADMSG;
+ }
+
+ BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
+
+ /* undo last step in BCH alg (modulo mirroring not needed) */
+ for (i = 0; i < numerrs; i++)
+ errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
+
+ /* fix the errors */
+ for (i = 0; i < numerrs; i++) {
+
+ /* ignore if error within oob ecc bytes */
+ if (errpos[i] > DOCG4_USERDATA_LEN * 8)
+ continue;
+
+ /* if error within oob area preceeding ecc bytes... */
+ if (errpos[i] > DOCG4_PAGE_SIZE * 8)
+ change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
+ (unsigned long *)nand->oob_poi);
+
+ else /* error in page data */
+ change_bit(errpos[i], (unsigned long *)buf);
+ }
+
+ dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
+ numerrs, page * DOCG4_PAGE_SIZE);
+
+ return numerrs;
+}
+
+static uint8_t docg4_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ dev_dbg(doc->dev, "%s\n", __func__);
+
+ if (doc->last_command.command == NAND_CMD_STATUS) {
+ int status;
+
+ /*
+ * Previous nand command was status request, so nand
+ * infrastructure code expects to read the status here. If an
+ * error occurred in a previous operation, report it.
+ */
+ doc->last_command.command = 0;
+
+ if (doc->status) {
+ status = doc->status;
+ doc->status = 0;
+ }
+
+ /* why is NAND_STATUS_WP inverse logic?? */
+ else
+ status = NAND_STATUS_WP | NAND_STATUS_READY;
+
+ return status;
+ }
+
+ dev_warn(doc->dev, "unexpected call to read_byte()\n");
+
+ return 0;
+}
+
+static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
+{
+ /* write the four address bytes packed in docg4_addr to the device */
+
+ void __iomem *docptr = doc->virtadr;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+ docg4_addr >>= 8;
+ writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+}
+
+static int read_progstatus(struct docg4_priv *doc)
+{
+ /*
+ * This apparently checks the status of programming. Done after an
+ * erasure, and after page data is written. On error, the status is
+ * saved, to be later retrieved by the nand infrastructure code.
+ */
+ void __iomem *docptr = doc->virtadr;
+
+ /* status is read from the I/O reg */
+ uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
+ uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
+ __func__, status1, status2, status3);
+
+ if (status1 != DOCG4_PROGSTATUS_GOOD
+ || status2 != DOCG4_PROGSTATUS_GOOD_2
+ || status3 != DOCG4_PROGSTATUS_GOOD_2) {
+ doc->status = NAND_STATUS_FAIL;
+ dev_warn(doc->dev, "read_progstatus failed: "
+ "%02x, %02x, %02x\n", status1, status2, status3);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pageprog(struct mtd_info *mtd)
+{
+ /*
+ * Final step in writing a page. Writes the contents of its
+ * internal buffer out to the flash array, or some such.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ int retval = 0;
+
+ dev_dbg(doc->dev, "docg4: %s\n", __func__);
+
+ writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* Just busy-wait; usleep_range() slows things down noticeably. */
+ poll_status(doc);
+
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ retval = read_progstatus(doc);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+
+ return retval;
+}
+
+static void sequence_reset(struct mtd_info *mtd)
+{
+ /* common starting sequence for all operations */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
+ writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+}
+
+static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in reading a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ write_addr(doc, docg4_addr);
+
+ write_nop(docptr);
+ writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ poll_status(doc);
+}
+
+static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+ /* first step in writing a page */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev,
+ "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
+ sequence_reset(mtd);
+
+ if (unlikely(reliable_mode)) {
+ writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ }
+
+ writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_addr(doc, docg4_addr);
+ write_nop(docptr);
+ write_nop(docptr);
+ poll_status(doc);
+}
+
+static uint32_t mtd_to_docg4_address(int page, int column)
+{
+ /*
+ * Convert mtd address to format used by the device, 32 bit packed.
+ *
+ * Some notes on G4 addressing... The M-Sys documentation on this device
+ * claims that pages are 2K in length, and indeed, the format of the
+ * address used by the device reflects that. But within each page are
+ * four 512 byte "sub-pages", each with its own oob data that is
+ * read/written immediately after the 512 bytes of page data. This oob
+ * data contains the ecc bytes for the preceeding 512 bytes.
+ *
+ * Rather than tell the mtd nand infrastructure that page size is 2k,
+ * with four sub-pages each, we engage in a little subterfuge and tell
+ * the infrastructure code that pages are 512 bytes in size. This is
+ * done because during the course of reverse-engineering the device, I
+ * never observed an instance where an entire 2K "page" was read or
+ * written as a unit. Each "sub-page" is always addressed individually,
+ * its data read/written, and ecc handled before the next "sub-page" is
+ * addressed.
+ *
+ * This requires us to convert addresses passed by the mtd nand
+ * infrastructure code to those used by the device.
+ *
+ * The address that is written to the device consists of four bytes: the
+ * first two are the 2k page number, and the second is the index into
+ * the page. The index is in terms of 16-bit half-words and includes
+ * the preceeding oob data, so e.g., the index into the second
+ * "sub-page" is 0x108, and the full device address of the start of mtd
+ * page 0x201 is 0x00800108.
+ */
+ int g4_page = page / 4; /* device's 2K page */
+ int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
+ return (g4_page << 16) | g4_index; /* pack */
+}
+
+static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
+ int page_addr)
+{
+ /* handle standard nand commands */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
+
+ dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
+ __func__, command, page_addr, column);
+
+ /*
+ * Save the command and its arguments. This enables emulation of
+ * standard flash devices, and also some optimizations.
+ */
+ doc->last_command.command = command;
+ doc->last_command.column = column;
+ doc->last_command.page = page_addr;
+
+ switch (command) {
+
+ case NAND_CMD_RESET:
+ reset(mtd);
+ break;
+
+ case NAND_CMD_READ0:
+ read_page_prologue(mtd, g4_addr);
+ break;
+
+ case NAND_CMD_STATUS:
+ /* next call to read_byte() will expect a status */
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (unlikely(reliable_mode)) {
+ uint16_t g4_page = g4_addr >> 16;
+
+ /* writes to odd-numbered 2k pages are invalid */
+ if (g4_page & 0x01)
+ dev_warn(doc->dev,
+ "invalid reliable mode address\n");
+ }
+
+ write_page_prologue(mtd, g4_addr);
+
+ /* hack for deferred write of oob bytes */
+ if (doc->oob_page == page_addr)
+ memcpy(nand->oob_poi, doc->oob_buf, 16);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ pageprog(mtd);
+ break;
+
+ /* we don't expect these, based on review of nand_base.c */
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ dev_warn(doc->dev, "docg4_command: "
+ "unexpected nand command 0x%x\n", command);
+ break;
+
+ }
+}
+
+static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int page, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status, edc_err, *buf16;
+ int bits_corrected = 0;
+
+ dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+
+ writew(DOC_ECCCONF0_READ_MODE |
+ DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is page data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_err(doc->dev,
+ "docg4_read_page: bad status: 0x%02x\n", status);
+ writew(0, docptr + DOC_DATAEND);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
+
+ /* this device always reads oob after page data */
+ /* first 14 oob bytes read from I/O reg */
+ docg4_read_buf(mtd, nand->oob_poi, 14);
+
+ /* last 2 read from another reg */
+ buf16 = (uint16_t *)(nand->oob_poi + 14);
+ *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ write_nop(docptr);
+
+ if (likely(use_ecc == true)) {
+
+ /* read the register that tells us if bitflip(s) detected */
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ edc_err = readw(docptr + DOC_ECCCONF1);
+ dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
+
+ /* If bitflips are reported, attempt to correct with ecc */
+ if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
+ bits_corrected = correct_data(mtd, buf, page);
+ if (bits_corrected == -EBADMSG)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += bits_corrected;
+ }
+ }
+
+ writew(0, docptr + DOC_DATAEND);
+ if (bits_corrected == -EBADMSG) /* uncorrectable errors */
+ return 0;
+ return bits_corrected;
+}
+
+
+static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int oob_required, int page)
+{
+ return read_page(mtd, nand, buf, page, false);
+}
+
+static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int oob_required, int page)
+{
+ return read_page(mtd, nand, buf, page, true);
+}
+
+static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t status;
+
+ dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
+
+ docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+
+ writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* the 1st byte from the I/O reg is a status; the rest is oob data */
+ status = readw(docptr + DOC_IOSPACE_DATA);
+ if (status & DOCG4_READ_ERROR) {
+ dev_warn(doc->dev,
+ "docg4_read_oob failed: status = 0x%02x\n", status);
+ return -EIO;
+ }
+
+ dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+ docg4_read_buf(mtd, nand->oob_poi, 16);
+
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_erase_block(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t g4_page;
+
+ dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
+
+ sequence_reset(mtd);
+
+ writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+
+ /* only 2 bytes of address are written to specify erase block */
+ g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ g4_page >>= 8;
+ writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+ write_nop(docptr);
+
+ /* start the erasure */
+ writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ usleep_range(500, 1000); /* erasure is long; take a snooze */
+ poll_status(doc);
+ writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+ write_nop(docptr);
+
+ read_progstatus(doc);
+
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+ poll_status(doc);
+ write_nop(docptr);
+
+ return nand->waitfunc(mtd, nand);
+}
+
+static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, bool use_ecc)
+{
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint8_t ecc_buf[8];
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ writew(DOC_ECCCONF0_ECC_ENABLE |
+ DOC_ECCCONF0_UNKNOWN |
+ DOCG4_BCH_SIZE,
+ docptr + DOC_ECCCONF0);
+ write_nop(docptr);
+
+ /* write the page data */
+ docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
+
+ /* oob bytes 0 through 5 are written to I/O reg */
+ docg4_write_buf16(mtd, nand->oob_poi, 6);
+
+ /* oob byte 6 written to a separate reg */
+ writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
+
+ write_nop(docptr);
+ write_nop(docptr);
+
+ /* write hw-generated ecc bytes to oob */
+ if (likely(use_ecc == true)) {
+ /* oob byte 7 is hamming code */
+ uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
+ hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
+ writew(hamming, docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+
+ /* read the 7 bch bytes from ecc regs */
+ read_hw_ecc(docptr, ecc_buf);
+ ecc_buf[7] = 0; /* clear the "page written" flag */
+ }
+
+ /* write user-supplied bytes to oob */
+ else {
+ writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
+ write_nop(docptr);
+ memcpy(ecc_buf, &nand->oob_poi[8], 8);
+ }
+
+ docg4_write_buf16(mtd, ecc_buf, 8);
+ write_nop(docptr);
+ write_nop(docptr);
+ writew(0, docptr + DOC_DATAEND);
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, int oob_required)
+{
+ return write_page(mtd, nand, buf, false);
+}
+
+static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+ const uint8_t *buf, int oob_required)
+{
+ return write_page(mtd, nand, buf, true);
+}
+
+static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ /*
+ * Writing oob-only is not really supported, because MLC nand must write
+ * oob bytes at the same time as page data. Nonetheless, we save the
+ * oob buffer contents here, and then write it along with the page data
+ * if the same page is subsequently written. This allows user space
+ * utilities that write the oob data prior to the page data to work
+ * (e.g., nandwrite). The disdvantage is that, if the intention was to
+ * write oob only, the operation is quietly ignored. Also, oob can get
+ * corrupted if two concurrent processes are running nandwrite.
+ */
+
+ /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
+ struct docg4_priv *doc = nand->priv;
+ doc->oob_page = page;
+ memcpy(doc->oob_buf, nand->oob_poi, 16);
+ return 0;
+}
+
+static int __init read_factory_bbt(struct mtd_info *mtd)
+{
+ /*
+ * The device contains a read-only factory bad block table. Read it and
+ * update the memory-based bbt accordingly.
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
+ uint8_t *buf;
+ int i, block;
+ __u32 eccfailed_stats = mtd->ecc_stats.failed;
+
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ read_page_prologue(mtd, g4_addr);
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
+
+ /*
+ * If no memory-based bbt was created, exit. This will happen if module
+ * parameter ignore_badblocks is set. Then why even call this function?
+ * For an unknown reason, block erase always fails if it's the first
+ * operation after device power-up. The above read ensures it never is.
+ * Ugly, I know.
+ */
+ if (nand->bbt == NULL) /* no memory-based bbt */
+ goto exit;
+
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ /*
+ * Whoops, an ecc failure ocurred reading the factory bbt.
+ * It is stored redundantly, so we get another chance.
+ */
+ eccfailed_stats = mtd->ecc_stats.failed;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ dev_warn(doc->dev,
+ "The factory bbt could not be read!\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Parse factory bbt and update memory-based bbt. Factory bbt format is
+ * simple: one bit per block, block numbers increase left to right (msb
+ * to lsb). Bit clear means bad block.
+ */
+ for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
+ int bitnum;
+ unsigned long bits = ~buf[i];
+ for_each_set_bit(bitnum, &bits, 8) {
+ int badblock = block + 7 - bitnum;
+ nand->bbt[badblock / 4] |=
+ 0x03 << ((badblock % 4) * 2);
+ mtd->ecc_stats.badblocks++;
+ dev_notice(doc->dev, "factory-marked bad block: %d\n",
+ badblock);
+ }
+ }
+ exit:
+ kfree(buf);
+ return 0;
+}
+
+static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ /*
+ * Mark a block as bad. Bad blocks are marked in the oob area of the
+ * first page of the block. The default scan_bbt() in the nand
+ * infrastructure code works fine for building the memory-based bbt
+ * during initialization, as does the nand infrastructure function that
+ * checks if a block is bad by reading the bbt. This function replaces
+ * the nand default because writes to oob-only are not supported.
+ */
+
+ int ret, i;
+ uint8_t *buf;
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ struct nand_bbt_descr *bbtd = nand->badblock_pattern;
+ int page = (int)(ofs >> nand->page_shift);
+ uint32_t g4_addr = mtd_to_docg4_address(page, 0);
+
+ dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
+
+ if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
+ dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
+ __func__, ofs);
+
+ /* allocate blank buffer for page data */
+ buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ /* write bit-wise negation of pattern to oob buffer */
+ memset(nand->oob_poi, 0xff, mtd->oobsize);
+ for (i = 0; i < bbtd->len; i++)
+ nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
+
+ /* write first page of block */
+ write_page_prologue(mtd, g4_addr);
+ docg4_write_page(mtd, nand, buf, 1);
+ ret = pageprog(mtd);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* only called when module_param ignore_badblocks is set */
+ return 0;
+}
+
+static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /*
+ * Put the device into "deep power-down" mode. Note that CE# must be
+ * deasserted for this to take effect. The xscale, e.g., can be
+ * configured to float this signal when the processor enters power-down,
+ * and a suitable pull-up ensures its deassertion.
+ */
+
+ int i;
+ uint8_t pwr_down;
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ /* poll the register that tells us we're ready to go to sleep */
+ for (i = 0; i < 10; i++) {
+ pwr_down = readb(docptr + DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ usleep_range(1000, 4000);
+ }
+
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ dev_err(doc->dev, "suspend failed; "
+ "timeout polling DOC_POWERDOWN_READY\n");
+ return -EIO;
+ }
+
+ writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
+ docptr + DOC_ASICMODE);
+ writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
+ docptr + DOC_ASICMODECONFIRM);
+
+ write_nop(docptr);
+
+ return 0;
+}
+
+static int docg4_resume(struct platform_device *pdev)
+{
+
+ /*
+ * Exit power-down. Twelve consecutive reads of the address below
+ * accomplishes this, assuming CE# has been asserted.
+ */
+
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ dev_dbg(doc->dev, "%s...\n", __func__);
+
+ for (i = 0; i < 12; i++)
+ readb(docptr + 0x1fff);
+
+ return 0;
+}
+
+static void __init init_mtd_structs(struct mtd_info *mtd)
+{
+ /* initialize mtd and nand data structures */
+
+ /*
+ * Note that some of the following initializations are not usually
+ * required within a nand driver because they are performed by the nand
+ * infrastructure code as part of nand_scan(). In this case they need
+ * to be initialized here because we skip call to nand_scan_ident() (the
+ * first half of nand_scan()). The call to nand_scan_ident() is skipped
+ * because for this device the chip id is not read in the manner of a
+ * standard nand device. Unfortunately, nand_scan_ident() does other
+ * things as well, such as call nand_set_defaults().
+ */
+
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+
+ mtd->size = DOCG4_CHIP_SIZE;
+ mtd->name = "Msys_Diskonchip_G4";
+ mtd->writesize = DOCG4_PAGE_SIZE;
+ mtd->erasesize = DOCG4_BLOCK_SIZE;
+ mtd->oobsize = DOCG4_OOB_SIZE;
+ nand->chipsize = DOCG4_CHIP_SIZE;
+ nand->chip_shift = DOCG4_CHIP_SHIFT;
+ nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
+ nand->chip_delay = 20;
+ nand->page_shift = DOCG4_PAGE_SHIFT;
+ nand->pagemask = 0x3ffff;
+ nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ nand->badblockbits = 8;
+ nand->ecc.layout = &docg4_oobinfo;
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.size = DOCG4_PAGE_SIZE;
+ nand->ecc.prepad = 8;
+ nand->ecc.bytes = 8;
+ nand->ecc.strength = DOCG4_T;
+ nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
+ nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
+ nand->controller = &nand->hwcontrol;
+ spin_lock_init(&nand->controller->lock);
+ init_waitqueue_head(&nand->controller->wq);
+
+ /* methods */
+ nand->cmdfunc = docg4_command;
+ nand->waitfunc = docg4_wait;
+ nand->select_chip = docg4_select_chip;
+ nand->read_byte = docg4_read_byte;
+ nand->block_markbad = docg4_block_markbad;
+ nand->read_buf = docg4_read_buf;
+ nand->write_buf = docg4_write_buf16;
+ nand->erase = docg4_erase_block;
+ nand->ecc.read_page = docg4_read_page;
+ nand->ecc.write_page = docg4_write_page;
+ nand->ecc.read_page_raw = docg4_read_page_raw;
+ nand->ecc.write_page_raw = docg4_write_page_raw;
+ nand->ecc.read_oob = docg4_read_oob;
+ nand->ecc.write_oob = docg4_write_oob;
+
+ /*
+ * The way the nand infrastructure code is written, a memory-based bbt
+ * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
+ * nand->block_bad() is used. So when ignoring bad blocks, we skip the
+ * scan and define a dummy block_bad() which always returns 0.
+ */
+ if (ignore_badblocks) {
+ nand->options |= NAND_SKIP_BBTSCAN;
+ nand->block_bad = docg4_block_neverbad;
+ }
+
+}
+
+static int __init read_id_reg(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ void __iomem *docptr = doc->virtadr;
+ uint16_t id1, id2;
+
+ /* check for presence of g4 chip by reading id registers */
+ id1 = readw(docptr + DOC_CHIPID);
+ id1 = readw(docptr + DOCG4_MYSTERY_REG);
+ id2 = readw(docptr + DOC_CHIPID_INV);
+ id2 = readw(docptr + DOCG4_MYSTERY_REG);
+
+ if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
+ dev_info(doc->dev,
+ "NAND device: 128MiB Diskonchip G4 detected\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int __init probe_docg4(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ void __iomem *virtadr;
+ struct docg4_priv *doc;
+ int len, retval;
+ struct resource *r;
+ struct device *dev = &pdev->dev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(dev, "no io memory resource defined!\n");
+ return -ENODEV;
+ }
+
+ virtadr = ioremap(r->start, resource_size(r));
+ if (!virtadr) {
+ dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
+ return -EIO;
+ }
+
+ len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
+ sizeof(struct docg4_priv);
+ mtd = kzalloc(len, GFP_KERNEL);
+ if (mtd == NULL) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct docg4_priv *) (nand + 1);
+ mtd->priv = nand;
+ nand->priv = doc;
+ mtd->owner = THIS_MODULE;
+ doc->virtadr = virtadr;
+ doc->dev = dev;
+
+ init_mtd_structs(mtd);
+
+ /* initialize kernel bch algorithm */
+ doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+ if (doc->bch == NULL) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, doc);
+
+ reset(mtd);
+ retval = read_id_reg(mtd);
+ if (retval == -ENODEV) {
+ dev_warn(dev, "No diskonchip G4 device found.\n");
+ goto fail;
+ }
+
+ retval = nand_scan_tail(mtd);
+ if (retval)
+ goto fail;
+
+ retval = read_factory_bbt(mtd);
+ if (retval)
+ goto fail;
+
+ retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+ if (retval)
+ goto fail;
+
+ doc->mtd = mtd;
+ return 0;
+
+ fail:
+ iounmap(virtadr);
+ if (mtd) {
+ /* re-declarations avoid compiler warning */
+ struct nand_chip *nand = mtd->priv;
+ struct docg4_priv *doc = nand->priv;
+ nand_release(mtd); /* deletes partitions and mtd devices */
+ free_bch(doc->bch);
+ kfree(mtd);
+ }
+
+ return retval;
+}
+
+static int __exit cleanup_docg4(struct platform_device *pdev)
+{
+ struct docg4_priv *doc = platform_get_drvdata(pdev);
+ nand_release(doc->mtd);
+ free_bch(doc->bch);
+ kfree(doc->mtd);
+ iounmap(doc->virtadr);
+ return 0;
+}
+
+static struct platform_driver docg4_driver = {
+ .driver = {
+ .name = "docg4",
+ },
+ .suspend = docg4_suspend,
+ .resume = docg4_resume,
+ .remove = __exit_p(cleanup_docg4),
+};
+
+module_platform_driver_probe(docg4_driver, probe_docg4);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Dunn");
+MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
new file mode 100644
index 000000000..04b22fd37
--- /dev/null
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -0,0 +1,963 @@
+/* Freescale Enhanced Local Bus Controller NAND driver
+ *
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
+ * Roy Zang <tie-fei.zang@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/fsl_lbc.h>
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_lbc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+ int page_size; /* NAND page size (0=512, 1=2048) */
+ unsigned int fmr; /* FCM Flash Mode Register value */
+};
+
+/* Freescale eLBC FCM controller information */
+
+struct fsl_elbc_fcm_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+ u8 __iomem *addr; /* Address of assigned FCM buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from LTESR after last op */
+ unsigned int mdr; /* UPM/FCM Data Register value */
+ unsigned int use_mdr; /* Non zero if the MDR is to be set */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+/* Small Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
+ .eccbytes = 3,
+ .eccpos = {6, 7, 8},
+ .oobfree = { {0, 5}, {9, 7} },
+};
+
+/* Small Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
+ .eccbytes = 3,
+ .eccpos = {8, 9, 10},
+ .oobfree = { {0, 5}, {6, 2}, {11, 5} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
+ .eccbytes = 12,
+ .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
+ .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
+ .eccbytes = 12,
+ .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
+ .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ int buf_num;
+
+ elbc_fcm_ctrl->page = page_addr;
+
+ if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+ (oob ? FPAR_LP_MS : 0) | column);
+ buf_num = (page_addr & 1) << 2;
+ } else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+ (oob ? FPAR_SP_MS : 0) | column);
+ buf_num = page_addr & 7;
+ }
+
+ elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
+ elbc_fcm_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
+
+ dev_vdbg(priv->dev, "set_addr: bank=%d, "
+ "elbc_fcm_ctrl->addr=0x%p (0x%p), "
+ "index %x, pes %d ps %d\n",
+ buf_num, elbc_fcm_ctrl->addr, priv->vbase,
+ elbc_fcm_ctrl->index,
+ chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ /* Setup the FMR[OP] to execute without write protection */
+ out_be32(&lbc->fmr, priv->fmr | 3);
+ if (elbc_fcm_ctrl->use_mdr)
+ out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
+
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+ in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fbar=%08x fpar=%08x "
+ "fbcr=%08x bank=%d\n",
+ in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+ in_be32(&lbc->fbcr), priv->bank);
+
+ ctrl->irq_status = 0;
+ /* execute special operation */
+ out_be32(&lbc->lsor, priv->bank);
+
+ /* wait for FCM complete flag or timeout */
+ wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
+ FCM_TIMEOUT_MSECS * HZ/1000);
+ elbc_fcm_ctrl->status = ctrl->irq_status;
+ /* store mdr value in case it was needed */
+ if (elbc_fcm_ctrl->use_mdr)
+ elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC) {
+ dev_info(priv->dev,
+ "command failed: fir %x fcr %x status %x mdr %x\n",
+ in_be32(&lbc->fir), in_be32(&lbc->fcr),
+ elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
+ return -EIO;
+ }
+
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ elbc_fcm_ctrl->max_bitflips = 0;
+
+ if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+ uint32_t lteccr = in_be32(&lbc->lteccr);
+ /*
+ * if command was a full page read and the ELBC
+ * has the LTECCR register, then bits 12-15 (ppc order) of
+ * LTECCR indicates which 512 byte sub-pages had fixed errors.
+ * bits 28-31 are uncorrectable errors, marked elsewhere.
+ * for small page nand only 1 bit is used.
+ * if the ELBC doesn't have the lteccr register it reads 0
+ * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+ * count the number of sub-pages with bitflips and update
+ * ecc_stats.corrected accordingly.
+ */
+ if (lteccr & 0x000F000F)
+ out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+ if (lteccr & 0x000F0000) {
+ mtd->ecc_stats.corrected++;
+ elbc_fcm_ctrl->max_bitflips = 1;
+ }
+ }
+
+ return 0;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+ if (oob)
+ out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+ else
+ out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ /* clear the read buffer */
+ elbc_fcm_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ elbc_fcm_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 and READ1 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ1:
+ column += 256;
+
+ /* fall-through */
+ case NAND_CMD_READ0:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+
+ out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+ set_addr(mtd, 0, page_addr, 0);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ elbc_fcm_ctrl->index += column;
+
+ fsl_elbc_do_read(chip, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
+
+ out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_UA << FIR_OP1_SHIFT) |
+ (FIR_OP_RBW << FIR_OP2_SHIFT));
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
+ elbc_fcm_ctrl->use_mdr = 1;
+ elbc_fcm_ctrl->mdr = column;
+ set_addr(mtd, 0, 0, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+ "page_addr: 0x%x.\n", page_addr);
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_PA << FIR_OP1_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RS << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr,
+ (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
+
+ out_be32(&lbc->fbcr, 0);
+ elbc_fcm_ctrl->read_bytes = 0;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ __be32 fcr;
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ "page_addr: 0x%x, column: 0x%x.\n",
+ page_addr, column);
+
+ elbc_fcm_ctrl->column = column;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
+ fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
+ (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM2 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_WB << FIR_OP3_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT) |
+ (FIR_OP_RS << FIR_OP6_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+ (FIR_OP_CA << FIR_OP2_SHIFT) |
+ (FIR_OP_PA << FIR_OP3_SHIFT) |
+ (FIR_OP_WB << FIR_OP4_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP5_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP6_SHIFT) |
+ (FIR_OP_RS << FIR_OP7_SHIFT));
+
+ if (elbc_fcm_ctrl->oob)
+ /* OOB area --> READOOB */
+ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+ else
+ /* First 256 bytes --> READ0 */
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ }
+
+ out_be32(&lbc->fcr, fcr);
+ set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+ "writing %d bytes.\n", elbc_fcm_ctrl->index);
+
+ /* if the write did not start at 0 or is not a full page
+ * then set the exact length, otherwise use a full page
+ * write so the HW generates the ECC.
+ */
+ if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
+ elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
+ else
+ out_be32(&lbc->fbcr, 0);
+
+ fsl_elbc_run_command(mtd);
+ return;
+ }
+
+ /* CMD_STATUS must read the status byte while CEB is active */
+ /* Note - it does not wait for the ready line */
+ case NAND_CMD_STATUS:
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ elbc_fcm_ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ /* RESET without waiting for the ready line */
+ case NAND_CMD_RESET:
+ dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+ out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev,
+ "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+ command);
+ }
+}
+
+static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "write_buf of %d bytes", len);
+ elbc_fcm_ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
+ dev_err(priv->dev,
+ "write_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, bufsize - elbc_fcm_ctrl->index);
+ len = bufsize - elbc_fcm_ctrl->index;
+ }
+
+ memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
+ /*
+ * This is workaround for the weird elbc hangs during nand write,
+ * Scott Wood says: "...perhaps difference in how long it takes a
+ * write to make it through the localbus compared to a write to IMMR
+ * is causing problems, and sync isn't helping for some reason."
+ * Reading back the last byte helps though.
+ */
+ in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
+
+ elbc_fcm_ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ /* If there are still bytes in the FCM, then use the next byte. */
+ if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
+ return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
+
+ dev_err(priv->dev, "read_byte beyond end of buffer\n");
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len,
+ elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
+ memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
+ elbc_fcm_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "read_buf beyond end of buffer "
+ "(%d requested, %d available)\n",
+ len, avail);
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
+}
+
+static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ unsigned int al;
+
+ /* calculate FMR Address Length field */
+ al = 0;
+ if (chip->pagemask & 0xffff0000)
+ al++;
+ if (chip->pagemask & 0xff000000)
+ al++;
+
+ priv->fmr |= al << FMR_AL_SHIFT;
+
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ chip->numchips);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+ chip->chipsize);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ chip->pagemask);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
+ chip->chip_delay);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ chip->badblockpos);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ chip->chip_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ chip->page_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ chip->ecc.total);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
+ chip->ecc.layout);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ mtd->erasesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ mtd->writesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ mtd->oobsize);
+
+ /* adjust Option Register and ECC to match Flash page size */
+ if (mtd->writesize == 512) {
+ priv->page_size = 0;
+ clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else if (mtd->writesize == 2048) {
+ priv->page_size = 1;
+ setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ /* adjust ecc setup if needed */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.size = 512;
+ chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_lp_eccm1 :
+ &fsl_elbc_oob_lp_eccm0;
+ }
+ } else {
+ dev_err(priv->dev,
+ "fsl_elbc_init: page size %d is not supported\n",
+ mtd->writesize);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
+ fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return elbc_fcm_ctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, uint32_t data_len,
+ const uint8_t *buf, int oob_required)
+{
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct nand_chip *chip = &priv->chip;
+
+ dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
+
+ /* Fill in fsl_elbc_mtd structure */
+ priv->mtd.priv = chip;
+ priv->mtd.owner = THIS_MODULE;
+
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ chip->read_byte = fsl_elbc_read_byte;
+ chip->write_buf = fsl_elbc_write_buf;
+ chip->read_buf = fsl_elbc_read_buf;
+ chip->select_chip = fsl_elbc_select_chip;
+ chip->cmdfunc = fsl_elbc_cmdfunc;
+ chip->waitfunc = fsl_elbc_wait;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ chip->controller = &elbc_fcm_ctrl->controller;
+ chip->priv = priv;
+
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.mode = NAND_ECC_HW;
+ /* put in small page settings and adjust later if needed */
+ chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ } else {
+ /* otherwise fall back to default software ECC */
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ return 0;
+}
+
+static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ nand_release(&priv->mtd);
+
+ kfree(priv->mtd.name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ elbc_fcm_ctrl->chips[priv->bank] = NULL;
+ kfree(priv);
+ return 0;
+}
+
+static DEFINE_MUTEX(fsl_elbc_nand_mutex);
+
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
+{
+ struct fsl_lbc_regs __iomem *lbc;
+ struct fsl_elbc_mtd *priv;
+ struct resource res;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device *dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = pdev->dev.of_node;
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+ lbc = fsl_lbc_ctrl_dev->regs;
+ dev = fsl_lbc_ctrl_dev->dev;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to get resource\n");
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < MAX_BANKS; bank++)
+ if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
+ (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
+ (in_be32(&lbc->bank[bank].br) &
+ in_be32(&lbc->bank[bank].or) & BR_BA)
+ == fsl_lbc_addr(res.start))
+ break;
+
+ if (bank >= MAX_BANKS) {
+ dev_err(dev, "address did not match any chip selects\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ if (!fsl_lbc_ctrl_dev->nand) {
+ elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
+ if (!elbc_fcm_ctrl) {
+ mutex_unlock(&fsl_elbc_nand_mutex);
+ ret = -ENOMEM;
+ goto err;
+ }
+ elbc_fcm_ctrl->counter++;
+
+ spin_lock_init(&elbc_fcm_ctrl->controller.lock);
+ init_waitqueue_head(&elbc_fcm_ctrl->controller.wq);
+ fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
+ } else {
+ elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ elbc_fcm_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_lbc_ctrl_dev;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(priv->dev, priv);
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(dev, "failed to map chip region\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!priv->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_elbc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
+ if (ret)
+ goto err;
+
+ ret = fsl_elbc_chip_init_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
+
+ printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+ return 0;
+
+err:
+ fsl_elbc_chip_remove(priv);
+ return ret;
+}
+
+static int fsl_elbc_nand_remove(struct platform_device *pdev)
+{
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+
+ fsl_elbc_chip_remove(priv);
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ elbc_fcm_ctrl->counter--;
+ if (!elbc_fcm_ctrl->counter) {
+ fsl_lbc_ctrl_dev->nand = NULL;
+ kfree(elbc_fcm_ctrl);
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ return 0;
+
+}
+
+static const struct of_device_id fsl_elbc_nand_match[] = {
+ { .compatible = "fsl,elbc-fcm-nand", },
+ {}
+};
+
+static struct platform_driver fsl_elbc_nand_driver = {
+ .driver = {
+ .name = "fsl,elbc-fcm-nand",
+ .of_match_table = fsl_elbc_nand_match,
+ },
+ .probe = fsl_elbc_nand_probe,
+ .remove = fsl_elbc_nand_remove,
+};
+
+module_platform_driver(fsl_elbc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
new file mode 100644
index 000000000..51394e599
--- /dev/null
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -0,0 +1,1178 @@
+/*
+ * Freescale Integrated Flash Controller NAND driver
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/fsl_ifc.h>
+
+#define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+ for IFC NAND Machine */
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_ifc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_nand_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
+
+ void __iomem *addr; /* Address of assigned IFC buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes;/* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int eccread; /* Non zero for a full-page ECC read */
+ unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
+};
+
+static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
+
+/* 512-byte page with 4-bit ECC, 8-bit */
+static struct nand_ecclayout oob_512_8bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {0, 5}, {6, 2} },
+};
+
+/* 512-byte page with 4-bit ECC, 16-bit */
+static struct nand_ecclayout oob_512_16bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {2, 6}, },
+};
+
+/* 2048-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_2048_ecc4 = {
+ .eccbytes = 32,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobfree = { {2, 6}, {40, 24} },
+};
+
+/* 4096-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_4096_ecc4 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ },
+ .oobfree = { {2, 6}, {72, 56} },
+};
+
+/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_4096_ecc8 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 82} },
+};
+
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+ .eccbytes = 256,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263,
+ },
+ .oobfree = { {2, 6}, {264, 80} },
+};
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ int buf_num;
+
+ ifc_nand_ctrl->page = page_addr;
+ /* Program ROW0/COL0 */
+ iowrite32be(page_addr, &ifc->ifc_nand.row0);
+ iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+ ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+ ifc_nand_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ifc_nand_ctrl->index += mtd->writesize;
+}
+
+static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
+ u32 __iomem *mainarea = (u32 __iomem *)addr;
+ u8 __iomem *oob = addr + mtd->writesize;
+ int i;
+
+ for (i = 0; i < mtd->writesize / 4; i++) {
+ if (__raw_readl(&mainarea[i]) != 0xffffffff)
+ return 0;
+ }
+
+ for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
+ int pos = chip->ecc.layout->eccpos[i];
+
+ if (__raw_readb(&oob[pos]) != 0xff)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+ u32 *eccstat, unsigned int bufnum)
+{
+ u32 reg = eccstat[bufnum / 4];
+ int errors;
+
+ errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
+
+ return errors;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static void fsl_ifc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 eccstat[4];
+ int i;
+
+ /* set the chip select for NAND Transaction */
+ iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
+
+ dev_vdbg(priv->dev,
+ "%s: fir0=%08x fcr0=%08x\n",
+ __func__,
+ ioread32be(&ifc->ifc_nand.nand_fir0),
+ ioread32be(&ifc->ifc_nand.nand_fcr0));
+
+ ctrl->nand_stat = 0;
+
+ /* start read/write seq */
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ /* ctrl->nand_stat will be updated from IRQ context */
+ if (!ctrl->nand_stat)
+ dev_err(priv->dev, "Controller is not responding\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
+ dev_err(priv->dev, "NAND Flash Timeout Error\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
+ dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+
+ nctrl->max_bitflips = 0;
+
+ if (nctrl->eccread) {
+ int errors;
+ int bufnum = nctrl->page & priv->bufnum_mask;
+ int sector = bufnum * chip->ecc.steps;
+ int sector_end = sector + chip->ecc.steps - 1;
+
+ for (i = sector / 4; i <= sector_end / 4; i++)
+ eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
+
+ for (i = sector; i <= sector_end; i++) {
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
+ /*
+ * Uncorrectable error.
+ * OK only if the whole page is blank.
+ *
+ * We disable ECCER reporting due to...
+ * erratum IFC-A002770 -- so report it now if we
+ * see an uncorrectable error in ECCSTAT.
+ */
+ if (!is_blank(mtd, bufnum))
+ ctrl->nand_stat |=
+ IFC_NAND_EVTER_STAT_ECCER;
+ break;
+ }
+
+ mtd->ecc_stats.corrected += errors;
+ nctrl->max_bitflips = max_t(unsigned int,
+ nctrl->max_bitflips,
+ errors);
+ }
+
+ nctrl->eccread = 0;
+ }
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+ int oob,
+ struct mtd_info *mtd)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+ iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+ } else {
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+ if (oob)
+ iowrite32be(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ else
+ iowrite32be(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ }
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr) {
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* clear the read buffer */
+ ifc_nand_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ifc_nand_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0:
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ifc_nand_ctrl->index += column;
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ ifc_nand_ctrl->eccread = 1;
+
+ fsl_ifc_do_read(chip, 0, mtd);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, column, page_addr, 1);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_ifc_do_read(chip, 1, mtd);
+ fsl_ifc_run_command(mtd);
+
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM: {
+ int timing = IFC_FIR_OP_RB;
+ if (command == NAND_CMD_PARAM)
+ timing = IFC_FIR_OP_RBCD;
+
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(column, &ifc->ifc_nand.row3);
+
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 256;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+
+ iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 nand_fcr0;
+ ifc_nand_ctrl->column = column;
+ ifc_nand_ctrl->oob = 0;
+
+ if (mtd->writesize > 512) {
+ nand_fcr0 =
+ (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
+
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN <<
+ IFC_NAND_FCR0_CMD2_SHIFT) |
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
+
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+ else
+ nand_fcr0 |=
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+ }
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ ifc_nand_ctrl->oob = 1;
+ }
+ iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+ set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ if (ifc_nand_ctrl->oob) {
+ iowrite32be(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
+ } else {
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ }
+
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ case NAND_CMD_STATUS:
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ else
+ setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ case NAND_CMD_RESET:
+ iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
+ __func__, command);
+ }
+}
+
+static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, bufsize - ifc_nand_ctrl->index);
+ len = bufsize - ifc_nand_ctrl->index;
+ }
+
+ memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
+ ifc_nand_ctrl->index += len;
+}
+
+/*
+ * Read a byte from either the IFC hardware buffer
+ * read function for 8-bit buswidth
+ */
+static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ unsigned int offset;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ offset = ifc_nand_ctrl->index++;
+ return in_8(ifc_nand_ctrl->addr + offset);
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ uint16_t data;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+ ifc_nand_ctrl->index += 2;
+ return (uint8_t) data;
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ int avail;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ avail = min((unsigned int)len,
+ ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
+ memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
+ ifc_nand_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %d available)\n",
+ __func__, len, avail);
+}
+
+/*
+ * This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 nand_fsr;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return nand_fsr | NAND_STATUS_WP;
+}
+
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+
+ fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
+ dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ mtd->ecc_stats.failed++;
+
+ return nctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+
+ dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
+ chip->numchips);
+ dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
+ chip->chipsize);
+ dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
+ chip->pagemask);
+ dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
+ chip->chip_delay);
+ dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
+ chip->badblockpos);
+ dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
+ chip->chip_shift);
+ dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
+ chip->page_shift);
+ dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
+ chip->ecc.total);
+ dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
+ chip->ecc.layout);
+ dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
+ dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
+ dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
+ mtd->erasesize);
+ dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
+ mtd->writesize);
+ dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
+ mtd->oobsize);
+
+ return 0;
+}
+
+static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ /* Save CSOR and CSOR_ext */
+ csor = ioread32be(&ifc->csor_cs[cs].csor);
+ csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
+ iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
+
+ /* READID */
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(0x0, &ifc->ifc_nand.row3);
+
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
+
+ /* Program ROW0/COL0 */
+ iowrite32be(0x0, &ifc->ifc_nand.row0);
+ iowrite32be(0x0, &ifc->ifc_nand.col0);
+
+ /* set the chip select for NAND Transaction */
+ iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+
+ /* start read seq */
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
+
+ /* Restore CSOR and CSOR_ext */
+ iowrite32be(csor, &ifc->csor_cs[cs].csor);
+ iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
+}
+
+static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct nand_chip *chip = &priv->chip;
+ struct nand_ecclayout *layout;
+ u32 csor;
+
+ /* Fill in fsl_ifc_mtd structure */
+ priv->mtd.priv = chip;
+ priv->mtd.owner = THIS_MODULE;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ chip->read_byte = fsl_ifc_read_byte16;
+ else
+ chip->read_byte = fsl_ifc_read_byte;
+
+ chip->write_buf = fsl_ifc_write_buf;
+ chip->read_buf = fsl_ifc_read_buf;
+ chip->select_chip = fsl_ifc_select_chip;
+ chip->cmdfunc = fsl_ifc_cmdfunc;
+ chip->waitfunc = fsl_ifc_wait;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+
+ if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ chip->read_byte = fsl_ifc_read_byte16;
+ chip->options |= NAND_BUSWIDTH_16;
+ } else {
+ chip->read_byte = fsl_ifc_read_byte;
+ }
+
+ chip->controller = &ifc_nand_ctrl->controller;
+ chip->priv = priv;
+
+ chip->ecc.read_page = fsl_ifc_read_page;
+ chip->ecc.write_page = fsl_ifc_write_page;
+
+ csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
+
+ switch (csor & CSOR_NAND_PGS_MASK) {
+ case CSOR_NAND_PGS_512:
+ if (chip->options & NAND_BUSWIDTH_16) {
+ layout = &oob_512_16bit_ecc4;
+ } else {
+ layout = &oob_512_8bit_ecc4;
+
+ /* Avoid conflict with bad block marker */
+ bbt_main_descr.offs = 0;
+ bbt_mirror_descr.offs = 0;
+ }
+
+ priv->bufnum_mask = 15;
+ break;
+
+ case CSOR_NAND_PGS_2K:
+ layout = &oob_2048_ecc4;
+ priv->bufnum_mask = 3;
+ break;
+
+ case CSOR_NAND_PGS_4K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_4096_ecc4;
+ } else {
+ layout = &oob_4096_ecc8;
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+
+ priv->bufnum_mask = 1;
+ break;
+
+ case CSOR_NAND_PGS_8K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_8192_ecc4;
+ } else {
+ layout = &oob_8192_ecc8;
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+
+ priv->bufnum_mask = 0;
+ break;
+
+ default:
+ dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
+ return -ENODEV;
+ }
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.layout = layout;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ if (ctrl->version == FSL_IFC_VERSION_1_1_0)
+ fsl_ifc_sram_init(priv);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+{
+ nand_release(&priv->mtd);
+
+ kfree(priv->mtd.name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ ifc_nand_ctrl->chips[priv->bank] = NULL;
+
+ return 0;
+}
+
+static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
+ phys_addr_t addr)
+{
+ u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
+
+ if (!(cspr & CSPR_V))
+ return 0;
+ if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
+ return 0;
+
+ return (cspr & CSPR_BA) == convert_ifc_address(addr);
+}
+
+static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+static int fsl_ifc_nand_probe(struct platform_device *dev)
+{
+ struct fsl_ifc_regs __iomem *ifc;
+ struct fsl_ifc_mtd *priv;
+ struct resource res;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device_node *node = dev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = dev->dev.of_node;
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ return -ENODEV;
+ ifc = fsl_ifc_ctrl_dev->regs;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
+ if (match_bank(ifc, bank, res.start))
+ break;
+ }
+
+ if (bank >= fsl_ifc_ctrl_dev->banks) {
+ dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ if (!fsl_ifc_ctrl_dev->nand) {
+ ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
+ if (!ifc_nand_ctrl) {
+ mutex_unlock(&fsl_ifc_nand_mutex);
+ return -ENOMEM;
+ }
+
+ ifc_nand_ctrl->read_bytes = 0;
+ ifc_nand_ctrl->index = 0;
+ ifc_nand_ctrl->addr = NULL;
+ fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
+
+ spin_lock_init(&ifc_nand_ctrl->controller.lock);
+ init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
+ } else {
+ ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ ifc_nand_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_ifc_ctrl_dev;
+ priv->dev = &dev->dev;
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(priv->dev, priv);
+
+ iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
+
+ /* enable NAND Machine Interrupts */
+ iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!priv->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_ifc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
+ if (ret)
+ goto err;
+
+ ret = fsl_ifc_chip_init_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
+
+ dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+ return 0;
+
+err:
+ fsl_ifc_chip_remove(priv);
+ return ret;
+}
+
+static int fsl_ifc_nand_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+
+ fsl_ifc_chip_remove(priv);
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ ifc_nand_ctrl->counter--;
+ if (!ifc_nand_ctrl->counter) {
+ fsl_ifc_ctrl_dev->nand = NULL;
+ kfree(ifc_nand_ctrl);
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ifc_nand_match[] = {
+ {
+ .compatible = "fsl,ifc-nand",
+ },
+ {}
+};
+
+static struct platform_driver fsl_ifc_nand_driver = {
+ .driver = {
+ .name = "fsl,ifc-nand",
+ .of_match_table = fsl_ifc_nand_match,
+ },
+ .probe = fsl_ifc_nand_probe,
+ .remove = fsl_ifc_nand_remove,
+};
+
+module_platform_driver(fsl_ifc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
new file mode 100644
index 000000000..72755d7ec
--- /dev/null
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -0,0 +1,361 @@
+/*
+ * Freescale UPM NAND driver.
+ *
+ * Copyright © 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/fsl_lbc.h>
+
+#define FSL_UPM_WAIT_RUN_PATTERN 0x1
+#define FSL_UPM_WAIT_WRITE_BYTE 0x2
+#define FSL_UPM_WAIT_WRITE_BUFFER 0x4
+
+struct fsl_upm_nand {
+ struct device *dev;
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int last_ctrl;
+ struct mtd_partition *parts;
+ struct fsl_upm upm;
+ uint8_t upm_addr_offset;
+ uint8_t upm_cmd_offset;
+ void __iomem *io_base;
+ int rnb_gpio[NAND_MAX_CHIPS];
+ uint32_t mchip_offsets[NAND_MAX_CHIPS];
+ uint32_t mchip_count;
+ uint32_t mchip_number;
+ int chip_delay;
+ uint32_t wait_flags;
+};
+
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct fsl_upm_nand, mtd);
+}
+
+static int fun_chip_ready(struct mtd_info *mtd)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+ if (gpio_get_value(fun->rnb_gpio[fun->mchip_number]))
+ return 1;
+
+ dev_vdbg(fun->dev, "busy\n");
+ return 0;
+}
+
+static void fun_wait_rnb(struct fsl_upm_nand *fun)
+{
+ if (fun->rnb_gpio[fun->mchip_number] >= 0) {
+ int cnt = 1000000;
+
+ while (--cnt && !fun_chip_ready(&fun->mtd))
+ cpu_relax();
+ if (!cnt)
+ dev_err(fun->dev, "tired waiting for RNB\n");
+ } else {
+ ndelay(100);
+ }
+}
+
+static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ u32 mar;
+
+ if (!(ctrl & fun->last_ctrl)) {
+ fsl_upm_end_pattern(&fun->upm);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
+ }
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_ALE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+ else if (ctrl & NAND_CLE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+ }
+
+ mar = (cmd << (32 - fun->upm.width)) |
+ fun->mchip_offsets[fun->mchip_number];
+ fsl_upm_run_pattern(&fun->upm, chip->IO_ADDR_R, mar);
+
+ if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
+ fun_wait_rnb(fun);
+}
+
+static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+ if (mchip_nr == -1) {
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
+ fun->mchip_number = mchip_nr;
+ chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+ } else {
+ BUG();
+ }
+}
+
+static uint8_t fun_read_byte(struct mtd_info *mtd)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+ return in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ int i;
+
+ for (i = 0; i < len; i++) {
+ out_8(fun->chip.IO_ADDR_W, buf[i]);
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
+ fun_wait_rnb(fun);
+ }
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
+ fun_wait_rnb(fun);
+}
+
+static int fun_chip_init(struct fsl_upm_nand *fun,
+ const struct device_node *upm_np,
+ const struct resource *io_res)
+{
+ int ret;
+ struct device_node *flash_np;
+ struct mtd_part_parser_data ppdata;
+
+ fun->chip.IO_ADDR_R = fun->io_base;
+ fun->chip.IO_ADDR_W = fun->io_base;
+ fun->chip.cmd_ctrl = fun_cmd_ctrl;
+ fun->chip.chip_delay = fun->chip_delay;
+ fun->chip.read_byte = fun_read_byte;
+ fun->chip.read_buf = fun_read_buf;
+ fun->chip.write_buf = fun_write_buf;
+ fun->chip.ecc.mode = NAND_ECC_SOFT;
+ if (fun->mchip_count > 1)
+ fun->chip.select_chip = fun_select_chip;
+
+ if (fun->rnb_gpio[0] >= 0)
+ fun->chip.dev_ready = fun_chip_ready;
+
+ fun->mtd.priv = &fun->chip;
+ fun->mtd.owner = THIS_MODULE;
+
+ flash_np = of_get_next_child(upm_np, NULL);
+ if (!flash_np)
+ return -ENODEV;
+
+ fun->mtd.name = kasprintf(GFP_KERNEL, "0x%llx.%s", (u64)io_res->start,
+ flash_np->name);
+ if (!fun->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(&fun->mtd, fun->mchip_count);
+ if (ret)
+ goto err;
+
+ ppdata.of_node = flash_np;
+ ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0);
+err:
+ of_node_put(flash_np);
+ if (ret)
+ kfree(fun->mtd.name);
+ return ret;
+}
+
+static int fun_probe(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun;
+ struct resource io_res;
+ const __be32 *prop;
+ int rnb_gpio;
+ int ret;
+ int size;
+ int i;
+
+ fun = kzalloc(sizeof(*fun), GFP_KERNEL);
+ if (!fun)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't get IO base\n");
+ goto err1;
+ }
+
+ ret = fsl_upm_find(io_res.start, &fun->upm);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't find UPM\n");
+ goto err1;
+ }
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
+ &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM address offset\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+ fun->upm_addr_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM command offset\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+ fun->upm_cmd_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node,
+ "fsl,upm-addr-line-cs-offsets", &size);
+ if (prop && (size / sizeof(uint32_t)) > 0) {
+ fun->mchip_count = size / sizeof(uint32_t);
+ if (fun->mchip_count >= NAND_MAX_CHIPS) {
+ dev_err(&ofdev->dev, "too much multiple chips\n");
+ goto err1;
+ }
+ for (i = 0; i < fun->mchip_count; i++)
+ fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
+ } else {
+ fun->mchip_count = 1;
+ }
+
+ for (i = 0; i < fun->mchip_count; i++) {
+ fun->rnb_gpio[i] = -1;
+ rnb_gpio = of_get_gpio(ofdev->dev.of_node, i);
+ if (rnb_gpio >= 0) {
+ ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev));
+ if (ret) {
+ dev_err(&ofdev->dev,
+ "can't request RNB gpio #%d\n", i);
+ goto err2;
+ }
+ gpio_direction_input(rnb_gpio);
+ fun->rnb_gpio[i] = rnb_gpio;
+ } else if (rnb_gpio == -EINVAL) {
+ dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
+ goto err2;
+ }
+ }
+
+ prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
+ if (prop)
+ fun->chip_delay = be32_to_cpup(prop);
+ else
+ fun->chip_delay = 50;
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
+ if (prop && size == sizeof(uint32_t))
+ fun->wait_flags = be32_to_cpup(prop);
+ else
+ fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
+ FSL_UPM_WAIT_WRITE_BYTE;
+
+ fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+ resource_size(&io_res));
+ if (!fun->io_base) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ fun->dev = &ofdev->dev;
+ fun->last_ctrl = NAND_CLE;
+
+ ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res);
+ if (ret)
+ goto err2;
+
+ dev_set_drvdata(&ofdev->dev, fun);
+
+ return 0;
+err2:
+ for (i = 0; i < fun->mchip_count; i++) {
+ if (fun->rnb_gpio[i] < 0)
+ break;
+ gpio_free(fun->rnb_gpio[i]);
+ }
+err1:
+ kfree(fun);
+
+ return ret;
+}
+
+static int fun_remove(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
+ int i;
+
+ nand_release(&fun->mtd);
+ kfree(fun->mtd.name);
+
+ for (i = 0; i < fun->mchip_count; i++) {
+ if (fun->rnb_gpio[i] < 0)
+ break;
+ gpio_free(fun->rnb_gpio[i]);
+ }
+
+ kfree(fun);
+
+ return 0;
+}
+
+static const struct of_device_id of_fun_match[] = {
+ { .compatible = "fsl,upm-nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_fun_match);
+
+static struct platform_driver of_fun_driver = {
+ .driver = {
+ .name = "fsl,upm-nand",
+ .of_match_table = of_fun_match,
+ },
+ .probe = fun_probe,
+ .remove = fun_remove,
+};
+
+module_platform_driver(of_fun_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
+ "LocalBus User-Programmable Machine");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
new file mode 100644
index 000000000..e58af4bfa
--- /dev/null
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -0,0 +1,1242 @@
+/*
+ * drivers/mtd/nand/fsmc_nand.c
+ *
+ * ST Microelectronics
+ * Flexible Static Memory Controller (FSMC)
+ * Driver for NAND portions
+ *
+ * Copyright © 2010 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ * Ashish Priyadarshi
+ *
+ * Based on drivers/mtd/nand/nomadik_nand.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/resource.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mtd/fsmc.h>
+#include <linux/amba/bus.h>
+#include <mtd/mtd-abi.h>
+
+static struct nand_ecclayout fsmc_ecc1_128_layout = {
+ .eccbytes = 24,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
+ 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ {.offset = 72, .length = 8},
+ {.offset = 88, .length = 8},
+ {.offset = 104, .length = 8},
+ {.offset = 120, .length = 8}
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc1_64_layout = {
+ .eccbytes = 12,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc1_16_layout = {
+ .eccbytes = 3,
+ .eccpos = {2, 3, 4},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
+ * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_256_layout = {
+ .eccbytes = 208,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126,
+ 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142,
+ 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158,
+ 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174,
+ 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190,
+ 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206,
+ 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222,
+ 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238,
+ 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 3},
+ {.offset = 143, .length = 3},
+ {.offset = 159, .length = 3},
+ {.offset = 175, .length = 3},
+ {.offset = 191, .length = 3},
+ {.offset = 207, .length = 3},
+ {.offset = 223, .length = 3},
+ {.offset = 239, .length = 3},
+ {.offset = 255, .length = 1}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_224_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 97}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_128_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 1}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
+ * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_64_layout = {
+ .eccbytes = 52,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 1},
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
+ * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
+ * byte is free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_16_layout = {
+ .eccbytes = 13,
+ .eccpos = { 0, 1, 2, 3, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14
+ },
+ .oobfree = {
+ {.offset = 15, .length = 1},
+ }
+};
+
+/*
+ * ECC placement definitions in oobfree type format.
+ * There are 13 bytes of ecc for every 512 byte block and it has to be read
+ * consecutively and immediately after the 512 byte data block for hardware to
+ * generate the error bit offsets in 512 byte data.
+ * Managing the ecc bytes in the following way makes it easier for software to
+ * read ecc bytes consecutive to data bytes. This way is similar to
+ * oobfree structure maintained already in generic nand driver
+ */
+static struct fsmc_eccplace fsmc_ecc4_lp_place = {
+ .eccplace = {
+ {.offset = 2, .length = 13},
+ {.offset = 18, .length = 13},
+ {.offset = 34, .length = 13},
+ {.offset = 50, .length = 13},
+ {.offset = 66, .length = 13},
+ {.offset = 82, .length = 13},
+ {.offset = 98, .length = 13},
+ {.offset = 114, .length = 13}
+ }
+};
+
+static struct fsmc_eccplace fsmc_ecc4_sp_place = {
+ .eccplace = {
+ {.offset = 0, .length = 4},
+ {.offset = 6, .length = 9}
+ }
+};
+
+/**
+ * struct fsmc_nand_data - structure for FSMC NAND device state
+ *
+ * @pid: Part ID on the AMBA PrimeCell format
+ * @mtd: MTD info for a NAND flash.
+ * @nand: Chip related info for a NAND flash.
+ * @partitions: Partition info for a NAND Flash.
+ * @nr_partitions: Total number of partition of a NAND flash.
+ *
+ * @ecc_place: ECC placing locations in oobfree type format.
+ * @bank: Bank number for probed device.
+ * @clk: Clock structure for FSMC.
+ *
+ * @read_dma_chan: DMA channel for read access
+ * @write_dma_chan: DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @data_pa: NAND Physical port for Data.
+ * @data_va: NAND port for Data.
+ * @cmd_va: NAND port for Command.
+ * @addr_va: NAND port for Address.
+ * @regs_va: FSMC regs base address.
+ */
+struct fsmc_nand_data {
+ u32 pid;
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct mtd_partition *partitions;
+ unsigned int nr_partitions;
+
+ struct fsmc_eccplace *ecc_place;
+ unsigned int bank;
+ struct device *dev;
+ enum access_mode mode;
+ struct clk *clk;
+
+ /* DMA related objects */
+ struct dma_chan *read_dma_chan;
+ struct dma_chan *write_dma_chan;
+ struct completion dma_access_complete;
+
+ struct fsmc_nand_timings *dev_timings;
+
+ dma_addr_t data_pa;
+ void __iomem *data_va;
+ void __iomem *cmd_va;
+ void __iomem *addr_va;
+ void __iomem *regs_va;
+
+ void (*select_chip)(uint32_t bank, uint32_t busw);
+};
+
+/* Assert CS signal based on chipnr */
+static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+
+ switch (chipnr) {
+ case -1:
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ if (host->select_chip)
+ host->select_chip(chipnr,
+ chip->options & NAND_BUSWIDTH_16);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/*
+ * fsmc_cmd_ctrl - For facilitaing Hardware access
+ * This routine allows hardware specific access to control-lines(ALE,CLE)
+ */
+static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void __iomem *regs = host->regs_va;
+ unsigned int bank = host->bank;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u32 pc;
+
+ if (ctrl & NAND_CLE) {
+ this->IO_ADDR_R = host->cmd_va;
+ this->IO_ADDR_W = host->cmd_va;
+ } else if (ctrl & NAND_ALE) {
+ this->IO_ADDR_R = host->addr_va;
+ this->IO_ADDR_W = host->addr_va;
+ } else {
+ this->IO_ADDR_R = host->data_va;
+ this->IO_ADDR_W = host->data_va;
+ }
+
+ pc = readl(FSMC_NAND_REG(regs, bank, PC));
+ if (ctrl & NAND_NCE)
+ pc |= FSMC_ENABLE;
+ else
+ pc &= ~FSMC_ENABLE;
+ writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
+ }
+
+ mb();
+
+ if (cmd != NAND_CMD_NONE)
+ writeb_relaxed(cmd, this->IO_ADDR_W);
+}
+
+/*
+ * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
+ *
+ * This routine initializes timing parameters related to NAND memory access in
+ * FSMC registers
+ */
+static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
+ uint32_t busw, struct fsmc_nand_timings *timings)
+{
+ uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ uint32_t tclr, tar, thiz, thold, twait, tset;
+ struct fsmc_nand_timings *tims;
+ struct fsmc_nand_timings default_timings = {
+ .tclr = FSMC_TCLR_1,
+ .tar = FSMC_TAR_1,
+ .thiz = FSMC_THIZ_1,
+ .thold = FSMC_THOLD_4,
+ .twait = FSMC_TWAIT_6,
+ .tset = FSMC_TSET_0,
+ };
+
+ if (timings)
+ tims = timings;
+ else
+ tims = &default_timings;
+
+ tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+ tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+ thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+ thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+ twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+ tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
+
+ if (busw)
+ writel_relaxed(value | FSMC_DEVWID_16,
+ FSMC_NAND_REG(regs, bank, PC));
+ else
+ writel_relaxed(value | FSMC_DEVWID_8,
+ FSMC_NAND_REG(regs, bank, PC));
+
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, COMM));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, ATTRIB));
+}
+
+/*
+ * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
+ */
+static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void __iomem *regs = host->regs_va;
+ uint32_t bank = host->bank;
+
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+ FSMC_NAND_REG(regs, bank, PC));
+}
+
+/*
+ * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
+ * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
+ * max of 8-bits)
+ */
+static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void __iomem *regs = host->regs_va;
+ uint32_t bank = host->bank;
+ uint32_t ecc_tmp;
+ unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
+
+ do {
+ if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
+ break;
+ else
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ if (time_after_eq(jiffies, deadline)) {
+ dev_err(host->dev, "calculate ecc timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc[0] = (uint8_t) (ecc_tmp >> 0);
+ ecc[1] = (uint8_t) (ecc_tmp >> 8);
+ ecc[2] = (uint8_t) (ecc_tmp >> 16);
+ ecc[3] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc[4] = (uint8_t) (ecc_tmp >> 0);
+ ecc[5] = (uint8_t) (ecc_tmp >> 8);
+ ecc[6] = (uint8_t) (ecc_tmp >> 16);
+ ecc[7] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc[8] = (uint8_t) (ecc_tmp >> 0);
+ ecc[9] = (uint8_t) (ecc_tmp >> 8);
+ ecc[10] = (uint8_t) (ecc_tmp >> 16);
+ ecc[11] = (uint8_t) (ecc_tmp >> 24);
+
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
+ ecc[12] = (uint8_t) (ecc_tmp >> 16);
+
+ return 0;
+}
+
+/*
+ * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
+ * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ void __iomem *regs = host->regs_va;
+ uint32_t bank = host->bank;
+ uint32_t ecc_tmp;
+
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc[0] = (uint8_t) (ecc_tmp >> 0);
+ ecc[1] = (uint8_t) (ecc_tmp >> 8);
+ ecc[2] = (uint8_t) (ecc_tmp >> 16);
+
+ return 0;
+}
+
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+ int k, written_bits = 0;
+
+ for (k = 0; k < size; k++) {
+ written_bits += hweight8(~buff[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+ struct fsmc_nand_data *host = param;
+
+ complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+ enum dma_data_direction direction)
+{
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src, dma_addr;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int ret;
+
+ if (direction == DMA_TO_DEVICE)
+ chan = host->write_dma_chan;
+ else if (direction == DMA_FROM_DEVICE)
+ chan = host->read_dma_chan;
+ else
+ return -EINVAL;
+
+ dma_dev = chan->device;
+ dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+ if (direction == DMA_TO_DEVICE) {
+ dma_src = dma_addr;
+ dma_dst = host->data_pa;
+ } else {
+ dma_src = host->data_pa;
+ dma_dst = dma_addr;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(host->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto unmap_dma;
+ }
+
+ tx->callback = dma_complete;
+ tx->callback_param = host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(host->dev, "dma_submit_error %d\n", cookie);
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(chan);
+
+ ret =
+ wait_for_completion_timeout(&host->dma_access_complete,
+ msecs_to_jiffies(3000));
+ if (ret <= 0) {
+ dmaengine_terminate_all(chan);
+ dev_err(host->dev, "wait_for_completion_timeout\n");
+ if (!ret)
+ ret = -ETIMEDOUT;
+ goto unmap_dma;
+ }
+
+ ret = 0;
+
+unmap_dma:
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+
+ return ret;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ writel_relaxed(p[i], chip->IO_ADDR_W);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb_relaxed(buf[i], chip->IO_ADDR_W);
+ }
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+
+ if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) {
+ uint32_t *p = (uint32_t *)buf;
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ p[i] = readl_relaxed(chip->IO_ADDR_R);
+ } else {
+ for (i = 0; i < len; i++)
+ buf[i] = readb_relaxed(chip->IO_ADDR_R);
+ }
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct fsmc_nand_data *host;
+
+ host = container_of(mtd, struct fsmc_nand_data, mtd);
+ dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
+/*
+ * fsmc_read_page_hwecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This routine is needed for fsmc version 8 as reading from NAND chip has to be
+ * performed in a strict sequence as follows:
+ * data(512 byte) -> ecc(13 byte)
+ * After this read, fsmc hardware generates and reports error data bits(up to a
+ * max of 8 bits)
+ */
+static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct fsmc_eccplace *ecc_place = host->ecc_place;
+ int i, j, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ int off, len, group = 0;
+ /*
+ * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
+ * end up reading 14 bytes (7 words) from oob. The local array is
+ * to maintain word alignment
+ */
+ uint16_t ecc_oob[7];
+ uint8_t *oob = (uint8_t *)&ecc_oob[0];
+ unsigned int max_bitflips = 0;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
+ chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ for (j = 0; j < eccbytes;) {
+ off = ecc_place->eccplace[group].offset;
+ len = ecc_place->eccplace[group].length;
+ group++;
+
+ /*
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ len = roundup(len, 2);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
+ chip->read_buf(mtd, oob + j, len);
+ j += len;
+ }
+
+ memcpy(&ecc_code[i], oob, chip->ecc.bytes);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/*
+ * fsmc_bch8_correct_data
+ * @mtd: mtd info structure
+ * @dat: buffer of read data
+ * @read_ecc: ecc read from device spare area
+ * @calc_ecc: ecc calculated from read data
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset informations of 13 bits each in 512 bytes of read data.
+ */
+static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct fsmc_nand_data *host = container_of(mtd,
+ struct fsmc_nand_data, mtd);
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *regs = host->regs_va;
+ unsigned int bank = host->bank;
+ uint32_t err_idx[8];
+ uint32_t num_err, i;
+ uint32_t ecc1, ecc2, ecc3, ecc4;
+
+ num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+
+ /* no bit flipping */
+ if (likely(num_err == 0))
+ return 0;
+
+ /* too many errors */
+ if (unlikely(num_err > 8)) {
+ /*
+ * This is a temporary erase check. A newly erased page read
+ * would result in an ecc error because the oob data is also
+ * erased to FF and the calculated ecc for an FF data is not
+ * FF..FF.
+ * This is a workaround to skip performing correction in case
+ * data is FF..FF
+ *
+ * Logic:
+ * For every page, each bit written as 0 is counted until these
+ * number of bits are greater than 8 (the maximum correction
+ * capability of FSMC for each 512 + 13 bytes)
+ */
+
+ int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+ int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+ if ((bits_ecc + bits_data) <= 8) {
+ if (bits_data)
+ memset(dat, 0xff, chip->ecc.size);
+ return bits_data;
+ }
+
+ return -EBADMSG;
+ }
+
+ /*
+ * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
+ * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset informations of 13 bits each. calc_ecc is copied into a
+ * uint64_t array and error offset indexes are populated in err_idx
+ * array
+ */
+ ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
+
+ err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+ err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+ err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+ err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+ err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+ err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+ err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+ err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
+
+ i = 0;
+ while (num_err--) {
+ change_bit(0, (unsigned long *)&err_idx[i]);
+ change_bit(1, (unsigned long *)&err_idx[i]);
+
+ if (err_idx[i] < chip->ecc.size * 8) {
+ change_bit(err_idx[i], (unsigned long *)dat);
+ i++;
+ }
+ }
+ return i;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ chan->private = slave;
+ return true;
+}
+
+#ifdef CONFIG_OF
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 val;
+ int ret;
+
+ /* Set default NAND width to 8 bits */
+ pdata->width = 8;
+ if (!of_property_read_u32(np, "bank-width", &val)) {
+ if (val == 2) {
+ pdata->width = 16;
+ } else if (val != 1) {
+ dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+ if (of_get_property(np, "nand-skip-bbtscan", NULL))
+ pdata->options = NAND_SKIP_BBTSCAN;
+
+ pdata->nand_timings = devm_kzalloc(&pdev->dev,
+ sizeof(*pdata->nand_timings), GFP_KERNEL);
+ if (!pdata->nand_timings)
+ return -ENOMEM;
+ ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
+ sizeof(*pdata->nand_timings));
+ if (ret) {
+ dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
+ pdata->nand_timings = NULL;
+ }
+
+ /* Set default NAND bank to 0 */
+ pdata->bank = 0;
+ if (!of_property_read_u32(np, "bank", &val)) {
+ if (val > 3) {
+ dev_err(&pdev->dev, "invalid bank %u\n", val);
+ return -EINVAL;
+ }
+ pdata->bank = val;
+ }
+ return 0;
+}
+#else
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+/*
+ * fsmc_nand_probe - Probe function
+ * @pdev: platform device structure
+ */
+static int __init fsmc_nand_probe(struct platform_device *pdev)
+{
+ struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node __maybe_unused *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata = {};
+ struct fsmc_nand_data *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct resource *res;
+ dma_cap_mask_t mask;
+ int ret = 0;
+ u32 pid;
+ int i;
+
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ ret = fsmc_nand_probe_config_dt(pdev, np);
+ if (ret) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+ }
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data is NULL\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ host->data_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->data_va))
+ return PTR_ERR(host->data_va);
+
+ host->data_pa = (dma_addr_t)res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ host->addr_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->addr_va))
+ return PTR_ERR(host->addr_va);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->cmd_va))
+ return PTR_ERR(host->cmd_va);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
+ host->regs_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_va))
+ return PTR_ERR(host->regs_va);
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "failed to fetch block clock\n");
+ return PTR_ERR(host->clk);
+ }
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto err_clk_prepare_enable;
+
+ /*
+ * This device ID is actually a common AMBA ID as used on the
+ * AMBA PrimeCell bus. However it is not a PrimeCell.
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+ host->pid = pid;
+ dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
+ "revision %02x, config %02x\n",
+ AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+ AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
+ host->bank = pdata->bank;
+ host->select_chip = pdata->select_bank;
+ host->partitions = pdata->partitions;
+ host->nr_partitions = pdata->nr_partitions;
+ host->dev = &pdev->dev;
+ host->dev_timings = pdata->nand_timings;
+ host->mode = pdata->mode;
+
+ if (host->mode == USE_DMA_ACCESS)
+ init_completion(&host->dma_access_complete);
+
+ /* Link all private pointers */
+ mtd = &host->mtd;
+ nand = &host->nand;
+ mtd->priv = nand;
+ nand->priv = host;
+
+ host->mtd.owner = THIS_MODULE;
+ nand->IO_ADDR_R = host->data_va;
+ nand->IO_ADDR_W = host->data_va;
+ nand->cmd_ctrl = fsmc_cmd_ctrl;
+ nand->chip_delay = 30;
+
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.hwctl = fsmc_enable_hwecc;
+ nand->ecc.size = 512;
+ nand->options = pdata->options;
+ nand->select_chip = fsmc_select_chip;
+ nand->badblockbits = 7;
+
+ if (pdata->width == FSMC_NAND_BW16)
+ nand->options |= NAND_BUSWIDTH_16;
+
+ switch (host->mode) {
+ case USE_DMA_ACCESS:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->read_dma_chan = dma_request_channel(mask, filter,
+ pdata->read_dma_priv);
+ if (!host->read_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get read dma channel\n");
+ goto err_req_read_chnl;
+ }
+ host->write_dma_chan = dma_request_channel(mask, filter,
+ pdata->write_dma_priv);
+ if (!host->write_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get write dma channel\n");
+ goto err_req_write_chnl;
+ }
+ nand->read_buf = fsmc_read_buf_dma;
+ nand->write_buf = fsmc_write_buf_dma;
+ break;
+
+ default:
+ case USE_WORD_ACCESS:
+ nand->read_buf = fsmc_read_buf;
+ nand->write_buf = fsmc_write_buf;
+ break;
+ }
+
+ fsmc_nand_setup(host->regs_va, host->bank,
+ nand->options & NAND_BUSWIDTH_16,
+ host->dev_timings);
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ nand->ecc.read_page = fsmc_read_page_hwecc;
+ nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+ nand->ecc.correct = fsmc_bch8_correct_data;
+ nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
+ } else {
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ }
+
+ /*
+ * Scan to find existence of the device
+ */
+ if (nand_scan_ident(&host->mtd, 1, NULL)) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "No NAND Device found!\n");
+ goto err_scan_ident;
+ }
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc4_16_layout;
+ host->ecc_place = &fsmc_ecc4_sp_place;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc4_64_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc4_128_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 224:
+ nand->ecc.layout = &fsmc_ecc4_224_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ case 256:
+ nand->ecc.layout = &fsmc_ecc4_256_layout;
+ host->ecc_place = &fsmc_ecc4_lp_place;
+ break;
+ default:
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
+ }
+ } else {
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc1_16_layout;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc1_64_layout;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc1_128_layout;
+ break;
+ default:
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
+ }
+ }
+
+ /* Second stage of scan to fill MTD data-structures */
+ if (nand_scan_tail(&host->mtd)) {
+ ret = -ENXIO;
+ goto err_probe;
+ }
+
+ /*
+ * The partition information can is accessed by (in the same precedence)
+ *
+ * command line through Bootloader,
+ * platform data,
+ * default partition information present in driver.
+ */
+ /*
+ * Check for partition info passed
+ */
+ host->mtd.name = "nand";
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata,
+ host->partitions, host->nr_partitions);
+ if (ret)
+ goto err_probe;
+
+ platform_set_drvdata(pdev, host);
+ dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+ return 0;
+
+err_probe:
+err_scan_ident:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->write_dma_chan);
+err_req_write_chnl:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->read_dma_chan);
+err_req_read_chnl:
+ clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
+ clk_put(host->clk);
+ return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static int fsmc_nand_remove(struct platform_device *pdev)
+{
+ struct fsmc_nand_data *host = platform_get_drvdata(pdev);
+
+ if (host) {
+ nand_release(&host->mtd);
+
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_release_channel(host->write_dma_chan);
+ dma_release_channel(host->read_dma_chan);
+ }
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fsmc_nand_suspend(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ if (host)
+ clk_disable_unprepare(host->clk);
+ return 0;
+}
+
+static int fsmc_nand_resume(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ if (host) {
+ clk_prepare_enable(host->clk);
+ fsmc_nand_setup(host->regs_va, host->bank,
+ host->nand.options & NAND_BUSWIDTH_16,
+ host->dev_timings);
+ }
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id fsmc_nand_id_table[] = {
+ { .compatible = "st,spear600-fsmc-nand" },
+ { .compatible = "stericsson,fsmc-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
+#endif
+
+static struct platform_driver fsmc_nand_driver = {
+ .remove = fsmc_nand_remove,
+ .driver = {
+ .name = "fsmc-nand",
+ .of_match_table = of_match_ptr(fsmc_nand_id_table),
+ .pm = &fsmc_nand_pm_ops,
+ },
+};
+
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
+MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
new file mode 100644
index 000000000..73c4048c3
--- /dev/null
+++ b/drivers/mtd/nand/gpio.c
@@ -0,0 +1,321 @@
+/*
+ * drivers/mtd/nand/gpio.c
+ *
+ * Updated, and converted to generic GPIO based driver by Russell King.
+ *
+ * Written by Ben Dooks <ben@simtec.co.uk>
+ * Based on 2.4 version by Mark Whittaker
+ *
+ * © 2004 Simtec Electronics
+ *
+ * Device driver for NAND flash that uses a memory mapped interface to
+ * read/write the NAND commands and data, and GPIO pins for control signals
+ * (the DT binding refers to this as "GPIO assisted NAND flash")
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+
+struct gpiomtd {
+ void __iomem *io_sync;
+ struct mtd_info mtd_info;
+ struct nand_chip nand_chip;
+ struct gpio_nand_platdata plat;
+};
+
+#define gpio_nand_getpriv(x) container_of(x, struct gpiomtd, mtd_info)
+
+
+#ifdef CONFIG_ARM
+/* gpio_nand_dosync()
+ *
+ * Make sure the GPIO state changes occur in-order with writes to NAND
+ * memory region.
+ * Needed on PXA due to bus-reordering within the SoC itself (see section on
+ * I/O ordering in PXA manual (section 2.3, p35)
+ */
+static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
+{
+ unsigned long tmp;
+
+ if (gpiomtd->io_sync) {
+ /*
+ * Linux memory barriers don't cater for what's required here.
+ * What's required is what's here - a read from a separate
+ * region with a dependency on that read.
+ */
+ tmp = readl(gpiomtd->io_sync);
+ asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
+ }
+}
+#else
+static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
+#endif
+
+static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+
+ gpio_nand_dosync(gpiomtd);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE));
+ gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
+ gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+ gpio_nand_dosync(gpiomtd);
+ }
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
+ gpio_nand_dosync(gpiomtd);
+}
+
+static int gpio_nand_devready(struct mtd_info *mtd)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+
+ return gpio_get_value(gpiomtd->plat.gpio_rdy);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+ plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+ plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+ plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+ plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r;
+ u64 addr;
+
+ if (of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev_get_platdata(dev)) {
+ memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
+static int gpio_nand_remove(struct platform_device *pdev)
+{
+ struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
+
+ nand_release(&gpiomtd->mtd_info);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+
+ return 0;
+}
+
+static int gpio_nand_probe(struct platform_device *pdev)
+{
+ struct gpiomtd *gpiomtd;
+ struct nand_chip *chip;
+ struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
+ int ret = 0;
+
+ if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+ return -EINVAL;
+
+ gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ if (!gpiomtd)
+ return -ENOMEM;
+
+ chip = &gpiomtd->nand_chip;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->IO_ADDR_R))
+ return PTR_ERR(chip->IO_ADDR_R);
+
+ res = gpio_nand_get_io_sync(pdev);
+ if (res) {
+ gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpiomtd->io_sync))
+ return PTR_ERR(gpiomtd->io_sync);
+ }
+
+ ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE");
+ if (ret)
+ return ret;
+ gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
+ "NAND NWP");
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
+ if (ret)
+ return ret;
+ gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
+ if (ret)
+ return ret;
+ gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
+ ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
+ "NAND RDY");
+ if (ret)
+ return ret;
+ gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ chip->dev_ready = gpio_nand_devready;
+ }
+
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->options = gpiomtd->plat.options;
+ chip->chip_delay = gpiomtd->plat.chip_delay;
+ chip->cmd_ctrl = gpio_nand_cmd_ctrl;
+
+ gpiomtd->mtd_info.priv = chip;
+ gpiomtd->mtd_info.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, gpiomtd);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+
+ if (nand_scan(&gpiomtd->mtd_info, 1)) {
+ ret = -ENXIO;
+ goto err_wp;
+ }
+
+ if (gpiomtd->plat.adjust_parts)
+ gpiomtd->plat.adjust_parts(&gpiomtd->plat,
+ gpiomtd->mtd_info.size);
+
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+ gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (!ret)
+ return 0;
+
+err_wp:
+ if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+ gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+
+ return ret;
+}
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove = gpio_nand_remove,
+ .driver = {
+ .name = "gpio-nand",
+ .of_match_table = of_match_ptr(gpio_nand_id_table),
+ },
+};
+
+module_platform_driver(gpio_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
new file mode 100644
index 000000000..3a462487c
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
+gpmi_nand-objs += gpmi-nand.o
+gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
new file mode 100644
index 000000000..05bb91f2f
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -0,0 +1,128 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL 0x00000000
+#define HW_BCH_CTRL_SET 0x00000004
+#define HW_BCH_CTRL_CLR 0x00000008
+#define HW_BCH_CTRL_TOG 0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
+
+#define HW_BCH_STATUS0 0x00000010
+#define HW_BCH_MODE 0x00000020
+#define HW_BCH_ENCODEPTR 0x00000030
+#define HW_BCH_DATAPTR 0x00000040
+#define HW_BCH_METAPTR 0x00000050
+#define HW_BCH_LAYOUTSELECT 0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0 0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0 12
+#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
+ : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & BM_BCH_FLASH0LAYOUT0_ECC0) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ )
+
+#define HW_BCH_FLASH0LAYOUT1 0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
+ (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN 12
+#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
+ : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & BM_BCH_FLASH0LAYOUT1_ECCN) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ )
+
+#define HW_BCH_VERSION 0x00000160
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 000000000..43fa16b5f
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1508 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+static struct timing_threshod timing_default_threshold = {
+ .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
+ BP_GPMI_TIMING0_DATA_SETUP),
+ .internal_data_setup_in_ns = 0,
+ .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
+ BP_GPMI_CTRL1_RDN_DELAY),
+ .max_dll_clock_period_in_ns = 32,
+ .max_dll_delay_in_ns = 16,
+};
+
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ /* clear the bit */
+ writel(mask, addr + MXS_CLR_ADDR);
+
+ /*
+ * SFTRST needs 3 GPMI clocks to settle, the reference manual
+ * recommends to wait 1us.
+ */
+ udelay(1);
+
+ /* poll the bit becoming clear */
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+#define MODULE_CLKGATE (1 << 30)
+#define MODULE_SFTRST (1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ * [1] enable the module.
+ * [2] reset the module.
+ *
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ if (!just_enable) {
+ /* set SFTRST to reset the block */
+ writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+ }
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
+{
+ struct clk *clk;
+ int ret;
+ int i;
+
+ for (i = 0; i < GPMI_CLK_MAX; i++) {
+ clk = this->resources.clock[i];
+ if (!clk)
+ break;
+
+ if (v) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_clk;
+ } else {
+ clk_disable_unprepare(clk);
+ }
+ }
+ return 0;
+
+err_clk:
+ for (; i > 0; i--)
+ clk_disable_unprepare(this->resources.clock[i - 1]);
+ return ret;
+}
+
+#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
+#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
+
+int gpmi_init(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = gpmi_enable_clk(this);
+ if (ret)
+ goto err_out;
+ ret = gpmi_reset_block(r->gpmi_regs, false);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+ * See later BCH reset for explanation of MX23 handling
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Set the IRQ polarity. */
+ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Disable Write-Protection. */
+ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Select BCH ECC. */
+ writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * Decouple the chip select from dma channel. We use dma0 for all
+ * the chips.
+ */
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ gpmi_disable_clk(this);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *geo = &this->bch_geometry;
+ u32 reg;
+ int i;
+
+ dev_err(this->dev, "Show GPMI registers :\n");
+ for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+ reg = readl(r->gpmi_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+
+ /* start to print out the BCH info */
+ dev_err(this->dev, "Show BCH registers :\n");
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+ reg = readl(r->bch_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+ dev_err(this->dev, "BCH Geometry :\n"
+ "GF length : %u\n"
+ "ECC Strength : %u\n"
+ "Page Size in Bytes : %u\n"
+ "Metadata Size in Bytes : %u\n"
+ "ECC Chunk Size in Bytes: %u\n"
+ "ECC Chunk Count : %u\n"
+ "Payload Size in Bytes : %u\n"
+ "Auxiliary Size in Bytes: %u\n"
+ "Auxiliary Status Offset: %u\n"
+ "Block Mark Byte Offset : %u\n"
+ "Block Mark Bit Offset : %u\n",
+ geo->gf_len,
+ geo->ecc_strength,
+ geo->page_size,
+ geo->metadata_size,
+ geo->ecc_chunk_size,
+ geo->ecc_chunk_count,
+ geo->payload_size,
+ geo->auxiliary_size,
+ geo->auxiliary_status_offset,
+ geo->block_mark_byte_offset,
+ geo->block_mark_bit_offset);
+}
+
+/* Configures the geometry for BCH. */
+int bch_set_geometry(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ unsigned int block_count;
+ unsigned int block_size;
+ unsigned int metadata_size;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int gf_len;
+ int ret;
+
+ if (common_nfc_set_geometry(this))
+ return !0;
+
+ block_count = bch_geo->ecc_chunk_count - 1;
+ block_size = bch_geo->ecc_chunk_size;
+ metadata_size = bch_geo->metadata_size;
+ ecc_strength = bch_geo->ecc_strength >> 1;
+ page_size = bch_geo->page_size;
+ gf_len = bch_geo->gf_len;
+
+ ret = gpmi_enable_clk(this);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+ * On the other hand, the MX28 needs the reset, because one case has been
+ * seen where the BCH produced ECC errors constantly after 10000
+ * consecutive reboots. The latter case has not been seen on the MX23
+ * yet, still we don't know if it could happen there as well.
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+ /* Configure layout 0. */
+ writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
+ | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
+ | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
+ | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+ writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
+ | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
+ | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
+ r->bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ /* Set *all* chip selects to use layout 0. */
+ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+ /* Enable interrupts. */
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ r->bch_regs + HW_BCH_CTRL_SET);
+
+ gpmi_disable_clk(this);
+ return 0;
+err_out:
+ return ret;
+}
+
+/* Converts time in nanoseconds to cycles. */
+static unsigned int ns_to_cycles(unsigned int time,
+ unsigned int period, unsigned int min)
+{
+ unsigned int k;
+
+ k = (time + period - 1) / period;
+ return max(k, min);
+}
+
+#define DEF_MIN_PROP_DELAY 5
+#define DEF_MAX_PROP_DELAY 9
+/* Apply timing to current hardware conditions. */
+static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
+ struct gpmi_nfc_hardware_timing *hw)
+{
+ struct timing_threshod *nfc = &timing_default_threshold;
+ struct resources *r = &this->resources;
+ struct nand_chip *nand = &this->nand;
+ struct nand_timing target = this->timing;
+ bool improved_timing_is_available;
+ unsigned long clock_frequency_in_hz;
+ unsigned int clock_period_in_ns;
+ bool dll_use_half_periods;
+ unsigned int dll_delay_shift;
+ unsigned int max_sample_delay_in_ns;
+ unsigned int address_setup_in_cycles;
+ unsigned int data_setup_in_ns;
+ unsigned int data_setup_in_cycles;
+ unsigned int data_hold_in_cycles;
+ int ideal_sample_delay_in_ns;
+ unsigned int sample_delay_factor;
+ int tEYE;
+ unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+ unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
+
+ /*
+ * If there are multiple chips, we need to relax the timings to allow
+ * for signal distortion due to higher capacitance.
+ */
+ if (nand->numchips > 2) {
+ target.data_setup_in_ns += 10;
+ target.data_hold_in_ns += 10;
+ target.address_setup_in_ns += 10;
+ } else if (nand->numchips > 1) {
+ target.data_setup_in_ns += 5;
+ target.data_hold_in_ns += 5;
+ target.address_setup_in_ns += 5;
+ }
+
+ /* Check if improved timing information is available. */
+ improved_timing_is_available =
+ (target.tREA_in_ns >= 0) &&
+ (target.tRLOH_in_ns >= 0) &&
+ (target.tRHOH_in_ns >= 0);
+
+ /* Inspect the clock. */
+ nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
+ clock_frequency_in_hz = nfc->clock_frequency_in_hz;
+ clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz;
+
+ /*
+ * The NFC quantizes setup and hold parameters in terms of clock cycles.
+ * Here, we quantize the setup and hold timing parameters to the
+ * next-highest clock period to make sure we apply at least the
+ * specified times.
+ *
+ * For data setup and data hold, the hardware interprets a value of zero
+ * as the largest possible delay. This is not what's intended by a zero
+ * in the input parameter, so we impose a minimum of one cycle.
+ */
+ data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
+ clock_period_in_ns, 1);
+ data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
+ clock_period_in_ns, 1);
+ address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
+ clock_period_in_ns, 0);
+
+ /*
+ * The clock's period affects the sample delay in a number of ways:
+ *
+ * (1) The NFC HAL tells us the maximum clock period the sample delay
+ * DLL can tolerate. If the clock period is greater than half that
+ * maximum, we must configure the DLL to be driven by half periods.
+ *
+ * (2) We need to convert from an ideal sample delay, in ns, to a
+ * "sample delay factor," which the NFC uses. This factor depends on
+ * whether we're driving the DLL with full or half periods.
+ * Paraphrasing the reference manual:
+ *
+ * AD = SDF x 0.125 x RP
+ *
+ * where:
+ *
+ * AD is the applied delay, in ns.
+ * SDF is the sample delay factor, which is dimensionless.
+ * RP is the reference period, in ns, which is a full clock period
+ * if the DLL is being driven by full periods, or half that if
+ * the DLL is being driven by half periods.
+ *
+ * Let's re-arrange this in a way that's more useful to us:
+ *
+ * 8
+ * SDF = AD x ----
+ * RP
+ *
+ * The reference period is either the clock period or half that, so this
+ * is:
+ *
+ * 8 AD x DDF
+ * SDF = AD x ----- = --------
+ * f x P P
+ *
+ * where:
+ *
+ * f is 1 or 1/2, depending on how we're driving the DLL.
+ * P is the clock period.
+ * DDF is the DLL Delay Factor, a dimensionless value that
+ * incorporates all the constants in the conversion.
+ *
+ * DDF will be either 8 or 16, both of which are powers of two. We can
+ * reduce the cost of this conversion by using bit shifts instead of
+ * multiplication or division. Thus:
+ *
+ * AD << DDS
+ * SDF = ---------
+ * P
+ *
+ * or
+ *
+ * AD = (SDF >> DDS) x P
+ *
+ * where:
+ *
+ * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
+ */
+ if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
+ dll_use_half_periods = true;
+ dll_delay_shift = 3 + 1;
+ } else {
+ dll_use_half_periods = false;
+ dll_delay_shift = 3;
+ }
+
+ /*
+ * Compute the maximum sample delay the NFC allows, under current
+ * conditions. If the clock is running too slowly, no sample delay is
+ * possible.
+ */
+ if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
+ max_sample_delay_in_ns = 0;
+ else {
+ /*
+ * Compute the delay implied by the largest sample delay factor
+ * the NFC allows.
+ */
+ max_sample_delay_in_ns =
+ (nfc->max_sample_delay_factor * clock_period_in_ns) >>
+ dll_delay_shift;
+
+ /*
+ * Check if the implied sample delay larger than the NFC
+ * actually allows.
+ */
+ if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
+ max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
+ }
+
+ /*
+ * Check if improved timing information is available. If not, we have to
+ * use a less-sophisticated algorithm.
+ */
+ if (!improved_timing_is_available) {
+ /*
+ * Fold the read setup time required by the NFC into the ideal
+ * sample delay.
+ */
+ ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
+ nfc->internal_data_setup_in_ns;
+
+ /*
+ * The ideal sample delay may be greater than the maximum
+ * allowed by the NFC. If so, we can trade off sample delay time
+ * for more data setup time.
+ *
+ * In each iteration of the following loop, we add a cycle to
+ * the data setup time and subtract a corresponding amount from
+ * the sample delay until we've satisified the constraints or
+ * can't do any better.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ data_setup_in_cycles++;
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds most closely
+ * to the ideal sample delay. If the result is too large for the
+ * NFC, use the maximum value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the
+ * sample delay factor. We do this because the form of the
+ * computation is the same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /* Skip to the part where we return our results. */
+ goto return_results;
+ }
+
+ /*
+ * If control arrives here, we have more detailed timing information,
+ * so we can use a better algorithm.
+ */
+
+ /*
+ * Fold the read setup time required by the NFC into the maximum
+ * propagation delay.
+ */
+ max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
+
+ /*
+ * Earlier, we computed the number of clock cycles required to satisfy
+ * the data setup time. Now, we need to know the actual nanoseconds.
+ */
+ data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
+
+ /*
+ * Compute tEYE, the width of the data eye when reading from the NAND
+ * Flash. The eye width is fundamentally determined by the data setup
+ * time, perturbed by propagation delays and some characteristics of the
+ * NAND Flash device.
+ *
+ * start of the eye = max_prop_delay + tREA
+ * end of the eye = min_prop_delay + tRHOH + data_setup
+ */
+ tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
+ (int)data_setup_in_ns;
+
+ tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
+
+ /*
+ * The eye must be open. If it's not, we can try to open it by
+ * increasing its main forcer, the data setup time.
+ *
+ * In each iteration of the following loop, we increase the data setup
+ * time by a single clock cycle. We do this until either the eye is
+ * open or we run into NFC limits.
+ */
+ while ((tEYE <= 0) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+ }
+
+ /*
+ * When control arrives here, the eye is open. The ideal time to sample
+ * the data is in the center of the eye:
+ *
+ * end of the eye + start of the eye
+ * --------------------------------- - data_setup
+ * 2
+ *
+ * After some algebra, this simplifies to the code immediately below.
+ */
+ ideal_sample_delay_in_ns =
+ ((int)max_prop_delay_in_ns +
+ (int)target.tREA_in_ns +
+ (int)min_prop_delay_in_ns +
+ (int)target.tRHOH_in_ns -
+ (int)data_setup_in_ns) >> 1;
+
+ /*
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ *
+ * __ _____________________________________
+ * RDN \_________________/
+ *
+ * <---- tEYE ----->
+ * /-----------------\
+ * Read Data ----------------------------< >---------
+ * \-----------------/
+ * ^ ^ ^ ^
+ * | | | |
+ * |<--Data Setup -->|<--Delay Time -->| |
+ * | | | |
+ * | | |
+ * | |<-- Quantized Delay Time -->|
+ * | | |
+ *
+ *
+ * We have some issues we must now address:
+ *
+ * (1) The *ideal* sample delay time must not be negative. If it is, we
+ * jam it to zero.
+ *
+ * (2) The *ideal* sample delay time must not be greater than that
+ * allowed by the NFC. If it is, we can increase the data setup
+ * time, which will reduce the delay between the end of the data
+ * setup and the center of the eye. It will also make the eye
+ * larger, which might help with the next issue...
+ *
+ * (3) The *quantized* sample delay time must not fall either before the
+ * eye opens or after it closes (the latter is the problem
+ * illustrated in the above figure).
+ */
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * Extend the data setup as needed to reduce the ideal sample delay
+ * below the maximum permitted by the NFC.
+ */
+ while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+ }
+
+ /*
+ * Compute the sample delay factor that corresponds to the ideal sample
+ * delay. If the result is too large, then use the maximum allowed
+ * value.
+ *
+ * Notice that we use the ns_to_cycles function to compute the sample
+ * delay factor. We do this because the form of the computation is the
+ * same as that for calculating cycles.
+ */
+ sample_delay_factor =
+ ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+
+ /*
+ * These macros conveniently encapsulate a computation we'll use to
+ * continuously evaluate whether or not the data sample delay is inside
+ * the eye.
+ */
+ #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
+
+ #define QUANTIZED_DELAY \
+ ((int) ((sample_delay_factor * clock_period_in_ns) >> \
+ dll_delay_shift))
+
+ #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
+
+ #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
+
+ /*
+ * While the quantized sample time falls outside the eye, reduce the
+ * sample delay or extend the data setup to move the sampling point back
+ * toward the eye. Do not allow the number of data setup cycles to
+ * exceed the maximum allowed by the NFC.
+ */
+ while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
+ (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * outside the eye. Check if it's before the eye opens, or after
+ * the eye closes.
+ */
+ if (QUANTIZED_DELAY > IDEAL_DELAY) {
+ /*
+ * If control arrives here, the quantized sample delay
+ * falls after the eye closes. Decrease the quantized
+ * delay time and then go back to re-evaluate.
+ */
+ if (sample_delay_factor != 0)
+ sample_delay_factor--;
+ continue;
+ }
+
+ /*
+ * If control arrives here, the quantized sample delay falls
+ * before the eye opens. Shift the sample point by increasing
+ * data setup time. This will also make the eye larger.
+ */
+
+ /* Give a cycle to data setup. */
+ data_setup_in_cycles++;
+ /* Synchronize the data setup time with the cycles. */
+ data_setup_in_ns += clock_period_in_ns;
+ /* Adjust tEYE accordingly. */
+ tEYE += clock_period_in_ns;
+
+ /*
+ * Decrease the ideal sample delay by one half cycle, to keep it
+ * in the middle of the eye.
+ */
+ ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+ /* ...and one less period for the delay time. */
+ ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+ /* Jam a negative ideal sample delay to zero. */
+ if (ideal_sample_delay_in_ns < 0)
+ ideal_sample_delay_in_ns = 0;
+
+ /*
+ * We have a new ideal sample delay, so re-compute the quantized
+ * delay.
+ */
+ sample_delay_factor =
+ ns_to_cycles(
+ ideal_sample_delay_in_ns << dll_delay_shift,
+ clock_period_in_ns, 0);
+
+ if (sample_delay_factor > nfc->max_sample_delay_factor)
+ sample_delay_factor = nfc->max_sample_delay_factor;
+ }
+
+ /* Control arrives here when we're ready to return our results. */
+return_results:
+ hw->data_setup_in_cycles = data_setup_in_cycles;
+ hw->data_hold_in_cycles = data_hold_in_cycles;
+ hw->address_setup_in_cycles = address_setup_in_cycles;
+ hw->use_half_periods = dll_use_half_periods;
+ hw->sample_delay_factor = sample_delay_factor;
+ hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT;
+ hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
+
+ /* Return success. */
+ return 0;
+}
+
+/*
+ * <1> Firstly, we should know what's the GPMI-clock means.
+ * The GPMI-clock is the internal clock in the gpmi nand controller.
+ * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
+ * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
+ *
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
+ * The frequency on the nand chip pins is derived from the GPMI-clock.
+ * We can get it from the following equation:
+ *
+ * F = G / (DS + DH)
+ *
+ * F : the frequency on the nand chip pins.
+ * G : the GPMI clock, such as 100MHz.
+ * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
+ * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
+ *
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
+ * the nand EDO(extended Data Out) timing could be applied.
+ * The GPMI implements a feedback read strobe to sample the read data.
+ * The feedback read strobe can be delayed to support the nand EDO timing
+ * where the read strobe may deasserts before the read data is valid, and
+ * read data is valid for some time after read strobe.
+ *
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ * |<---tREA---->|
+ * | |
+ * | | |
+ * |<--tRP-->| |
+ * | | |
+ * __ ___|__________________________________
+ * RDN \________/ |
+ * |
+ * /---------\
+ * Read Data --------------< >---------
+ * \---------/
+ * | |
+ * |<-D->|
+ * FeedbackRDN ________ ____________
+ * \___________/
+ *
+ * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
+ *
+ *
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
+ *
+ * 4.1) From the aspect of the nand chip pins:
+ * Delay = (tREA + C - tRP) {1}
+ *
+ * tREA : the maximum read access time. From the ONFI nand standards,
+ * we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
+ * Please check it in : www.onfi.org
+ * C : a constant for adjust the delay. default is 4.
+ * tRP : the read pulse width.
+ * Specified by the HW_GPMI_TIMING0:DATA_SETUP:
+ * tRP = (GPMI-clock-period) * DATA_SETUP
+ *
+ * 4.2) From the aspect of the GPMI nand controller:
+ * Delay = RDN_DELAY * 0.125 * RP {2}
+ *
+ * RP : the DLL reference period.
+ * if (GPMI-clock-period > DLL_THRETHOLD)
+ * RP = GPMI-clock-period / 2;
+ * else
+ * RP = GPMI-clock-period;
+ *
+ * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
+ * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
+ * is 16ns, but in mx6q, we use 12ns.
+ *
+ * 4.3) since {1} equals {2}, we get:
+ *
+ * (tREA + 4 - tRP) * 8
+ * RDN_DELAY = --------------------- {3}
+ * RP
+ *
+ * 4.4) We only support the fastest asynchronous mode of ONFI nand.
+ * For some ONFI nand, the mode 4 is the fastest mode;
+ * while for some ONFI nand, the mode 5 is the fastest mode.
+ * So we only support the mode 4 and mode 5. It is no need to
+ * support other modes.
+ */
+static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
+ struct gpmi_nfc_hardware_timing *hw)
+{
+ struct resources *r = &this->resources;
+ unsigned long rate = clk_get_rate(r->clock[0]);
+ int mode = this->timing_mode;
+ int dll_threshold = this->devdata->max_chain_delay;
+ unsigned long delay;
+ unsigned long clk_period;
+ int t_rea;
+ int c = 4;
+ int t_rp;
+ int rp;
+
+ /*
+ * [1] for GPMI_HW_GPMI_TIMING0:
+ * The async mode requires 40MHz for mode 4, 50MHz for mode 5.
+ * The GPMI can support 100MHz at most. So if we want to
+ * get the 40MHz or 50MHz, we have to set DS=1, DH=1.
+ * Set the ADDRESS_SETUP to 0 in mode 4.
+ */
+ hw->data_setup_in_cycles = 1;
+ hw->data_hold_in_cycles = 1;
+ hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
+
+ /* [2] for GPMI_HW_GPMI_TIMING1 */
+ hw->device_busy_timeout = 0x9000;
+
+ /* [3] for GPMI_HW_GPMI_CTRL1 */
+ hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+
+ /*
+ * Enlarge 10 times for the numerator and denominator in {3}.
+ * This make us to get more accurate result.
+ */
+ clk_period = NSEC_PER_SEC / (rate / 10);
+ dll_threshold *= 10;
+ t_rea = ((mode == 5) ? 16 : 20) * 10;
+ c *= 10;
+
+ t_rp = clk_period * 1; /* DATA_SETUP is 1 */
+
+ if (clk_period > dll_threshold) {
+ hw->use_half_periods = 1;
+ rp = clk_period / 2;
+ } else {
+ hw->use_half_periods = 0;
+ rp = clk_period;
+ }
+
+ /*
+ * Multiply the numerator with 10, we could do a round off:
+ * 7.8 round up to 8; 7.4 round down to 7.
+ */
+ delay = (((t_rea + c - t_rp) * 8) * 10) / rp;
+ delay = (delay + 5) / 10;
+
+ hw->sample_delay_factor = delay;
+}
+
+static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
+{
+ struct resources *r = &this->resources;
+ struct nand_chip *nand = &this->nand;
+ struct mtd_info *mtd = &this->mtd;
+ uint8_t *feature;
+ unsigned long rate;
+ int ret;
+
+ feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
+ if (!feature)
+ return -ENOMEM;
+
+ nand->select_chip(mtd, 0);
+
+ /* [1] send SET FEATURE commond to NAND */
+ feature[0] = mode;
+ ret = nand->onfi_set_features(mtd, nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+ if (ret)
+ goto err_out;
+
+ /* [2] send GET FEATURE command to double-check the timing mode */
+ memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
+ ret = nand->onfi_get_features(mtd, nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+ if (ret || feature[0] != mode)
+ goto err_out;
+
+ nand->select_chip(mtd, -1);
+
+ /* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
+ rate = (mode == 5) ? 100000000 : 80000000;
+ clk_set_rate(r->clock[0], rate);
+
+ /* Let the gpmi_begin() re-compute the timing again. */
+ this->flags &= ~GPMI_TIMING_INIT_OK;
+
+ this->flags |= GPMI_ASYNC_EDO_ENABLED;
+ this->timing_mode = mode;
+ kfree(feature);
+ dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
+ return 0;
+
+err_out:
+ nand->select_chip(mtd, -1);
+ kfree(feature);
+ dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
+ return -EINVAL;
+}
+
+int gpmi_extra_init(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+
+ /* Enable the asynchronous EDO feature. */
+ if (GPMI_IS_MX6(this) && chip->onfi_version) {
+ int mode = onfi_get_async_timing_mode(chip);
+
+ /* We only support the timing mode 4 and mode 5. */
+ if (mode & ONFI_TIMING_MODE_5)
+ mode = 5;
+ else if (mode & ONFI_TIMING_MODE_4)
+ mode = 4;
+ else
+ return 0;
+
+ return enable_edo_mode(this, mode);
+ }
+ return 0;
+}
+
+/* Begin the I/O */
+void gpmi_begin(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ void __iomem *gpmi_regs = r->gpmi_regs;
+ unsigned int clock_period_in_ns;
+ uint32_t reg;
+ unsigned int dll_wait_time_in_us;
+ struct gpmi_nfc_hardware_timing hw;
+ int ret;
+
+ /* Enable the clock. */
+ ret = gpmi_enable_clk(this);
+ if (ret) {
+ dev_err(this->dev, "We failed in enable the clk\n");
+ goto err_out;
+ }
+
+ /* Only initialize the timing once */
+ if (this->flags & GPMI_TIMING_INIT_OK)
+ return;
+ this->flags |= GPMI_TIMING_INIT_OK;
+
+ if (this->flags & GPMI_ASYNC_EDO_ENABLED)
+ gpmi_compute_edo_timing(this, &hw);
+ else
+ gpmi_nfc_compute_hardware_timing(this, &hw);
+
+ /* [1] Set HW_GPMI_TIMING0 */
+ reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
+ BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
+ BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
+
+ writel(reg, gpmi_regs + HW_GPMI_TIMING0);
+
+ /* [2] Set HW_GPMI_TIMING1 */
+ writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
+ gpmi_regs + HW_GPMI_TIMING1);
+
+ /* [3] The following code is to set the HW_GPMI_CTRL1. */
+
+ /* Set the WRN_DLY_SEL */
+ writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
+ gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Clear out the DLL control fields. */
+ reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
+ writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* If no sample delay is called for, return immediately. */
+ if (!hw.sample_delay_factor)
+ return;
+
+ /* Set RDN_DELAY or HALF_PERIOD. */
+ reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
+ | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
+
+ writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* At last, we enable the DLL. */
+ writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * After we enable the GPMI DLL, we have to wait 64 clock cycles before
+ * we can use the GPMI. Calculate the amount of time we need to wait,
+ * in microseconds.
+ */
+ clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
+ dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
+
+ if (!dll_wait_time_in_us)
+ dll_wait_time_in_us = 1;
+
+ /* Wait for the DLL to settle. */
+ udelay(dll_wait_time_in_us);
+
+err_out:
+ return;
+}
+
+void gpmi_end(struct gpmi_nand_data *this)
+{
+ gpmi_disable_clk(this);
+}
+
+/* Clears a BCH interrupt. */
+void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+/* Returns the Ready/Busy status of the given chip. */
+int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
+{
+ struct resources *r = &this->resources;
+ uint32_t mask = 0;
+ uint32_t reg = 0;
+
+ if (GPMI_IS_MX23(this)) {
+ mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
+ reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
+ } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
+ /*
+ * In the imx6, all the ready/busy pins are bound
+ * together. So we only need to check chip 0.
+ */
+ if (GPMI_IS_MX6(this))
+ chip = 0;
+
+ /* MX28 shares the same R/B register as MX6Q. */
+ mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
+ reg = readl(r->gpmi_regs + HW_GPMI_STAT);
+ } else
+ dev_err(this->dev, "unknown arch.\n");
+ return reg & mask;
+}
+
+static inline void set_dma_type(struct gpmi_nand_data *this,
+ enum dma_ops_type type)
+{
+ this->last_dma_type = this->dma_type;
+ this->dma_type = type;
+}
+
+int gpmi_send_command(struct gpmi_nand_data *this)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl;
+ int chip = this->current_chip;
+ u32 pio[3];
+
+ /* [1] send out the PIO words */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+ | BM_GPMI_CTRL0_ADDRESS_INCREMENT
+ | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
+ pio[1] = pio[2] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
+ sgl = &this->cmd_sgl;
+
+ sg_init_one(sgl, this->cmd_buffer, this->command_length);
+ dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel,
+ sgl, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_COMMAND);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ uint32_t command_mode;
+ uint32_t address;
+ u32 pio[2];
+
+ /* [1] PIO */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] send DMA request */
+ prepare_data_dma(this, DMA_TO_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] submit the DMA */
+ set_dma_type(this, DMA_FOR_WRITE_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_read_data(struct gpmi_nand_data *this)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[2];
+
+ /* [1] : send PIO */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] : send DMA request */
+ prepare_data_dma(this, DMA_FROM_DEVICE);
+ desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] : submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_DATA);
+ return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* A DMA descriptor that does an ECC page read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+ BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
+
+int gpmi_read_page(struct gpmi_nand_data *this,
+ dma_addr_t payload, dma_addr_t auxiliary)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ uint32_t command_mode;
+ uint32_t address;
+ uint32_t ecc_command;
+ uint32_t buffer_mask;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ int chip = this->current_chip;
+ u32 pio[6];
+
+ /* [1] Wait for the chip to report ready. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 0);
+ if (!desc)
+ return -EINVAL;
+
+ /* [2] Enable the BCH block and read. */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+ ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
+ buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+ | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+
+ pio[1] = 0;
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+ pio[3] = geo->page_size;
+ pio[4] = payload;
+ pio[5] = auxiliary;
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio,
+ ARRAY_SIZE(pio), DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [3] Disable the BCH block */
+ command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+ address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(address)
+ | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+ pio[1] = 0;
+ pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
+ desc = dmaengine_prep_slave_sg(channel,
+ (struct scatterlist *)pio, 3,
+ DMA_TRANS_NONE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ /* [4] submit the DMA */
+ set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
+ return start_dma_with_bch_irq(this, desc);
+}
+
+/**
+ * gpmi_copy_bits - copy bits from one memory region to another
+ * @dst: destination buffer
+ * @dst_bit_off: bit offset we're starting to write at
+ * @src: source buffer
+ * @src_bit_off: bit offset we're starting to read from
+ * @nbits: number of bits to copy
+ *
+ * This functions copies bits from one memory region to another, and is used by
+ * the GPMI driver to copy ECC sections which are not guaranteed to be byte
+ * aligned.
+ *
+ * src and dst should not overlap.
+ *
+ */
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+ const u8 *src, size_t src_bit_off,
+ size_t nbits)
+{
+ size_t i;
+ size_t nbytes;
+ u32 src_buffer = 0;
+ size_t bits_in_src_buffer = 0;
+
+ if (!nbits)
+ return;
+
+ /*
+ * Move src and dst pointers to the closest byte pointer and store bit
+ * offsets within a byte.
+ */
+ src += src_bit_off / 8;
+ src_bit_off %= 8;
+
+ dst += dst_bit_off / 8;
+ dst_bit_off %= 8;
+
+ /*
+ * Initialize the src_buffer value with bits available in the first
+ * byte of data so that we end up with a byte aligned src pointer.
+ */
+ if (src_bit_off) {
+ src_buffer = src[0] >> src_bit_off;
+ if (nbits >= (8 - src_bit_off)) {
+ bits_in_src_buffer += 8 - src_bit_off;
+ } else {
+ src_buffer &= GENMASK(nbits - 1, 0);
+ bits_in_src_buffer += nbits;
+ }
+ nbits -= bits_in_src_buffer;
+ src++;
+ }
+
+ /* Calculate the number of bytes that can be copied from src to dst. */
+ nbytes = nbits / 8;
+
+ /* Try to align dst to a byte boundary. */
+ if (dst_bit_off) {
+ if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
+ src_buffer |= src[0] << bits_in_src_buffer;
+ bits_in_src_buffer += 8;
+ src++;
+ nbytes--;
+ }
+
+ if (bits_in_src_buffer >= (8 - dst_bit_off)) {
+ dst[0] &= GENMASK(dst_bit_off - 1, 0);
+ dst[0] |= src_buffer << dst_bit_off;
+ src_buffer >>= (8 - dst_bit_off);
+ bits_in_src_buffer -= (8 - dst_bit_off);
+ dst_bit_off = 0;
+ dst++;
+ if (bits_in_src_buffer > 7) {
+ bits_in_src_buffer -= 8;
+ dst[0] = src_buffer;
+ dst++;
+ src_buffer >>= 8;
+ }
+ }
+ }
+
+ if (!bits_in_src_buffer && !dst_bit_off) {
+ /*
+ * Both src and dst pointers are byte aligned, thus we can
+ * just use the optimized memcpy function.
+ */
+ if (nbytes)
+ memcpy(dst, src, nbytes);
+ } else {
+ /*
+ * src buffer is not byte aligned, hence we have to copy each
+ * src byte to the src_buffer variable before extracting a byte
+ * to store in dst.
+ */
+ for (i = 0; i < nbytes; i++) {
+ src_buffer |= src[i] << bits_in_src_buffer;
+ dst[i] = src_buffer;
+ src_buffer >>= 8;
+ }
+ }
+ /* Update dst and src pointers */
+ dst += nbytes;
+ src += nbytes;
+
+ /*
+ * nbits is the number of remaining bits. It should not exceed 8 as
+ * we've already copied as much bytes as possible.
+ */
+ nbits %= 8;
+
+ /*
+ * If there's no more bits to copy to the destination and src buffer
+ * was already byte aligned, then we're done.
+ */
+ if (!nbits && !bits_in_src_buffer)
+ return;
+
+ /* Copy the remaining bits to src_buffer */
+ if (nbits)
+ src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
+ bits_in_src_buffer;
+ bits_in_src_buffer += nbits;
+
+ /*
+ * In case there were not enough bits to get a byte aligned dst buffer
+ * prepare the src_buffer variable to match the dst organization (shift
+ * src_buffer by dst_bit_off and retrieve the least significant bits
+ * from dst).
+ */
+ if (dst_bit_off)
+ src_buffer = (src_buffer << dst_bit_off) |
+ (*dst & GENMASK(dst_bit_off - 1, 0));
+ bits_in_src_buffer += dst_bit_off;
+
+ /*
+ * Keep most significant bits from dst if we end up with an unaligned
+ * number of bits.
+ */
+ nbytes = bits_in_src_buffer / 8;
+ if (bits_in_src_buffer % 8) {
+ src_buffer |= (dst[nbytes] &
+ GENMASK(7, bits_in_src_buffer % 8)) <<
+ (nbytes * 8);
+ nbytes++;
+ }
+
+ /* Copy the remaining bytes to dst */
+ for (i = 0; i < nbytes; i++) {
+ dst[i] = src_buffer;
+ src_buffer >>= 8;
+ }
+}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 000000000..1b8f3500e
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,2051 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+#include "gpmi-nand.h"
+#include "bch-regs.h"
+
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
+static struct nand_ecclayout gpmi_hw_ecclayout = {
+ .eccbytes = 0,
+ .eccpos = { 0, },
+ .oobfree = { {.offset = 0, .length = 0} }
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
+ .type = IS_MX23,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
+ .type = IS_MX28,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
+ .type = IS_MX6Q,
+ .bch_max_ecc_strength = 40,
+ .max_chain_delay = 12,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
+ .type = IS_MX6SX,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12,
+};
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+ struct gpmi_nand_data *this = cookie;
+
+ gpmi_clear_bch(this);
+ complete(&this->bch_done);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Calculate the ECC strength by hand:
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * O : the oobsize of the NAND chip.
+ * M : the metasize of per page.
+ *
+ * The formula is :
+ * E * G * N
+ * ------------ <= (O - M)
+ * 8
+ *
+ * So, we get E by:
+ * (O - M) * 8
+ * E <= -------------
+ * G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ int ecc_strength;
+
+ ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+
+ /* We need the minor even number. */
+ return round_down(ecc_strength, 2);
+}
+
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ /* Do the sanity check. */
+ if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+ /* The mx23/mx28 only support the GF13. */
+ if (geo->gf_len == 14)
+ return false;
+ }
+ return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
+}
+
+/*
+ * If we can get the ECC information from the nand chip, we do not
+ * need to calculate them ourselves.
+ *
+ * We may have available oob space in this case.
+ */
+static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = mtd->priv;
+ struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree;
+ unsigned int block_mark_bit_offset;
+
+ if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+ return false;
+
+ switch (chip->ecc_step_ds) {
+ case SZ_512:
+ geo->gf_len = 13;
+ break;
+ case SZ_1K:
+ geo->gf_len = 14;
+ break;
+ default:
+ dev_err(this->dev,
+ "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
+ chip->ecc_strength_ds, chip->ecc_step_ds);
+ return false;
+ }
+ geo->ecc_chunk_size = chip->ecc_step_ds;
+ geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
+ if (!gpmi_check_ecc(this))
+ return false;
+
+ /* Keep the C >= O */
+ if (geo->ecc_chunk_size < mtd->oobsize) {
+ dev_err(this->dev,
+ "unsupported nand chip. ecc size: %d, oob size : %d\n",
+ chip->ecc_step_ds, mtd->oobsize);
+ return false;
+ }
+
+ /* The default value, see comment in the legacy_set_geometry(). */
+ geo->metadata_size = 10;
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /*
+ * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
+ *
+ * | P |
+ * |<----------------------------------------------------->|
+ * | |
+ * | (Block Mark) |
+ * | P' | | | |
+ * |<-------------------------------------------->| D | | O' |
+ * | |<---->| |<--->|
+ * V V V V V
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * | M | data |E| data |E| data |E| data |E| |
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * ^ ^
+ * | O |
+ * |<------------>|
+ * | |
+ *
+ * P : the page size for BCH module.
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * M : the metasize of per page.
+ * C : the ecc chunk size, aka the "data" above.
+ * P': the nand chip's page size.
+ * O : the nand chip's oob size.
+ * O': the free oob.
+ *
+ * The formula for P is :
+ *
+ * E * G * N
+ * P = ------------ + P' + M
+ * 8
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * Please see the comment in legacy_set_geometry().
+ * With the condition C >= O , we still can get same result.
+ * So the bit position of the physical block mark within the ECC-based
+ * view of the page is :
+ * (P' - D) * 8
+ */
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+
+ /* The available oob size we have. */
+ if (geo->page_size < mtd->writesize + mtd->oobsize) {
+ of->offset = geo->page_size - mtd->writesize;
+ of->length = mtd->oobsize - of->offset;
+ }
+
+ geo->payload_size = mtd->writesize;
+
+ geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+ geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ + ALIGN(geo->ecc_chunk_count, 4);
+
+ if (!this->swap_block_mark)
+ return true;
+
+ /* For bit swap. */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return true;
+}
+
+static int legacy_set_geometry(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int metadata_size;
+ unsigned int status_size;
+ unsigned int block_mark_bit_offset;
+
+ /*
+ * The size of the metadata can be changed, though we set it to 10
+ * bytes now. But it can't be too large, because we have to save
+ * enough space for BCH.
+ */
+ geo->metadata_size = 10;
+
+ /* The default for the length of Galois Field. */
+ geo->gf_len = 13;
+
+ /* The default for chunk size. */
+ geo->ecc_chunk_size = 512;
+ while (geo->ecc_chunk_size < mtd->oobsize) {
+ geo->ecc_chunk_size *= 2; /* keep C >= O */
+ geo->gf_len = 14;
+ }
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+ /* We use the same ECC strength for all chunks. */
+ geo->ecc_strength = get_ecc_strength(this);
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n",
+ geo->ecc_strength,
+ this->devdata->bch_max_ecc_strength);
+ return -EINVAL;
+ }
+
+ geo->page_size = mtd->writesize + mtd->oobsize;
+ geo->payload_size = mtd->writesize;
+
+ /*
+ * The auxiliary buffer contains the metadata and the ECC status. The
+ * metadata is padded to the nearest 32-bit boundary. The ECC status
+ * contains one byte for every ECC chunk, and is also padded to the
+ * nearest 32-bit boundary.
+ */
+ metadata_size = ALIGN(geo->metadata_size, 4);
+ status_size = ALIGN(geo->ecc_chunk_count, 4);
+
+ geo->auxiliary_size = metadata_size + status_size;
+ geo->auxiliary_status_offset = metadata_size;
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /*
+ * We need to compute the byte and bit offsets of
+ * the physical block mark within the ECC-based view of the page.
+ *
+ * NAND chip with 2K page shows below:
+ * (Block Mark)
+ * | |
+ * | D |
+ * |<---->|
+ * V V
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ * | M | data |E| data |E| data |E| data |E|
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * With the formula to compute the ECC strength, and the condition
+ * : C >= O (C is the ecc chunk size)
+ *
+ * It's easy to deduce to the following result:
+ *
+ * E * G (O - M) C - M C - M
+ * ----------- <= ------- <= -------- < ---------
+ * 8 N N (N - 1)
+ *
+ * So, we get:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M) < C
+ * 8
+ *
+ * The above inequality means the position of block mark
+ * within the ECC-based view of the page is still in the data chunk,
+ * and it's NOT in the ECC bits of the chunk.
+ *
+ * Use the following to compute the bit position of the
+ * physical block mark within the ECC-based view of the page:
+ * (page_size - D) * 8
+ *
+ * --Huang Shijie
+ */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+ if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
+ && set_geometry_by_ecc_info(this))
+ return 0;
+ return legacy_set_geometry(this);
+}
+
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+ /* We use the DMA channel 0 to access all the nand chips. */
+ return this->dma_chans[0];
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+{
+ struct scatterlist *sgl = &this->data_sgl;
+ int ret;
+
+ /* first try to map the upper buffer directly */
+ if (virt_addr_valid(this->upper_buf) &&
+ !object_is_on_stack(this->upper_buf)) {
+ sg_init_one(sgl, this->upper_buf, this->upper_len);
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0)
+ goto map_fail;
+
+ this->direct_dma_map_ok = true;
+ return;
+ }
+
+map_fail:
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+
+ if (dr == DMA_TO_DEVICE)
+ memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+
+ dma_map_sg(this->dev, sgl, 1, dr);
+
+ this->direct_dma_map_ok = false;
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+ switch (this->dma_type) {
+ case DMA_FOR_COMMAND:
+ dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+ if (this->direct_dma_map_ok == false)
+ memcpy(this->upper_buf, this->data_buffer_dma,
+ this->upper_len);
+ break;
+
+ case DMA_FOR_WRITE_DATA:
+ dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+ break;
+
+ case DMA_FOR_READ_ECC_PAGE:
+ case DMA_FOR_WRITE_ECC_PAGE:
+ /* We have to wait the BCH interrupt to finish. */
+ break;
+
+ default:
+ dev_err(this->dev, "in wrong DMA operation.\n");
+ }
+
+ complete(dma_c);
+}
+
+int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *dma_c = &this->dma_done;
+ unsigned long timeout;
+
+ init_completion(dma_c);
+
+ desc->callback = dma_irq_callback;
+ desc->callback_param = this;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(get_dma_chan(this));
+
+ /* Wait for the interrupt from the DMA block. */
+ timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+ if (!timeout) {
+ dev_err(this->dev, "DMA timeout, last DMA :%d\n",
+ this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * This function is used in BCH reading or BCH writing pages.
+ * It will wait for the BCH interrupt as long as ONE second.
+ * Actually, we must wait for two interrupts :
+ * [1] firstly the DMA interrupt and
+ * [2] secondly the BCH interrupt.
+ */
+int start_dma_with_bch_irq(struct gpmi_nand_data *this,
+ struct dma_async_tx_descriptor *desc)
+{
+ struct completion *bch_c = &this->bch_done;
+ unsigned long timeout;
+
+ /* Prepare to receive an interrupt from the BCH block. */
+ init_completion(bch_c);
+
+ /* start the DMA */
+ start_dma_without_bch_irq(this, desc);
+
+ /* Wait for the interrupt from the BCH block. */
+ timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+ if (!timeout) {
+ dev_err(this->dev, "BCH timeout, last DMA :%d\n",
+ this->last_dma_type);
+ gpmi_dump_info(this);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int acquire_register_block(struct gpmi_nand_data *this,
+ const char *res_name)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ struct resource *r;
+ void __iomem *p;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+ p = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+ res->gpmi_regs = p;
+ else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+ res->bch_regs = p;
+ else
+ dev_err(this->dev, "unknown resource name : %s\n", res_name);
+
+ return 0;
+}
+
+static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+ struct platform_device *pdev = this->pdev;
+ const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+ struct resource *r;
+ int err;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+ if (!r) {
+ dev_err(this->dev, "Can't get resource for %s\n", res_name);
+ return -ENODEV;
+ }
+
+ err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+ if (err)
+ dev_err(this->dev, "error requesting BCH IRQ\n");
+
+ return err;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+ unsigned int i;
+ for (i = 0; i < DMA_CHANS; i++)
+ if (this->dma_chans[i]) {
+ dma_release_channel(this->dma_chans[i]);
+ this->dma_chans[i] = NULL;
+ }
+}
+
+static int acquire_dma_channels(struct gpmi_nand_data *this)
+{
+ struct platform_device *pdev = this->pdev;
+ struct dma_chan *dma_chan;
+
+ /* request dma channel */
+ dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ if (!dma_chan) {
+ dev_err(this->dev, "Failed to request DMA channel.\n");
+ goto acquire_err;
+ }
+
+ this->dma_chans[0] = dma_chan;
+ return 0;
+
+acquire_err:
+ release_dma_channels(this);
+ return -EINVAL;
+}
+
+static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
+ "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
+};
+
+static int gpmi_get_clks(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ char **extra_clks = NULL;
+ struct clk *clk;
+ int err, i;
+
+ /* The main clock is stored in the first. */
+ r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
+ if (IS_ERR(r->clock[0])) {
+ err = PTR_ERR(r->clock[0]);
+ goto err_clock;
+ }
+
+ /* Get extra clocks */
+ if (GPMI_IS_MX6(this))
+ extra_clks = extra_clks_for_mx6q;
+ if (!extra_clks)
+ return 0;
+
+ for (i = 1; i < GPMI_CLK_MAX; i++) {
+ if (extra_clks[i - 1] == NULL)
+ break;
+
+ clk = devm_clk_get(this->dev, extra_clks[i - 1]);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto err_clock;
+ }
+
+ r->clock[i] = clk;
+ }
+
+ if (GPMI_IS_MX6(this))
+ /*
+ * Set the default value for the gpmi clock.
+ *
+ * If you want to use the ONFI nand which is in the
+ * Synchronous Mode, you should change the clock as you need.
+ */
+ clk_set_rate(r->clock[0], 22000000);
+
+ return 0;
+
+err_clock:
+ dev_dbg(this->dev, "failed in finding the clocks.\n");
+ return err;
+}
+
+static int acquire_resources(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_bch_irq(this, bch_irq);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_dma_channels(this);
+ if (ret)
+ goto exit_regs;
+
+ ret = gpmi_get_clks(this);
+ if (ret)
+ goto exit_clock;
+ return 0;
+
+exit_clock:
+ release_dma_channels(this);
+exit_regs:
+ return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+ release_dma_channels(this);
+}
+
+static int init_hardware(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /*
+ * This structure contains the "safe" GPMI timing that should succeed
+ * with any NAND Flash device
+ * (although, with less-than-optimal performance).
+ */
+ struct nand_timing safe_timing = {
+ .data_setup_in_ns = 80,
+ .data_hold_in_ns = 60,
+ .address_setup_in_ns = 25,
+ .gpmi_sample_delay_in_ns = 6,
+ .tREA_in_ns = -1,
+ .tRLOH_in_ns = -1,
+ .tRHOH_in_ns = -1,
+ };
+
+ /* Initialize the hardwares. */
+ ret = gpmi_init(this);
+ if (ret)
+ return ret;
+
+ this->timing = safe_timing;
+ return 0;
+}
+
+static int read_page_prepare(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(destination)) {
+ dma_addr_t dest_phys;
+
+ dest_phys = dma_map_single(dev, destination,
+ length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dest_phys)) {
+ if (alt_size < length) {
+ dev_err(dev, "Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = destination;
+ *use_phys = dest_phys;
+ this->direct_dma_map_ok = true;
+ return 0;
+ }
+
+map_failed:
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ this->direct_dma_map_ok = false;
+ return 0;
+}
+
+static inline void read_page_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (this->direct_dma_map_ok)
+ dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
+}
+
+static inline void read_page_swap_end(struct gpmi_nand_data *this,
+ void *destination, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ void *used_virt, dma_addr_t used_phys)
+{
+ if (!this->direct_dma_map_ok)
+ memcpy(destination, alt_virt, length);
+}
+
+static int send_page_prepare(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void **use_virt, dma_addr_t *use_phys)
+{
+ struct device *dev = this->dev;
+
+ if (virt_addr_valid(source)) {
+ dma_addr_t source_phys;
+
+ source_phys = dma_map_single(dev, (void *)source, length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, source_phys)) {
+ if (alt_size < length) {
+ dev_err(dev, "Alternate buffer is too small\n");
+ return -ENOMEM;
+ }
+ goto map_failed;
+ }
+ *use_virt = source;
+ *use_phys = source_phys;
+ return 0;
+ }
+map_failed:
+ /*
+ * Copy the content of the source buffer into the alternate
+ * buffer and set up the return values accordingly.
+ */
+ memcpy(alt_virt, source, length);
+
+ *use_virt = alt_virt;
+ *use_phys = alt_phys;
+ return 0;
+}
+
+static void send_page_end(struct gpmi_nand_data *this,
+ const void *source, unsigned length,
+ void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+ const void *used_virt, dma_addr_t used_phys)
+{
+ struct device *dev = this->dev;
+ if (used_virt == source)
+ dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+
+ if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
+ dma_free_coherent(dev, this->page_buffer_size,
+ this->page_buffer_virt,
+ this->page_buffer_phys);
+ kfree(this->cmd_buffer);
+ kfree(this->data_buffer_dma);
+ kfree(this->raw_buffer);
+
+ this->cmd_buffer = NULL;
+ this->data_buffer_dma = NULL;
+ this->page_buffer_virt = NULL;
+ this->page_buffer_size = 0;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
+
+ /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
+ this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
+ if (this->cmd_buffer == NULL)
+ goto error_alloc;
+
+ /*
+ * [2] Allocate a read/write data buffer.
+ * The gpmi_alloc_dma_buffer can be called twice.
+ * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+ * is called before the nand_scan_ident; and we allocate a buffer
+ * of the real NAND page size when the gpmi_alloc_dma_buffer is
+ * called after the nand_scan_ident.
+ */
+ this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (this->data_buffer_dma == NULL)
+ goto error_alloc;
+
+ /*
+ * [3] Allocate the page buffer.
+ *
+ * Both the payload buffer and the auxiliary buffer must appear on
+ * 32-bit boundaries. We presume the size of the payload buffer is a
+ * power of two and is much larger than four, which guarantees the
+ * auxiliary buffer will appear on a 32-bit boundary.
+ */
+ this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
+ this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
+ &this->page_buffer_phys, GFP_DMA);
+ if (!this->page_buffer_virt)
+ goto error_alloc;
+
+ this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!this->raw_buffer)
+ goto error_alloc;
+
+ /* Slice up the page buffer. */
+ this->payload_virt = this->page_buffer_virt;
+ this->payload_phys = this->page_buffer_phys;
+ this->auxiliary_virt = this->payload_virt + geo->payload_size;
+ this->auxiliary_phys = this->payload_phys + geo->payload_size;
+ return 0;
+
+error_alloc:
+ gpmi_free_dma_buffer(this);
+ return -ENOMEM;
+}
+
+static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret;
+
+ /*
+ * Every operation begins with a command byte and a series of zero or
+ * more address bytes. These are distinguished by either the Address
+ * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
+ * asserted. When MTD is ready to execute the command, it will deassert
+ * both latch enables.
+ *
+ * Rather than run a separate DMA operation for every single byte, we
+ * queue them up and run a single DMA operation for the entire series
+ * of command and data bytes. NAND_CMD_NONE means the END of the queue.
+ */
+ if ((ctrl & (NAND_ALE | NAND_CLE))) {
+ if (data != NAND_CMD_NONE)
+ this->cmd_buffer[this->command_length++] = data;
+ return;
+ }
+
+ if (!this->command_length)
+ return;
+
+ ret = gpmi_send_command(this);
+ if (ret)
+ dev_err(this->dev, "Chip: %u, Error %d\n",
+ this->current_chip, ret);
+
+ this->command_length = 0;
+}
+
+static int gpmi_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ return gpmi_is_ready(this, this->current_chip);
+}
+
+static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ if ((this->current_chip < 0) && (chipnr >= 0))
+ gpmi_begin(this);
+ else if ((this->current_chip >= 0) && (chipnr < 0))
+ gpmi_end(this);
+
+ this->current_chip = chipnr;
+}
+
+static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ dev_dbg(this->dev, "len is %d\n", len);
+ this->upper_buf = buf;
+ this->upper_len = len;
+
+ gpmi_read_data(this);
+}
+
+static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+
+ dev_dbg(this->dev, "len is %d\n", len);
+ this->upper_buf = (uint8_t *)buf;
+ this->upper_len = len;
+
+ gpmi_send_data(this);
+}
+
+static uint8_t gpmi_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ uint8_t *buf = this->data_buffer_dma;
+
+ gpmi_read_buf(mtd, buf, 1);
+ return buf[0];
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+ void *payload, void *auxiliary)
+{
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ unsigned char *p;
+ unsigned char *a;
+ unsigned int bit;
+ unsigned char mask;
+ unsigned char from_data;
+ unsigned char from_oob;
+
+ if (!this->swap_block_mark)
+ return;
+
+ /*
+ * If control arrives here, we're swapping. Make some convenience
+ * variables.
+ */
+ bit = nfc_geo->block_mark_bit_offset;
+ p = payload + nfc_geo->block_mark_byte_offset;
+ a = auxiliary;
+
+ /*
+ * Get the byte from the data area that overlays the block mark. Since
+ * the ECC engine applies its own view to the bits in the page, the
+ * physical block mark won't (in general) appear on a byte boundary in
+ * the data.
+ */
+ from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+ /* Get the byte from the OOB. */
+ from_oob = a[0];
+
+ /* Swap them. */
+ a[0] = from_data;
+
+ mask = (0x1 << bit) - 1;
+ p[0] = (p[0] & mask) | (from_oob << bit);
+
+ mask = ~0 << bit;
+ p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ void *payload_virt;
+ dma_addr_t payload_phys;
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ unsigned int i;
+ unsigned char *status;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ dev_dbg(this->dev, "page number is : %d\n", page);
+ ret = read_page_prepare(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ dev_err(this->dev, "Inadequate DMA buffer\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* go! */
+ ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
+ read_page_end(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ if (ret) {
+ dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
+ return ret;
+ }
+
+ /* handle the block mark swapping */
+ block_mark_swapping(this, payload_virt, auxiliary_virt);
+
+ /* Loop over status bytes, accumulating ECC status. */
+ status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+
+ for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
+ if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+ continue;
+
+ if (*status == STATUS_UNCORRECTABLE) {
+ mtd->ecc_stats.failed++;
+ continue;
+ }
+ mtd->ecc_stats.corrected += *status;
+ max_bitflips = max_t(unsigned int, max_bitflips, *status);
+ }
+
+ if (oob_required) {
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+ * for details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the
+ * block mark to th caller's buffer. Note that, if block mark
+ * swapping was necessary, it has already been done, so we can
+ * rely on the first byte of the auxiliary buffer to contain
+ * the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+ }
+
+ read_page_swap_end(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+
+ return max_bitflips;
+}
+
+/* Fake a virtual small page for the subpage read */
+static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offs, uint32_t len, uint8_t *buf, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ void __iomem *bch_regs = this->resources.bch_regs;
+ struct bch_geometry old_geo = this->bch_geometry;
+ struct bch_geometry *geo = &this->bch_geometry;
+ int size = chip->ecc.size; /* ECC chunk size */
+ int meta, n, page_size;
+ u32 r1_old, r2_old, r1_new, r2_new;
+ unsigned int max_bitflips;
+ int first, last, marker_pos;
+ int ecc_parity_size;
+ int col = 0;
+ int old_swap_block_mark = this->swap_block_mark;
+
+ /* The size of ECC parity */
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+
+ /* Align it with the chunk size */
+ first = offs / size;
+ last = (offs + len - 1) / size;
+
+ if (this->swap_block_mark) {
+ /*
+ * Find the chunk which contains the Block Marker.
+ * If this chunk is in the range of [first, last],
+ * we have to read out the whole page.
+ * Why? since we had swapped the data at the position of Block
+ * Marker to the metadata which is bound with the chunk 0.
+ */
+ marker_pos = geo->block_mark_byte_offset / size;
+ if (last >= marker_pos && first <= marker_pos) {
+ dev_dbg(this->dev,
+ "page:%d, first:%d, last:%d, marker at:%d\n",
+ page, first, last, marker_pos);
+ return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ }
+ }
+
+ meta = geo->metadata_size;
+ if (first) {
+ col = meta + (size + ecc_parity_size) * first;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
+
+ meta = 0;
+ buf = buf + first * size;
+ }
+
+ /* Save the old environment */
+ r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
+ r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ /* change the BCH registers and bch_geometry{} */
+ n = last - first + 1;
+ page_size = meta + (size + ecc_parity_size) * n;
+
+ r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
+ BM_BCH_FLASH0LAYOUT0_META_SIZE);
+ r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
+ | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
+ writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+ r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
+ r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
+ writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+ geo->ecc_chunk_count = n;
+ geo->payload_size = n * size;
+ geo->page_size = page_size;
+ geo->auxiliary_status_offset = ALIGN(meta, 4);
+
+ dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
+ page, offs, len, col, first, n, page_size);
+
+ /* Read the subpage now */
+ this->swap_block_mark = false;
+ max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+
+ /* Restore */
+ writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
+ writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
+ this->bch_geometry = old_geo;
+ this->swap_block_mark = old_swap_block_mark;
+
+ return max_bitflips;
+}
+
+static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ const void *payload_virt;
+ dma_addr_t payload_phys;
+ const void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+ int ret;
+
+ dev_dbg(this->dev, "ecc write page.\n");
+ if (this->swap_block_mark) {
+ /*
+ * If control arrives here, we're doing block mark swapping.
+ * Since we can't modify the caller's buffers, we must copy them
+ * into our own.
+ */
+ memcpy(this->payload_virt, buf, mtd->writesize);
+ payload_virt = this->payload_virt;
+ payload_phys = this->payload_phys;
+
+ memcpy(this->auxiliary_virt, chip->oob_poi,
+ nfc_geo->auxiliary_size);
+ auxiliary_virt = this->auxiliary_virt;
+ auxiliary_phys = this->auxiliary_phys;
+
+ /* Handle block mark swapping. */
+ block_mark_swapping(this,
+ (void *)payload_virt, (void *)auxiliary_virt);
+ } else {
+ /*
+ * If control arrives here, we're not doing block mark swapping,
+ * so we can to try and use the caller's buffers.
+ */
+ ret = send_page_prepare(this,
+ buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ &payload_virt, &payload_phys);
+ if (ret) {
+ dev_err(this->dev, "Inadequate payload DMA buffer\n");
+ return 0;
+ }
+
+ ret = send_page_prepare(this,
+ chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ &auxiliary_virt, &auxiliary_phys);
+ if (ret) {
+ dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
+ goto exit_auxiliary;
+ }
+ }
+
+ /* Ask the NFC. */
+ ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
+ if (ret)
+ dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
+
+ if (!this->swap_block_mark) {
+ send_page_end(this, chip->oob_poi, mtd->oobsize,
+ this->auxiliary_virt, this->auxiliary_phys,
+ nfc_geo->auxiliary_size,
+ auxiliary_virt, auxiliary_phys);
+exit_auxiliary:
+ send_page_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+ }
+
+ return 0;
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ * true state of the block mark, no matter where that block mark appears in
+ * the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ * return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ * page, using the conventional definition of which bytes are data and which
+ * are OOB. This gives the caller a way to see the actual, physical bytes
+ * in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ * | Raw | ECC-based |
+ * -------------+-------------------------+-------------------------+
+ * | Read the conventional | |
+ * | OOB at the end of the | |
+ * Swapping | page and return it. It | |
+ * | contains exactly what | |
+ * | we want. | Read the block mark and |
+ * -------------+-------------------------+ return it in a buffer |
+ * | Read the conventional | full of set bits. |
+ * | OOB at the end of the | |
+ * | page and also the block | |
+ * Transcribing | mark in the metadata. | |
+ * | Copy the block mark | |
+ * | into the first byte of | |
+ * | the OOB. | |
+ * -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ */
+static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+
+ dev_dbg(this->dev, "page number is %d\n", page);
+ /* clear the OOB buffer */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+
+ /* Read out the conventional OOB. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /*
+ * Now, we want to make sure the block mark is correct. In the
+ * non-transcribing case (!GPMI_IS_MX23()), we already have it.
+ * Otherwise, we need to explicitly read it.
+ */
+ if (GPMI_IS_MX23(this)) {
+ /* Read the block mark into the first byte of the OOB buffer. */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ chip->oob_poi[0] = chip->read_byte(mtd);
+ }
+
+ return 0;
+}
+
+static int
+gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ struct nand_oobfree *of = mtd->ecclayout->oobfree;
+ int status = 0;
+
+ /* Do we have available oob area? */
+ if (!of->length)
+ return -EPERM;
+
+ if (!nand_is_slc(chip))
+ return -EPERM;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
+ chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * This function reads a NAND page without involving the ECC engine (no HW
+ * ECC correction).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the payload data and ECC bits stored in the
+ * page into the provided buffers, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ size_t src_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ uint8_t *oob = chip->oob_poi;
+ int step;
+
+ chip->read_buf(mtd, tmp_buf,
+ mtd->writesize + mtd->oobsize);
+
+ /*
+ * If required, swap the bad block marker and the data stored in the
+ * metadata section, so that we don't wrongly consider a block as bad.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark) {
+ u8 swap = tmp_buf[0];
+
+ tmp_buf[0] = tmp_buf[mtd->writesize];
+ tmp_buf[mtd->writesize] = swap;
+ }
+
+ /*
+ * Copy the metadata section into the oob buffer (this section is
+ * guaranteed to be aligned on a byte boundary).
+ */
+ if (oob_required)
+ memcpy(oob, tmp_buf, nfc_geo->metadata_size);
+
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ src_bit_off = oob_bit_off;
+
+ /* Extract interleaved payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ gpmi_copy_bits(buf, step * eccsize * 8,
+ tmp_buf, src_bit_off,
+ eccsize * 8);
+ src_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ gpmi_copy_bits(oob, oob_bit_off,
+ tmp_buf, src_bit_off,
+ eccbits);
+
+ src_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ if (oob_required) {
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_byte_off < mtd->oobsize)
+ memcpy(oob + oob_byte_off,
+ tmp_buf + mtd->writesize + oob_byte_off,
+ mtd->oobsize - oob_byte_off);
+ }
+
+ return 0;
+}
+
+/*
+ * This function writes a NAND page without involving the ECC engine (no HW
+ * ECC generation).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the OOB area at the right place in the
+ * final page, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ uint8_t *oob = chip->oob_poi;
+ size_t dst_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ int step;
+
+ /*
+ * Initialize all bits to 1 in case we don't have a buffer for the
+ * payload or oob data in order to leave unspecified bits of data
+ * to their initial state.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ /*
+ * First copy the metadata section (stored in oob buffer) at the
+ * beginning of the page, as imposed by the GPMI layout.
+ */
+ memcpy(tmp_buf, oob, nfc_geo->metadata_size);
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ dst_bit_off = oob_bit_off;
+
+ /* Interleave payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ gpmi_copy_bits(tmp_buf, dst_bit_off,
+ buf, step * eccsize * 8, eccsize * 8);
+ dst_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ gpmi_copy_bits(tmp_buf, dst_bit_off,
+ oob, oob_bit_off, eccbits);
+
+ dst_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_required && oob_byte_off < mtd->oobsize)
+ memcpy(tmp_buf + mtd->writesize + oob_byte_off,
+ oob + oob_byte_off, mtd->oobsize - oob_byte_off);
+
+ /*
+ * If required, swap the bad block marker and the first byte of the
+ * metadata section, so that we don't modify the bad block marker.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark) {
+ u8 swap = tmp_buf[0];
+
+ tmp_buf[0] = tmp_buf[mtd->writesize];
+ tmp_buf[mtd->writesize] = swap;
+ }
+
+ chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
+
+ return 0;
+}
+
+static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+
+ return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1);
+}
+
+static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct gpmi_nand_data *this = chip->priv;
+ int ret = 0;
+ uint8_t *block_mark;
+ int column, page, status, chipnr;
+
+ chipnr = (int)(ofs >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
+
+ /* Write the block mark. */
+ block_mark = this->data_buffer_dma;
+ block_mark[0] = 0; /* bad block marker */
+
+ /* Shift to get page */
+ page = (int)(ofs >> chip->page_shift);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+ chip->write_buf(mtd, block_mark, 1);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
+ chip->select_chip(mtd, -1);
+
+ return ret;
+}
+
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+ /*
+ * Set the boot block stride size.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->stride_size_in_pages = 64;
+
+ /*
+ * Set the search area stride exponent.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->search_area_stride_exponent = 2;
+ return 0;
+}
+
+static const char *fingerprint = "STMP";
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int search_area_size_in_strides;
+ unsigned int stride;
+ unsigned int page;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int found_an_ncb_fingerprint = false;
+
+ /* Compute the number of strides in a search area. */
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /*
+ * Loop through the first search area, looking for the NCB fingerprint.
+ */
+ dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+ /*
+ * Read the NCB fingerprint. The fingerprint is four bytes long
+ * and starts in the 12th byte of the page.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ chip->read_buf(mtd, buffer, strlen(fingerprint));
+
+ /* Look for the fingerprint. */
+ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+ found_an_ncb_fingerprint = true;
+ break;
+ }
+
+ }
+
+ chip->select_chip(mtd, saved_chip_number);
+
+ if (found_an_ncb_fingerprint)
+ dev_dbg(dev, "\tFound a fingerprint\n");
+ else
+ dev_dbg(dev, "\tNo fingerprint found\n");
+ return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ unsigned int block_size_in_pages;
+ unsigned int search_area_size_in_strides;
+ unsigned int search_area_size_in_pages;
+ unsigned int search_area_size_in_blocks;
+ unsigned int block;
+ unsigned int stride;
+ unsigned int page;
+ uint8_t *buffer = chip->buffers->databuf;
+ int saved_chip_number;
+ int status;
+
+ /* Compute the search area geometry. */
+ block_size_in_pages = mtd->erasesize / mtd->writesize;
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+ search_area_size_in_pages = search_area_size_in_strides *
+ rom_geo->stride_size_in_pages;
+ search_area_size_in_blocks =
+ (search_area_size_in_pages + (block_size_in_pages - 1)) /
+ block_size_in_pages;
+
+ dev_dbg(dev, "Search Area Geometry :\n");
+ dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+ dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+ dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
+
+ /* Select chip 0. */
+ saved_chip_number = this->current_chip;
+ chip->select_chip(mtd, 0);
+
+ /* Loop over blocks in the first search area, erasing them. */
+ dev_dbg(dev, "Erasing the search area...\n");
+
+ for (block = 0; block < search_area_size_in_blocks; block++) {
+ /* Compute the page address. */
+ page = block * block_size_in_pages;
+
+ /* Erase this block. */
+ dev_dbg(dev, "\tErasing block 0x%x\n", block);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ /* Wait for the erase to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Erase failed.\n", __func__);
+ }
+
+ /* Write the NCB fingerprint into the page buffer. */
+ memset(buffer, ~0, mtd->writesize);
+ memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+ /* Loop through the first search area, writing NCB fingerprints. */
+ dev_dbg(dev, "Writing NCB fingerprints...\n");
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ /* Write the first page of the current stride. */
+ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ chip->ecc.write_page_raw(mtd, chip, buffer, 0);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ /* Wait for the write to finish. */
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ dev_err(dev, "[%s] Write failed.\n", __func__);
+ }
+
+ /* Deselect chip 0. */
+ chip->select_chip(mtd, saved_chip_number);
+ return 0;
+}
+
+static int mx23_boot_init(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = &this->mtd;
+ unsigned int block_count;
+ unsigned int block;
+ int chipnr;
+ int page;
+ loff_t byte;
+ uint8_t block_mark;
+ int ret = 0;
+
+ /*
+ * If control arrives here, we can't use block mark swapping, which
+ * means we're forced to use transcription. First, scan for the
+ * transcription stamp. If we find it, then we don't have to do
+ * anything -- the block marks are already transcribed.
+ */
+ if (mx23_check_transcription_stamp(this))
+ return 0;
+
+ /*
+ * If control arrives here, we couldn't find a transcription stamp, so
+ * so we presume the block marks are in the conventional location.
+ */
+ dev_dbg(dev, "Transcribing bad block marks...\n");
+
+ /* Compute the number of blocks in the entire medium. */
+ block_count = chip->chipsize >> chip->phys_erase_shift;
+
+ /*
+ * Loop over all the blocks in the medium, transcribing block marks as
+ * we go.
+ */
+ for (block = 0; block < block_count; block++) {
+ /*
+ * Compute the chip, page and byte addresses for this block's
+ * conventional mark.
+ */
+ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+ page = block << (chip->phys_erase_shift - chip->page_shift);
+ byte = block << chip->phys_erase_shift;
+
+ /* Send the command to read the conventional block mark. */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ block_mark = chip->read_byte(mtd);
+ chip->select_chip(mtd, -1);
+
+ /*
+ * Check if the block is marked bad. If so, we need to mark it
+ * again, but this time the result will be a mark in the
+ * location where we transcribe block marks.
+ */
+ if (block_mark != 0xff) {
+ dev_dbg(dev, "Transcribing mark in block %u\n", block);
+ ret = chip->block_markbad(mtd, byte);
+ if (ret)
+ dev_err(dev,
+ "Failed to mark block bad with ret %d\n",
+ ret);
+ }
+ }
+
+ /* Write the stamp that indicates we've transcribed the block marks. */
+ mx23_write_transcription_stamp(this);
+ return 0;
+}
+
+static int nand_boot_init(struct gpmi_nand_data *this)
+{
+ nand_boot_set_geometry(this);
+
+ /* This is ROM arch-specific initilization before the BBT scanning. */
+ if (GPMI_IS_MX23(this))
+ return mx23_boot_init(this);
+ return 0;
+}
+
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Free the temporary DMA memory for reading ID. */
+ gpmi_free_dma_buffer(this);
+
+ /* Set up the NFC geometry which is used by BCH. */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
+ return ret;
+ }
+
+ /* Alloc the new DMA buffers according to the pagesize and oobsize */
+ return gpmi_alloc_dma_buffer(this);
+}
+
+static void gpmi_nand_exit(struct gpmi_nand_data *this)
+{
+ nand_release(&this->mtd);
+ gpmi_free_dma_buffer(this);
+}
+
+static int gpmi_init_last(struct gpmi_nand_data *this)
+{
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ int ret;
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
+ if (ret)
+ return ret;
+
+ /* Init the nand_ecc_ctrl{} */
+ ecc->read_page = gpmi_ecc_read_page;
+ ecc->write_page = gpmi_ecc_write_page;
+ ecc->read_oob = gpmi_ecc_read_oob;
+ ecc->write_oob = gpmi_ecc_write_oob;
+ ecc->read_page_raw = gpmi_ecc_read_page_raw;
+ ecc->write_page_raw = gpmi_ecc_write_page_raw;
+ ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
+ ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = bch_geo->ecc_chunk_size;
+ ecc->strength = bch_geo->ecc_strength;
+ ecc->layout = &gpmi_hw_ecclayout;
+
+ /*
+ * We only enable the subpage read when:
+ * (1) the chip is imx6, and
+ * (2) the size of the ECC parity is byte aligned.
+ */
+ if (GPMI_IS_MX6(this) &&
+ ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
+ ecc->read_subpage = gpmi_ecc_read_subpage;
+ chip->options |= NAND_SUBPAGE_READ;
+ }
+
+ /*
+ * Can we enable the extra features? such as EDO or Sync mode.
+ *
+ * We do not check the return value now. That's means if we fail in
+ * enable the extra features, we still can run in the normal way.
+ */
+ gpmi_extra_init(this);
+
+ return 0;
+}
+
+static int gpmi_nand_init(struct gpmi_nand_data *this)
+{
+ struct mtd_info *mtd = &this->mtd;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_part_parser_data ppdata = {};
+ int ret;
+
+ /* init current chip */
+ this->current_chip = -1;
+
+ /* init the MTD data structures */
+ mtd->priv = chip;
+ mtd->name = "gpmi-nand";
+ mtd->owner = THIS_MODULE;
+
+ /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+ chip->priv = this;
+ chip->select_chip = gpmi_select_chip;
+ chip->cmd_ctrl = gpmi_cmd_ctrl;
+ chip->dev_ready = gpmi_dev_ready;
+ chip->read_byte = gpmi_read_byte;
+ chip->read_buf = gpmi_read_buf;
+ chip->write_buf = gpmi_write_buf;
+ chip->badblock_pattern = &gpmi_bbt_descr;
+ chip->block_markbad = gpmi_block_markbad;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ this->swap_block_mark = !GPMI_IS_MX23(this);
+
+ if (of_get_nand_on_flash_bbt(this->dev->of_node)) {
+ chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ if (of_property_read_bool(this->dev->of_node,
+ "fsl,no-blockmark-swap"))
+ this->swap_block_mark = false;
+ }
+ dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+ this->swap_block_mark ? "en" : "dis");
+
+ /*
+ * Allocate a temporary DMA buffer for reading ID in the
+ * nand_scan_ident().
+ */
+ this->bch_geometry.payload_size = 1024;
+ this->bch_geometry.auxiliary_size = 128;
+ ret = gpmi_alloc_dma_buffer(this);
+ if (ret)
+ goto err_out;
+
+ ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
+ if (ret)
+ goto err_out;
+
+ ret = gpmi_init_last(this);
+ if (ret)
+ goto err_out;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ goto err_out;
+
+ ret = nand_boot_init(this);
+ if (ret)
+ goto err_out;
+ ret = chip->scan_bbt(mtd);
+ if (ret)
+ goto err_out;
+
+ ppdata.of_node = this->pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret)
+ goto err_out;
+ return 0;
+
+err_out:
+ gpmi_nand_exit(this);
+ return ret;
+}
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+ {
+ .compatible = "fsl,imx23-gpmi-nand",
+ .data = &gpmi_devdata_imx23,
+ }, {
+ .compatible = "fsl,imx28-gpmi-nand",
+ .data = &gpmi_devdata_imx28,
+ }, {
+ .compatible = "fsl,imx6q-gpmi-nand",
+ .data = &gpmi_devdata_imx6q,
+ }, {
+ .compatible = "fsl,imx6sx-gpmi-nand",
+ .data = &gpmi_devdata_imx6sx,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
+static int gpmi_nand_probe(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this;
+ const struct of_device_id *of_id;
+ int ret;
+
+ this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+ if (of_id) {
+ this->devdata = of_id->data;
+ } else {
+ dev_err(&pdev->dev, "Failed to find the right device id.\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, this);
+ this->pdev = pdev;
+ this->dev = &pdev->dev;
+
+ ret = acquire_resources(this);
+ if (ret)
+ goto exit_acquire_resources;
+
+ ret = init_hardware(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ ret = gpmi_nand_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ dev_info(this->dev, "driver registered.\n");
+
+ return 0;
+
+exit_nfc_init:
+ release_resources(this);
+exit_acquire_resources:
+
+ return ret;
+}
+
+static int gpmi_nand_remove(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+
+ gpmi_nand_exit(this);
+ release_resources(this);
+ return 0;
+}
+
+static struct platform_driver gpmi_nand_driver = {
+ .driver = {
+ .name = "gpmi-nand",
+ .of_match_table = gpmi_nand_id_table,
+ },
+ .probe = gpmi_nand_probe,
+ .remove = gpmi_nand_remove,
+};
+module_platform_driver(gpmi_nand_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 000000000..544062f65
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,311 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
+struct resources {
+ void __iomem *gpmi_regs;
+ void __iomem *bch_regs;
+ unsigned int dma_low_channel;
+ unsigned int dma_high_channel;
+ struct clk *clock[GPMI_CLK_MAX];
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len: The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength: A number that describes the strength of the ECC
+ * algorithm.
+ * @page_size: The size, in bytes, of a physical page, including
+ * both data and OOB.
+ * @metadata_size: The size, in bytes, of the metadata.
+ * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
+ * the first chunk in the page includes both data and
+ * metadata, so it's a bit larger than this value.
+ * @ecc_chunk_count: The number of ECC chunks in the page,
+ * @payload_size: The size, in bytes, of the payload buffer.
+ * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset: The offset into the auxiliary buffer at which
+ * the ECC status appears.
+ * @block_mark_byte_offset: The byte offset in the ECC-based page view at
+ * which the underlying physical block mark appears.
+ * @block_mark_bit_offset: The bit offset into the ECC-based page view at
+ * which the underlying physical block mark appears.
+ */
+struct bch_geometry {
+ unsigned int gf_len;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int metadata_size;
+ unsigned int ecc_chunk_size;
+ unsigned int ecc_chunk_count;
+ unsigned int payload_size;
+ unsigned int auxiliary_size;
+ unsigned int auxiliary_status_offset;
+ unsigned int block_mark_byte_offset;
+ unsigned int block_mark_bit_offset;
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages: The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ * search area in boot block strides.
+ */
+struct boot_rom_geometry {
+ unsigned int stride_size_in_pages;
+ unsigned int search_area_stride_exponent;
+};
+
+/* DMA operations types */
+enum dma_ops_type {
+ DMA_FOR_COMMAND = 1,
+ DMA_FOR_READ_DATA,
+ DMA_FOR_WRITE_DATA,
+ DMA_FOR_READ_ECC_PAGE,
+ DMA_FOR_WRITE_ECC_PAGE
+};
+
+/**
+ * struct nand_timing - Fundamental timing attributes for NAND.
+ * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the
+ * maximum of tDS and tWP. A negative value
+ * indicates this characteristic isn't known.
+ * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the
+ * maximum of tDH, tWH and tREH. A negative value
+ * indicates this characteristic isn't known.
+ * @address_setup_in_ns: The address setup time, in nanoseconds. Usually
+ * the maximum of tCLS, tCS and tALS. A negative
+ * value indicates this characteristic isn't known.
+ * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value
+ * indicates this characteristic isn't known.
+ * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A
+ * negative value indicates this characteristic isn't
+ * known.
+ */
+struct nand_timing {
+ int8_t data_setup_in_ns;
+ int8_t data_hold_in_ns;
+ int8_t address_setup_in_ns;
+ int8_t gpmi_sample_delay_in_ns;
+ int8_t tREA_in_ns;
+ int8_t tRLOH_in_ns;
+ int8_t tRHOH_in_ns;
+};
+
+enum gpmi_type {
+ IS_MX23,
+ IS_MX28,
+ IS_MX6Q,
+ IS_MX6SX
+};
+
+struct gpmi_devdata {
+ enum gpmi_type type;
+ int bch_max_ecc_strength;
+ int max_chain_delay; /* See the async EDO mode */
+};
+
+struct gpmi_nand_data {
+ /* flags */
+#define GPMI_ASYNC_EDO_ENABLED (1 << 0)
+#define GPMI_TIMING_INIT_OK (1 << 1)
+ int flags;
+ const struct gpmi_devdata *devdata;
+
+ /* System Interface */
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /* Resources */
+ struct resources resources;
+
+ /* Flash Hardware */
+ struct nand_timing timing;
+ int timing_mode;
+
+ /* BCH */
+ struct bch_geometry bch_geometry;
+ struct completion bch_done;
+
+ /* NAND Boot issue */
+ bool swap_block_mark;
+ struct boot_rom_geometry rom_geometry;
+
+ /* MTD / NAND */
+ struct nand_chip nand;
+ struct mtd_info mtd;
+
+ /* General-use Variables */
+ int current_chip;
+ unsigned int command_length;
+
+ /* passed from upper layer */
+ uint8_t *upper_buf;
+ int upper_len;
+
+ /* for DMA operations */
+ bool direct_dma_map_ok;
+
+ struct scatterlist cmd_sgl;
+ char *cmd_buffer;
+
+ struct scatterlist data_sgl;
+ char *data_buffer_dma;
+
+ void *page_buffer_virt;
+ dma_addr_t page_buffer_phys;
+ unsigned int page_buffer_size;
+
+ void *payload_virt;
+ dma_addr_t payload_phys;
+
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+
+ void *raw_buffer;
+
+ /* DMA channels */
+#define DMA_CHANS 8
+ struct dma_chan *dma_chans[DMA_CHANS];
+ enum dma_ops_type last_dma_type;
+ enum dma_ops_type dma_type;
+ struct completion dma_done;
+
+ /* private */
+ void *private;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @data_setup_in_cycles: The data setup time, in cycles.
+ * @data_hold_in_cycles: The data hold time, in cycles.
+ * @address_setup_in_cycles: The address setup time, in cycles.
+ * @device_busy_timeout: The timeout waiting for NAND Ready/Busy,
+ * this value is the number of cycles multiplied
+ * by 4096.
+ * @use_half_periods: Indicates the clock is running slowly, so the
+ * NFC DLL should use half-periods.
+ * @sample_delay_factor: The sample delay factor.
+ * @wrn_dly_sel: The delay on the GPMI write strobe.
+ */
+struct gpmi_nfc_hardware_timing {
+ /* for HW_GPMI_TIMING0 */
+ uint8_t data_setup_in_cycles;
+ uint8_t data_hold_in_cycles;
+ uint8_t address_setup_in_cycles;
+
+ /* for HW_GPMI_TIMING1 */
+ uint16_t device_busy_timeout;
+#define GPMI_DEFAULT_BUSY_TIMEOUT 0x500 /* default busy timeout value.*/
+
+ /* for HW_GPMI_CTRL1 */
+ bool use_half_periods;
+ uint8_t sample_delay_factor;
+ uint8_t wrn_dly_sel;
+};
+
+/**
+ * struct timing_threshod - Timing threshold
+ * @max_data_setup_cycles: The maximum number of data setup cycles that
+ * can be expressed in the hardware.
+ * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
+ * for data read internal setup. In the Reference
+ * Manual, see the chapter "High-Speed NAND
+ * Timing" for more details.
+ * @max_sample_delay_factor: The maximum sample delay factor that can be
+ * expressed in the hardware.
+ * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the
+ * sample delay DLL hardware can possibly work
+ * with (the DLL is unusable with longer periods).
+ * If the full-cycle period is greater than HALF
+ * this value, the DLL must be configured to use
+ * half-periods.
+ * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the
+ * DLL can implement.
+ * @clock_frequency_in_hz: The clock frequency, in Hz, during the current
+ * I/O transaction. If no I/O transaction is in
+ * progress, this is the clock frequency during
+ * the most recent I/O transaction.
+ */
+struct timing_threshod {
+ const unsigned int max_chip_count;
+ const unsigned int max_data_setup_cycles;
+ const unsigned int internal_data_setup_in_ns;
+ const unsigned int max_sample_delay_factor;
+ const unsigned int max_dll_clock_period_in_ns;
+ const unsigned int max_dll_delay_in_ns;
+ unsigned long clock_frequency_in_hz;
+
+};
+
+/* Common Services */
+extern int common_nfc_set_geometry(struct gpmi_nand_data *);
+extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+extern void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+
+/* GPMI-NAND helper function library */
+extern int gpmi_init(struct gpmi_nand_data *);
+extern int gpmi_extra_init(struct gpmi_nand_data *);
+extern void gpmi_clear_bch(struct gpmi_nand_data *);
+extern void gpmi_dump_info(struct gpmi_nand_data *);
+extern int bch_set_geometry(struct gpmi_nand_data *);
+extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+extern int gpmi_send_command(struct gpmi_nand_data *);
+extern void gpmi_begin(struct gpmi_nand_data *);
+extern void gpmi_end(struct gpmi_nand_data *);
+extern int gpmi_read_data(struct gpmi_nand_data *);
+extern int gpmi_send_data(struct gpmi_nand_data *);
+extern int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+extern int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+ const u8 *src, size_t src_bit_off,
+ size_t nbits);
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD 0x00
+#define STATUS_ERASED 0xff
+#define STATUS_UNCORRECTABLE 0xfe
+
+/* Use the devdata to distinguish different Archs. */
+#define GPMI_IS_MX23(x) ((x)->devdata->type == IS_MX23)
+#define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28)
+#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
+#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
+
+#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 000000000..82114cdc8
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,187 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0 0x00000000
+#define HW_GPMI_CTRL0_SET 0x00000004
+#define HW_GPMI_CTRL0_CLR 0x00000008
+#define HW_GPMI_CTRL0_TOG 0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE 24
+#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
+ (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
+
+/*
+ * Difference in LOCK_CS between imx23 and imx28 :
+ * This bit may impact the _POWER_ consumption. So some chips
+ * do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
+#define LOCK_CS_ENABLE 0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS 20
+#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
+ (GPMI_IS_MX23((x)) \
+ ? MX23_BM_GPMI_CTRL0_CS \
+ : MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS 17
+#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v) \
+ (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT 0
+#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v) \
+ (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE 0x00000010
+
+#define HW_GPMI_ECCCTRL 0x00000020
+#define HW_GPMI_ECCCTRL_SET 0x00000024
+#define HW_GPMI_ECCCTRL_CLR 0x00000028
+#define HW_GPMI_ECCCTRL_TOG 0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD 13
+#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
+ (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
+ (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
+
+#define HW_GPMI_ECCCOUNT 0x00000030
+#define HW_GPMI_PAYLOAD 0x00000040
+#define HW_GPMI_AUXILIARY 0x00000050
+#define HW_GPMI_CTRL1 0x00000060
+#define HW_GPMI_CTRL1_SET 0x00000064
+#define HW_GPMI_CTRL1_CLR 0x00000068
+#define HW_GPMI_CTRL1_TOG 0x0000006c
+
+#define BP_GPMI_CTRL1_DECOUPLE_CS 24
+#define BM_GPMI_CTRL1_DECOUPLE_CS (1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
+#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
+#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
+#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
+ (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
+
+#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE 17
+#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD 16
+#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY 12
+#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v) \
+ (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
+
+#define HW_GPMI_TIMING0 0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD 8
+#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP 0
+#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1 0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
+#define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
+#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \
+ (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
+
+#define HW_GPMI_TIMING2 0x00000090
+#define HW_GPMI_DATA 0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT 0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY 24
+#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
+ (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG 0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0 28
+#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
new file mode 100644
index 000000000..8dcc7b8fe
--- /dev/null
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -0,0 +1,890 @@
+/*
+ * Hisilicon NAND Flash controller driver
+ *
+ * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
+ * http://www.hisilicon.com
+ *
+ * Author: Zhou Wang <wangzhou.bry@gmail.com>
+ * The initial developer of the original code is Zhiyong Cai
+ * <caizhiyong@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sizes.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/nand.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+
+#define HINFC504_MAX_CHIP (4)
+#define HINFC504_W_LATCH (5)
+#define HINFC504_R_LATCH (7)
+#define HINFC504_RW_LATCH (3)
+
+#define HINFC504_NFC_TIMEOUT (2 * HZ)
+#define HINFC504_NFC_PM_TIMEOUT (1 * HZ)
+#define HINFC504_NFC_DMA_TIMEOUT (5 * HZ)
+#define HINFC504_CHIP_DELAY (25)
+
+#define HINFC504_REG_BASE_ADDRESS_LEN (0x100)
+#define HINFC504_BUFFER_BASE_ADDRESS_LEN (2048 + 128)
+
+#define HINFC504_ADDR_CYCLE_MASK 0x4
+
+#define HINFC504_CON 0x00
+#define HINFC504_CON_OP_MODE_NORMAL BIT(0)
+#define HINFC504_CON_PAGEISZE_SHIFT (1)
+#define HINFC504_CON_PAGESIZE_MASK (0x07)
+#define HINFC504_CON_BUS_WIDTH BIT(4)
+#define HINFC504_CON_READY_BUSY_SEL BIT(8)
+#define HINFC504_CON_ECCTYPE_SHIFT (9)
+#define HINFC504_CON_ECCTYPE_MASK (0x07)
+
+#define HINFC504_PWIDTH 0x04
+#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
+ ((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
+
+#define HINFC504_CMD 0x0C
+#define HINFC504_ADDRL 0x10
+#define HINFC504_ADDRH 0x14
+#define HINFC504_DATA_NUM 0x18
+
+#define HINFC504_OP 0x1C
+#define HINFC504_OP_READ_DATA_EN BIT(1)
+#define HINFC504_OP_WAIT_READY_EN BIT(2)
+#define HINFC504_OP_CMD2_EN BIT(3)
+#define HINFC504_OP_WRITE_DATA_EN BIT(4)
+#define HINFC504_OP_ADDR_EN BIT(5)
+#define HINFC504_OP_CMD1_EN BIT(6)
+#define HINFC504_OP_NF_CS_SHIFT (7)
+#define HINFC504_OP_NF_CS_MASK (3)
+#define HINFC504_OP_ADDR_CYCLE_SHIFT (9)
+#define HINFC504_OP_ADDR_CYCLE_MASK (7)
+
+#define HINFC504_STATUS 0x20
+#define HINFC504_READY BIT(0)
+
+#define HINFC504_INTEN 0x24
+#define HINFC504_INTEN_DMA BIT(9)
+#define HINFC504_INTEN_UE BIT(6)
+#define HINFC504_INTEN_CE BIT(5)
+
+#define HINFC504_INTS 0x28
+#define HINFC504_INTS_DMA BIT(9)
+#define HINFC504_INTS_UE BIT(6)
+#define HINFC504_INTS_CE BIT(5)
+
+#define HINFC504_INTCLR 0x2C
+#define HINFC504_INTCLR_DMA BIT(9)
+#define HINFC504_INTCLR_UE BIT(6)
+#define HINFC504_INTCLR_CE BIT(5)
+
+#define HINFC504_ECC_STATUS 0x5C
+#define HINFC504_ECC_16_BIT_SHIFT 12
+
+#define HINFC504_DMA_CTRL 0x60
+#define HINFC504_DMA_CTRL_DMA_START BIT(0)
+#define HINFC504_DMA_CTRL_WE BIT(1)
+#define HINFC504_DMA_CTRL_DATA_AREA_EN BIT(2)
+#define HINFC504_DMA_CTRL_OOB_AREA_EN BIT(3)
+#define HINFC504_DMA_CTRL_BURST4_EN BIT(4)
+#define HINFC504_DMA_CTRL_BURST8_EN BIT(5)
+#define HINFC504_DMA_CTRL_BURST16_EN BIT(6)
+#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT (7)
+#define HINFC504_DMA_CTRL_ADDR_NUM_MASK (1)
+#define HINFC504_DMA_CTRL_CS_SHIFT (8)
+#define HINFC504_DMA_CTRL_CS_MASK (0x03)
+
+#define HINFC504_DMA_ADDR_DATA 0x64
+#define HINFC504_DMA_ADDR_OOB 0x68
+
+#define HINFC504_DMA_LEN 0x6C
+#define HINFC504_DMA_LEN_OOB_SHIFT (16)
+#define HINFC504_DMA_LEN_OOB_MASK (0xFFF)
+
+#define HINFC504_DMA_PARA 0x70
+#define HINFC504_DMA_PARA_DATA_RW_EN BIT(0)
+#define HINFC504_DMA_PARA_OOB_RW_EN BIT(1)
+#define HINFC504_DMA_PARA_DATA_EDC_EN BIT(2)
+#define HINFC504_DMA_PARA_OOB_EDC_EN BIT(3)
+#define HINFC504_DMA_PARA_DATA_ECC_EN BIT(4)
+#define HINFC504_DMA_PARA_OOB_ECC_EN BIT(5)
+
+#define HINFC_VERSION 0x74
+#define HINFC504_LOG_READ_ADDR 0x7C
+#define HINFC504_LOG_READ_LEN 0x80
+
+#define HINFC504_NANDINFO_LEN 0x10
+
+struct hinfc_host {
+ struct nand_chip chip;
+ struct mtd_info mtd;
+ struct device *dev;
+ void __iomem *iobase;
+ void __iomem *mmio;
+ struct completion cmd_complete;
+ unsigned int offset;
+ unsigned int command;
+ int chipselect;
+ unsigned int addr_cycle;
+ u32 addr_value[2];
+ u32 cache_addr_value[2];
+ char *buffer;
+ dma_addr_t dma_buffer;
+ dma_addr_t dma_oob;
+ int version;
+ unsigned int irq_status; /* interrupt status */
+};
+
+static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
+{
+ return readl(host->iobase + reg);
+}
+
+static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
+ unsigned int reg)
+{
+ writel(value, host->iobase + reg);
+}
+
+static void wait_controller_finished(struct hinfc_host *host)
+{
+ unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
+ int val;
+
+ while (time_before(jiffies, timeout)) {
+ val = hinfc_read(host, HINFC504_STATUS);
+ if (host->command == NAND_CMD_ERASE2) {
+ /* nfc is ready */
+ while (!(val & HINFC504_READY)) {
+ usleep_range(500, 1000);
+ val = hinfc_read(host, HINFC504_STATUS);
+ }
+ return;
+ }
+
+ if (val & HINFC504_READY)
+ return;
+ }
+
+ /* wait cmd timeout */
+ dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
+}
+
+static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *chip = mtd->priv;
+ unsigned long val;
+ int ret;
+
+ hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
+ hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
+
+ if (chip->ecc.mode == NAND_ECC_NONE) {
+ hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
+ << HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
+
+ hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+ | HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
+ } else {
+ if (host->command == NAND_CMD_READOOB)
+ hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
+ | HINFC504_DMA_PARA_OOB_EDC_EN
+ | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+ else
+ hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+ | HINFC504_DMA_PARA_OOB_RW_EN
+ | HINFC504_DMA_PARA_DATA_EDC_EN
+ | HINFC504_DMA_PARA_OOB_EDC_EN
+ | HINFC504_DMA_PARA_DATA_ECC_EN
+ | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+
+ }
+
+ val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
+ | HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
+ | HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
+ | ((host->addr_cycle == 4 ? 1 : 0)
+ << HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
+ | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
+ << HINFC504_DMA_CTRL_CS_SHIFT));
+
+ if (todev)
+ val |= HINFC504_DMA_CTRL_WE;
+
+ init_completion(&host->cmd_complete);
+
+ hinfc_write(host, val, HINFC504_DMA_CTRL);
+ ret = wait_for_completion_timeout(&host->cmd_complete,
+ HINFC504_NFC_DMA_TIMEOUT);
+
+ if (!ret) {
+ dev_err(host->dev, "DMA operation(irq) timeout!\n");
+ /* sanity check */
+ val = hinfc_read(host, HINFC504_DMA_CTRL);
+ if (!(val & HINFC504_DMA_CTRL_DMA_START))
+ dev_err(host->dev, "DMA is already done but without irq ACK!\n");
+ else
+ dev_err(host->dev, "DMA is really timeout!\n");
+ }
+}
+
+static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
+{
+ host->addr_value[0] &= 0xffff0000;
+
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+ hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
+ HINFC504_CMD);
+
+ hisi_nfc_dma_transfer(host, 1);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+
+ if ((host->addr_value[0] == host->cache_addr_value[0]) &&
+ (host->addr_value[1] == host->cache_addr_value[1]))
+ return 0;
+
+ host->addr_value[0] &= 0xffff0000;
+
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+ hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
+ HINFC504_CMD);
+
+ hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
+ hinfc_write(host, mtd->writesize + mtd->oobsize,
+ HINFC504_LOG_READ_LEN);
+
+ hisi_nfc_dma_transfer(host, 0);
+
+ host->cache_addr_value[0] = host->addr_value[0];
+ host->cache_addr_value[1] = host->addr_value[1];
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
+{
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
+ HINFC504_CMD);
+
+ hinfc_write(host, HINFC504_OP_WAIT_READY_EN
+ | HINFC504_OP_CMD2_EN
+ | HINFC504_OP_CMD1_EN
+ | HINFC504_OP_ADDR_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
+ << HINFC504_OP_ADDR_CYCLE_SHIFT),
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
+{
+ hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+ hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
+ hinfc_write(host, 0, HINFC504_ADDRL);
+
+ hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
+ | HINFC504_OP_READ_DATA_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
+{
+ hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+ hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
+ hinfc_write(host, HINFC504_OP_CMD1_EN
+ | HINFC504_OP_READ_DATA_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT),
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
+{
+ hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
+
+ hinfc_write(host, HINFC504_OP_CMD1_EN
+ | ((chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | HINFC504_OP_WAIT_READY_EN,
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct hinfc_host *host = chip->priv;
+
+ if (chipselect < 0)
+ return;
+
+ host->chipselect = chipselect;
+}
+
+static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct hinfc_host *host = chip->priv;
+
+ if (host->command == NAND_CMD_STATUS)
+ return *(uint8_t *)(host->mmio);
+
+ host->offset++;
+
+ if (host->command == NAND_CMD_READID)
+ return *(uint8_t *)(host->mmio + host->offset - 1);
+
+ return *(uint8_t *)(host->buffer + host->offset - 1);
+}
+
+static u16 hisi_nfc_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct hinfc_host *host = chip->priv;
+
+ host->offset += 2;
+ return *(u16 *)(host->buffer + host->offset - 2);
+}
+
+static void
+hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct hinfc_host *host = chip->priv;
+
+ memcpy(host->buffer + host->offset, buf, len);
+ host->offset += len;
+}
+
+static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct hinfc_host *host = chip->priv;
+
+ memcpy(buf, host->buffer + host->offset, len);
+ host->offset += len;
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct hinfc_host *host = chip->priv;
+ unsigned int command = host->command;
+
+ host->addr_cycle = 0;
+ host->addr_value[0] = 0;
+ host->addr_value[1] = 0;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+
+ host->addr_value[0] = column & 0xffff;
+ host->addr_cycle = 2;
+ }
+ if (page_addr != -1) {
+ host->addr_value[0] |= (page_addr & 0xffff)
+ << (host->addr_cycle * 8);
+ host->addr_cycle += 2;
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ host->addr_cycle += 1;
+ if (host->command == NAND_CMD_ERASE1)
+ host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
+ else
+ host->addr_value[1] |= ((page_addr >> 16) & 0xff);
+ }
+ }
+}
+
+static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column,
+ int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct hinfc_host *host = chip->priv;
+ int is_cache_invalid = 1;
+ unsigned int flag = 0;
+
+ host->command = command;
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READ0)
+ host->offset = column;
+ else
+ host->offset = column + mtd->writesize;
+
+ is_cache_invalid = 0;
+ set_addr(mtd, column, page_addr);
+ hisi_nfc_send_cmd_readstart(host);
+ break;
+
+ case NAND_CMD_SEQIN:
+ host->offset = column;
+ set_addr(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ hisi_nfc_send_cmd_pageprog(host);
+ break;
+
+ case NAND_CMD_ERASE2:
+ hisi_nfc_send_cmd_erase(host);
+ break;
+
+ case NAND_CMD_READID:
+ host->offset = column;
+ memset(host->mmio, 0, 0x10);
+ hisi_nfc_send_cmd_readid(host);
+ break;
+
+ case NAND_CMD_STATUS:
+ flag = hinfc_read(host, HINFC504_CON);
+ if (chip->ecc.mode == NAND_ECC_HW)
+ hinfc_write(host,
+ flag & ~(HINFC504_CON_ECCTYPE_MASK <<
+ HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
+
+ host->offset = 0;
+ memset(host->mmio, 0, 0x10);
+ hisi_nfc_send_cmd_status(host);
+ hinfc_write(host, flag, HINFC504_CON);
+ break;
+
+ case NAND_CMD_RESET:
+ hisi_nfc_send_cmd_reset(host, host->chipselect);
+ break;
+
+ default:
+ dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
+ command, column, page_addr);
+ }
+
+ if (is_cache_invalid) {
+ host->cache_addr_value[0] = ~0;
+ host->cache_addr_value[1] = ~0;
+ }
+}
+
+static irqreturn_t hinfc_irq_handle(int irq, void *devid)
+{
+ struct hinfc_host *host = devid;
+ unsigned int flag;
+
+ flag = hinfc_read(host, HINFC504_INTS);
+ /* store interrupts state */
+ host->irq_status |= flag;
+
+ if (flag & HINFC504_INTS_DMA) {
+ hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
+ complete(&host->cmd_complete);
+ } else if (flag & HINFC504_INTS_CE) {
+ hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
+ } else if (flag & HINFC504_INTS_UE) {
+ hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ struct hinfc_host *host = chip->priv;
+ int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
+ int stat_1, stat_2;
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* errors which can not be corrected by ECC */
+ if (host->irq_status & HINFC504_INTS_UE) {
+ mtd->ecc_stats.failed++;
+ } else if (host->irq_status & HINFC504_INTS_CE) {
+ /* TODO: need add other ECC modes! */
+ switch (chip->ecc.strength) {
+ case 16:
+ status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
+ HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
+ stat_2 = status_ecc & 0x3f;
+ stat_1 = status_ecc >> 6 & 0x3f;
+ stat = stat_1 + stat_2;
+ stat_max = max_t(int, stat_1, stat_2);
+ }
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(int, max_bitflips, stat_max);
+ }
+ host->irq_status = 0;
+
+ return max_bitflips;
+}
+
+static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct hinfc_host *host = chip->priv;
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (host->irq_status & HINFC504_INTS_UE) {
+ host->irq_status = 0;
+ return -EBADMSG;
+ }
+
+ host->irq_status = 0;
+ return 0;
+}
+
+static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void hisi_nfc_host_init(struct hinfc_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ unsigned int flag = 0;
+
+ host->version = hinfc_read(host, HINFC_VERSION);
+ host->addr_cycle = 0;
+ host->addr_value[0] = 0;
+ host->addr_value[1] = 0;
+ host->cache_addr_value[0] = ~0;
+ host->cache_addr_value[1] = ~0;
+ host->chipselect = 0;
+
+ /* default page size: 2K, ecc_none. need modify */
+ flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
+ | ((0x001 & HINFC504_CON_PAGESIZE_MASK)
+ << HINFC504_CON_PAGEISZE_SHIFT)
+ | ((0x0 & HINFC504_CON_ECCTYPE_MASK)
+ << HINFC504_CON_ECCTYPE_SHIFT)
+ | ((chip->options & NAND_BUSWIDTH_16) ?
+ HINFC504_CON_BUS_WIDTH : 0);
+ hinfc_write(host, flag, HINFC504_CON);
+
+ memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
+
+ hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+ HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+ /* enable DMA irq */
+ hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
+}
+
+static struct nand_ecclayout nand_ecc_2K_16bits = {
+ .oobavail = 6,
+ .oobfree = { {2, 6} },
+};
+
+static int hisi_nfc_ecc_probe(struct hinfc_host *host)
+{
+ unsigned int flag;
+ int size, strength, ecc_bits;
+ struct device *dev = host->dev;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = &host->mtd;
+ struct device_node *np = host->dev->of_node;
+
+ size = of_get_nand_ecc_step_size(np);
+ strength = of_get_nand_ecc_strength(np);
+ if (size != 1024) {
+ dev_err(dev, "error ecc size: %d\n", size);
+ return -EINVAL;
+ }
+
+ if ((size == 1024) && ((strength != 8) && (strength != 16) &&
+ (strength != 24) && (strength != 40))) {
+ dev_err(dev, "ecc size and strength do not match\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.size = size;
+ chip->ecc.strength = strength;
+
+ chip->ecc.read_page = hisi_nand_read_page_hwecc;
+ chip->ecc.read_oob = hisi_nand_read_oob;
+ chip->ecc.write_page = hisi_nand_write_page_hwecc;
+
+ switch (chip->ecc.strength) {
+ case 16:
+ ecc_bits = 6;
+ if (mtd->writesize == 2048)
+ chip->ecc.layout = &nand_ecc_2K_16bits;
+
+ /* TODO: add more page size support */
+ break;
+
+ /* TODO: add more ecc strength support */
+ default:
+ dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
+ return -EINVAL;
+ }
+
+ flag = hinfc_read(host, HINFC504_CON);
+ /* add ecc type configure */
+ flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
+ << HINFC504_CON_ECCTYPE_SHIFT);
+ hinfc_write(host, flag, HINFC504_CON);
+
+ /* enable ecc irq */
+ flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
+ hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
+ HINFC504_INTEN);
+
+ return 0;
+}
+
+static int hisi_nfc_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP;
+ struct device *dev = &pdev->dev;
+ struct hinfc_host *host;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *res;
+ struct device_node *np = dev->of_node;
+ struct mtd_part_parser_data ppdata;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+ chip = &host->chip;
+ mtd = &host->mtd;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no IRQ resource defined\n");
+ ret = -ENXIO;
+ goto err_res;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->iobase)) {
+ ret = PTR_ERR(host->iobase);
+ goto err_res;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->mmio)) {
+ ret = PTR_ERR(host->mmio);
+ dev_err(dev, "devm_ioremap_resource[1] fail\n");
+ goto err_res;
+ }
+
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->name = "hisi_nand";
+ mtd->dev.parent = &pdev->dev;
+
+ chip->priv = host;
+ chip->cmdfunc = hisi_nfc_cmdfunc;
+ chip->select_chip = hisi_nfc_select_chip;
+ chip->read_byte = hisi_nfc_read_byte;
+ chip->read_word = hisi_nfc_read_word;
+ chip->write_buf = hisi_nfc_write_buf;
+ chip->read_buf = hisi_nfc_read_buf;
+ chip->chip_delay = HINFC504_CHIP_DELAY;
+
+ chip->ecc.mode = of_get_nand_ecc_mode(np);
+
+ buswidth = of_get_nand_bus_width(np);
+ if (buswidth == 16)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ hisi_nfc_host_init(host);
+
+ ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ\n");
+ goto err_res;
+ }
+
+ ret = nand_scan_ident(mtd, max_chips, NULL);
+ if (ret) {
+ ret = -ENODEV;
+ goto err_res;
+ }
+
+ host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer) {
+ ret = -ENOMEM;
+ goto err_res;
+ }
+
+ host->dma_oob = host->dma_buffer + mtd->writesize;
+ memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
+
+ flag = hinfc_read(host, HINFC504_CON);
+ flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
+ switch (mtd->writesize) {
+ case 2048:
+ flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
+ /*
+ * TODO: add more pagesize support,
+ * default pagesize has been set in hisi_nfc_host_init
+ */
+ default:
+ dev_err(dev, "NON-2KB page size nand flash\n");
+ ret = -EINVAL;
+ goto err_res;
+ }
+ hinfc_write(host, flag, HINFC504_CON);
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ hisi_nfc_ecc_probe(host);
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ goto err_res;
+ }
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Err MTD partition=%d\n", ret);
+ goto err_mtd;
+ }
+
+ return 0;
+
+err_mtd:
+ nand_release(mtd);
+err_res:
+ return ret;
+}
+
+static int hisi_nfc_remove(struct platform_device *pdev)
+{
+ struct hinfc_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hisi_nfc_suspend(struct device *dev)
+{
+ struct hinfc_host *host = dev_get_drvdata(dev);
+ unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
+
+ while (time_before(jiffies, timeout)) {
+ if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
+ (hinfc_read(host, HINFC504_DMA_CTRL) &
+ HINFC504_DMA_CTRL_DMA_START)) {
+ cond_resched();
+ return 0;
+ }
+ }
+
+ dev_err(host->dev, "nand controller suspend timeout.\n");
+
+ return -EAGAIN;
+}
+
+static int hisi_nfc_resume(struct device *dev)
+{
+ int cs;
+ struct hinfc_host *host = dev_get_drvdata(dev);
+ struct nand_chip *chip = &host->chip;
+
+ for (cs = 0; cs < chip->numchips; cs++)
+ hisi_nfc_send_cmd_reset(host, cs);
+ hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+ HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
+
+static const struct of_device_id nfc_id_table[] = {
+ { .compatible = "hisilicon,504-nfc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, nfc_id_table);
+
+static struct platform_driver hisi_nfc_driver = {
+ .driver = {
+ .name = "hisi_nand",
+ .of_match_table = nfc_id_table,
+ .pm = &hisi_nfc_pm_ops,
+ },
+ .probe = hisi_nfc_probe,
+ .remove = hisi_nfc_remove,
+};
+
+module_platform_driver(hisi_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhou Wang");
+MODULE_AUTHOR("Zhiyong Cai");
+MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
new file mode 100644
index 000000000..ebf2cce04
--- /dev/null
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC NAND controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/gpio.h>
+
+#include <asm/mach-jz4740/jz4740_nand.h>
+
+#define JZ_REG_NAND_CTRL 0x50
+#define JZ_REG_NAND_ECC_CTRL 0x100
+#define JZ_REG_NAND_DATA 0x104
+#define JZ_REG_NAND_PAR0 0x108
+#define JZ_REG_NAND_PAR1 0x10C
+#define JZ_REG_NAND_PAR2 0x110
+#define JZ_REG_NAND_IRQ_STAT 0x114
+#define JZ_REG_NAND_IRQ_CTRL 0x118
+#define JZ_REG_NAND_ERR(x) (0x11C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
+#define JZ_NAND_ECC_CTRL_RS BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
+#define JZ_NAND_STATUS_ERROR BIT(0)
+
+#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa
+
+#define JZ_NAND_MEM_CMD_OFFSET 0x08000
+#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
+
+struct jz_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ void __iomem *base;
+ struct resource *mem;
+
+ unsigned char banks[JZ_NAND_NUM_BANKS];
+ void __iomem *bank_base[JZ_NAND_NUM_BANKS];
+ struct resource *bank_mem[JZ_NAND_NUM_BANKS];
+
+ int selected_bank;
+
+ struct gpio_desc *busy_gpio;
+ bool is_reading;
+};
+
+static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct jz_nand, mtd);
+}
+
+static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct nand_chip *chip = mtd->priv;
+ uint32_t ctrl;
+ int banknr;
+
+ ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+ ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK;
+
+ if (chipnr == -1) {
+ banknr = -1;
+ } else {
+ banknr = nand->banks[chipnr] - 1;
+ chip->IO_ADDR_R = nand->bank_base[banknr];
+ chip->IO_ADDR_W = nand->bank_base[banknr];
+ }
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+ nand->selected_bank = banknr;
+}
+
+static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct nand_chip *chip = mtd->priv;
+ uint32_t reg;
+ void __iomem *bank_base = nand->bank_base[nand->selected_bank];
+
+ BUG_ON(nand->selected_bank < 0);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
+ if (ctrl & NAND_ALE)
+ bank_base += JZ_NAND_MEM_ADDR_OFFSET;
+ else if (ctrl & NAND_CLE)
+ bank_base += JZ_NAND_MEM_CMD_OFFSET;
+ chip->IO_ADDR_W = bank_base;
+
+ reg = readl(nand->base + JZ_REG_NAND_CTRL);
+ if (ctrl & NAND_NCE)
+ reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
+ else
+ reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
+ writel(reg, nand->base + JZ_REG_NAND_CTRL);
+ }
+ if (dat != NAND_CMD_NONE)
+ writeb(dat, chip->IO_ADDR_W);
+}
+
+static int jz_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ return gpiod_get_value_cansleep(nand->busy_gpio);
+}
+
+static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ uint32_t reg;
+
+ writel(0, nand->base + JZ_REG_NAND_IRQ_STAT);
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ reg |= JZ_NAND_ECC_CTRL_RESET;
+ reg |= JZ_NAND_ECC_CTRL_ENABLE;
+ reg |= JZ_NAND_ECC_CTRL_RS;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+ nand->is_reading = true;
+ break;
+ case NAND_ECC_WRITE:
+ reg |= JZ_NAND_ECC_CTRL_ENCODING;
+ nand->is_reading = false;
+ break;
+ default:
+ break;
+ }
+
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ uint32_t reg, status;
+ int i;
+ unsigned int timeout = 1000;
+ static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4,
+ 0x8b, 0xff, 0xb7, 0x6f};
+
+ if (nand->is_reading)
+ return 0;
+
+ do {
+ status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ for (i = 0; i < 9; ++i)
+ ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i);
+
+ /* If the written data is completly 0xff, we also want to write 0xff as
+ * ecc, otherwise we will get in trouble when doing subpage writes. */
+ if (memcmp(ecc_code, empty_block_ecc, 9) == 0)
+ memset(ecc_code, 0xff, 9);
+
+ return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *dat, int index, int mask)
+{
+ int offset = index & 0x7;
+ uint16_t data;
+
+ index += (index >> 3);
+
+ data = dat[index];
+ data |= dat[index+1] << 8;
+
+ mask ^= (data >> offset) & 0x1ff;
+ data &= ~(0x1ff << offset);
+ data |= (mask << offset);
+
+ dat[index] = data & 0xff;
+ dat[index+1] = (data >> 8) & 0xff;
+}
+
+static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ int i, error_count, index;
+ uint32_t reg, status, error;
+ uint32_t t;
+ unsigned int timeout = 1000;
+
+ t = read_ecc[0];
+
+ if (t == 0xff) {
+ for (i = 1; i < 9; ++i)
+ t &= read_ecc[i];
+
+ t &= dat[0];
+ t &= dat[nand->chip.ecc.size / 2];
+ t &= dat[nand->chip.ecc.size - 1];
+
+ if (t == 0xff) {
+ for (i = 1; i < nand->chip.ecc.size - 1; ++i)
+ t &= dat[i];
+ if (t == 0xff)
+ return 0;
+ }
+ }
+
+ for (i = 0; i < 9; ++i)
+ writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i);
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ do {
+ status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+ if (status & JZ_NAND_STATUS_ERROR) {
+ if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+ return -1;
+
+ error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+ for (i = 0; i < error_count; ++i) {
+ error = readl(nand->base + JZ_REG_NAND_ERR(i));
+ index = ((error >> 16) & 0x1ff) - 1;
+ if (index >= 0 && index < 512)
+ jz_nand_correct_data(dat, index, error & 0x1ff);
+ }
+
+ return error_count;
+ }
+
+ return 0;
+}
+
+static int jz_nand_ioremap_resource(struct platform_device *pdev,
+ const char *name, struct resource **res, void *__iomem *base)
+{
+ int ret;
+
+ *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!*res) {
+ dev_err(&pdev->dev, "Failed to get platform %s memory\n", name);
+ ret = -ENXIO;
+ goto err;
+ }
+
+ *res = request_mem_region((*res)->start, resource_size(*res),
+ pdev->name);
+ if (!*res) {
+ dev_err(&pdev->dev, "Failed to request %s memory region\n", name);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ *base = ioremap((*res)->start, resource_size(*res));
+ if (!*base) {
+ dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name);
+ ret = -EBUSY;
+ goto err_release_mem;
+ }
+
+ return 0;
+
+err_release_mem:
+ release_mem_region((*res)->start, resource_size(*res));
+err:
+ *res = NULL;
+ *base = NULL;
+ return ret;
+}
+
+static inline void jz_nand_iounmap_resource(struct resource *res,
+ void __iomem *base)
+{
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
+}
+
+static int jz_nand_detect_bank(struct platform_device *pdev,
+ struct jz_nand *nand, unsigned char bank,
+ size_t chipnr, uint8_t *nand_maf_id,
+ uint8_t *nand_dev_id)
+{
+ int ret;
+ int gpio;
+ char gpio_name[9];
+ char res_name[6];
+ uint32_t ctrl;
+ struct mtd_info *mtd = &nand->mtd;
+ struct nand_chip *chip = &nand->chip;
+
+ /* Request GPIO port. */
+ gpio = JZ_GPIO_MEM_CS0 + bank - 1;
+ sprintf(gpio_name, "NAND CS%d", bank);
+ ret = gpio_request(gpio, gpio_name);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "Failed to request %s gpio %d: %d\n",
+ gpio_name, gpio, ret);
+ goto notfound_gpio;
+ }
+
+ /* Request I/O resource. */
+ sprintf(res_name, "bank%d", bank);
+ ret = jz_nand_ioremap_resource(pdev, res_name,
+ &nand->bank_mem[bank - 1],
+ &nand->bank_base[bank - 1]);
+ if (ret)
+ goto notfound_resource;
+
+ /* Enable chip in bank. */
+ jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0);
+ ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+ ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1);
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+ if (chipnr == 0) {
+ /* Detect first chip. */
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ goto notfound_id;
+
+ /* Retrieve the IDs from the first chip. */
+ chip->select_chip(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ *nand_maf_id = chip->read_byte(mtd);
+ *nand_dev_id = chip->read_byte(mtd);
+ } else {
+ /* Detect additional chip. */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ if (*nand_maf_id != chip->read_byte(mtd)
+ || *nand_dev_id != chip->read_byte(mtd)) {
+ ret = -ENODEV;
+ goto notfound_id;
+ }
+
+ /* Update size of the MTD. */
+ chip->numchips++;
+ mtd->size += chip->chipsize;
+ }
+
+ dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
+ return 0;
+
+notfound_id:
+ dev_info(&pdev->dev, "No chip found on bank %i\n", bank);
+ ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1));
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+ jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+notfound_resource:
+ gpio_free(gpio);
+notfound_gpio:
+ return ret;
+}
+
+static int jz_nand_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct jz_nand *nand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ size_t chipnr, bank_idx;
+ uint8_t nand_maf_id = 0, nand_dev_id = 0;
+
+ nand = kzalloc(sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
+ if (ret)
+ goto err_free;
+
+ nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN);
+ if (IS_ERR(nand->busy_gpio)) {
+ ret = PTR_ERR(nand->busy_gpio);
+ dev_err(&pdev->dev, "Failed to request busy gpio %d\n",
+ ret);
+ goto err_iounmap_mmio;
+ }
+
+ mtd = &nand->mtd;
+ chip = &nand->chip;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->name = "jz4740-nand";
+
+ chip->ecc.hwctl = jz_nand_hwctl;
+ chip->ecc.calculate = jz_nand_calculate_ecc_rs;
+ chip->ecc.correct = jz_nand_correct_ecc_rs;
+ chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 9;
+ chip->ecc.strength = 4;
+
+ if (pdata)
+ chip->ecc.layout = pdata->ecc_layout;
+
+ chip->chip_delay = 50;
+ chip->cmd_ctrl = jz_nand_cmd_ctrl;
+ chip->select_chip = jz_nand_select_chip;
+
+ if (nand->busy_gpio)
+ chip->dev_ready = jz_nand_dev_ready;
+
+ platform_set_drvdata(pdev, nand);
+
+ /* We are going to autodetect NAND chips in the banks specified in the
+ * platform data. Although nand_scan_ident() can detect multiple chips,
+ * it requires those chips to be numbered consecuitively, which is not
+ * always the case for external memory banks. And a fixed chip-to-bank
+ * mapping is not practical either, since for example Dingoo units
+ * produced at different times have NAND chips in different banks.
+ */
+ chipnr = 0;
+ for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) {
+ unsigned char bank;
+
+ /* If there is no platform data, look for NAND in bank 1,
+ * which is the most likely bank since it is the only one
+ * that can be booted from.
+ */
+ bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1;
+ if (bank == 0)
+ break;
+ if (bank > JZ_NAND_NUM_BANKS) {
+ dev_warn(&pdev->dev,
+ "Skipping non-existing bank: %d\n", bank);
+ continue;
+ }
+ /* The detection routine will directly or indirectly call
+ * jz_nand_select_chip(), so nand->banks has to contain the
+ * bank we're checking.
+ */
+ nand->banks[chipnr] = bank;
+ if (jz_nand_detect_bank(pdev, nand, bank, chipnr,
+ &nand_maf_id, &nand_dev_id) == 0)
+ chipnr++;
+ else
+ nand->banks[chipnr] = 0;
+ }
+ if (chipnr == 0) {
+ dev_err(&pdev->dev, "No NAND chips found\n");
+ goto err_iounmap_mmio;
+ }
+
+ if (pdata && pdata->ident_callback) {
+ pdata->ident_callback(pdev, chip, &pdata->partitions,
+ &pdata->num_partitions);
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to scan NAND\n");
+ goto err_unclaim_banks;
+ }
+
+ ret = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata ? pdata->partitions : NULL,
+ pdata ? pdata->num_partitions : 0);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add mtd device\n");
+ goto err_nand_release;
+ }
+
+ dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n");
+
+ return 0;
+
+err_nand_release:
+ nand_release(mtd);
+err_unclaim_banks:
+ while (chipnr--) {
+ unsigned char bank = nand->banks[chipnr];
+ gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+ }
+ writel(0, nand->base + JZ_REG_NAND_CTRL);
+err_iounmap_mmio:
+ jz_nand_iounmap_resource(nand->mem, nand->base);
+err_free:
+ kfree(nand);
+ return ret;
+}
+
+static int jz_nand_remove(struct platform_device *pdev)
+{
+ struct jz_nand *nand = platform_get_drvdata(pdev);
+ size_t i;
+
+ nand_release(&nand->mtd);
+
+ /* Deassert and disable all chips */
+ writel(0, nand->base + JZ_REG_NAND_CTRL);
+
+ for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) {
+ unsigned char bank = nand->banks[i];
+ if (bank != 0) {
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+ gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+ }
+ }
+
+ jz_nand_iounmap_resource(nand->mem, nand->base);
+
+ kfree(nand);
+
+ return 0;
+}
+
+static struct platform_driver jz_nand_driver = {
+ .probe = jz_nand_probe,
+ .remove = jz_nand_remove,
+ .driver = {
+ .name = "jz4740-nand",
+ },
+};
+
+module_platform_driver(jz_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC");
+MODULE_ALIAS("platform:jz4740-nand");
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
new file mode 100644
index 000000000..79c3b7801
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -0,0 +1,889 @@
+/*
+ * Driver for NAND MLC Controller in LPC32xx
+ *
+ * Author: Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 WORK Microwave GmbH
+ * Copyright © 2011, 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * NAND Flash Controller Operation:
+ * - Read: Auto Decode
+ * - Write: Auto Encode
+ * - Tested Page Sizes: 2048, 4096
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_mlc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+
+#define DRV_NAME "lpc32xx_mlc"
+
+/**********************************************************************
+* MLC NAND controller register offsets
+**********************************************************************/
+
+#define MLC_BUFF(x) (x + 0x00000)
+#define MLC_DATA(x) (x + 0x08000)
+#define MLC_CMD(x) (x + 0x10000)
+#define MLC_ADDR(x) (x + 0x10004)
+#define MLC_ECC_ENC_REG(x) (x + 0x10008)
+#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
+#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
+#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
+#define MLC_RPR(x) (x + 0x10018)
+#define MLC_WPR(x) (x + 0x1001C)
+#define MLC_RUBP(x) (x + 0x10020)
+#define MLC_ROBP(x) (x + 0x10024)
+#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
+#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
+#define MLC_ICR(x) (x + 0x10030)
+#define MLC_TIME_REG(x) (x + 0x10034)
+#define MLC_IRQ_MR(x) (x + 0x10038)
+#define MLC_IRQ_SR(x) (x + 0x1003C)
+#define MLC_LOCK_PR(x) (x + 0x10044)
+#define MLC_ISR(x) (x + 0x10048)
+#define MLC_CEH(x) (x + 0x1004C)
+
+/**********************************************************************
+* MLC_CMD bit definitions
+**********************************************************************/
+#define MLCCMD_RESET 0xFF
+
+/**********************************************************************
+* MLC_ICR bit definitions
+**********************************************************************/
+#define MLCICR_WPROT (1 << 3)
+#define MLCICR_LARGEBLOCK (1 << 2)
+#define MLCICR_LONGADDR (1 << 1)
+#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
+
+/**********************************************************************
+* MLC_TIME_REG bit definitions
+**********************************************************************/
+#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
+#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
+#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
+#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
+#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
+#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
+#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
+
+/**********************************************************************
+* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
+**********************************************************************/
+#define MLCIRQ_NAND_READY (1 << 5)
+#define MLCIRQ_CONTROLLER_READY (1 << 4)
+#define MLCIRQ_DECODE_FAILURE (1 << 3)
+#define MLCIRQ_DECODE_ERROR (1 << 2)
+#define MLCIRQ_ECC_READY (1 << 1)
+#define MLCIRQ_WRPROT_FAULT (1 << 0)
+
+/**********************************************************************
+* MLC_LOCK_PR bit definitions
+**********************************************************************/
+#define MLCLOCKPR_MAGIC 0xA25E
+
+/**********************************************************************
+* MLC_ISR bit definitions
+**********************************************************************/
+#define MLCISR_DECODER_FAILURE (1 << 6)
+#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
+#define MLCISR_ERRORS_DETECTED (1 << 3)
+#define MLCISR_ECC_READY (1 << 2)
+#define MLCISR_CONTROLLER_READY (1 << 1)
+#define MLCISR_NAND_READY (1 << 0)
+
+/**********************************************************************
+* MLC_CEH bit definitions
+**********************************************************************/
+#define MLCCEH_NORMAL (1 << 0)
+
+struct lpc32xx_nand_cfg_mlc {
+ uint32_t tcea_delay;
+ uint32_t busy_delay;
+ uint32_t nand_ta;
+ uint32_t rd_high;
+ uint32_t rd_low;
+ uint32_t wr_high;
+ uint32_t wr_low;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+static struct nand_ecclayout lpc32xx_nand_oob = {
+ .eccbytes = 40,
+ .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
+ .oobfree = {
+ { .offset = 0,
+ .length = 6, },
+ { .offset = 16,
+ .length = 6, },
+ { .offset = 32,
+ .length = 6, },
+ { .offset = 48,
+ .length = 6, },
+ },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_mlc_platform_data *pdata;
+ struct clk *clk;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ int irq;
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct completion comp_nand;
+ struct completion comp_controller;
+ uint32_t llptr;
+ /*
+ * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ dma_addr_t oob_buf_phy;
+ /*
+ * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ uint8_t *oob_buf;
+ /* Physical address of DMA base address */
+ dma_addr_t io_base_phy;
+
+ struct completion comp_dma;
+ struct dma_chan *dma_chan;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+ uint8_t *dma_buf;
+ uint8_t *dummy_buf;
+ int mlcsubpages; /* number of 512bytes-subpages */
+};
+
+/*
+ * Activate/Deactivate DMA Operation:
+ *
+ * Using the PL080 DMA Controller for transferring the 512 byte subpages
+ * instead of doing readl() / writel() in a loop slows it down significantly.
+ * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
+ *
+ * - readl() of 128 x 32 bits in a loop: ~20us
+ * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
+ * - DMA read of 512 bytes (32 bit, no bursts): ~100us
+ *
+ * This applies to the transfer itself. In the DMA case: only the
+ * wait_for_completion() (DMA setup _not_ included).
+ *
+ * Note that the 512 bytes subpage transfer is done directly from/to a
+ * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
+ * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
+ * controller transferring data between its internal buffer to/from the NAND
+ * chip.)
+ *
+ * Therefore, using the PL080 DMA is disabled by default, for now.
+ *
+ */
+static int use_dma;
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset MLC controller */
+ writel(MLCCMD_RESET, MLC_CMD(host->io_base));
+ udelay(1000);
+
+ /* Get base clock for MLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = 104000000;
+
+ /* Unlock MLC_ICR
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Configure MLC Controller: Large Block, 5 Byte Address */
+ tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
+ writel(tmp, MLC_ICR(host->io_base));
+
+ /* Unlock MLC_TIME_REG
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Compute clock setup values, see LPC and NAND manual */
+ tmp = 0;
+ tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
+ tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
+ tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
+ tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
+ tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
+ tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
+ tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
+ writel(tmp, MLC_TIME_REG(host->io_base));
+
+ /* Enable IRQ for CONTROLLER_READY and NAND_READY */
+ writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
+ MLC_IRQ_MR(host->io_base));
+
+ /* Normal nCE operation: nCE controlled by controller */
+ writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct lpc32xx_nand_host *host = nand_chip->priv;
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, MLC_CMD(host->io_base));
+ else
+ writel(cmd, MLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read Device Ready (NAND device _and_ controller ready)
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct lpc32xx_nand_host *host = nand_chip->priv;
+
+ if ((readb(MLC_ISR(host->io_base)) &
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
+ return 1;
+
+ return 0;
+}
+
+static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+{
+ uint8_t sr;
+
+ /* Clear interrupt flag by reading status */
+ sr = readb(MLC_IRQ_SR(host->io_base));
+ if (sr & MLCIRQ_NAND_READY)
+ complete(&host->comp_nand);
+ if (sr & MLCIRQ_CONTROLLER_READY)
+ complete(&host->comp_controller);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_nand);
+
+ while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
+ /* Seems to be delayed sometimes by controller */
+ dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
+ struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_controller);
+
+ while (!(readb(MLC_ISR(host->io_base)) &
+ MLCISR_CONTROLLER_READY)) {
+ dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ lpc32xx_waitfunc_nand(mtd, chip);
+ lpc32xx_waitfunc_controller(mtd, chip);
+
+ return NAND_STATUS_READY;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
+ enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp_dma);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp_dma;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ int i, j;
+ uint8_t *oobbuf = chip->oob_poi;
+ uint32_t mlc_isr;
+ int res;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->dma_buf;
+ dma_mapped = false;
+ }
+
+ /* Writing Command and Address */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* For all sub-pages */
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Auto Decode Command */
+ writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(mtd, chip);
+
+ /* Check ECC Error status */
+ mlc_isr = readl(MLC_ISR(host->io_base));
+ if (mlc_isr & MLCISR_DECODER_FAILURE) {
+ mtd->ecc_stats.failed++;
+ dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
+ } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
+ mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
+ }
+
+ /* Read 512 + 16 Bytes */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ *((uint32_t *)(buf)) =
+ readl(MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ for (j = 0; j < (16 >> 2); j++) {
+ *((uint32_t *)(oobbuf)) =
+ readl(MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ }
+ }
+
+ if (use_dma && !dma_mapped)
+ memcpy(buf, dma_buf, mtd->writesize);
+
+ return 0;
+}
+
+static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ const uint8_t *oobbuf = chip->oob_poi;
+ uint8_t *dma_buf = (uint8_t *)buf;
+ int res;
+ int i, j;
+
+ if (use_dma && (void *)buf >= high_memory) {
+ dma_buf = host->dma_buf;
+ memcpy(dma_buf, buf, mtd->writesize);
+ }
+
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Encode */
+ writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
+
+ /* Write 512 + 6 Bytes to Buffer */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_MEM_TO_DEV);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ writel(*((uint32_t *)(buf)),
+ MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 12;
+
+ /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
+ writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(mtd, chip);
+ }
+ return 0;
+}
+
+static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Read whole page - necessary with MLC controller! */
+ lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
+
+ return 0;
+}
+
+static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
+ return 0;
+}
+
+/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
+static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
+{
+ /* Always enabled! */
+}
+
+static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-mlc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Set direction to a sensible value even if the dmaengine driver
+ * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
+ * driver criticizes it as "alien transfer direction".
+ */
+ host->dma_slave_config.direction = DMA_DEV_TO_MEM;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 128;
+ host->dma_slave_config.dst_maxburst = 128;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
+ host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ goto out1;
+ }
+
+ return 0;
+out1:
+ dma_release_channel(host->dma_chan);
+ return -ENXIO;
+}
+
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
+ of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
+ of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
+ of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
+ of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
+ of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
+ of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
+
+ if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
+ !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
+ !ncfg->wr_low) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *rc;
+ int res;
+ struct mtd_part_parser_data ppdata = {};
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ host->io_base_phy = rc->start;
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) &&
+ gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock initialization failure\n");
+ res = -ENOENT;
+ goto err_exit1;
+ }
+ clk_enable(host->clk);
+
+ nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ nand_chip->dev_ready = lpc32xx_nand_device_ready;
+ nand_chip->chip_delay = 25; /* us */
+ nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
+ nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* Initialize function pointers */
+ nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
+ nand_chip->ecc.read_page_raw = lpc32xx_read_page;
+ nand_chip->ecc.read_page = lpc32xx_read_page;
+ nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+ nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+ nand_chip->ecc.write_oob = lpc32xx_write_oob;
+ nand_chip->ecc.read_oob = lpc32xx_read_oob;
+ nand_chip->ecc.strength = 4;
+ nand_chip->waitfunc = lpc32xx_waitfunc;
+
+ nand_chip->options = NAND_NO_SUBPAGE_WRITE;
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ nand_chip->bbt_td = &lpc32xx_nand_bbt;
+ nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
+
+ if (use_dma) {
+ res = lpc32xx_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto err_exit2;
+ }
+ }
+
+ /*
+ * Scan to find existance of the device and
+ * Get the type of NAND device SMALL block or LARGE block
+ */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dma_buf) {
+ res = -ENOMEM;
+ goto err_exit3;
+ }
+
+ host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dummy_buf) {
+ res = -ENOMEM;
+ goto err_exit3;
+ }
+
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.layout = &lpc32xx_nand_oob;
+ host->mlcsubpages = mtd->writesize / 512;
+
+ /* initially clear interrupt status */
+ readb(MLC_IRQ_SR(host->io_base));
+
+ init_completion(&host->comp_nand);
+ init_completion(&host->comp_controller);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+ dev_err(&pdev->dev, "failed to get platform irq\n");
+ res = -EINVAL;
+ goto err_exit3;
+ }
+
+ if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+ dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ /*
+ * Fills out all the uninitialized function pointers with the defaults
+ * And scans for a bad block table if appropriate.
+ */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_exit4;
+ }
+
+ mtd->name = DRV_NAME;
+
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+err_exit4:
+ free_irq(host->irq, host);
+err_exit3:
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+err_exit2:
+ clk_disable(host->clk);
+ clk_put(host->clk);
+err_exit1:
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return res;
+}
+
+/*
+ * Remove NAND device
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+ free_irq(host->irq, host);
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Re-enable NAND clock */
+ clk_enable(host->clk);
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable(host->clk);
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-mlc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = lpc32xx_nand_remove,
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
new file mode 100644
index 000000000..abfec1386
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -0,0 +1,1011 @@
+/*
+ * NXP LPC32XX NAND SLC driver
+ *
+ * Authors:
+ * Kevin Wells <kevin.wells@nxp.com>
+ * Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 NXP Semiconductors
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_slc.h>
+
+#define LPC32XX_MODNAME "lpc32xx-nand"
+
+/**********************************************************************
+* SLC NAND controller register offsets
+**********************************************************************/
+
+#define SLC_DATA(x) (x + 0x000)
+#define SLC_ADDR(x) (x + 0x004)
+#define SLC_CMD(x) (x + 0x008)
+#define SLC_STOP(x) (x + 0x00C)
+#define SLC_CTRL(x) (x + 0x010)
+#define SLC_CFG(x) (x + 0x014)
+#define SLC_STAT(x) (x + 0x018)
+#define SLC_INT_STAT(x) (x + 0x01C)
+#define SLC_IEN(x) (x + 0x020)
+#define SLC_ISR(x) (x + 0x024)
+#define SLC_ICR(x) (x + 0x028)
+#define SLC_TAC(x) (x + 0x02C)
+#define SLC_TC(x) (x + 0x030)
+#define SLC_ECC(x) (x + 0x034)
+#define SLC_DMA_DATA(x) (x + 0x038)
+
+/**********************************************************************
+* slc_ctrl register definitions
+**********************************************************************/
+#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
+#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
+#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
+
+/**********************************************************************
+* slc_cfg register definitions
+**********************************************************************/
+#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
+#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
+#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
+#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
+#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
+#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
+
+/**********************************************************************
+* slc_stat register definitions
+**********************************************************************/
+#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
+#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
+#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
+
+/**********************************************************************
+* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
+**********************************************************************/
+#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
+#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
+
+/**********************************************************************
+* slc_tac register definitions
+**********************************************************************/
+/* Clock setting for RDY write sample wait time in 2*n clocks */
+#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
+/* Write pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24)
+/* Write hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WHOLD(n) (((n) & 0xF) << 20)
+/* Write setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WSETUP(n) (((n) & 0xF) << 16)
+/* Clock setting for RDY read sample wait time in 2*n clocks */
+#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
+/* Read pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8)
+/* Read hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RHOLD(n) (((n) & 0xF) << 4)
+/* Read setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RSETUP(n) (((n) & 0xF) << 0)
+
+/**********************************************************************
+* slc_ecc register definitions
+**********************************************************************/
+/* ECC line party fetch macro */
+#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
+#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
+
+/*
+ * DMA requires storage space for the DMA local buffer and the hardware ECC
+ * storage area. The DMA local buffer is only used if DMA mapping fails
+ * during runtime.
+ */
+#define LPC32XX_DMA_DATA_SIZE 4096
+#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
+
+/* Number of bytes used for ECC stored in NAND per 256 bytes */
+#define LPC32XX_SLC_DEV_ECC_BYTES 3
+
+/*
+ * If the NAND base clock frequency can't be fetched, this frequency will be
+ * used instead as the base. This rate is used to setup the timing registers
+ * used for NAND accesses.
+ */
+#define LPC32XX_DEF_BUS_RATE 133250000
+
+/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
+#define LPC32XX_DMA_TIMEOUT 100
+
+/*
+ * NAND ECC Layout for small page NAND devices
+ * Note: For large and huge page devices, the default layouts are used
+ */
+static struct nand_ecclayout lpc32xx_nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = {10, 11, 12, 13, 14, 15},
+ .oobfree = {
+ { .offset = 0, .length = 4 },
+ { .offset = 6, .length = 4 },
+ },
+};
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
+ * Note: Large page devices used the default layout
+ */
+static struct nand_bbt_descr bbt_smallpage_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/*
+ * NAND platform configuration structure
+ */
+struct lpc32xx_nand_cfg_slc {
+ uint32_t wdr_clks;
+ uint32_t wwidth;
+ uint32_t whold;
+ uint32_t wsetup;
+ uint32_t rdr_clks;
+ uint32_t rwidth;
+ uint32_t rhold;
+ uint32_t rsetup;
+ bool use_bbt;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_slc_platform_data *pdata;
+ struct clk *clk;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ struct lpc32xx_nand_cfg_slc *ncfg;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
+ uint32_t dma_buf_len;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+
+ /*
+ * DMA and CPU addresses of ECC work area and data buffer
+ */
+ uint32_t *ecc_buf;
+ uint8_t *data_buf;
+ dma_addr_t io_base_dma;
+};
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset SLC controller */
+ writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
+ udelay(1000);
+
+ /* Basic setup */
+ writel(0, SLC_CFG(host->io_base));
+ writel(0, SLC_IEN(host->io_base));
+ writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
+ SLC_ICR(host->io_base));
+
+ /* Get base clock for SLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = LPC32XX_DEF_BUS_RATE;
+
+ /* Compute clock setup values */
+ tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
+ SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
+ SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
+ SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
+ SLCTAC_RDR(host->ncfg->rdr_clks) |
+ SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
+ SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
+ SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
+ writel(tmp, SLC_TAC(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ uint32_t tmp;
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Does CE state need to be changed? */
+ tmp = readl(SLC_CFG(host->io_base));
+ if (ctrl & NAND_NCE)
+ tmp |= SLCCFG_CE_LOW;
+ else
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CFG(host->io_base));
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, SLC_CMD(host->io_base));
+ else
+ writel(cmd, SLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read the Device Ready pin
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ int rdy = 0;
+
+ if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
+ rdy = 1;
+
+ return rdy;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+/*
+ * Prepares SLC for transfers with H/W ECC enabled
+ */
+static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
+{
+ /* Hardware ECC is enabled automatically in hardware as needed */
+}
+
+/*
+ * Calculates the ECC for the data
+ */
+static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ /*
+ * ECC is calculated automatically in hardware during syndrome read
+ * and write operations, so it doesn't need to be calculated here.
+ */
+ return 0;
+}
+
+/*
+ * Read a single byte from NAND device
+ */
+static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ return (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device read without ECC
+ */
+static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Direct device read with no ECC */
+ while (len-- > 0)
+ *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device write without ECC
+ */
+static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Direct device write with no ECC */
+ while (len-- > 0)
+ writel((uint32_t)*buf++, SLC_DATA(host->io_base));
+}
+
+/*
+ * Read the OOB data from the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the OOB data to the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
+ */
+static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
+{
+ int i;
+
+ for (i = 0; i < (count * 3); i += 3) {
+ uint32_t ce = ecc[i / 3];
+ ce = ~(ce << 2) & 0xFFFFFF;
+ spare[i + 2] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i + 1] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i] = (uint8_t)(ce & 0xFF);
+ }
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
+ void *mem, int len, enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ host->dma_slave_config.direction = dir;
+ host->dma_slave_config.src_addr = dma;
+ host->dma_slave_config.dst_addr = dma;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 4;
+ host->dma_slave_config.dst_maxburst = 4;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ return -ENXIO;
+ }
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+/*
+ * DMA read/write transfers with ECC support
+ */
+static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
+ int read)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ int i, status = 0;
+ unsigned long timeout;
+ int res;
+ enum dma_transfer_direction dir =
+ read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->data_buf;
+ dma_mapped = false;
+ if (!read)
+ memcpy(host->data_buf, buf, mtd->writesize);
+ }
+
+ if (read) {
+ writel(readl(SLC_CFG(host->io_base)) |
+ SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
+ } else {
+ writel((readl(SLC_CFG(host->io_base)) |
+ SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
+ ~SLCCFG_DMA_DIR,
+ SLC_CFG(host->io_base));
+ }
+
+ /* Clear initial ECC */
+ writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
+
+ /* Transfer size is data area only */
+ writel(mtd->writesize, SLC_TC(host->io_base));
+
+ /* Start transfer in the NAND controller */
+ writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ /* Data */
+ res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
+ dma_buf + i * chip->ecc.size,
+ mtd->writesize / chip->ecc.steps, dir);
+ if (res)
+ return res;
+
+ /* Always _read_ ECC */
+ if (i == chip->ecc.steps - 1)
+ break;
+ if (!read) /* ECC availability delayed on write */
+ udelay(10);
+ res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
+ &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ }
+
+ /*
+ * According to NXP, the DMA can be finished here, but the NAND
+ * controller may still have buffered data. After porting to using the
+ * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
+ * appears to be always true, according to tests. Keeping the check for
+ * safety reasons for now.
+ */
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
+ dev_warn(mtd->dev.parent, "FIFO not empty!\n");
+ timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
+ while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
+ time_before(jiffies, timeout))
+ cpu_relax();
+ if (!time_before(jiffies, timeout)) {
+ dev_err(mtd->dev.parent, "FIFO held data too long\n");
+ status = -EIO;
+ }
+ }
+
+ /* Read last calculated ECC value */
+ if (!read)
+ udelay(10);
+ host->ecc_buf[chip->ecc.steps - 1] =
+ readl(SLC_ECC(host->io_base));
+
+ /* Flush DMA */
+ dmaengine_terminate_all(host->dma_chan);
+
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
+ readl(SLC_TC(host->io_base))) {
+ /* Something is left in the FIFO, something is wrong */
+ dev_err(mtd->dev.parent, "DMA FIFO failure\n");
+ status = -EIO;
+ }
+
+ /* Stop DMA & HW ECC */
+ writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+ writel(readl(SLC_CFG(host->io_base)) &
+ ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
+
+ if (!dma_mapped && read)
+ memcpy(buf, host->data_buf, mtd->writesize);
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, use ECC correction with the
+ * data, disable ECC for the OOB data
+ */
+static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ int stat, i, status;
+ uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
+
+ /* Issue read command */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* Read data and oob, calculate ECC */
+ status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
+
+ /* Get OOB data */
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Convert to stored ECC format */
+ lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
+
+ /* Pointer to ECC data retrieved from NAND spare area */
+ oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ stat = chip->ecc.correct(mtd, buf, oobecc,
+ &tmpecc[i * chip->ecc.bytes]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ buf += chip->ecc.size;
+ oobecc += chip->ecc.bytes;
+ }
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ /* Issue read command */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* Raw reads can just use the FIFO interface */
+ chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, use ECC with the data,
+ * disable ECC for the OOB data
+ */
+static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
+ int error;
+
+ /* Write data, calculate ECC on outbound data */
+ error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
+ if (error)
+ return error;
+
+ /*
+ * The calculated ECC needs some manual work done to it before
+ * committing it to NAND. Process the calculated ECC and place
+ * the resultant values directly into the OOB buffer. */
+ lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
+
+ /* Write ECC data to device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ /* Raw writes can just use the FIFO interface */
+ chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-slc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_slc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
+ of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
+ of_property_read_u32(np, "nxp,whold", &ncfg->whold);
+ of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
+ of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
+ of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
+ of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
+ of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
+
+ if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
+ !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
+ !ncfg->rhold || !ncfg->rsetup) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *rc;
+ struct mtd_part_parser_data ppdata = {};
+ int res;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (rc == NULL) {
+ dev_err(&pdev->dev, "No memory resource found for device\n");
+ return -EBUSY;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ host->io_base_dma = rc->start;
+
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
+ host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ mtd = &host->mtd;
+ chip = &host->nand_chip;
+ chip->priv = host;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock failure\n");
+ res = -ENOENT;
+ goto err_exit1;
+ }
+ clk_enable(host->clk);
+
+ /* Set NAND IO addresses and command/ready functions */
+ chip->IO_ADDR_R = SLC_DATA(host->io_base);
+ chip->IO_ADDR_W = SLC_DATA(host->io_base);
+ chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ chip->dev_ready = lpc32xx_nand_device_ready;
+ chip->chip_delay = 20; /* 20us command delay time */
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* NAND callbacks for LPC32xx SLC hardware */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->read_byte = lpc32xx_nand_read_byte;
+ chip->read_buf = lpc32xx_nand_read_buf;
+ chip->write_buf = lpc32xx_nand_write_buf;
+ chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+ chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+ chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+ chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+ chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+ chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+ chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.strength = 1;
+ chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+
+ /*
+ * Allocate a large enough buffer for a single huge page plus
+ * extra space for the spare area and ECC storage area
+ */
+ host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
+ host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
+ GFP_KERNEL);
+ if (host->data_buf == NULL) {
+ res = -ENOMEM;
+ goto err_exit2;
+ }
+
+ res = lpc32xx_nand_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto err_exit2;
+ }
+
+ /* Find NAND device */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ /* OOB and ECC CPU and DMA work areas */
+ host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+ /*
+ * Small page FLASH has a unique OOB layout, but large and huge
+ * page FLASH use the standard layout. Small page FLASH uses a
+ * custom BBT marker layout.
+ */
+ if (mtd->writesize <= 512)
+ chip->ecc.layout = &lpc32xx_nand_oob_16;
+
+ /* These sizes remain the same regardless of page size */
+ chip->ecc.size = 256;
+ chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+ chip->ecc.prepad = chip->ecc.postpad = 0;
+
+ /* Avoid extra scan if using BBT, setup BBT support */
+ if (host->ncfg->use_bbt) {
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if (mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
+ }
+ }
+
+ /*
+ * Fills out all the uninitialized function pointers with the defaults
+ */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ mtd->name = "nxp_lpc3220_slc";
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+err_exit3:
+ dma_release_channel(host->dma_chan);
+err_exit2:
+ clk_disable(host->clk);
+err_exit1:
+ lpc32xx_wp_enable(host);
+
+ return res;
+}
+
+/*
+ * Remove NAND device.
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+ dma_release_channel(host->dma_chan);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ clk_disable(host->clk);
+ lpc32xx_wp_enable(host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Re-enable NAND clock */
+ clk_enable(host->clk);
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable(host->clk);
+
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-slc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = lpc32xx_nand_remove,
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = LPC32XX_MODNAME,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 000000000..1f12e5bfb
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(mtd, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(mtd, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+ int ret = 0;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+out:
+ of_node_put(rmnode);
+ return ret;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ if (prv->clk)
+ clk_disable_unprepare(prv->clk);
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int mpc5121_nfc_probe(struct platform_device *op)
+{
+ struct device_node *rootnode, *dn = op->dev.of_node;
+ struct clk *clk;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const __be32 *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+ struct mtd_part_parser_data ppdata;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv)
+ return -ENOMEM;
+
+ mtd = &prv->mtd;
+ chip = &prv->chip;
+
+ mtd->priv = chip;
+ chip->priv = prv;
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (prv->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = resource_size(&res);
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ ppdata.of_node = dn;
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Support external chip-select logic on ADS5121 board */
+ rootnode = of_find_node_by_path("/");
+ if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ of_node_put(rootnode);
+ return retval;
+ }
+
+ chip->select_chip = ads5121_select_chip;
+ }
+ of_node_put(rootnode);
+
+ /* Enable NFC clock */
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Unable to acquire NFC clock!\n");
+ retval = PTR_ERR(clk);
+ goto error;
+ }
+ retval = clk_prepare_enable(clk);
+ if (retval) {
+ dev_err(dev, "Unable to enable NFC clock!\n");
+ goto error;
+ }
+ prv->clk = clk;
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, be32_to_cpup(chips_no))) {
+ dev_err(dev, "NAND Flash not found !\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+ retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static int mpc5121_nfc_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ nand_release(mtd);
+ mpc5121_nfc_free(dev, mtd);
+
+ return 0;
+}
+
+static struct of_device_id mpc5121_nfc_match[] = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+
+static struct platform_driver mpc5121_nfc_driver = {
+ .probe = mpc5121_nfc_probe,
+ .remove = mpc5121_nfc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mpc5121_nfc_match,
+ },
+};
+
+module_platform_driver(mpc5121_nfc_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
new file mode 100644
index 000000000..372e0e38f
--- /dev/null
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -0,0 +1,1649 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+
+#include <asm/mach/flash.h>
+#include <linux/platform_data/mtd-mxc_nand.h>
+
+#define DRIVER_NAME "mxc_nand"
+
+/* Addresses for NFC registers */
+#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
+#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
+#define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06)
+#define NFC_V1_V2_FLASH_CMD (host->regs + 0x08)
+#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
+#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
+#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
+#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V1_V2_WRPROT (host->regs + 0x12)
+#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
+#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
+#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
+#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
+#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
+#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
+
+#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
+#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
+#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
+#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4)
+#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
+#define NFC_V1_V2_CONFIG1_RST (1 << 6)
+#define NFC_V1_V2_CONFIG1_CE (1 << 7)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
+
+#define NFC_V1_V2_CONFIG2_INT (1 << 15)
+
+/*
+ * Operation modes for the NFC. Valid for v1, v2 and v3
+ * type controllers.
+ */
+#define NFC_CMD (1 << 0)
+#define NFC_ADDR (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+
+#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00)
+#define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04)
+
+#define NFC_V3_CONFIG1 (host->regs_axi + 0x34)
+#define NFC_V3_CONFIG1_SP_EN (1 << 0)
+#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7 ) << 4)
+
+#define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38)
+
+#define NFC_V3_LAUNCH (host->regs_axi + 0x40)
+
+#define NFC_V3_WRPROT (host->regs_ip + 0x0)
+#define NFC_V3_WRPROT_LOCK_TIGHT (1 << 0)
+#define NFC_V3_WRPROT_LOCK (1 << 1)
+#define NFC_V3_WRPROT_UNLOCK (1 << 2)
+#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6)
+
+#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04)
+
+#define NFC_V3_CONFIG2 (host->regs_ip + 0x24)
+#define NFC_V3_CONFIG2_PS_512 (0 << 0)
+#define NFC_V3_CONFIG2_PS_2048 (1 << 0)
+#define NFC_V3_CONFIG2_PS_4096 (2 << 0)
+#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2)
+#define NFC_V3_CONFIG2_ECC_EN (1 << 3)
+#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
+#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
+#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
+#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
+#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
+#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16)
+
+#define NFC_V3_CONFIG3 (host->regs_ip + 0x28)
+#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0)
+#define NFC_V3_CONFIG3_FW8 (1 << 3)
+#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8)
+#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x) (((x) & 0x7) << 12)
+#define NFC_V3_CONFIG3_RBB_MODE (1 << 15)
+#define NFC_V3_CONFIG3_NO_SDMA (1 << 20)
+
+#define NFC_V3_IPC (host->regs_ip + 0x2C)
+#define NFC_V3_IPC_CREQ (1 << 0)
+#define NFC_V3_IPC_INT (1 << 31)
+
+#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
+
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+ void (*preset)(struct mtd_info *);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
+ u32 (*get_ecc_status)(struct mxc_nand_host *);
+ struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+ void (*select_chip)(struct mtd_info *mtd, int chip);
+ int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc);
+
+ /*
+ * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+ * (CONFIG1:INT_MSK is set). To handle this the driver uses
+ * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+ */
+ int irqpending_quirk;
+ int needs_ip;
+
+ size_t regs_offset;
+ size_t spare0_offset;
+ size_t axi_offset;
+
+ int spare_len;
+ int eccbytes;
+ int eccsize;
+ int ppb_shift;
+};
+
+struct mxc_nand_host {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct device *dev;
+
+ void __iomem *spare0;
+ void __iomem *main_area0;
+
+ void __iomem *base;
+ void __iomem *regs;
+ void __iomem *regs_axi;
+ void __iomem *regs_ip;
+ int status_request;
+ struct clk *clk;
+ int clk_act;
+ int irq;
+ int eccsize;
+ int active_cs;
+
+ struct completion op_completion;
+
+ uint8_t *data_buf;
+ unsigned int buf_start;
+
+ const struct mxc_nand_devtype_data *devtype_data;
+ struct mxc_nand_platform_data pdata;
+};
+
+/* OOB placement block for use with hardware ecc generation */
+static struct nand_ecclayout nandv1_hw_eccoob_smallpage = {
+ .eccbytes = 5,
+ .eccpos = {6, 7, 8, 9, 10},
+ .oobfree = {{0, 5}, {12, 4}, }
+};
+
+static struct nand_ecclayout nandv1_hw_eccoob_largepage = {
+ .eccbytes = 20,
+ .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
+ .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
+};
+
+/* OOB description for 512 byte pages with 16 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_smallpage = {
+ .eccbytes = 1 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15
+ },
+ .oobfree = {
+ {.offset = 0, .length = 5}
+ }
+};
+
+/* OOB description for 2048 byte pages with 64 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
+ .eccbytes = 4 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 7},
+ {.offset = 32, .length = 7},
+ {.offset = 48, .length = 7}
+ }
+};
+
+/* OOB description for 4096 byte pages with 128 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_4k = {
+ .eccbytes = 8 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 7},
+ {.offset = 32, .length = 7},
+ {.offset = 48, .length = 7},
+ {.offset = 64, .length = 7},
+ {.offset = 80, .length = 7},
+ {.offset = 96, .length = 7},
+ {.offset = 112, .length = 7},
+ }
+};
+
+static const char * const part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", NULL };
+
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = __raw_readl(s++);
+}
+
+static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(trg, src, size / 4);
+}
+
+static int check_int_v3(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_IPC);
+ if (!(tmp & NFC_V3_IPC_INT))
+ return 0;
+
+ tmp &= ~NFC_V3_IPC_INT;
+ writel(tmp, NFC_V3_IPC);
+
+ return 1;
+}
+
+static int check_int_v1_v2(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG2);
+ if (!(tmp & NFC_V1_V2_CONFIG2_INT))
+ return 0;
+
+ if (!host->devtype_data->irqpending_quirk)
+ writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+
+ return 1;
+}
+
+static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
+{
+ uint16_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG1);
+
+ if (activate)
+ tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
+ else
+ tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ writew(tmp, NFC_V1_V2_CONFIG1);
+}
+
+static void irq_control_v3(struct mxc_nand_host *host, int activate)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG2);
+
+ if (activate)
+ tmp &= ~NFC_V3_CONFIG2_INT_MSK;
+ else
+ tmp |= NFC_V3_CONFIG2_INT_MSK;
+
+ writel(tmp, NFC_V3_CONFIG2);
+}
+
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+ if (host->devtype_data->irqpending_quirk) {
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ } else {
+ host->devtype_data->irq_control(host, activate);
+ }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+ return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+ return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+ return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+ struct mxc_nand_host *host = dev_id;
+
+ if (!host->devtype_data->check_int(host))
+ return IRQ_NONE;
+
+ irq_control(host, 0);
+
+ complete(&host->op_completion);
+
+ return IRQ_HANDLED;
+}
+
+/* This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit of config2 register.
+ */
+static int wait_op_done(struct mxc_nand_host *host, int useirq)
+{
+ int ret = 0;
+
+ /*
+ * If operation is already complete, don't bother to setup an irq or a
+ * loop.
+ */
+ if (host->devtype_data->check_int(host))
+ return 0;
+
+ if (useirq) {
+ unsigned long timeout;
+
+ reinit_completion(&host->op_completion);
+
+ irq_control(host, 1);
+
+ timeout = wait_for_completion_timeout(&host->op_completion, HZ);
+ if (!timeout && !host->devtype_data->check_int(host)) {
+ dev_dbg(host->dev, "timeout waiting for irq\n");
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ int max_retries = 8000;
+ int done;
+
+ do {
+ udelay(1);
+
+ done = host->devtype_data->check_int(host);
+ if (done)
+ break;
+
+ } while (--max_retries);
+
+ if (!done) {
+ dev_dbg(host->dev, "timeout polling for completion\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
+
+ return ret;
+}
+
+static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ /* fill command */
+ writel(cmd, NFC_V3_FLASH_CMD);
+
+ /* send out command */
+ writel(NFC_CMD, NFC_V3_LAUNCH);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+}
+
+/* This function issues the specified command to the NAND device and
+ * waits for completion. */
+static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+
+ writew(cmd, NFC_V1_V2_FLASH_CMD);
+ writew(NFC_CMD, NFC_V1_V2_CONFIG2);
+
+ if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(NFC_V1_V2_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ pr_debug("%s: RESET failed\n", __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
+}
+
+static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ /* fill address */
+ writel(addr, NFC_V3_FLASH_ADDR0);
+
+ /* send out address */
+ writel(NFC_ADDR, NFC_V3_LAUNCH);
+
+ wait_op_done(host, 0);
+}
+
+/* This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command. */
+static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+
+ writew(addr, NFC_V1_V2_FLASH_ADDR);
+ writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, islast);
+}
+
+static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG1);
+ tmp &= ~(7 << 4);
+ writel(tmp, NFC_V3_CONFIG1);
+
+ /* transfer data from NFC ram to nand */
+ writel(ops, NFC_V3_LAUNCH);
+
+ wait_op_done(host, false);
+}
+
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ int bufs, i;
+
+ if (mtd->writesize > 512)
+ bufs = 4;
+ else
+ bufs = 1;
+
+ for (i = 0; i < bufs; i++) {
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+ }
+}
+
+static void send_read_id_v3(struct mxc_nand_host *host)
+{
+ /* Read ID into main buffer */
+ writel(NFC_ID, NFC_V3_LAUNCH);
+
+ wait_op_done(host, true);
+
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id_v1_v2(struct mxc_nand_host *host)
+{
+ /* NANDFC buffer 0 is used for device ID output */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(NFC_ID, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
+{
+ writew(NFC_STATUS, NFC_V3_LAUNCH);
+ wait_op_done(host, true);
+
+ return readl(NFC_V3_CONFIG1) >> 16;
+}
+
+/* This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status. */
+static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
+{
+ void __iomem *main_buf = host->main_area0;
+ uint32_t store;
+ uint16_t ret;
+
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ /*
+ * The device status is stored in main_area0. To
+ * prevent corruption of the buffer save the value
+ * and restore it afterwards.
+ */
+ store = readl(main_buf);
+
+ writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
+ wait_op_done(host, true);
+
+ ret = readw(main_buf);
+
+ writel(store, main_buf);
+
+ return ret;
+}
+
+/* This functions is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles R/B internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ /*
+ * If HW ECC is enabled, we turn it on during init. There is
+ * no need to enable again here.
+ */
+}
+
+static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /*
+ * 1-Bit errors are automatically corrected in HW. No need for
+ * additional correction. 2-Bit errors cannot be corrected by
+ * HW ECC, so we need to return failure
+ */
+ uint16_t ecc_status = get_ecc_status_v1(host);
+
+ if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
+ pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ u32 ecc_stat, err;
+ int no_subpages = 1;
+ int ret = 0;
+ u8 ecc_bit_mask, err_limit;
+
+ ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+ err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+ no_subpages = mtd->writesize >> 9;
+
+ ecc_stat = host->devtype_data->get_ecc_status(host);
+
+ do {
+ err = ecc_stat & ecc_bit_mask;
+ if (err > err_limit) {
+ printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ return -1;
+ } else {
+ ret += err;
+ }
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+ pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+
+ return ret;
+}
+
+static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ return 0;
+}
+
+static u_char mxc_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint8_t ret;
+
+ /* Check for status request */
+ if (host->status_request)
+ return host->devtype_data->get_dev_status(host) & 0xFF;
+
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* only take the lower byte of each word */
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+
+ host->buf_start += 2;
+ } else {
+ ret = *(uint8_t *)(host->data_buf + host->buf_start);
+ host->buf_start++;
+ }
+
+ pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ return ret;
+}
+
+static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t ret;
+
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+ host->buf_start += 2;
+
+ return ret;
+}
+
+/* Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash */
+static void mxc_nand_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(host->data_buf + col, buf, n);
+
+ host->buf_start += n;
+}
+
+/* Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(buf, host->data_buf + col, n);
+
+ host->buf_start += n;
+}
+
+/* This function is used by upper layer for select and
+ * deselect of the NAND chip */
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+}
+
+/*
+ * Function to transfer data to/from spare area.
+ */
+static void copy_spare(struct mtd_info *mtd, bool bfrom)
+{
+ struct nand_chip *this = mtd->priv;
+ struct mxc_nand_host *host = this->priv;
+ u16 i, j;
+ u16 n = mtd->writesize >> 9;
+ u8 *d = host->data_buf + mtd->writesize;
+ u8 __iomem *s = host->spare0;
+ u16 t = host->devtype_data->spare_len;
+
+ j = (mtd->oobsize / n >> 1) << 1;
+
+ if (bfrom) {
+ for (i = 0; i < n - 1; i++)
+ memcpy32_fromio(d + i * j, s + i * t, j);
+
+ /* the last section */
+ memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
+ } else {
+ for (i = 0; i < n - 1; i++)
+ memcpy32_toio(&s[i * t], &d[i * j], j);
+
+ /* the last section */
+ memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+ }
+}
+
+/*
+ * MXC NANDFC can only perform full page+spare or spare-only read/write. When
+ * the upper layers perform a read/write buf operation, the saved column address
+ * is used to index into the full page. So usually this function is called with
+ * column == 0 (unless no column cycle is needed indicated by column == -1)
+ */
+static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /* Write out column address, if necessary */
+ if (column != -1) {
+ host->devtype_data->send_addr(host, column & 0xff,
+ page_addr == -1);
+ if (mtd->writesize > 512)
+ /* another col addr cycle for 2k page */
+ host->devtype_data->send_addr(host,
+ (column >> 8) & 0xff,
+ false);
+ }
+
+ /* Write out page address, if necessary */
+ if (page_addr != -1) {
+ /* paddr_0 - p_addr_7 */
+ host->devtype_data->send_addr(host, (page_addr & 0xff), false);
+
+ if (mtd->writesize > 512) {
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
+ } else {
+ /* One more address cycle for higher density devices */
+ if (mtd->size >= 0x4000000) {
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
+ }
+ }
+}
+
+/*
+ * v2 and v3 type controllers can do 4bit or 8bit ecc depending
+ * on how much oob the nand chip has. For 8bit ecc we need at least
+ * 26 bytes of oob data per 512 byte block.
+ */
+static int get_eccsize(struct mtd_info *mtd)
+{
+ int oobbytes_per_512 = 0;
+
+ oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
+
+ if (oobbytes_per_512 < 26)
+ return 4;
+ else
+ return 8;
+}
+
+static void preset_v1(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW && mtd->writesize)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ host->eccsize = 1;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v2(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t config1 = 0;
+
+ config1 |= NFC_V2_CONFIG1_FP_INT;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ if (mtd->writesize) {
+ uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 4)
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+ config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
+ } else {
+ host->eccsize = 1;
+ }
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v3(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mxc_nand_host *host = chip->priv;
+ uint32_t config2, config3;
+ int i, addr_phases;
+
+ writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
+ writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
+
+ /* Unlock the internal RAM Buffer */
+ writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
+ NFC_V3_WRPROT);
+
+ /* Blocks to be unlocked */
+ for (i = 0; i < NAND_MAX_CHIPS; i++)
+ writel(0x0 | (0xffff << 16),
+ NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
+
+ writel(0, NFC_V3_IPC);
+
+ config2 = NFC_V3_CONFIG2_ONE_CYCLE |
+ NFC_V3_CONFIG2_2CMD_PHASES |
+ NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
+ NFC_V3_CONFIG2_ST_CMD(0x70) |
+ NFC_V3_CONFIG2_INT_MSK |
+ NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
+
+ addr_phases = fls(chip->pagemask) >> 3;
+
+ if (mtd->writesize == 2048) {
+ config2 |= NFC_V3_CONFIG2_PS_2048;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else if (mtd->writesize == 4096) {
+ config2 |= NFC_V3_CONFIG2_PS_4096;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else {
+ config2 |= NFC_V3_CONFIG2_PS_512;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
+ }
+
+ if (mtd->writesize) {
+ if (chip->ecc.mode == NAND_ECC_HW)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+
+ config2 |= NFC_V3_CONFIG2_PPB(
+ ffs(mtd->erasesize / mtd->writesize) - 6,
+ host->devtype_data->ppb_shift);
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 8)
+ config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
+ }
+
+ writel(config2, NFC_V3_CONFIG2);
+
+ config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
+ NFC_V3_CONFIG3_NO_SDMA |
+ NFC_V3_CONFIG3_RBB_MODE |
+ NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+ NFC_V3_CONFIG3_ADD_OP(0);
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ config3 |= NFC_V3_CONFIG3_FW8;
+
+ writel(config3, NFC_V3_CONFIG3);
+
+ writel(0, NFC_V3_DELAY_LINE);
+}
+
+/* Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash */
+static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ command, column, page_addr);
+
+ /* Reset command state information */
+ host->status_request = false;
+
+ /* Command pre-processing step */
+ switch (command) {
+ case NAND_CMD_RESET:
+ host->devtype_data->preset(mtd);
+ host->devtype_data->send_cmd(host, command, false);
+ break;
+
+ case NAND_CMD_STATUS:
+ host->buf_start = 0;
+ host->status_request = true;
+
+ host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READ0)
+ host->buf_start = column;
+ else
+ host->buf_start = column + mtd->writesize;
+
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+
+ host->devtype_data->send_cmd(host, command, false);
+ WARN_ONCE(column < 0,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, 0, page_addr);
+
+ if (mtd->writesize > 512)
+ host->devtype_data->send_cmd(host,
+ NAND_CMD_READSTART, true);
+
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+
+ memcpy32_fromio(host->data_buf, host->main_area0,
+ mtd->writesize);
+ copy_spare(mtd, true);
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (column >= mtd->writesize)
+ /* call ourself to read a page */
+ mxc_nand_command(mtd, NAND_CMD_READ0, 0, page_addr);
+
+ host->buf_start = column;
+
+ host->devtype_data->send_cmd(host, command, false);
+ WARN_ONCE(column < -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, 0, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
+ copy_spare(mtd, false);
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+ host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_READID:
+ host->devtype_data->send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_read_id(host);
+ host->buf_start = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ host->devtype_data->send_cmd(host, command, false);
+ WARN_ONCE(column != -1,
+ "Unexpected column value (cmd=%u, col=%d)\n",
+ command, column);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+
+ break;
+ case NAND_CMD_PARAM:
+ host->devtype_data->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+ break;
+ default:
+ WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
+ command);
+ break;
+ }
+}
+
+/*
+ * The generic flash bbt decriptors overlap with our ecc
+ * hardware, so define some i.MX specific ones.
+ */
+static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 1,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .axi_offset = 0,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+ .preset = preset_v2,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v2,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v2,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_4k,
+ .select_chip = mxc_nand_select_chip_v2,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0x1e00,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0,
+ .spare_len = 64,
+ .eccbytes = 9,
+ .eccsize = 0,
+};
+
+/* v3.2a: i.MX51 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 8,
+};
+
+static inline int is_imx21_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx21_nand_devtype_data;
+}
+
+static inline int is_imx27_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx27_nand_devtype_data;
+}
+
+static inline int is_imx25_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx25_nand_devtype_data;
+}
+
+static inline int is_imx51_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx51_nand_devtype_data;
+}
+
+static inline int is_imx53_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx53_nand_devtype_data;
+}
+
+static struct platform_device_id mxcnd_devtype[] = {
+ {
+ .name = "imx21-nand",
+ .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
+ }, {
+ .name = "imx27-nand",
+ .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
+ }, {
+ .name = "imx25-nand",
+ .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
+ }, {
+ .name = "imx51-nand",
+ .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
+ }, {
+ .name = "imx53-nand",
+ .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
+
+#ifdef CONFIG_OF_MTD
+static const struct of_device_id mxcnd_dt_ids[] = {
+ {
+ .compatible = "fsl,imx21-nand",
+ .data = &imx21_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx27-nand",
+ .data = &imx27_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx25-nand",
+ .data = &imx25_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx51-nand",
+ .data = &imx51_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx53-nand",
+ .data = &imx53_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct mxc_nand_platform_data *pdata = &host->pdata;
+ const struct of_device_id *of_id =
+ of_match_device(mxcnd_dt_ids, host->dev);
+ int buswidth;
+
+ if (!np)
+ return 1;
+
+ if (of_get_nand_ecc_mode(np) >= 0)
+ pdata->hw_ecc = 1;
+
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ buswidth = of_get_nand_bus_width(np);
+ if (buswidth < 0)
+ return buswidth;
+
+ pdata->width = buswidth / 8;
+
+ host->devtype_data = of_id->data;
+
+ return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ return 1;
+}
+#endif
+
+static int mxcnd_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct mxc_nand_host *host;
+ struct resource *res;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+ GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ /* allocate a temporary buffer for the nand_scan_ident() */
+ host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
+
+ host->dev = &pdev->dev;
+ /* structures must be linked */
+ this = &host->nand;
+ mtd = &host->mtd;
+ mtd->priv = this;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = DRIVER_NAME;
+
+ /* 50 us command delay time */
+ this->chip_delay = 5;
+
+ this->priv = host;
+ this->dev_ready = mxc_nand_dev_ready;
+ this->cmdfunc = mxc_nand_command;
+ this->read_byte = mxc_nand_read_byte;
+ this->read_word = mxc_nand_read_word;
+ this->write_buf = mxc_nand_write_buf;
+ this->read_buf = mxc_nand_read_buf;
+
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ err = mxcnd_probe_dt(host);
+ if (err > 0) {
+ struct mxc_nand_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ host->pdata = *pdata;
+ host->devtype_data = (struct mxc_nand_devtype_data *)
+ pdev->id_entry->driver_data;
+ } else {
+ err = -ENODEV;
+ }
+ }
+ if (err < 0)
+ return err;
+
+ if (host->devtype_data->needs_ip) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_ip))
+ return PTR_ERR(host->regs_ip);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ }
+
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
+
+ host->main_area0 = host->base;
+
+ if (host->devtype_data->regs_offset)
+ host->regs = host->base + host->devtype_data->regs_offset;
+ host->spare0 = host->base + host->devtype_data->spare0_offset;
+ if (host->devtype_data->axi_offset)
+ host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+ this->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+
+ this->select_chip = host->devtype_data->select_chip;
+ this->ecc.size = 512;
+ this->ecc.layout = host->devtype_data->ecclayout_512;
+
+ if (host->pdata.hw_ecc) {
+ this->ecc.calculate = mxc_nand_calculate_ecc;
+ this->ecc.hwctl = mxc_nand_enable_hwecc;
+ this->ecc.correct = host->devtype_data->correct_data;
+ this->ecc.mode = NAND_ECC_HW;
+ } else {
+ this->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ /* NAND bus width determines access functions used by upper layer */
+ if (host->pdata.width == 2)
+ this->options |= NAND_BUSWIDTH_16;
+
+ if (host->pdata.flash_bbt) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ /* update flash based bbt */
+ this->bbt_options |= NAND_BBT_USE_FLASH;
+ }
+
+ init_completion(&host->op_completion);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return host->irq;
+
+ /*
+ * Use host->devtype_data->irq_control() here instead of irq_control()
+ * because we must not disable_irq_nosync without having requested the
+ * irq.
+ */
+ host->devtype_data->irq_control(host, 0);
+
+ err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+ 0, DRIVER_NAME, host);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(host->clk);
+ if (err)
+ return err;
+ host->clk_act = 1;
+
+ /*
+ * Now that we "own" the interrupt make sure the interrupt mask bit is
+ * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+ * on this machine.
+ */
+ if (host->devtype_data->irqpending_quirk) {
+ disable_irq_nosync(host->irq);
+ host->devtype_data->irq_control(host, 1);
+ }
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
+ err = -ENXIO;
+ goto escan;
+ }
+
+ /* allocate the right size buffer now */
+ devm_kfree(&pdev->dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf) {
+ err = -ENOMEM;
+ goto escan;
+ }
+
+ /* Call preset again, with correct writesize this time */
+ host->devtype_data->preset(mtd);
+
+ if (mtd->writesize == 2048)
+ this->ecc.layout = host->devtype_data->ecclayout_2k;
+ else if (mtd->writesize == 4096)
+ this->ecc.layout = host->devtype_data->ecclayout_4k;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ if (is_imx21_nfc(host) || is_imx27_nfc(host))
+ this->ecc.strength = 1;
+ else
+ this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ err = -ENXIO;
+ goto escan;
+ }
+
+ /* Register the partitions */
+ mtd_device_parse_register(mtd, part_probes,
+ &(struct mtd_part_parser_data){
+ .of_node = pdev->dev.of_node,
+ },
+ host->pdata.parts,
+ host->pdata.nr_parts);
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+escan:
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
+
+ return err;
+}
+
+static int mxcnd_remove(struct platform_device *pdev)
+{
+ struct mxc_nand_host *host = platform_get_drvdata(pdev);
+
+ nand_release(&host->mtd);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static struct platform_driver mxcnd_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(mxcnd_dt_ids),
+ },
+ .id_table = mxcnd_devtype,
+ .probe = mxcnd_probe,
+ .remove = mxcnd_remove,
+};
+module_platform_driver(mxcnd_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
new file mode 100644
index 000000000..c2e1232cd
--- /dev/null
+++ b/drivers/mtd/nand/nand_base.c
@@ -0,0 +1,4286 @@
+/*
+ * drivers/mtd/nand.c
+ *
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/doc/nand.html
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * TODO:
+ * Enable cached programming for 2k page size chips
+ * Check, if mtd->ecctype should be set to MTD_ECC_HW
+ * if we have HW ECC support.
+ * BBT table is not serialized, has to be fixed
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/leds.h>
+#include <linux/io.h>
+#include <linux/mtd/partitions.h>
+
+/* Define default oob placement schemes for large and small page devices */
+static struct nand_ecclayout nand_oob_8 = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3,
+ .length = 2},
+ {.offset = 6,
+ .length = 2} }
+};
+
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 6, 7},
+ .oobfree = {
+ {.offset = 8,
+ . length = 8} }
+};
+
+static struct nand_ecclayout nand_oob_64 = {
+ .eccbytes = 24,
+ .eccpos = {
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = {
+ {.offset = 2,
+ .length = 38} }
+};
+
+static struct nand_ecclayout nand_oob_128 = {
+ .eccbytes = 48,
+ .eccpos = {
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ .oobfree = {
+ {.offset = 2,
+ .length = 78} }
+};
+
+static int nand_get_device(struct mtd_info *mtd, int new_state);
+
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+
+/*
+ * For devices which display every fart in the system on a separate LED. Is
+ * compiled away when LED support is disabled.
+ */
+DEFINE_LED_TRIGGER(nand_led_trigger);
+
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Release chip lock and wake up anyone waiting on the device.
+ */
+static void nand_release_device(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /* Release the controller and the chip */
+ spin_lock(&chip->controller->lock);
+ chip->controller->active = NULL;
+ chip->state = FL_READY;
+ wake_up(&chip->controller->wq);
+ spin_unlock(&chip->controller->lock);
+}
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 8bit buswidth
+ */
+static uint8_t nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return readb(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
+}
+
+/**
+ * nand_read_word - [DEFAULT] read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth without endianness conversion.
+ */
+static u16 nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return readw(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ switch (chipnr) {
+ case -1:
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ chip->write_buf(mtd, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_onfi_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->write_buf(mtd, (uint8_t *)&word, 2);
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ iowrite8_rep(chip->IO_ADDR_W, buf, len);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ ioread8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+
+ iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ u16 *p = (u16 *) buf;
+
+ ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ int page, chipnr, res = 0, i = 0;
+ struct nand_chip *chip = mtd->priv;
+ u16 bad;
+
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ if (getchip) {
+ chipnr = (int)(ofs >> chip->chip_shift);
+
+ nand_get_device(mtd, FL_READING);
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+ }
+
+ do {
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ chip->badblockpos & 0xFE, page);
+ bad = cpu_to_le16(chip->read_word(mtd));
+ if (chip->badblockpos & 0x1)
+ bad >>= 8;
+ else
+ bad &= 0xFF;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
+ page);
+ bad = chip->read_byte(mtd);
+ }
+
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ ofs += mtd->writesize;
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ i++;
+ } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
+
+ if (getchip) {
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+ }
+
+ return res;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
+ uint8_t buf[2] = { 0, 0 };
+ int ret = 0, res, i = 0;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ /* Write to first/last page(s) if necessary */
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+ do {
+ res = nand_do_write_oob(mtd, ofs, &ops);
+ if (!ret)
+ ret = res;
+
+ i++;
+ ofs += mtd->writesize;
+ } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+ return ret;
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) write bad block marker to OOB area of affected block (unless flag
+ * NAND_BBT_NO_OOB_BBM is present)
+ * (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ int res, ret = 0;
+
+ if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
+ struct erase_info einfo;
+
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = ofs;
+ einfo.len = 1ULL << chip->phys_erase_shift;
+ nand_erase_nand(mtd, &einfo, 0);
+
+ /* Write bad block marker to OOB */
+ nand_get_device(mtd, FL_WRITING);
+ ret = chip->block_markbad(mtd, ofs);
+ nand_release_device(mtd);
+ }
+
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ res = nand_markbad_bbt(mtd, ofs);
+ if (!ret)
+ ret = res;
+ }
+
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @mtd: MTD device structure
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+}
+
+/**
+ * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check if the block is marked as reserved.
+ */
+static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (!chip->bbt)
+ return 0;
+ /* Return info from the table */
+ return nand_isreserved_bbt(mtd, ofs);
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
+ int allowbbt)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (!chip->bbt)
+ return chip->block_bad(mtd, ofs, getchip);
+
+ /* Return info from the table */
+ return nand_isbad_bbt(mtd, ofs, allowbbt);
+}
+
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ struct nand_chip *chip = mtd->priv;
+ int i;
+
+ /* Wait for the device to get ready */
+ for (i = 0; i < timeo; i++) {
+ if (chip->dev_ready(mtd))
+ break;
+ touch_softlockup_watchdog();
+ mdelay(1);
+ }
+}
+
+/* Wait for the ready pin, after a command. The timeout is caught later. */
+void nand_wait_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ unsigned long timeo = jiffies + msecs_to_jiffies(20);
+
+ /* 400ms timeout */
+ if (in_interrupt() || oops_in_progress)
+ return panic_nand_wait_ready(mtd, 400);
+
+ led_trigger_event(nand_led_trigger, LED_FULL);
+ /* Wait until command is processed or timeout occurs */
+ do {
+ if (chip->dev_ready(mtd))
+ break;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+ led_trigger_event(nand_led_trigger, LED_OFF);
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ register struct nand_chip *chip = mtd->priv;
+
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ break;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+};
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
+ */
+static void nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+ /* Write out the command to the device */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->cmd_ctrl(mtd, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ chip->cmd_ctrl(mtd, command, ctrl);
+
+ /* Address cycle, when necessary */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
+ /* One more address cycle for devices > 32MiB */
+ if (chip->chipsize > (32 << 20))
+ chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd,
+ NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ chip->cmd_ctrl(mtd, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20))
+ chip->cmd_ctrl(mtd, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ return;
+
+ case NAND_CMD_READ0:
+ chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay.
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip,
+ struct mtd_info *mtd, int new_state)
+{
+ /* Hardware controller shared among independent devices */
+ chip->controller->active = chip;
+ chip->state = new_state;
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct mtd_info *mtd, int new_state)
+{
+ struct nand_chip *chip = mtd->priv;
+ spinlock_t *lock = &chip->controller->lock;
+ wait_queue_head_t *wq = &chip->controller->wq;
+ DECLARE_WAITQUEUE(wait, current);
+retry:
+ spin_lock(lock);
+
+ /* Hardware controller shared among independent devices */
+ if (!chip->controller->active)
+ chip->controller->active = chip;
+
+ if (chip->controller->active == chip && chip->state == FL_READY) {
+ chip->state = new_state;
+ spin_unlock(lock);
+ return 0;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ if (chip->controller->active->state == FL_PM_SUSPENDED) {
+ chip->state = FL_PM_SUSPENDED;
+ spin_unlock(lock);
+ return 0;
+ }
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(wq, &wait);
+ spin_unlock(lock);
+ schedule();
+ remove_wait_queue(wq, &wait);
+ goto retry;
+}
+
+/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops through mtdoops.
+ */
+static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
+ unsigned long timeo)
+{
+ int i;
+ for (i = 0; i < timeo; i++) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only. Erase can
+ * take up to 400ms and program up to 20ms according to general NAND and
+ * SmartMedia specs.
+ */
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+
+ int status, state = chip->state;
+ unsigned long timeo = (state == FL_ERASING ? 400 : 20);
+
+ led_trigger_event(nand_led_trigger, LED_FULL);
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
+ ndelay(100);
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+
+ if (in_interrupt() || oops_in_progress)
+ panic_nand_wait(mtd, chip, timeo);
+ else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ while (time_before(jiffies, timeo)) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ cond_resched();
+ }
+ }
+ led_trigger_event(nand_led_trigger, LED_OFF);
+
+ status = (int)chip->read_byte(mtd);
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
+ return status;
+}
+
+/**
+ * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * when = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
+ *
+ * Returs unlock status.
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len, int invert)
+{
+ int ret = 0;
+ int status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* Submit address of first page to unlock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* Submit address of last page to unlock */
+ page = (ofs + len) >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+ (page | invert) & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* See if device thinks it succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * Returns unlock status.
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ return -EINVAL;
+
+ /* Align to last block address if size addresses end of the device */
+ if (ofs + len == mtd->size)
+ len -= mtd->erasesize;
+
+ nand_get_device(mtd, FL_UNLOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /*
+ * Reset the chip.
+ * If we want to check the WP through READ STATUS and check the bit 7
+ * we must reset the chip
+ * some operation can also clear the bit 7 of status register
+ * eg. erase/program a locked block
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_unlock);
+
+/**
+ * nand_lock - [REPLACEABLE] locks all blocks present in the device
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
+ *
+ * Returns lock status.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr, status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ return -EINVAL;
+
+ nand_get_device(mtd, FL_LOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /*
+ * Reset the chip.
+ * If we want to check the WP through READ STATUS and check the bit 7
+ * we must reset the chip
+ * some operation can also clear the bit 7 of status register
+ * eg. erase/program a locked block
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ status = MTD_ERASE_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Submit address of first page to lock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* See if device thinks it succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_lock);
+
+/**
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ chip->read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->read_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->read_buf(mtd, oob, size);
+
+ return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ */
+static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
+
+ chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ * @page: page number to read
+ */
+static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
+ int page)
+{
+ int start_step, end_step, num_steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index;
+ unsigned int max_bitflips = 0;
+
+ /* Column address within the page aligned to ECC size (256bytes) */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+ index = start_step * chip->ecc.bytes;
+
+ /* Data size aligned to ECC ecc.size */
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ if (data_col_addr != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+
+ p = bufpoi + data_col_addr;
+ chip->read_buf(mtd, p, datafrag_len);
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
+ for (i = 0; i < eccfrag_len - 1; i++) {
+ if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
+ gaps = 1;
+ break;
+ }
+ }
+ if (gaps) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ } else {
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
+ aligned_pos = eccpos[index] & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (eccpos[index] & (busw - 1))
+ aligned_len++;
+ if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
+ aligned_len++;
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + aligned_pos, -1);
+ chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ }
+
+ for (i = 0; i < eccfrag_len; i++)
+ chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p,
+ &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
+ */
+static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ unsigned int max_bitflips = 0;
+
+ /* Read the OOB area first */
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+ chip->read_buf(mtd, oob, eccbytes);
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->read_buf(mtd, oob, i);
+
+ return max_bitflips;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, roffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Read request not from offset 0? */
+ if (unlikely(roffs)) {
+ if (roffs >= free->length) {
+ roffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + roffs;
+ bytes = min_t(size_t, len,
+ (free->length - roffs));
+ roffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(oob, chip->oob_poi + boffs, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->setup_read_retry(mtd, retry_mode);
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+ uint32_t readlen = ops->len;
+ uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
+ mtd->oobavail : mtd->oobsize;
+
+ uint8_t *bufpoi, *oob, *buf;
+ int use_bufpoi;
+ unsigned int max_bitflips = 0;
+ int retry_mode = 0;
+ bool ecc_fail = false;
+
+ chipnr = (int)(from >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ col = (int)(from & (mtd->writesize - 1));
+
+ buf = ops->datbuf;
+ oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
+
+ while (1) {
+ unsigned int ecc_failures = mtd->ecc_stats.failed;
+
+ bytes = min(mtd->writesize - col, readlen);
+ aligned = (bytes == mtd->writesize);
+
+ if (!aligned)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !virt_addr_valid(buf);
+ else
+ use_bufpoi = 0;
+
+ /* Is the current page in the buffer? */
+ if (realpage != chip->pagebuf || oob) {
+ bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+
+ if (use_bufpoi && aligned)
+ pr_debug("%s: using read bounce buffer for buf@%p\n",
+ __func__, buf);
+
+read_retry:
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
+ if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+ oob_required,
+ page);
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
+ ret = chip->ecc.read_subpage(mtd, chip,
+ col, bytes, bufpoi,
+ page);
+ else
+ ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ oob_required, page);
+ if (ret < 0) {
+ if (use_bufpoi)
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ break;
+ }
+
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
+ /* Transfer not aligned data */
+ if (use_bufpoi) {
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - ecc_failures) &&
+ (ops->mode != MTD_OPS_RAW)) {
+ chip->pagebuf = realpage;
+ chip->pagebuf_bitflips = ret;
+ } else {
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ }
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ }
+
+ if (unlikely(oob)) {
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
+ }
+
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ if (mtd->ecc_stats.failed - ecc_failures) {
+ if (retry_mode + 1 < chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(mtd,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset failures; retry */
+ mtd->ecc_stats.failed = ecc_failures;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
+ } else {
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagebuf_bitflips);
+ }
+
+ readlen -= bytes;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(mtd, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+ chip->select_chip(mtd, -1);
+
+ ops->retlen = ops->len - (size_t) readlen;
+ if (oob)
+ ops->oobretlen = ops->ooblen - oobreadlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (ecc_fail)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ * Get hold of the chip and call nand_do_read.
+ */
+static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ nand_get_device(mtd, FL_READING);
+ memset(&ops, 0, sizeof(ops));
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ret = nand_do_read_ops(mtd, from, &ops);
+ *retlen = ops.retlen;
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = chip->oob_poi;
+ int i, toread, sndrnd = 0, pos;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ else
+ chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0)
+ chip->read_buf(mtd, bufpoi, length);
+
+ return 0;
+}
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int status = 0;
+ const uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, buf, length);
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/**
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+ chip->write_buf(mtd, (uint8_t *)&fill,
+ num);
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+ chip->write_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ length -= len;
+ }
+ if (length > 0)
+ chip->write_buf(mtd, bufpoi, length);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/**
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area.
+ */
+static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int page, realpage, chipnr;
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+ int ret = 0;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ len = chip->ecc.layout->oobavail;
+ else
+ len = mtd->oobsize;
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+ (from >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(from >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ while (1) {
+ if (ops->mode == MTD_OPS_RAW)
+ ret = chip->ecc.read_oob_raw(mtd, chip, page);
+ else
+ ret = chip->ecc.read_oob(mtd, chip, page);
+
+ if (ret < 0)
+ break;
+
+ len = min(len, readlen);
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+ chip->select_chip(mtd, -1);
+
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data.
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(mtd, FL_READING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_read_oob(mtd, from, ops);
+ else
+ ret = nand_do_read_ops(mtd, from, ops);
+
+out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->write_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->write_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->write_buf(mtd, oob, size);
+
+ return 0;
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Software ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ return chip->ecc.write_page_raw(mtd, chip, buf, 1);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+
+/**
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required)
+{
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, i;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ chip->write_buf(mtd, buf, ecc_size);
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(mtd, p, oob);
+ chip->write_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->write_buf(mtd, oob, i);
+
+ return 0;
+}
+
+/**
+ * nand_write_page - [REPLACEABLE] write one page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
+{
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ if (unlikely(raw))
+ status = chip->ecc.write_page_raw(mtd, chip, buf,
+ oob_required);
+ else if (subpage)
+ status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+ buf, oob_required);
+ else
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
+
+ /*
+ * Cached progamming disabled for now. Not sure if it's worth the
+ * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
+ */
+ cached = 0;
+
+ if (!cached || !NAND_HAS_CACHEPROG(chip)) {
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ /*
+ * See if operation failed and additional status checks are
+ * available.
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status,
+ page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ return 0;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, woffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Write request not from offset 0? */
+ if (unlikely(woffs)) {
+ if (woffs >= free->length) {
+ woffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + woffs;
+ bytes = min_t(size_t, len,
+ (free->length - woffs));
+ woffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(chip->oob_poi + boffs, oob, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, realpage, page, blockmask, column;
+ struct nand_chip *chip = mtd->priv;
+ uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
+ mtd->oobavail : mtd->oobsize;
+
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret;
+ int oob_required = oob ? 1 : 0;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ /* Reject writes, which are not page aligned */
+ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ column = to & (mtd->writesize - 1);
+
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ realpage = (int)(to >> chip->page_shift);
+ page = realpage & chip->pagemask;
+ blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
+ ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
+ chip->pagebuf = -1;
+
+ /* Don't allow multipage oob writes with offset */
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ while (1) {
+ int bytes = mtd->writesize;
+ int cached = writelen > bytes && page != blockmask;
+ uint8_t *wbuf = buf;
+ int use_bufpoi;
+ int part_pagewr = (column || writelen < (mtd->writesize - 1));
+
+ if (part_pagewr)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !virt_addr_valid(buf);
+ else
+ use_bufpoi = 0;
+
+ /* Partial page write?, or need to use bounce buffer */
+ if (use_bufpoi) {
+ pr_debug("%s: using write bounce buffer for buf@%p\n",
+ __func__, buf);
+ cached = 0;
+ if (part_pagewr)
+ bytes = min_t(int, bytes - column, writelen);
+ chip->pagebuf = -1;
+ memset(chip->buffers->databuf, 0xff, mtd->writesize);
+ memcpy(&chip->buffers->databuf[column], buf, bytes);
+ wbuf = chip->buffers->databuf;
+ }
+
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(mtd, oob, len, ops);
+ oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+ ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+ oob_required, page, cached,
+ (ops->mode == MTD_OPS_RAW));
+ if (ret)
+ break;
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+
+err_out:
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+
+/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
+ int ret;
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
+ /* Grab the device */
+ panic_nand_get_device(chip, mtd, FL_WRITING);
+
+ memset(&ops, 0, sizeof(ops));
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ ret = nand_do_write_ops(mtd, to, &ops);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC.
+ */
+static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ nand_get_device(mtd, FL_WRITING);
+ memset(&ops, 0, sizeof(ops));
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ret = nand_do_write_ops(mtd, to, &ops);
+ *retlen = ops.retlen;
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, status, len;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ len = chip->ecc.layout->oobavail;
+ else
+ len = mtd->oobsize;
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow write past end of device */
+ if (unlikely(to >= mtd->size ||
+ ops->ooboffs + ops->ooblen >
+ ((mtd->size >> chip->page_shift) -
+ (to >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ chip->select_chip(mtd, -1);
+ return -EROFS;
+ }
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+
+ chip->select_chip(mtd, -1);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (ops->datbuf && (to + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(mtd, FL_WRITING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(mtd, to, ops);
+ else
+ ret = nand_do_write_ops(mtd, to, ops);
+
+out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * single_erase - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * Standard erase command for NAND chips. Returns NAND status.
+ */
+static int single_erase(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ /* Send commands to erase a block */
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ return chip->waitfunc(mtd, chip);
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand(mtd, instr, 0);
+}
+
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+ int allowbbt)
+{
+ int page, status, pages_per_block, ret, chipnr;
+ struct nand_chip *chip = mtd->priv;
+ loff_t len;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (check_offs_len(mtd, instr->addr, instr->len))
+ return -EINVAL;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(mtd, FL_ERASING);
+
+ /* Shift to get first page */
+ page = (int)(instr->addr >> chip->page_shift);
+ chipnr = (int)(instr->addr >> chip->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ instr->state = MTD_ERASING;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if (nand_block_checkbad(mtd, ((loff_t) page) <<
+ chip->page_shift, 0, allowbbt)) {
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page <= chip->pagebuf && chip->pagebuf <
+ (page + pages_per_block))
+ chip->pagebuf = -1;
+
+ status = chip->erase(mtd, page & chip->pagemask);
+
+ /*
+ * See if operation failed and additional status checks are
+ * available
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_ERASING,
+ status, page);
+
+ /* See if block erase succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr =
+ ((loff_t)page << chip->page_shift);
+ goto erase_exit;
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1ULL << chip->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+ /* Deselect and wake up anyone waiting on the device */
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(mtd, FL_SYNCING);
+ /* Release it and go back */
+ nand_release_device(mtd);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ return nand_block_checkbad(mtd, offs, 1, 0);
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return nand_block_markbad_lowlevel(mtd, ofs);
+}
+
+/**
+ * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ int status;
+ int i;
+
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, subfeature_param[i]);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ return 0;
+}
+
+/**
+ * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ int i;
+
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -EINVAL;
+
+ /* clear the sub feature parameters */
+ memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ *subfeature_param++ = chip->read_byte(mtd);
+ return 0;
+}
+
+/**
+ * nand_suspend - [MTD Interface] Suspend the NAND flash
+ * @mtd: MTD device structure
+ */
+static int nand_suspend(struct mtd_info *mtd)
+{
+ return nand_get_device(mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * nand_resume - [MTD Interface] Resume the NAND flash
+ * @mtd: MTD device structure
+ */
+static void nand_resume(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (chip->state == FL_PM_SUSPENDED)
+ nand_release_device(mtd);
+ else
+ pr_err("%s called for a chip which is not in suspended state\n",
+ __func__);
+}
+
+/**
+ * nand_shutdown - [MTD Interface] Finish the current NAND operation and
+ * prevent further operations
+ * @mtd: MTD device structure
+ */
+static void nand_shutdown(struct mtd_info *mtd)
+{
+ nand_get_device(mtd, FL_SHUTDOWN);
+}
+
+/* Set default functions */
+static void nand_set_defaults(struct nand_chip *chip, int busw)
+{
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->chip_delay)
+ chip->chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (chip->cmdfunc == NULL)
+ chip->cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->waitfunc == NULL)
+ chip->waitfunc = nand_wait;
+
+ if (!chip->select_chip)
+ chip->select_chip = nand_select_chip;
+
+ /* set for ONFI nand */
+ if (!chip->onfi_set_features)
+ chip->onfi_set_features = nand_onfi_set_features;
+ if (!chip->onfi_get_features)
+ chip->onfi_get_features = nand_onfi_get_features;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->read_byte || chip->read_byte == nand_read_byte)
+ chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->read_word)
+ chip->read_word = nand_read_word;
+ if (!chip->block_bad)
+ chip->block_bad = nand_block_bad;
+ if (!chip->block_markbad)
+ chip->block_markbad = nand_default_block_markbad;
+ if (!chip->write_buf || chip->write_buf == nand_write_buf)
+ chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->write_byte || chip->write_byte == nand_write_byte)
+ chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!chip->read_buf || chip->read_buf == nand_read_buf)
+ chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
+ if (!chip->scan_bbt)
+ chip->scan_bbt = nand_default_bbt;
+
+ if (!chip->controller) {
+ chip->controller = &chip->hwcontrol;
+ spin_lock_init(&chip->controller->lock);
+ init_waitqueue_head(&chip->controller->wq);
+ }
+
+}
+
+/* Sanitize ONFI strings so we can safely print them */
+static void sanitize_string(uint8_t *s, size_t len)
+{
+ ssize_t i;
+
+ /* Null terminate */
+ s[len - 1] = 0;
+
+ /* Remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* Remove trailing spaces */
+ strim(s);
+}
+
+static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+ struct nand_chip *chip, struct nand_onfi_params *p)
+{
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret = -EINVAL;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ /* Send our own NAND_CMD_PARAM. */
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+
+ /* Use the Change Read Column command to skip the ONFI param pages. */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ sizeof(*p) * p->num_of_param_pages , -1);
+
+ /* Read out the Extended Parameter Page. */
+ chip->read_buf(mtd, (uint8_t *)ep, len);
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp(ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
+ }
+
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ ret = 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+ feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+ if (le16_to_cpu(p->vendor_revision) < 1)
+ return;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ struct nand_onfi_params *p = &chip->onfi_params;
+ int i, j;
+ int val;
+
+ /* Try ONFI for unknown chip or LP */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
+ if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+ chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ return 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < sizeof(*p); j++)
+ ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+ le16_to_cpu(p->crc)) {
+ break;
+ }
+ }
+
+ if (i == 3) {
+ pr_err("Could not find valid ONFI parameter page; aborting\n");
+ return 0;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 5))
+ chip->onfi_version = 23;
+ else if (val & (1 << 4))
+ chip->onfi_version = 22;
+ else if (val & (1 << 3))
+ chip->onfi_version = 21;
+ else if (val & (1 << 2))
+ chip->onfi_version = 20;
+ else if (val & (1 << 1))
+ chip->onfi_version = 10;
+
+ if (!chip->onfi_version) {
+ pr_info("unsupported ONFI version: %d\n", val);
+ return 0;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* See erasesize comment */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
+ *busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ if (p->ecc_bits != 0xff) {
+ chip->ecc_strength_ds = p->ecc_bits;
+ chip->ecc_step_ds = 512;
+ } else if (chip->onfi_version >= 21 &&
+ (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->cmdfunc. So try to update the chip->cmdfunc
+ * now. We do not replace user supplied command function.
+ */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(mtd, chip, p))
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
+ }
+
+ if (p->jedec_id == NAND_MFR_MICRON)
+ nand_onfi_detect_micron(chip, p);
+
+ return 1;
+}
+
+/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ struct nand_jedec_params *p = &chip->jedec_params;
+ struct jedec_ecc_info *ecc;
+ int val;
+ int i, j;
+
+ /* Try JEDEC for unknown chip or LP */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
+ if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
+ chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
+ chip->read_byte(mtd) != 'C')
+ return 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < sizeof(*p); j++)
+ ((uint8_t *)p)[j] = chip->read_byte(mtd);
+
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
+ le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == 3) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ return 0;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ chip->jedec_version = 10;
+ else if (val & (1 << 1))
+ chip->jedec_version = 1; /* vendor specific version */
+
+ if (!chip->jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ return 0;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
+ *busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+ return 1;
+}
+
+/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+ int i, j;
+ for (i = 0; i < period; i++)
+ for (j = i + period; j < arrlen; j += period)
+ if (id_data[i] != id_data[j])
+ return 0;
+ return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+ int last_nonzero, period;
+
+ /* Find last non-zero byte */
+ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+ if (id_data[last_nonzero])
+ break;
+
+ /* All zeros */
+ if (last_nonzero < 0)
+ return 0;
+
+ /* Calculate wraparound period */
+ for (period = 1; period < arrlen; period++)
+ if (nand_id_has_period(id_data, arrlen, period))
+ break;
+
+ /* There's a repeated pattern */
+ if (period < arrlen)
+ return period;
+
+ /* There are trailing zeros */
+ if (last_nonzero < arrlen - 1)
+ return last_nonzero + 1;
+
+ /* No pattern detected */
+ return arrlen;
+}
+
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 id_data[8], int *busw)
+{
+ int extid, id_len;
+ /* The 3rd id byte holds MLC / multichip data */
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ id_len = nand_id_len(id_data, 8);
+
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
+ * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
+ *
+ * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+ * ID to decide what to do.
+ */
+ if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+ !nand_is_slc(chip) && id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 1:
+ mtd->oobsize = 128;
+ break;
+ case 2:
+ mtd->oobsize = 218;
+ break;
+ case 3:
+ mtd->oobsize = 400;
+ break;
+ case 4:
+ mtd->oobsize = 436;
+ break;
+ case 5:
+ mtd->oobsize = 512;
+ break;
+ case 6:
+ mtd->oobsize = 640;
+ break;
+ case 7:
+ default: /* Other cases are "reserved" (unknown) */
+ mtd->oobsize = 1024;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ *busw = 0;
+ } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
+ !nand_is_slc(chip)) {
+ unsigned int tmp;
+
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 0:
+ mtd->oobsize = 128;
+ break;
+ case 1:
+ mtd->oobsize = 224;
+ break;
+ case 2:
+ mtd->oobsize = 448;
+ break;
+ case 3:
+ mtd->oobsize = 64;
+ break;
+ case 4:
+ mtd->oobsize = 32;
+ break;
+ case 5:
+ mtd->oobsize = 16;
+ break;
+ default:
+ mtd->oobsize = 640;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
+ if (tmp < 0x03)
+ mtd->erasesize = (128 * 1024) << tmp;
+ else if (tmp == 0x03)
+ mtd->erasesize = 768 * 1024;
+ else
+ mtd->erasesize = (64 * 1024) << tmp;
+ *busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+ /*
+ * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+ * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+ * follows:
+ * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+ * 110b -> 24nm
+ * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ */
+ if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
+ nand_is_slc(chip) &&
+ (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
+ !(id_data[4] & 0x80) /* !BENAND */) {
+ mtd->oobsize = 32 * mtd->writesize >> 9;
+ }
+
+ }
+}
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 id_data[8],
+ int *busw)
+{
+ int maf_id = id_data[0];
+
+ mtd->erasesize = type->erasesize;
+ mtd->writesize = type->pagesize;
+ mtd->oobsize = mtd->writesize / 32;
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ /* All legacy ID NAND are small-page, SLC */
+ chip->bits_per_cell = 1;
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
+ && id_data[6] == 0x00 && id_data[7] == 0x00
+ && mtd->writesize == 512) {
+ mtd->erasesize = 128 * 1024;
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 id_data[8])
+{
+ int maf_id = id_data[0];
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ else
+ chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+
+ /*
+ * Bad block marker is stored in the last page of each block on Samsung
+ * and Hynix MLC devices; stored in first two pages of each block on
+ * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
+ * AMD/Spansion, and Macronix. All others scan only the first page.
+ */
+ if (!nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX))
+ chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ else if ((nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX ||
+ maf_id == NAND_MFR_TOSHIBA ||
+ maf_id == NAND_MFR_AMD ||
+ maf_id == NAND_MFR_MACRONIX)) ||
+ (mtd->writesize == 2048 &&
+ maf_id == NAND_MFR_MICRON))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+}
+
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ mtd->writesize = type->pagesize;
+ mtd->erasesize = type->erasesize;
+ mtd->oobsize = type->oobsize;
+
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+ chip->options |= type->options;
+ chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
+ chip->ecc_step_ds = NAND_ECC_STEP(type);
+ chip->onfi_timing_mode_default =
+ type->onfi_timing_mode_default;
+
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ if (!mtd->name)
+ mtd->name = type->name;
+
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
+ */
+static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int *maf_id, int *dev_id,
+ struct nand_flash_dev *type)
+{
+ int busw;
+ int i, maf_idx;
+ u8 id_data[8];
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
+
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ *maf_id = chip->read_byte(mtd);
+ *dev_id = chip->read_byte(mtd);
+
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
+ * interface concerns can cause random data which looks like a
+ * possibly credible NAND flash to appear. If the two results do
+ * not match, ignore the device completely.
+ */
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read entire ID string */
+ for (i = 0; i < 8; i++)
+ id_data[i] = chip->read_byte(mtd);
+
+ if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+ *maf_id, *dev_id, id_data[0], id_data[1]);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+ goto ident_done;
+ } else if (*dev_id == type->dev_id) {
+ break;
+ }
+ }
+
+ chip->onfi_version = 0;
+ if (!type->name || !type->pagesize) {
+ /* Check if the chip is ONFI compliant */
+ if (nand_flash_detect_onfi(mtd, chip, &busw))
+ goto ident_done;
+
+ /* Check if the chip is JEDEC compliant */
+ if (nand_flash_detect_jedec(mtd, chip, &busw))
+ goto ident_done;
+ }
+
+ if (!type->name)
+ return ERR_PTR(-ENODEV);
+
+ if (!mtd->name)
+ mtd->name = type->name;
+
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+
+ if (!type->pagesize && chip->init_size) {
+ /* Set the pagesize, oobsize, erasesize by the driver */
+ busw = chip->init_size(mtd, chip, id_data);
+ } else if (!type->pagesize) {
+ /* Decode parameters from extended ID */
+ nand_decode_ext_id(mtd, chip, id_data, &busw);
+ } else {
+ nand_decode_id(mtd, chip, type, id_data, &busw);
+ }
+ /* Get chip options */
+ chip->options |= type->options;
+
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+ * options for chips which do not have an extended id.
+ */
+ if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
+ chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ident_done:
+
+ /* Try to identify manufacturer */
+ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
+ if (nand_manuf_ids[maf_idx].id == *maf_id)
+ break;
+ }
+
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
+ chip->options |= busw;
+ nand_set_defaults(chip, busw);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nand_decode_bbm_options(mtd, chip, id_data);
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ /* Convert chipsize to number of pages per chip -1 */
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (chip->chipsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+ else {
+ chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
+ chip->chip_shift += 32 - 1;
+ }
+
+ chip->badblockbits = 8;
+ chip->erase = single_erase;
+
+ /* Do not replace user supplied command function! */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+
+ if (chip->onfi_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->onfi_params.model);
+ else if (chip->jedec_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->jedec_params.model);
+ else
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ type->name);
+
+ pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
+ (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
+ return type;
+}
+
+/**
+ * nand_scan_ident - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ * The mtd->owner field must be set to the module of the caller.
+ */
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
+{
+ int i, nand_maf_id, nand_dev_id;
+ struct nand_chip *chip = mtd->priv;
+ struct nand_flash_dev *type;
+
+ /* Set the default functions */
+ nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
+
+ /* Read the flash type */
+ type = nand_get_flash_type(mtd, chip, &nand_maf_id,
+ &nand_dev_id, table);
+
+ if (IS_ERR(type)) {
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ pr_warn("No NAND device found\n");
+ chip->select_chip(mtd, -1);
+ return PTR_ERR(type);
+ }
+
+ chip->select_chip(mtd, -1);
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ chip->select_chip(mtd, i);
+ /* See comment in nand_get_flash_type for reset */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != chip->read_byte(mtd) ||
+ nand_dev_id != chip->read_byte(mtd)) {
+ chip->select_chip(mtd, -1);
+ break;
+ }
+ chip->select_chip(mtd, -1);
+ }
+ if (i > 1)
+ pr_info("%d chips detected\n", i);
+
+ /* Store the number of chips and calc total size for mtd */
+ chip->numchips = i;
+ mtd->size = i * chip->chipsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_scan_ident);
+
+/*
+ * Check if the chip configuration meet the datasheet requirements.
+
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+static bool nand_ecc_strength_good(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int corr, ds_corr;
+
+ if (ecc->size == 0 || chip->ecc_step_ds == 0)
+ /* Not enough information */
+ return true;
+
+ /*
+ * We get the number of corrected bits per page to compare
+ * the correction density.
+ */
+ corr = (mtd->writesize * ecc->strength) / ecc->size;
+ ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
+
+ return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
+}
+
+/**
+ * nand_scan_tail - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ *
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
+ */
+int nand_scan_tail(struct mtd_info *mtd)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_buffers *nbuf;
+
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH));
+
+ if (!(chip->options & NAND_OWN_BUFFERS)) {
+ nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
+ + mtd->oobsize * 3, GFP_KERNEL);
+ if (!nbuf)
+ return -ENOMEM;
+ nbuf->ecccalc = (uint8_t *)(nbuf + 1);
+ nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
+ nbuf->databuf = nbuf->ecccode + mtd->oobsize;
+
+ chip->buffers = nbuf;
+ } else {
+ if (!chip->buffers)
+ return -ENOMEM;
+ }
+
+ /* Set the internal oob buffer location, just after the page data */
+ chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+
+ /*
+ * If no default placement scheme is given, select an appropriate one.
+ */
+ if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
+ switch (mtd->oobsize) {
+ case 8:
+ ecc->layout = &nand_oob_8;
+ break;
+ case 16:
+ ecc->layout = &nand_oob_16;
+ break;
+ case 64:
+ ecc->layout = &nand_oob_64;
+ break;
+ case 128:
+ ecc->layout = &nand_oob_128;
+ break;
+ default:
+ pr_warn("No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
+ }
+ }
+
+ if (!chip->write_page)
+ chip->write_page = nand_write_page;
+
+ /*
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * selected and we have 256 byte pagesize fallback to software ECC
+ */
+
+ switch (ecc->mode) {
+ case NAND_ECC_HW_OOB_FIRST:
+ /* Similar to NAND_ECC_HW, but a separate read_page handle */
+ if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
+ pr_warn("No ECC functions supplied; hardware ECC not possible\n");
+ BUG();
+ }
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc_oob_first;
+
+ case NAND_ECC_HW:
+ /* Use standard hwecc read page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage)
+ ecc->write_subpage = nand_write_subpage_hwecc;
+
+ case NAND_ECC_HW_SYNDROME:
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
+ pr_warn("No ECC functions supplied; hardware ECC not possible\n");
+ BUG();
+ }
+ /* Use standard syndrome read/write page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+
+ if (mtd->writesize >= ecc->size) {
+ if (!ecc->strength) {
+ pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+ BUG();
+ }
+ break;
+ }
+ pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
+ ecc->size, mtd->writesize);
+ ecc->mode = NAND_ECC_SOFT;
+
+ case NAND_ECC_SOFT:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
+ break;
+
+ case NAND_ECC_SOFT_BCH:
+ if (!mtd_nand_has_bch()) {
+ pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ BUG();
+ }
+ ecc->calculate = nand_bch_calculate_ecc;
+ ecc->correct = nand_bch_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ /*
+ * Board driver should supply ecc.size and ecc.strength values
+ * to select how many bits are correctable. Otherwise, default
+ * to 4 bits for large page devices.
+ */
+ if (!ecc->size && (mtd->oobsize >= 64)) {
+ ecc->size = 512;
+ ecc->strength = 4;
+ }
+
+ /* See nand_bch_init() for details. */
+ ecc->bytes = DIV_ROUND_UP(
+ ecc->strength * fls(8 * ecc->size), 8);
+ ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
+ &ecc->layout);
+ if (!ecc->priv) {
+ pr_warn("BCH ECC initialization failed!\n");
+ BUG();
+ }
+ break;
+
+ case NAND_ECC_NONE:
+ pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
+ ecc->read_page = nand_read_page_raw;
+ ecc->write_page = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->write_oob = nand_write_oob_std;
+ ecc->size = mtd->writesize;
+ ecc->bytes = 0;
+ ecc->strength = 0;
+ break;
+
+ default:
+ pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
+ BUG();
+ }
+
+ /* For many systems, the standard OOB write also works for raw */
+ if (!ecc->read_oob_raw)
+ ecc->read_oob_raw = ecc->read_oob;
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area.
+ */
+ ecc->layout->oobavail = 0;
+ for (i = 0; ecc->layout->oobfree[i].length
+ && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
+ ecc->layout->oobavail += ecc->layout->oobfree[i].length;
+ mtd->oobavail = ecc->layout->oobavail;
+
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_strength_good(mtd))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ mtd->name);
+
+ /*
+ * Set the number of read / write steps for one page depending on ECC
+ * mode.
+ */
+ ecc->steps = mtd->writesize / ecc->size;
+ if (ecc->steps * ecc->size != mtd->writesize) {
+ pr_warn("Invalid ECC parameters\n");
+ BUG();
+ }
+ ecc->total = ecc->steps * ecc->bytes;
+
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+ switch (ecc->steps) {
+ case 2:
+ mtd->subpage_sft = 1;
+ break;
+ case 4:
+ case 8:
+ case 16:
+ mtd->subpage_sft = 2;
+ break;
+ }
+ }
+ chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /* Initialize state */
+ chip->state = FL_READY;
+
+ /* Invalidate the pagebuffer reference */
+ chip->pagebuf = -1;
+
+ /* Large page NAND with SOFT_ECC should support subpage reads */
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT:
+ case NAND_ECC_SOFT_BCH:
+ if (chip->page_shift > 9)
+ chip->options |= NAND_SUBPAGE_READ;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
+ mtd->_erase = nand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read = nand_read;
+ mtd->_write = nand_write;
+ mtd->_panic_write = panic_nand_write;
+ mtd->_read_oob = nand_read_oob;
+ mtd->_write_oob = nand_write_oob;
+ mtd->_sync = nand_sync;
+ mtd->_lock = NULL;
+ mtd->_unlock = NULL;
+ mtd->_suspend = nand_suspend;
+ mtd->_resume = nand_resume;
+ mtd->_reboot = nand_shutdown;
+ mtd->_block_isreserved = nand_block_isreserved;
+ mtd->_block_isbad = nand_block_isbad;
+ mtd->_block_markbad = nand_block_markbad;
+ mtd->writebufsize = mtd->writesize;
+
+ /* propagate ecc info to mtd_info */
+ mtd->ecclayout = ecc->layout;
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+ /* Check, if we should skip the bad block table scan */
+ if (chip->options & NAND_SKIP_BBTSCAN)
+ return 0;
+
+ /* Build bad block table */
+ return chip->scan_bbt(mtd);
+}
+EXPORT_SYMBOL(nand_scan_tail);
+
+/*
+ * is_module_text_address() isn't exported, and it's mostly a pointless
+ * test if this is a module _anyway_ -- they'd have to try _really_ hard
+ * to call us from in-kernel code if the core NAND support is modular.
+ */
+#ifdef MODULE
+#define caller_is_module() (1)
+#else
+#define caller_is_module() \
+ is_module_text_address((unsigned long)__builtin_return_address(0))
+#endif
+
+/**
+ * nand_scan - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values. The mtd->owner field must be set to the module of the
+ * caller.
+ */
+int nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int ret;
+
+ /* Many callers got this wrong, so check for it for a while... */
+ if (!mtd->owner && caller_is_module()) {
+ pr_crit("%s called with NULL mtd->owner!\n", __func__);
+ BUG();
+ }
+
+ ret = nand_scan_ident(mtd, maxchips, NULL);
+ if (!ret)
+ ret = nand_scan_tail(mtd);
+ return ret;
+}
+EXPORT_SYMBOL(nand_scan);
+
+/**
+ * nand_release - [NAND Interface] Free resources held by the NAND device
+ * @mtd: MTD device structure
+ */
+void nand_release(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
+ nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+
+ mtd_device_unregister(mtd);
+
+ /* Free bad block table memory */
+ kfree(chip->bbt);
+ if (!(chip->options & NAND_OWN_BUFFERS))
+ kfree(chip->buffers);
+
+ /* Free bad block descriptor memory */
+ if (chip->badblock_pattern && chip->badblock_pattern->options
+ & NAND_BBT_DYNAMICSTRUCT)
+ kfree(chip->badblock_pattern);
+}
+EXPORT_SYMBOL_GPL(nand_release);
+
+static int __init nand_base_init(void)
+{
+ led_trigger_register_simple("nand-disk", &nand_led_trigger);
+ return 0;
+}
+
+static void __exit nand_base_exit(void)
+{
+ led_trigger_unregister_simple(nand_led_trigger);
+}
+
+module_init(nand_base_init);
+module_exit(nand_base_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
new file mode 100644
index 000000000..9bb8453d2
--- /dev/null
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -0,0 +1,1375 @@
+/*
+ * drivers/mtd/nand_bbt.c
+ *
+ * Overview:
+ * Bad block table support for the NAND driver
+ *
+ * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number, then the mirror BBT is used to build the memory based BBT.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
+ *
+ * The auto generated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <linux/string.h>
+
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
+ /* Compare the pattern */
+ if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ /* Compare the pattern */
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ int res, ret = 0, i, j, act = 0;
+ struct nand_chip *this = mtd->priv;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
+
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ from = ((loff_t)page) << this->page_shift;
+
+ while (totlen) {
+ len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd_read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act++) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_RESERVED);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ /* Factory marked bad or worn out? */
+ if (tmp == 0)
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
+ else
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_WORN);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int res = 0, i;
+
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < this->numchips; i++) {
+ if (chip == -1 || chip == i)
+ res = read_bbt(mtd, buf, td->pages[i],
+ this->chipsize >> this->bbt_erase_shift,
+ td, offs);
+ if (res)
+ return res;
+ offs += this->chipsize >> this->bbt_erase_shift;
+ }
+ } else {
+ res = read_bbt(mtd, buf, td->pages[0],
+ mtd->size >> this->bbt_erase_shift, td, 0);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ return mtd_read(mtd, offs, len, &retlen, buf);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtd_oob_ops ops;
+ int res, ret = 0;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+
+ while (len > 0) {
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
+
+ res = mtd_read_oob(mtd, offs, &ops);
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
+ return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ offs += mtd->writesize;
+ }
+ return ret;
+}
+
+static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_data(mtd, buf, offs, td);
+ else
+ return scan_read_oob(mtd, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.datbuf = buf;
+ ops.oobbuf = oob;
+ ops.len = len;
+
+ return mtd_write_oob(mtd, offs, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+ }
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, int numpages)
+{
+ struct mtd_oob_ops ops;
+ int j, ret;
+
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ for (j = 0; j < numpages; j++) {
+ /*
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
+ */
+ ret = mtd_read_oob(mtd, offs, &ops);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ return ret;
+
+ if (check_short_pattern(buf, bd))
+ return 1;
+
+ offs += mtd->writesize;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, numblocks, numpages;
+ int startblock;
+ loff_t from;
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ numpages = 2;
+ else
+ numpages = 1;
+
+ if (chip == -1) {
+ numblocks = mtd->size >> this->bbt_erase_shift;
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ numblocks = this->chipsize >> this->bbt_erase_shift;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = (loff_t)startblock << this->bbt_erase_shift;
+ }
+
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * numpages);
+
+ for (i = startblock; i < numblocks; i++) {
+ int ret;
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+ }
+
+ from += (1 << this->bbt_erase_shift);
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, chips;
+ int startblock, block, dir;
+ int scanlen = mtd->writesize + mtd->oobsize;
+ int bbtblocks;
+ int blocktopage = this->bbt_erase_shift - this->page_shift;
+
+ /* Search direction top -> down? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ bbtblocks = mtd->size >> this->bbt_erase_shift;
+ }
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+
+ /* Read first page */
+ scan_read(mtd, buf, offs, mtd->writesize, td);
+ if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+ td->pages[i] = actblock << blocktopage;
+ if (td->options & NAND_BBT_VERSION) {
+ offs = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[offs];
+ }
+ break;
+ }
+ }
+ startblock += this->chipsize >> this->bbt_erase_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ pr_warn("Bad block table not found for chip %d\n", i);
+ else
+ pr_info("Bad block table found at page %d, version 0x%02X\n",
+ td->pages[i], td->version[i]);
+ }
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt(mtd, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(mtd, buf, md);
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct erase_info einfo;
+ int i, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+ /*
+ * There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
+ continue;
+ }
+ page = block <<
+ (this->bbt_erase_shift - this->page_shift);
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ pr_err("No space left to write bad block table\n");
+ return -ENOSPC;
+ write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ to = ((loff_t)page) << this->page_shift;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
+ len = 1 << this->bbt_erase_shift;
+ res = mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("nand_bbt: error reading block for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("nand_bbt: ECC error while reading block for writing bad block table\n");
+ }
+ /* Read oob data */
+ ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to >> this->page_shift);
+ offs = pageoffs << this->page_shift;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ (len >> this->page_shift)* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks; i++) {
+ uint8_t dat;
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
+ }
+
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ einfo.len = 1 << this->bbt_erase_shift;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ return create_bbt(mtd, this->buffers->databuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, create, chipsel, res, res2;
+ struct nand_chip *this = mtd->priv;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = this->numchips;
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ create = 0;
+ rd = NULL;
+ rd2 = NULL;
+ res = res2 = 0;
+ /* Per chip or per device? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table available? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
+ writeops = 0x03;
+ } else if (td->pages[i] == -1) {
+ rd = md;
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
+ rd = td;
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ writeops = 0x02;
+ } else {
+ rd = md;
+ writeops = 0x01;
+ }
+ } else {
+ if (td->pages[i] == -1) {
+ create = 1;
+ writeops = 0x01;
+ } else {
+ rd = td;
+ }
+ }
+
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(mtd, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ } else {
+ chips = 1;
+ nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)block <<
+ this->bbt_erase_shift);
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
+ update = 1;
+ block++;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)(block - 1) <<
+ this->bbt_erase_shift);
+ }
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @mtd: MTD device structure
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ table_size = this->chipsize >> this->bbt_erase_shift;
+ else
+ table_size = mtd->size >> this->bbt_erase_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
+int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ len = mtd->size >> (this->bbt_erase_shift + 2);
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
+ this->bbt = kzalloc(len, GFP_KERNEL);
+ if (!this->bbt)
+ return -ENOMEM;
+
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(mtd, bd))) {
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
+ kfree(this->bbt);
+ this->bbt = NULL;
+ }
+ return res;
+ }
+ verify_bbt_descr(mtd, td);
+ verify_bbt_descr(mtd, md);
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = vmalloc(len);
+ if (!buf) {
+ kfree(this->bbt);
+ this->bbt = NULL;
+ return -ENOMEM;
+ }
+
+ /* Is the bbt at a given page? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ read_abs_bbts(mtd, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ search_read_bbts(mtd, buf, td, md);
+ }
+
+ res = check_create(mtd, buf, bd);
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(mtd, td);
+ if (md)
+ mark_bbt_region(mtd, md);
+
+ vfree(buf);
+ return res;
+}
+
+/**
+ * nand_update_bbt - update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+ if (this->badblock_pattern) {
+ pr_warn("Bad block pattern already allocated; not replacing\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+ return 0;
+}
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
+int nand_default_bbt(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ int ret;
+
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ }
+
+ if (!this->badblock_pattern) {
+ ret = nand_create_badblock_pattern(this);
+ if (ret)
+ return ret;
+ }
+
+ return nand_scan_bbt(mtd, this->badblock_pattern);
+}
+
+/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int block;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct nand_chip *this = mtd->priv;
+ int block, res;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ res = bbt_get_entry(this, block);
+
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int)offs, block, res);
+
+ switch (res) {
+ case BBT_BLOCK_GOOD:
+ return 0;
+ case BBT_BLOCK_WORN:
+ return 1;
+ case BBT_BLOCK_RESERVED:
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int block, ret = 0;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(mtd, offs);
+
+ return ret;
+}
+
+EXPORT_SYMBOL(nand_scan_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
new file mode 100644
index 000000000..3803e0bba
--- /dev/null
+++ b/drivers/mtd/nand/nand_bch.c
@@ -0,0 +1,243 @@
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_bch_control - private NAND BCH control structure
+ * @bch: BCH control structure
+ * @ecclayout: private ecc layout for this BCH configuration
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_bch_control {
+ struct bch_control *bch;
+ struct nand_ecclayout ecclayout;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+/**
+ * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int i;
+
+ memset(code, 0, chip->ecc.bytes);
+ encode_bch(nbc->bch, buf, chip->ecc.size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < chip->ecc.bytes; i++)
+ code[i] ^= nbc->eccmask[i];
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_bch_calculate_ecc);
+
+/**
+ * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct bit errors for a data byte block
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int *errloc = nbc->errloc;
+ int i, count;
+
+ count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+ NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (chip->ecc.size*8))
+ /* error is located in data, correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+ /* else error in ecc, no action needed */
+
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
+ }
+ } else if (count < 0) {
+ printk(KERN_ERR "ecc unrecoverable error\n");
+ count = -1;
+ }
+ return count;
+}
+EXPORT_SYMBOL(nand_bch_correct_data);
+
+/**
+ * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
+ * @mtd: MTD block structure
+ * @eccsize: ecc block size in bytes
+ * @eccbytes: ecc length in bytes
+ * @ecclayout: output default layout
+ *
+ * Returns:
+ * a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
+ * are used to compute BCH parameters m (Galois field order) and t (error
+ * correction capability). @eccbytes should be equal to the number of bytes
+ * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
+ * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
+ */
+struct nand_bch_control *
+nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
+ struct nand_ecclayout **ecclayout)
+{
+ unsigned int m, t, eccsteps, i;
+ struct nand_ecclayout *layout;
+ struct nand_bch_control *nbc = NULL;
+ unsigned char *erased_page;
+
+ if (!eccsize || !eccbytes) {
+ printk(KERN_WARNING "ecc parameters not supplied\n");
+ goto fail;
+ }
+
+ m = fls(1+8*eccsize);
+ t = (eccbytes*8)/m;
+
+ nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
+ if (!nbc)
+ goto fail;
+
+ nbc->bch = init_bch(m, t, 0);
+ if (!nbc->bch)
+ goto fail;
+
+ /* verify that eccbytes has the expected value */
+ if (nbc->bch->ecc_bytes != eccbytes) {
+ printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
+ eccbytes, nbc->bch->ecc_bytes);
+ goto fail;
+ }
+
+ eccsteps = mtd->writesize/eccsize;
+
+ /* if no ecc placement scheme was provided, build one */
+ if (!*ecclayout) {
+
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ printk(KERN_WARNING "must provide an oob scheme for "
+ "oobsize %d\n", mtd->oobsize);
+ goto fail;
+ }
+
+ layout = &nbc->ecclayout;
+ layout->eccbytes = eccsteps*eccbytes;
+
+ /* reserve 2 bytes for bad block marker */
+ if (layout->eccbytes+2 > mtd->oobsize) {
+ printk(KERN_WARNING "no suitable oob scheme available "
+ "for oobsize %d eccbytes %u\n", mtd->oobsize,
+ eccbytes);
+ goto fail;
+ }
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+
+ *ecclayout = layout;
+ }
+
+ /* sanity checks */
+ if (8*(eccsize+eccbytes) >= (1 << m)) {
+ printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
+ goto fail;
+ }
+ if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
+ printk(KERN_WARNING "invalid ecc layout\n");
+ goto fail;
+ }
+
+ nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
+ nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+ if (!nbc->eccmask || !nbc->errloc)
+ goto fail;
+ /*
+ * compute and store the inverted ecc of an erased ecc block
+ */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page)
+ goto fail;
+
+ memset(erased_page, 0xff, eccsize);
+ memset(nbc->eccmask, 0, eccbytes);
+ encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ nbc->eccmask[i] ^= 0xff;
+
+ return nbc;
+fail:
+ nand_bch_free(nbc);
+ return NULL;
+}
+EXPORT_SYMBOL(nand_bch_init);
+
+/**
+ * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
+ * @nbc: NAND BCH control structure
+ */
+void nand_bch_free(struct nand_bch_control *nbc)
+{
+ if (nbc) {
+ free_bch(nbc->bch);
+ kfree(nbc->errloc);
+ kfree(nbc->eccmask);
+ kfree(nbc);
+ }
+}
+EXPORT_SYMBOL(nand_bch_free);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
new file mode 100644
index 000000000..97c4c0216
--- /dev/null
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -0,0 +1,533 @@
+/*
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
+ *
+ * drivers/mtd/nand/nand_ecc.c
+ *
+ * Copyright © 2008 Koninklijke Philips Electronics NV.
+ * Author: Frans Meulenbroeks
+ *
+ * Completely replaces the previous ECC implementation which was written by:
+ * Steven J. Hill (sjhill@realitydiluted.com)
+ * Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/mtd/nand_ecc.txt
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+/*
+ * The STANDALONE macro is useful when running the code outside the kernel
+ * e.g. when running the code in a testbed or a benchmark program.
+ * When STANDALONE is used, the module related macros are commented out
+ * as well as the linux include files.
+ * Instead a private definition of mtd_info is given to satisfy the compiler
+ * (the code does not use mtd_info, so the code does not care)
+ */
+#ifndef STANDALONE
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/byteorder.h>
+#else
+#include <stdint.h>
+struct mtd_info;
+#define EXPORT_SYMBOL(x) /* x */
+
+#define MODULE_LICENSE(x) /* x */
+#define MODULE_AUTHOR(x) /* x */
+#define MODULE_DESCRIPTION(x) /* x */
+
+#define pr_err printf
+#endif
+
+/*
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
+ */
+static const char invparity[256] = {
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
+
+/*
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
+ */
+static const char bitsperbyte[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+/*
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ECC data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_correct_data for more details
+ */
+static const char addressbits[256] = {
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
+};
+
+/**
+ * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @buf: input buffer with raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ * @code: output buffer with ECC
+ */
+void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
+ unsigned char *code)
+{
+ int i;
+ const uint32_t *bp = (uint32_t *)buf;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
+ uint32_t cur; /* current value in buffer */
+ /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
+ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+ uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
+ uint32_t uninitialized_var(rp17); /* to make compiler happy */
+ uint32_t par; /* the cumulative parity for all data */
+ uint32_t tmppar; /* the cumulative parity for this iteration;
+ for rp12, rp14 and rp16 at the end of the
+ loop */
+
+ par = 0;
+ rp4 = 0;
+ rp6 = 0;
+ rp8 = 0;
+ rp10 = 0;
+ rp12 = 0;
+ rp14 = 0;
+ rp16 = 0;
+
+ /*
+ * The loop is unrolled a number of times;
+ * This avoids if statements to decide on which rp value to update
+ * Also we process the data by longwords.
+ * Note: passing unaligned data might give a performance penalty.
+ * It is assumed that the buffers are aligned.
+ * tmppar is the cumulative sum of this iteration.
+ * needed for calculating rp12, rp14, rp16 and par
+ * also used as a performance improvement for rp6, rp8 and rp10
+ */
+ for (i = 0; i < eccsize_mult << 2; i++) {
+ cur = *bp++;
+ tmppar = cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= tmppar;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp10 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= cur;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0)
+ rp12 ^= tmppar;
+ if ((i & 0x2) == 0)
+ rp14 ^= tmppar;
+ if (eccsize_mult == 2 && (i & 0x4) == 0)
+ rp16 ^= tmppar;
+ }
+
+ /*
+ * handle the fact that we use longword operations
+ * we'll bring rp4..rp14..rp16 back to single byte entities by
+ * shifting and xoring first fold the upper and lower 16 bits,
+ * then the upper and lower 8 bits.
+ */
+ rp4 ^= (rp4 >> 16);
+ rp4 ^= (rp4 >> 8);
+ rp4 &= 0xff;
+ rp6 ^= (rp6 >> 16);
+ rp6 ^= (rp6 >> 8);
+ rp6 &= 0xff;
+ rp8 ^= (rp8 >> 16);
+ rp8 ^= (rp8 >> 8);
+ rp8 &= 0xff;
+ rp10 ^= (rp10 >> 16);
+ rp10 ^= (rp10 >> 8);
+ rp10 &= 0xff;
+ rp12 ^= (rp12 >> 16);
+ rp12 ^= (rp12 >> 8);
+ rp12 &= 0xff;
+ rp14 ^= (rp14 >> 16);
+ rp14 ^= (rp14 >> 8);
+ rp14 &= 0xff;
+ if (eccsize_mult == 2) {
+ rp16 ^= (rp16 >> 16);
+ rp16 ^= (rp16 >> 8);
+ rp16 &= 0xff;
+ }
+
+ /*
+ * we also need to calculate the row parity for rp0..rp3
+ * This is present in par, because par is now
+ * rp3 rp3 rp2 rp2 in little endian and
+ * rp2 rp2 rp3 rp3 in big endian
+ * as well as
+ * rp1 rp0 rp1 rp0 in little endian and
+ * rp0 rp1 rp0 rp1 in big endian
+ * First calculate rp2 and rp3
+ */
+#ifdef __BIG_ENDIAN
+ rp2 = (par >> 16);
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+ rp3 = par & 0xffff;
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+#else
+ rp3 = (par >> 16);
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+ rp2 = par & 0xffff;
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+#endif
+
+ /* reduce par to 16 bits then calculate rp1 and rp0 */
+ par ^= (par >> 16);
+#ifdef __BIG_ENDIAN
+ rp0 = (par >> 8) & 0xff;
+ rp1 = (par & 0xff);
+#else
+ rp1 = (par >> 8) & 0xff;
+ rp0 = (par & 0xff);
+#endif
+
+ /* finally reduce par to 8 bits */
+ par ^= (par >> 8);
+ par &= 0xff;
+
+ /*
+ * and calculate rp5..rp15..rp17
+ * note that par = rp4 ^ rp5 and due to the commutative property
+ * of the ^ operator we can say:
+ * rp5 = (par ^ rp4);
+ * The & 0xff seems superfluous, but benchmarking learned that
+ * leaving it out gives slightly worse results. No idea why, probably
+ * it has to do with the way the pipeline in pentium is organized.
+ */
+ rp5 = (par ^ rp4) & 0xff;
+ rp7 = (par ^ rp6) & 0xff;
+ rp9 = (par ^ rp8) & 0xff;
+ rp11 = (par ^ rp10) & 0xff;
+ rp13 = (par ^ rp12) & 0xff;
+ rp15 = (par ^ rp14) & 0xff;
+ if (eccsize_mult == 2)
+ rp17 = (par ^ rp16) & 0xff;
+
+ /*
+ * Finally calculate the ECC bits.
+ * Again here it might seem that there are performance optimisations
+ * possible, but benchmarks showed that on the system this is developed
+ * the code below is the fastest
+ */
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+ code[0] =
+ (invparity[rp7] << 7) |
+ (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) |
+ (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) |
+ (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) |
+ (invparity[rp0]);
+ code[1] =
+ (invparity[rp15] << 7) |
+ (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) |
+ (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) |
+ (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) |
+ (invparity[rp8]);
+#else
+ code[1] =
+ (invparity[rp7] << 7) |
+ (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) |
+ (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) |
+ (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) |
+ (invparity[rp0]);
+ code[0] =
+ (invparity[rp15] << 7) |
+ (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) |
+ (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) |
+ (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) |
+ (invparity[rp8]);
+#endif
+ if (eccsize_mult == 1)
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ 3;
+ else
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ (invparity[rp17] << 1) |
+ (invparity[rp16] << 0);
+}
+EXPORT_SYMBOL(__nand_calculate_ecc);
+
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ __nand_calculate_ecc(buf,
+ ((struct nand_chip *)mtd->priv)->ecc.size, code);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_calculate_ecc);
+
+/**
+ * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ *
+ * Detect and correct a 1 bit error for eccsize byte block
+ */
+int __nand_correct_data(unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc,
+ unsigned int eccsize)
+{
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
+
+ /*
+ * b0 to b2 indicate which bit is faulty (if any)
+ * we might need the xor result more than once,
+ * so keep them in a local var
+ */
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+#else
+ b0 = read_ecc[1] ^ calc_ecc[1];
+ b1 = read_ecc[0] ^ calc_ecc[0];
+#endif
+ b2 = read_ecc[2] ^ calc_ecc[2];
+
+ /* check if there are any bitfaults */
+
+ /* repeated if statements are slightly more efficient than switch ... */
+ /* ordered in order of likelihood */
+
+ if ((b0 | b1 | b2) == 0)
+ return 0; /* no error */
+
+ if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
+ (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
+ ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
+ (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
+ /* single bit error */
+ /*
+ * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
+ * byte, cp 5/3/1 indicate the faulty bit.
+ * A lookup table (called addressbits) is used to filter
+ * the bits from the byte they are in.
+ * A marginal optimisation is possible by having three
+ * different lookup tables.
+ * One as we have now (for b0), one for b2
+ * (that would avoid the >> 1), and one for b1 (with all values
+ * << 4). However it was felt that introducing two more tables
+ * hardly justify the gain.
+ *
+ * The b2 shift is there to get rid of the lowest two bits.
+ * We could also do addressbits[b2] >> 1 but for the
+ * performance it does not make any difference
+ */
+ if (eccsize_mult == 1)
+ byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+ else
+ byte_addr = (addressbits[b2 & 0x3] << 8) +
+ (addressbits[b1] << 4) + addressbits[b0];
+ bit_addr = addressbits[b2 >> 2];
+ /* flip the bit */
+ buf[byte_addr] ^= (1 << bit_addr);
+ return 1;
+
+ }
+ /* count nr of bits; use table lookup, faster than calculating it */
+ if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+ return 1; /* error in ECC data; no action needed */
+
+ pr_err("%s: uncorrectable ECC error\n", __func__);
+ return -1;
+}
+EXPORT_SYMBOL(__nand_correct_data);
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ return __nand_correct_data(buf, read_ecc, calc_ecc,
+ ((struct nand_chip *)mtd->priv)->ecc.size);
+}
+EXPORT_SYMBOL(nand_correct_data);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
+MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
new file mode 100644
index 000000000..dd620c19c
--- /dev/null
+++ b/drivers/mtd/nand/nand_ids.c
@@ -0,0 +1,190 @@
+/*
+ * drivers/mtd/nandids.c
+ *
+ * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/sizes.h>
+
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+
+/*
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG5D2 32G 3.3V 8-bit",
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"TC58NVG6D2 64G 3.3V 8-bit",
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNRGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+ {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
+ { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
+ 4 },
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
+
+ /*
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
+ */
+
+ /* 512 Megabit */
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
+
+ /* 1 Gigabit */
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
+
+ /* 2 Gigabit */
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
+
+ /* 4 Gigabit */
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
+
+ /* 8 Gigabit */
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
+
+ /* 16 Gigabit */
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
+
+ /* 32 Gigabit */
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
+
+ /* 64 Gigabit */
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
+
+ /* 128 Gigabit */
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
+
+ /* 256 Gigabit */
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
+
+ /* 512 Gigabit */
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
+
+ {NULL}
+};
+
+/* Manufacturer IDs */
+struct nand_manufacturers nand_manuf_ids[] = {
+ {NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_SAMSUNG, "Samsung"},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_HYNIX, "Hynix"},
+ {NAND_MFR_MICRON, "Micron"},
+ {NAND_MFR_AMD, "AMD/Spansion"},
+ {NAND_MFR_MACRONIX, "Macronix"},
+ {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_SANDISK, "SanDisk"},
+ {NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_ATO, "ATO"},
+ {0x0, "Unknown"}
+};
+
+EXPORT_SYMBOL(nand_manuf_ids);
+EXPORT_SYMBOL(nand_flash_ids);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
new file mode 100644
index 000000000..e81470a8a
--- /dev/null
+++ b/drivers/mtd/nand/nand_timings.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/mtd/nand.h>
+
+static const struct nand_sdr_timings onfi_sdr_timings[] = {
+ /* Mode 0 */
+ {
+ .tADL_min = 200000,
+ .tALH_min = 20000,
+ .tALS_min = 50000,
+ .tAR_min = 25000,
+ .tCEA_max = 100000,
+ .tCEH_min = 20000,
+ .tCH_min = 20000,
+ .tCHZ_max = 100000,
+ .tCLH_min = 20000,
+ .tCLR_min = 20000,
+ .tCLS_min = 50000,
+ .tCOH_min = 0,
+ .tCS_min = 70000,
+ .tDH_min = 20000,
+ .tDS_min = 40000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 10000,
+ .tITC_max = 1000000,
+ .tRC_min = 100000,
+ .tREA_max = 40000,
+ .tREH_min = 30000,
+ .tRHOH_min = 0,
+ .tRHW_min = 200000,
+ .tRHZ_max = 200000,
+ .tRLOH_min = 0,
+ .tRP_min = 50000,
+ .tRST_max = 250000000000ULL,
+ .tWB_max = 200000,
+ .tRR_min = 40000,
+ .tWC_min = 100000,
+ .tWH_min = 30000,
+ .tWHR_min = 120000,
+ .tWP_min = 50000,
+ .tWW_min = 100000,
+ },
+ /* Mode 1 */
+ {
+ .tADL_min = 100000,
+ .tALH_min = 10000,
+ .tALS_min = 25000,
+ .tAR_min = 10000,
+ .tCEA_max = 45000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 25000,
+ .tCOH_min = 15000,
+ .tCS_min = 35000,
+ .tDH_min = 10000,
+ .tDS_min = 20000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 50000,
+ .tREA_max = 30000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 25000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 45000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 25000,
+ .tWW_min = 100000,
+ },
+ /* Mode 2 */
+ {
+ .tADL_min = 100000,
+ .tALH_min = 10000,
+ .tALS_min = 15000,
+ .tAR_min = 10000,
+ .tCEA_max = 30000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 15000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 15000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 35000,
+ .tREA_max = 25000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tRP_min = 17000,
+ .tWC_min = 35000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 17000,
+ .tWW_min = 100000,
+ },
+ /* Mode 3 */
+ {
+ .tADL_min = 100000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 30000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 15000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 30000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 15000,
+ .tWW_min = 100000,
+ },
+ /* Mode 4 */
+ {
+ .tADL_min = 70000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 20000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 25000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 12000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 25000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 12000,
+ .tWW_min = 100000,
+ },
+ /* Mode 5 */
+ {
+ .tADL_min = 70000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 5000,
+ .tDS_min = 7000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 20000,
+ .tREA_max = 16000,
+ .tREH_min = 7000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 10000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 20000,
+ .tWH_min = 7000,
+ .tWHR_min = 80000,
+ .tWP_min = 10000,
+ .tWW_min = 100000,
+ },
+};
+
+/**
+ * onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
+ * timings according to the given ONFI timing mode
+ * @mode: ONFI timing mode
+ */
+const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
+{
+ if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
+ return ERR_PTR(-EINVAL);
+
+ return &onfi_sdr_timings[mode];
+}
+EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
new file mode 100644
index 000000000..f2324271b
--- /dev/null
+++ b/drivers/mtd/nand/nandsim.c
@@ -0,0 +1,2425 @@
+/*
+ * NAND flash simulator.
+ *
+ * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ *
+ * Note: NS means "NAND Simulator".
+ * Note: Input means input TO flash chip, output means output FROM chip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ * Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+/* Default simulator parameters values */
+#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
+#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
+#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
+#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
+#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
+#endif
+
+#ifndef CONFIG_NANDSIM_ACCESS_DELAY
+#define CONFIG_NANDSIM_ACCESS_DELAY 25
+#endif
+#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
+#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
+#endif
+#ifndef CONFIG_NANDSIM_ERASE_DELAY
+#define CONFIG_NANDSIM_ERASE_DELAY 2
+#endif
+#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
+#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
+#endif
+#ifndef CONFIG_NANDSIM_INPUT_CYCLE
+#define CONFIG_NANDSIM_INPUT_CYCLE 50
+#endif
+#ifndef CONFIG_NANDSIM_BUS_WIDTH
+#define CONFIG_NANDSIM_BUS_WIDTH 8
+#endif
+#ifndef CONFIG_NANDSIM_DO_DELAYS
+#define CONFIG_NANDSIM_DO_DELAYS 0
+#endif
+#ifndef CONFIG_NANDSIM_LOG
+#define CONFIG_NANDSIM_LOG 0
+#endif
+#ifndef CONFIG_NANDSIM_DBG
+#define CONFIG_NANDSIM_DBG 0
+#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
+
+static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
+static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
+static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
+static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
+static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
+static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
+static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
+static uint log = CONFIG_NANDSIM_LOG;
+static uint dbg = CONFIG_NANDSIM_DBG;
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
+static unsigned int parts_num;
+static char *badblocks = NULL;
+static char *weakblocks = NULL;
+static char *weakpages = NULL;
+static unsigned int bitflips = 0;
+static char *gravepages = NULL;
+static unsigned int overridesize = 0;
+static char *cache_file = NULL;
+static unsigned int bbt;
+static unsigned int bch;
+static u_char id_bytes[8] = {
+ [0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
+ [1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
+ [2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
+ [3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
+ [4 ... 7] = 0xFF,
+};
+
+module_param_array(id_bytes, byte, NULL, 0400);
+module_param_named(first_id_byte, id_bytes[0], byte, 0400);
+module_param_named(second_id_byte, id_bytes[1], byte, 0400);
+module_param_named(third_id_byte, id_bytes[2], byte, 0400);
+module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
+module_param(access_delay, uint, 0400);
+module_param(programm_delay, uint, 0400);
+module_param(erase_delay, uint, 0400);
+module_param(output_cycle, uint, 0400);
+module_param(input_cycle, uint, 0400);
+module_param(bus_width, uint, 0400);
+module_param(do_delays, uint, 0400);
+module_param(log, uint, 0400);
+module_param(dbg, uint, 0400);
+module_param_array(parts, ulong, &parts_num, 0400);
+module_param(badblocks, charp, 0400);
+module_param(weakblocks, charp, 0400);
+module_param(weakpages, charp, 0400);
+module_param(bitflips, uint, 0400);
+module_param(gravepages, charp, 0400);
+module_param(overridesize, uint, 0400);
+module_param(cache_file, charp, 0400);
+module_param(bbt, uint, 0400);
+module_param(bch, uint, 0400);
+
+MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
+MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
+MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
+MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
+MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
+MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
+MODULE_PARM_DESC(log, "Perform logging if not zero");
+MODULE_PARM_DESC(dbg, "Output debug information if not zero");
+MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
+/* Page and erase block positions for the following parameters are independent of any partitions */
+MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
+MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
+ " separated by commas e.g. 113:2 means eb 113"
+ " can be erased only twice before failing");
+MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be written only twice before failing");
+MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
+MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be read only twice before failing");
+MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
+ "The size is specified in erase blocks and as the exponent of a power of two"
+ " e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
+MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
+MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
+ "be correctable in 512-byte blocks");
+
+/* The largest possible page size */
+#define NS_LARGEST_PAGE_SIZE 4096
+
+/* The prefix for simulator output */
+#define NS_OUTPUT_PREFIX "[nandsim]"
+
+/* Simulator's output macros (logging, debugging, warning, error) */
+#define NS_LOG(args...) \
+ do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
+#define NS_DBG(args...) \
+ do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
+#define NS_WARN(args...) \
+ do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
+#define NS_ERR(args...) \
+ do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
+#define NS_INFO(args...) \
+ do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
+
+/* Busy-wait delay macros (microseconds, milliseconds) */
+#define NS_UDELAY(us) \
+ do { if (do_delays) udelay(us); } while(0)
+#define NS_MDELAY(us) \
+ do { if (do_delays) mdelay(us); } while(0)
+
+/* Is the nandsim structure initialized ? */
+#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
+
+/* Good operation completion status */
+#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
+
+/* Operation failed completion status */
+#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
+
+/* Calculate the page offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET(ns) \
+ (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
+
+/* Calculate the OOB offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
+
+/* After a command is input, the simulator goes to one of the following states */
+#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
+#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
+#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
+#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
+#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
+#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
+#define STATE_CMD_STATUS 0x00000007 /* read status */
+#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
+#define STATE_CMD_READID 0x0000000A /* read ID */
+#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
+#define STATE_CMD_RESET 0x0000000C /* reset */
+#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
+#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
+#define STATE_CMD_MASK 0x0000000F /* command states mask */
+
+/* After an address is input, the simulator goes to one of these states */
+#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
+#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
+#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
+#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
+#define STATE_ADDR_MASK 0x00000070 /* address states mask */
+
+/* During data input/output the simulator is in these states */
+#define STATE_DATAIN 0x00000100 /* waiting for data input */
+#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
+
+#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
+#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
+#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
+#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
+
+/* Previous operation is done, ready to accept new requests */
+#define STATE_READY 0x00000000
+
+/* This state is used to mark that the next state isn't known yet */
+#define STATE_UNKNOWN 0x10000000
+
+/* Simulator's actions bit masks */
+#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
+#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
+#define ACTION_SECERASE 0x00300000 /* erase sector */
+#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
+#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
+#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
+#define ACTION_MASK 0x00700000 /* action mask */
+
+#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
+#define NS_OPER_STATES 6 /* Maximum number of states in operation */
+
+#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
+#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
+#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
+#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
+#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
+#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */
+
+/* Remove action bits from state */
+#define NS_STATE(x) ((x) & ~ACTION_MASK)
+
+/*
+ * Maximum previous states which need to be saved. Currently saving is
+ * only needed for page program operation with preceded read command
+ * (which is only valid for 512-byte pages).
+ */
+#define NS_MAX_PREVSTATES 1
+
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
+struct nandsim_debug_info {
+ struct dentry *dfs_root;
+ struct dentry *dfs_wear_report;
+};
+
+/*
+ * A union to represent flash memory contents and flash buffer.
+ */
+union ns_mem {
+ u_char *byte; /* for byte access */
+ uint16_t *word; /* for 16-bit word access */
+};
+
+/*
+ * The structure which describes all the internal simulator data.
+ */
+struct nandsim {
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
+ unsigned int nbparts;
+
+ uint busw; /* flash chip bus width (8 or 16) */
+ u_char ids[8]; /* chip's ID bytes */
+ uint32_t options; /* chip's characteristic bits */
+ uint32_t state; /* current chip state */
+ uint32_t nxstate; /* next expected state */
+
+ uint32_t *op; /* current operation, NULL operations isn't known yet */
+ uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
+ uint16_t npstates; /* number of previous states saved */
+ uint16_t stateidx; /* current state index */
+
+ /* The simulated NAND flash pages array */
+ union ns_mem *pages;
+
+ /* Slab allocator for nand pages */
+ struct kmem_cache *nand_pages_slab;
+
+ /* Internal buffer of page + OOB size bytes */
+ union ns_mem buf;
+
+ /* NAND flash "geometry" */
+ struct {
+ uint64_t totsz; /* total flash size, bytes */
+ uint32_t secsz; /* flash sector (erase block) size, bytes */
+ uint pgsz; /* NAND flash page size, bytes */
+ uint oobsz; /* page OOB area size, bytes */
+ uint64_t totszoob; /* total flash size including OOB, bytes */
+ uint pgszoob; /* page size including OOB , bytes*/
+ uint secszoob; /* sector size including OOB, bytes */
+ uint pgnum; /* total number of pages */
+ uint pgsec; /* number of pages per sector */
+ uint secshift; /* bits number in sector size */
+ uint pgshift; /* bits number in page size */
+ uint pgaddrbytes; /* bytes per page address */
+ uint secaddrbytes; /* bytes per sector address */
+ uint idbytes; /* the number ID bytes that this chip outputs */
+ } geom;
+
+ /* NAND flash internal registers */
+ struct {
+ unsigned command; /* the command register */
+ u_char status; /* the status register */
+ uint row; /* the page number */
+ uint column; /* the offset within page */
+ uint count; /* internal counter */
+ uint num; /* number of bytes which must be processed */
+ uint off; /* fixed page offset */
+ } regs;
+
+ /* NAND flash lines state */
+ struct {
+ int ce; /* chip Enable */
+ int cle; /* command Latch Enable */
+ int ale; /* address Latch Enable */
+ int wp; /* write Protect */
+ } lines;
+
+ /* Fields needed when using a cache file */
+ struct file *cfile; /* Open file */
+ unsigned long *pages_written; /* Which pages have been written */
+ void *file_buf;
+ struct page *held_pages[NS_MAX_HELD_PAGES];
+ int held_cnt;
+
+ struct nandsim_debug_info dbg;
+};
+
+/*
+ * Operations array. To perform any operation the simulator must pass
+ * through the correspondent states chain.
+ */
+static struct nandsim_operations {
+ uint32_t reqopts; /* options which are required to perform the operation */
+ uint32_t states[NS_OPER_STATES]; /* operation's states */
+} ops[NS_OPER_NUM] = {
+ /* Read page + OOB from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read page + OOB from the second half */
+ {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
+ STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the second half */
+ {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Erase sector */
+ {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
+ /* Read status */
+ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
+ /* Read ID */
+ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
+ /* Large page devices read page */
+ {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Large page devices random page read */
+ {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+};
+
+struct weak_block {
+ struct list_head list;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ unsigned int erases_done;
+};
+
+static LIST_HEAD(weak_blocks);
+
+struct weak_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_writes;
+ unsigned int writes_done;
+};
+
+static LIST_HEAD(weak_pages);
+
+struct grave_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_reads;
+ unsigned int reads_done;
+};
+
+static LIST_HEAD(grave_pages);
+
+static unsigned long *erase_block_wear = NULL;
+static unsigned int wear_eb_count = 0;
+static unsigned long total_wear = 0;
+
+/* MTD structure for NAND controller */
+static struct mtd_info *nsmtd;
+
+static int nandsim_debugfs_show(struct seq_file *m, void *private)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+
+ /* Output wear report */
+ seq_printf(m, "Total numbers of erases: %lu\n", tot);
+ seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
+ seq_printf(m, "Average number of erases: %lu\n", avg);
+ seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+ seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+
+ return 0;
+}
+
+static int nandsim_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nandsim_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dfs_fops = {
+ .open = nandsim_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * nandsim_debugfs_create - initialize debugfs
+ * @dev: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int nandsim_debugfs_create(struct nandsim *dev)
+{
+ struct nandsim_debug_info *dbg = &dev->dbg;
+ struct dentry *dent;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dent = debugfs_create_dir("nandsim", NULL);
+ if (IS_ERR_OR_NULL(dent)) {
+ int err = dent ? -ENODEV : PTR_ERR(dent);
+
+ NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
+ err);
+ return err;
+ }
+ dbg->dfs_root = dent;
+
+ dent = debugfs_create_file("wear_report", S_IRUSR,
+ dbg->dfs_root, dev, &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dbg->dfs_wear_report = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(dbg->dfs_root);
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ return err;
+}
+
+/**
+ * nandsim_debugfs_remove - destroy all debugfs files
+ */
+static void nandsim_debugfs_remove(struct nandsim *ns)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ns->dbg.dfs_root);
+}
+
+/*
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
+ *
+ * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
+ */
+static int alloc_device(struct nandsim *ns)
+{
+ struct file *cfile;
+ int i, err;
+
+ if (cache_file) {
+ cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+ if (IS_ERR(cfile))
+ return PTR_ERR(cfile);
+ if (!(cfile->f_mode & FMODE_CAN_READ)) {
+ NS_ERR("alloc_device: cache file not readable\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
+ NS_ERR("alloc_device: cache file not writeable\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
+ sizeof(unsigned long));
+ if (!ns->pages_written) {
+ NS_ERR("alloc_device: unable to allocate pages written array\n");
+ err = -ENOMEM;
+ goto err_close;
+ }
+ ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->file_buf) {
+ NS_ERR("alloc_device: unable to allocate file buf\n");
+ err = -ENOMEM;
+ goto err_free;
+ }
+ ns->cfile = cfile;
+ return 0;
+ }
+
+ ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
+ if (!ns->pages) {
+ NS_ERR("alloc_device: unable to allocate page array\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ ns->pages[i].byte = NULL;
+ }
+ ns->nand_pages_slab = kmem_cache_create("nandsim",
+ ns->geom.pgszoob, 0, 0, NULL);
+ if (!ns->nand_pages_slab) {
+ NS_ERR("cache_create: unable to create kmem_cache\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+
+err_free:
+ vfree(ns->pages_written);
+err_close:
+ filp_close(cfile, NULL);
+ return err;
+}
+
+/*
+ * Free any allocated pages, and free the array of page pointers.
+ */
+static void free_device(struct nandsim *ns)
+{
+ int i;
+
+ if (ns->cfile) {
+ kfree(ns->file_buf);
+ vfree(ns->pages_written);
+ filp_close(ns->cfile, NULL);
+ return;
+ }
+
+ if (ns->pages) {
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ if (ns->pages[i].byte)
+ kmem_cache_free(ns->nand_pages_slab,
+ ns->pages[i].byte);
+ }
+ kmem_cache_destroy(ns->nand_pages_slab);
+ vfree(ns->pages);
+ }
+}
+
+static char *get_partition_name(int i)
+{
+ return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
+}
+
+/*
+ * Initialize the nandsim structure.
+ *
+ * RETURNS: 0 if success, -ERRNO if failure.
+ */
+static int init_nandsim(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct nandsim *ns = chip->priv;
+ int i, ret = 0;
+ uint64_t remains;
+ uint64_t next_offset;
+
+ if (NS_IS_INITIALIZED(ns)) {
+ NS_ERR("init_nandsim: nandsim is already initialized\n");
+ return -EIO;
+ }
+
+ /* Force mtd to not do delays */
+ chip->chip_delay = 0;
+
+ /* Initialize the NAND flash parameters */
+ ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
+ ns->geom.totsz = mtd->size;
+ ns->geom.pgsz = mtd->writesize;
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+ ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
+ ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
+ ns->options = 0;
+
+ if (ns->geom.pgsz == 512) {
+ ns->options |= OPT_PAGE512;
+ if (ns->busw == 8)
+ ns->options |= OPT_PAGE512_8BIT;
+ } else if (ns->geom.pgsz == 2048) {
+ ns->options |= OPT_PAGE2048;
+ } else if (ns->geom.pgsz == 4096) {
+ ns->options |= OPT_PAGE4096;
+ } else {
+ NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
+ return -EIO;
+ }
+
+ if (ns->options & OPT_SMALLPAGE) {
+ if (ns->geom.totsz <= (32 << 20)) {
+ ns->geom.pgaddrbytes = 3;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 3;
+ }
+ } else {
+ if (ns->geom.totsz <= (128 << 20)) {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 5;
+ ns->geom.secaddrbytes = 3;
+ }
+ }
+
+ /* Fill the partition_info structure */
+ if (parts_num > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ remains = ns->geom.totsz;
+ next_offset = 0;
+ for (i = 0; i < parts_num; ++i) {
+ uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
+
+ if (!part_sz || part_sz > remains) {
+ NS_ERR("bad partition size.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ ns->partitions[i].name = get_partition_name(i);
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = part_sz;
+ next_offset += ns->partitions[i].size;
+ remains -= ns->partitions[i].size;
+ }
+ ns->nbparts = parts_num;
+ if (remains) {
+ if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ ns->partitions[i].name = get_partition_name(i);
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = remains;
+ ns->nbparts += 1;
+ }
+
+ if (ns->busw == 16)
+ NS_WARN("16-bit flashes support wasn't tested\n");
+
+ printk("flash size: %llu MiB\n",
+ (unsigned long long)ns->geom.totsz >> 20);
+ printk("page size: %u bytes\n", ns->geom.pgsz);
+ printk("OOB area size: %u bytes\n", ns->geom.oobsz);
+ printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
+ printk("pages number: %u\n", ns->geom.pgnum);
+ printk("pages per sector: %u\n", ns->geom.pgsec);
+ printk("bus width: %u\n", ns->busw);
+ printk("bits in sector size: %u\n", ns->geom.secshift);
+ printk("bits in page size: %u\n", ns->geom.pgshift);
+ printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
+ printk("flash size with OOB: %llu KiB\n",
+ (unsigned long long)ns->geom.totszoob >> 10);
+ printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
+ printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
+ printk("options: %#x\n", ns->options);
+
+ if ((ret = alloc_device(ns)) != 0)
+ goto error;
+
+ /* Allocate / initialize the internal buffer */
+ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->buf.byte) {
+ NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
+ ns->geom.pgszoob);
+ ret = -ENOMEM;
+ goto error;
+ }
+ memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
+
+ return 0;
+
+error:
+ free_device(ns);
+
+ return ret;
+}
+
+/*
+ * Free the nandsim structure.
+ */
+static void free_nandsim(struct nandsim *ns)
+{
+ kfree(ns->buf.byte);
+ free_device(ns);
+
+ return;
+}
+
+static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ loff_t offset;
+
+ if (!badblocks)
+ return 0;
+ w = badblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ offset = (loff_t)erase_block_no * ns->geom.secsz;
+ if (mtd_block_markbad(mtd, offset)) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ if (*w == ',')
+ w += 1;
+ } while (*w);
+ return 0;
+}
+
+static int parse_weakblocks(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ struct weak_block *wb;
+
+ if (!weakblocks)
+ return 0;
+ w = weakblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid weakblocks.\n");
+ return -EINVAL;
+ }
+ max_erases = 3;
+ if (*w == ':') {
+ w += 1;
+ max_erases = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wb = kzalloc(sizeof(*wb), GFP_KERNEL);
+ if (!wb) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wb->erase_block_no = erase_block_no;
+ wb->max_erases = max_erases;
+ list_add(&wb->list, &weak_blocks);
+ } while (*w);
+ return 0;
+}
+
+static int erase_error(unsigned int erase_block_no)
+{
+ struct weak_block *wb;
+
+ list_for_each_entry(wb, &weak_blocks, list)
+ if (wb->erase_block_no == erase_block_no) {
+ if (wb->erases_done >= wb->max_erases)
+ return 1;
+ wb->erases_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int parse_weakpages(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_writes;
+ struct weak_page *wp;
+
+ if (!weakpages)
+ return 0;
+ w = weakpages;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ page_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid weakpagess.\n");
+ return -EINVAL;
+ }
+ max_writes = 3;
+ if (*w == ':') {
+ w += 1;
+ max_writes = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wp = kzalloc(sizeof(*wp), GFP_KERNEL);
+ if (!wp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wp->page_no = page_no;
+ wp->max_writes = max_writes;
+ list_add(&wp->list, &weak_pages);
+ } while (*w);
+ return 0;
+}
+
+static int write_error(unsigned int page_no)
+{
+ struct weak_page *wp;
+
+ list_for_each_entry(wp, &weak_pages, list)
+ if (wp->page_no == page_no) {
+ if (wp->writes_done >= wp->max_writes)
+ return 1;
+ wp->writes_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int parse_gravepages(void)
+{
+ char *g;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_reads;
+ struct grave_page *gp;
+
+ if (!gravepages)
+ return 0;
+ g = gravepages;
+ do {
+ zero_ok = (*g == '0' ? 1 : 0);
+ page_no = simple_strtoul(g, &g, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid gravepagess.\n");
+ return -EINVAL;
+ }
+ max_reads = 3;
+ if (*g == ':') {
+ g += 1;
+ max_reads = simple_strtoul(g, &g, 0);
+ }
+ if (*g == ',')
+ g += 1;
+ gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+ if (!gp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ gp->page_no = page_no;
+ gp->max_reads = max_reads;
+ list_add(&gp->list, &grave_pages);
+ } while (*g);
+ return 0;
+}
+
+static int read_error(unsigned int page_no)
+{
+ struct grave_page *gp;
+
+ list_for_each_entry(gp, &grave_pages, list)
+ if (gp->page_no == page_no) {
+ if (gp->reads_done >= gp->max_reads)
+ return 1;
+ gp->reads_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static void free_lists(void)
+{
+ struct list_head *pos, *n;
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+ kfree(erase_block_wear);
+}
+
+static int setup_wear_reporting(struct mtd_info *mtd)
+{
+ size_t mem;
+
+ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
+ mem = wear_eb_count * sizeof(unsigned long);
+ if (mem / sizeof(unsigned long) != wear_eb_count) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+ return -ENOMEM;
+ }
+ erase_block_wear = kzalloc(mem, GFP_KERNEL);
+ if (!erase_block_wear) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void update_wear(unsigned int erase_block_no)
+{
+ if (!erase_block_wear)
+ return;
+ total_wear += 1;
+ /*
+ * TODO: Notify this through a debugfs entry,
+ * instead of showing an error message.
+ */
+ if (total_wear == 0)
+ NS_ERR("Erase counter total overflow\n");
+ erase_block_wear[erase_block_no] += 1;
+ if (erase_block_wear[erase_block_no] == 0)
+ NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
+}
+
+/*
+ * Returns the string representation of 'state' state.
+ */
+static char *get_state_name(uint32_t state)
+{
+ switch (NS_STATE(state)) {
+ case STATE_CMD_READ0:
+ return "STATE_CMD_READ0";
+ case STATE_CMD_READ1:
+ return "STATE_CMD_READ1";
+ case STATE_CMD_PAGEPROG:
+ return "STATE_CMD_PAGEPROG";
+ case STATE_CMD_READOOB:
+ return "STATE_CMD_READOOB";
+ case STATE_CMD_READSTART:
+ return "STATE_CMD_READSTART";
+ case STATE_CMD_ERASE1:
+ return "STATE_CMD_ERASE1";
+ case STATE_CMD_STATUS:
+ return "STATE_CMD_STATUS";
+ case STATE_CMD_SEQIN:
+ return "STATE_CMD_SEQIN";
+ case STATE_CMD_READID:
+ return "STATE_CMD_READID";
+ case STATE_CMD_ERASE2:
+ return "STATE_CMD_ERASE2";
+ case STATE_CMD_RESET:
+ return "STATE_CMD_RESET";
+ case STATE_CMD_RNDOUT:
+ return "STATE_CMD_RNDOUT";
+ case STATE_CMD_RNDOUTSTART:
+ return "STATE_CMD_RNDOUTSTART";
+ case STATE_ADDR_PAGE:
+ return "STATE_ADDR_PAGE";
+ case STATE_ADDR_SEC:
+ return "STATE_ADDR_SEC";
+ case STATE_ADDR_ZERO:
+ return "STATE_ADDR_ZERO";
+ case STATE_ADDR_COLUMN:
+ return "STATE_ADDR_COLUMN";
+ case STATE_DATAIN:
+ return "STATE_DATAIN";
+ case STATE_DATAOUT:
+ return "STATE_DATAOUT";
+ case STATE_DATAOUT_ID:
+ return "STATE_DATAOUT_ID";
+ case STATE_DATAOUT_STATUS:
+ return "STATE_DATAOUT_STATUS";
+ case STATE_READY:
+ return "STATE_READY";
+ case STATE_UNKNOWN:
+ return "STATE_UNKNOWN";
+ }
+
+ NS_ERR("get_state_name: unknown state, BUG\n");
+ return NULL;
+}
+
+/*
+ * Check if command is valid.
+ *
+ * RETURNS: 1 if wrong command, 0 if right.
+ */
+static int check_command(int cmd)
+{
+ switch (cmd) {
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ case NAND_CMD_READSTART:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RESET:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_RNDOUTSTART:
+ return 0;
+
+ default:
+ return 1;
+ }
+}
+
+/*
+ * Returns state after command is accepted by command number.
+ */
+static uint32_t get_state_by_command(unsigned command)
+{
+ switch (command) {
+ case NAND_CMD_READ0:
+ return STATE_CMD_READ0;
+ case NAND_CMD_READ1:
+ return STATE_CMD_READ1;
+ case NAND_CMD_PAGEPROG:
+ return STATE_CMD_PAGEPROG;
+ case NAND_CMD_READSTART:
+ return STATE_CMD_READSTART;
+ case NAND_CMD_READOOB:
+ return STATE_CMD_READOOB;
+ case NAND_CMD_ERASE1:
+ return STATE_CMD_ERASE1;
+ case NAND_CMD_STATUS:
+ return STATE_CMD_STATUS;
+ case NAND_CMD_SEQIN:
+ return STATE_CMD_SEQIN;
+ case NAND_CMD_READID:
+ return STATE_CMD_READID;
+ case NAND_CMD_ERASE2:
+ return STATE_CMD_ERASE2;
+ case NAND_CMD_RESET:
+ return STATE_CMD_RESET;
+ case NAND_CMD_RNDOUT:
+ return STATE_CMD_RNDOUT;
+ case NAND_CMD_RNDOUTSTART:
+ return STATE_CMD_RNDOUTSTART;
+ }
+
+ NS_ERR("get_state_by_command: unknown command, BUG\n");
+ return 0;
+}
+
+/*
+ * Move an address byte to the correspondent internal register.
+ */
+static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
+{
+ uint byte = (uint)bt;
+
+ if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
+ ns->regs.column |= (byte << 8 * ns->regs.count);
+ else {
+ ns->regs.row |= (byte << 8 * (ns->regs.count -
+ ns->geom.pgaddrbytes +
+ ns->geom.secaddrbytes));
+ }
+
+ return;
+}
+
+/*
+ * Switch to STATE_READY state.
+ */
+static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
+{
+ NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
+
+ ns->state = STATE_READY;
+ ns->nxstate = STATE_UNKNOWN;
+ ns->op = NULL;
+ ns->npstates = 0;
+ ns->stateidx = 0;
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ ns->regs.off = 0;
+ ns->regs.row = 0;
+ ns->regs.column = 0;
+ ns->regs.status = status;
+}
+
+/*
+ * If the operation isn't known yet, try to find it in the global array
+ * of supported operations.
+ *
+ * Operation can be unknown because of the following.
+ * 1. New command was accepted and this is the first call to find the
+ * correspondent states chain. In this case ns->npstates = 0;
+ * 2. There are several operations which begin with the same command(s)
+ * (for example program from the second half and read from the
+ * second half operations both begin with the READ1 command). In this
+ * case the ns->pstates[] array contains previous states.
+ *
+ * Thus, the function tries to find operation containing the following
+ * states (if the 'flag' parameter is 0):
+ * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
+ *
+ * If (one and only one) matching operation is found, it is accepted (
+ * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
+ * zeroed).
+ *
+ * If there are several matches, the current state is pushed to the
+ * ns->pstates.
+ *
+ * The operation can be unknown only while commands are input to the chip.
+ * As soon as address command is accepted, the operation must be known.
+ * In such situation the function is called with 'flag' != 0, and the
+ * operation is searched using the following pattern:
+ * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
+ *
+ * It is supposed that this pattern must either match one operation or
+ * none. There can't be ambiguity in that case.
+ *
+ * If no matches found, the function does the following:
+ * 1. if there are saved states present, try to ignore them and search
+ * again only using the last command. If nothing was found, switch
+ * to the STATE_READY state.
+ * 2. if there are no saved states, switch to the STATE_READY state.
+ *
+ * RETURNS: -2 - no matched operations found.
+ * -1 - several matches.
+ * 0 - operation is found.
+ */
+static int find_operation(struct nandsim *ns, uint32_t flag)
+{
+ int opsfound = 0;
+ int i, j, idx = 0;
+
+ for (i = 0; i < NS_OPER_NUM; i++) {
+
+ int found = 1;
+
+ if (!(ns->options & ops[i].reqopts))
+ /* Ignore operations we can't perform */
+ continue;
+
+ if (flag) {
+ if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
+ continue;
+ } else {
+ if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
+ continue;
+ }
+
+ for (j = 0; j < ns->npstates; j++)
+ if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
+ && (ns->options & ops[idx].reqopts)) {
+ found = 0;
+ break;
+ }
+
+ if (found) {
+ idx = i;
+ opsfound += 1;
+ }
+ }
+
+ if (opsfound == 1) {
+ /* Exact match */
+ ns->op = &ops[idx].states[0];
+ if (flag) {
+ /*
+ * In this case the find_operation function was
+ * called when address has just began input. But it isn't
+ * yet fully input and the current state must
+ * not be one of STATE_ADDR_*, but the STATE_ADDR_*
+ * state must be the next state (ns->nxstate).
+ */
+ ns->stateidx = ns->npstates - 1;
+ } else {
+ ns->stateidx = ns->npstates;
+ }
+ ns->npstates = 0;
+ ns->state = ns->op[ns->stateidx];
+ ns->nxstate = ns->op[ns->stateidx + 1];
+ NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
+ idx, get_state_name(ns->state), get_state_name(ns->nxstate));
+ return 0;
+ }
+
+ if (opsfound == 0) {
+ /* Nothing was found. Try to ignore previous commands (if any) and search again */
+ if (ns->npstates != 0) {
+ NS_DBG("find_operation: no operation found, try again with state %s\n",
+ get_state_name(ns->state));
+ ns->npstates = 0;
+ return find_operation(ns, 0);
+
+ }
+ NS_DBG("find_operation: no operations found\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return -2;
+ }
+
+ if (flag) {
+ /* This shouldn't happen */
+ NS_DBG("find_operation: BUG, operation must be known if address is input\n");
+ return -2;
+ }
+
+ NS_DBG("find_operation: there is still ambiguity\n");
+
+ ns->pstates[ns->npstates++] = ns->state;
+
+ return -1;
+}
+
+static void put_pages(struct nandsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ns->held_cnt; i++)
+ page_cache_release(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+{
+ pgoff_t index, start_index, end_index;
+ struct page *page;
+ struct address_space *mapping = file->f_mapping;
+
+ start_index = pos >> PAGE_CACHE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+ if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+ return -EINVAL;
+ ns->held_cnt = 0;
+ for (index = start_index; index <= end_index; index++) {
+ page = find_get_page(mapping, index);
+ if (page == NULL) {
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ if (page == NULL) {
+ write_inode_now(mapping->host, 1);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ }
+ if (page == NULL) {
+ put_pages(ns);
+ return -ENOMEM;
+ }
+ unlock_page(page);
+ }
+ ns->held_pages[ns->held_cnt++] = page;
+ }
+ return 0;
+}
+
+static int set_memalloc(void)
+{
+ if (current->flags & PF_MEMALLOC)
+ return 0;
+ current->flags |= PF_MEMALLOC;
+ return 1;
+}
+
+static void clear_memalloc(int memalloc)
+{
+ if (memalloc)
+ current->flags &= ~PF_MEMALLOC;
+}
+
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+{
+ ssize_t tx;
+ int err, memalloc;
+
+ err = get_pages(ns, file, count, pos);
+ if (err)
+ return err;
+ memalloc = set_memalloc();
+ tx = kernel_read(file, pos, buf, count);
+ clear_memalloc(memalloc);
+ put_pages(ns);
+ return tx;
+}
+
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+{
+ ssize_t tx;
+ int err, memalloc;
+
+ err = get_pages(ns, file, count, pos);
+ if (err)
+ return err;
+ memalloc = set_memalloc();
+ tx = kernel_write(file, buf, count, pos);
+ clear_memalloc(memalloc);
+ put_pages(ns);
+ return tx;
+}
+
+/*
+ * Returns a pointer to the current page.
+ */
+static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
+{
+ return &(ns->pages[ns->regs.row]);
+}
+
+/*
+ * Retuns a pointer to the current byte, within the current page.
+ */
+static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
+{
+ return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
+}
+
+static int do_read_error(struct nandsim *ns, int num)
+{
+ unsigned int page_no = ns->regs.row;
+
+ if (read_error(page_no)) {
+ prandom_bytes(ns->buf.byte, num);
+ NS_WARN("simulating read error in page %u\n", page_no);
+ return 1;
+ }
+ return 0;
+}
+
+static void do_bit_flips(struct nandsim *ns, int num)
+{
+ if (bitflips && prandom_u32() < (1 << 22)) {
+ int flips = 1;
+ if (bitflips > 1)
+ flips = (prandom_u32() % (int) bitflips) + 1;
+ while (flips--) {
+ int pos = prandom_u32() % (num * 8);
+ ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+ NS_WARN("read_page: flipping bit %d in page %d "
+ "reading from %d ecc: corrected=%u failed=%u\n",
+ pos, ns->regs.row, ns->regs.column + ns->regs.off,
+ nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+ }
+ }
+}
+
+/*
+ * Fill the NAND buffer with data read from the specified page.
+ */
+static void read_page(struct nandsim *ns, int num)
+{
+ union ns_mem *mypage;
+
+ if (ns->cfile) {
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
+ NS_DBG("read_page: page %d not written\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ loff_t pos;
+ ssize_t tx;
+
+ NS_DBG("read_page: page %d written, reading from %d\n",
+ ns->regs.row, ns->regs.column + ns->regs.off);
+ if (do_read_error(ns, num))
+ return;
+ pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
+ if (tx != num) {
+ NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return;
+ }
+ do_bit_flips(ns, num);
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ NS_DBG("read_page: page %d allocated, reading from %d\n",
+ ns->regs.row, ns->regs.column + ns->regs.off);
+ if (do_read_error(ns, num))
+ return;
+ memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
+ do_bit_flips(ns, num);
+ }
+}
+
+/*
+ * Erase all pages in the specified sector.
+ */
+static void erase_sector(struct nandsim *ns)
+{
+ union ns_mem *mypage;
+ int i;
+
+ if (ns->cfile) {
+ for (i = 0; i < ns->geom.pgsec; i++)
+ if (__test_and_clear_bit(ns->regs.row + i,
+ ns->pages_written)) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ for (i = 0; i < ns->geom.pgsec; i++) {
+ if (mypage->byte != NULL) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
+ kmem_cache_free(ns->nand_pages_slab, mypage->byte);
+ mypage->byte = NULL;
+ }
+ mypage++;
+ }
+}
+
+/*
+ * Program the specified page with the contents from the NAND buffer.
+ */
+static int prog_page(struct nandsim *ns, int num)
+{
+ int i;
+ union ns_mem *mypage;
+ u_char *pg_off;
+
+ if (ns->cfile) {
+ loff_t off;
+ ssize_t tx;
+ int all;
+
+ NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+ pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+ off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
+ all = 1;
+ memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+ } else {
+ all = 0;
+ tx = read_file(ns, ns->cfile, pg_off, num, off);
+ if (tx != num) {
+ NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+ if (all) {
+ loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
+ if (tx != ns->geom.pgszoob) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ __set_bit(ns->regs.row, ns->pages_written);
+ } else {
+ tx = write_file(ns, ns->cfile, pg_off, num, off);
+ if (tx != num) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
+ /*
+ * We allocate memory with GFP_NOFS because a flash FS may
+ * utilize this. If it is holding an FS lock, then gets here,
+ * then kernel memory alloc runs writeback which goes to the FS
+ * again and deadlocks. This was seen in practice.
+ */
+ mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
+ if (mypage->byte == NULL) {
+ NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
+ return -1;
+ }
+ memset(mypage->byte, 0xFF, ns->geom.pgszoob);
+ }
+
+ pg_off = NS_PAGE_BYTE_OFF(ns);
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+
+ return 0;
+}
+
+/*
+ * If state has any action bit, perform this action.
+ *
+ * RETURNS: 0 if success, -1 if error.
+ */
+static int do_state_action(struct nandsim *ns, uint32_t action)
+{
+ int num;
+ int busdiv = ns->busw == 8 ? 1 : 2;
+ unsigned int erase_block_no, page_no;
+
+ action &= ACTION_MASK;
+
+ /* Check that page address input is correct */
+ if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
+ NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ switch (action) {
+
+ case ACTION_CPY:
+ /*
+ * Copy page data to the internal buffer.
+ */
+
+ /* Column shouldn't be very large */
+ if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
+ NS_ERR("do_state_action: column number is too large\n");
+ break;
+ }
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ read_page(ns, num);
+
+ NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
+ num, NS_RAW_OFFSET(ns) + ns->regs.off);
+
+ if (ns->regs.off == 0)
+ NS_LOG("read page %d\n", ns->regs.row);
+ else if (ns->regs.off < ns->geom.pgsz)
+ NS_LOG("read page %d (second half)\n", ns->regs.row);
+ else
+ NS_LOG("read OOB of page %d\n", ns->regs.row);
+
+ NS_UDELAY(access_delay);
+ NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ break;
+
+ case ACTION_SECERASE:
+ /*
+ * Erase sector.
+ */
+
+ if (ns->lines.wp) {
+ NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
+ return -1;
+ }
+
+ if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
+ || (ns->regs.row & ~(ns->geom.secsz - 1))) {
+ NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ ns->regs.row = (ns->regs.row <<
+ 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
+ ns->regs.column = 0;
+
+ erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
+
+ NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
+ ns->regs.row, NS_RAW_OFFSET(ns));
+ NS_LOG("erase sector %u\n", erase_block_no);
+
+ erase_sector(ns);
+
+ NS_MDELAY(erase_delay);
+
+ if (erase_block_wear)
+ update_wear(erase_block_no);
+
+ if (erase_error(erase_block_no)) {
+ NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_PRGPAGE:
+ /*
+ * Program page - move internal buffer data to the page.
+ */
+
+ if (ns->lines.wp) {
+ NS_WARN("do_state_action: device is write-protected, programm\n");
+ return -1;
+ }
+
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ if (num != ns->regs.count) {
+ NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
+ ns->regs.count, num);
+ return -1;
+ }
+
+ if (prog_page(ns, num) == -1)
+ return -1;
+
+ page_no = ns->regs.row;
+
+ NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
+ num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
+ NS_LOG("programm page %d\n", ns->regs.row);
+
+ NS_UDELAY(programm_delay);
+ NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ if (write_error(page_no)) {
+ NS_WARN("simulating write failure in page %u\n", page_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_ZEROOFF:
+ NS_DBG("do_state_action: set internal offset to 0\n");
+ ns->regs.off = 0;
+ break;
+
+ case ACTION_HALFOFF:
+ if (!(ns->options & OPT_PAGE512_8BIT)) {
+ NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
+ "byte page size 8x chips\n");
+ return -1;
+ }
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
+ ns->regs.off = ns->geom.pgsz/2;
+ break;
+
+ case ACTION_OOBOFF:
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
+ ns->regs.off = ns->geom.pgsz;
+ break;
+
+ default:
+ NS_DBG("do_state_action: BUG! unknown action\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Switch simulator's state.
+ */
+static void switch_state(struct nandsim *ns)
+{
+ if (ns->op) {
+ /*
+ * The current operation have already been identified.
+ * Just follow the states chain.
+ */
+
+ ns->stateidx += 1;
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[ns->stateidx + 1];
+
+ NS_DBG("switch_state: operation is known, switch to the next state, "
+ "state: %s, nxstate: %s\n",
+ get_state_name(ns->state), get_state_name(ns->nxstate));
+
+ /* See, whether we need to do some action */
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ } else {
+ /*
+ * We don't yet know which operation we perform.
+ * Try to identify it.
+ */
+
+ /*
+ * The only event causing the switch_state function to
+ * be called with yet unknown operation is new command.
+ */
+ ns->state = get_state_by_command(ns->regs.command);
+
+ NS_DBG("switch_state: operation is unknown, try to find it\n");
+
+ if (find_operation(ns, 0) != 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+ }
+
+ /* For 16x devices column means the page offset in words */
+ if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
+ NS_DBG("switch_state: double the column number for 16x device\n");
+ ns->regs.column <<= 1;
+ }
+
+ if (NS_STATE(ns->nxstate) == STATE_READY) {
+ /*
+ * The current state is the last. Return to STATE_READY
+ */
+
+ u_char status = NS_STATUS_OK(ns);
+
+ /* In case of data states, see if all bytes were input/output */
+ if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
+ && ns->regs.count != ns->regs.num) {
+ NS_WARN("switch_state: not all bytes were processed, %d left\n",
+ ns->regs.num - ns->regs.count);
+ status = NS_STATUS_FAILED(ns);
+ }
+
+ NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
+
+ switch_to_ready_state(ns, status);
+
+ return;
+ } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
+ /*
+ * If the next state is data input/output, switch to it now
+ */
+
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[++ns->stateidx + 1];
+ ns->regs.num = ns->regs.count = 0;
+
+ NS_DBG("switch_state: the next state is data I/O, switch, "
+ "state: %s, nxstate: %s\n",
+ get_state_name(ns->state), get_state_name(ns->nxstate));
+
+ /*
+ * Set the internal register to the count of bytes which
+ * are expected to be input or output
+ */
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAIN:
+ case STATE_DATAOUT:
+ ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ break;
+
+ case STATE_DATAOUT_ID:
+ ns->regs.num = ns->geom.idbytes;
+ break;
+
+ case STATE_DATAOUT_STATUS:
+ ns->regs.count = ns->regs.num = 0;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown data state\n");
+ }
+
+ } else if (ns->nxstate & STATE_ADDR_MASK) {
+ /*
+ * If the next state is address input, set the internal
+ * register to the number of expected address bytes
+ */
+
+ ns->regs.count = 0;
+
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+
+ case STATE_ADDR_COLUMN:
+ /* Column address is always 2 bytes */
+ ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown address state\n");
+ }
+ } else {
+ /*
+ * Just reset internal counters.
+ */
+
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ }
+}
+
+static u_char ns_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+ u_char outb = 0x00;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_byte: unexpected data output cycle, state is %s "
+ "return %#x\n", get_state_name(ns->state), (uint)outb);
+ return outb;
+ }
+
+ /* Status register may be read as many times as it is wanted */
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
+ NS_DBG("read_byte: return %#x status\n", ns->regs.status);
+ return ns->regs.status;
+ }
+
+ /* Check if there is any data in the internal buffer which may be read */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
+ return outb;
+ }
+
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAOUT:
+ if (ns->busw == 8) {
+ outb = ns->buf.byte[ns->regs.count];
+ ns->regs.count += 1;
+ } else {
+ outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
+ ns->regs.count += 2;
+ }
+ break;
+ case STATE_DATAOUT_ID:
+ NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
+ outb = ns->ids[ns->regs.count];
+ ns->regs.count += 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("read_byte: all bytes were read\n");
+
+ if (NS_STATE(ns->nxstate) == STATE_READY)
+ switch_state(ns);
+ }
+
+ return outb;
+}
+
+static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("write_byte: chip is disabled, ignore write\n");
+ return;
+ }
+ if (ns->lines.ale && ns->lines.cle) {
+ NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
+ return;
+ }
+
+ if (ns->lines.cle == 1) {
+ /*
+ * The byte written is a command.
+ */
+
+ if (byte == NAND_CMD_RESET) {
+ NS_LOG("reset chip\n");
+ switch_to_ready_state(ns, NS_STATUS_OK(ns));
+ return;
+ }
+
+ /* Check that the command byte is correct */
+ if (check_command(byte)) {
+ NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
+ return;
+ }
+
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
+ || NS_STATE(ns->state) == STATE_DATAOUT) {
+ int row = ns->regs.row;
+
+ switch_state(ns);
+ if (byte == NAND_CMD_RNDOUT)
+ ns->regs.row = row;
+ }
+
+ /* Check if chip is expecting command */
+ if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
+ /* Do not warn if only 2 id bytes are read */
+ if (!(ns->regs.command == NAND_CMD_READID &&
+ NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+ /*
+ * We are in situation when something else (not command)
+ * was expected but command was input. In this case ignore
+ * previous command(s)/state(s) and accept the last one.
+ */
+ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
+ "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+ }
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ }
+
+ NS_DBG("command byte corresponding to %s state accepted\n",
+ get_state_name(get_state_by_command(byte)));
+ ns->regs.command = byte;
+ switch_state(ns);
+
+ } else if (ns->lines.ale == 1) {
+ /*
+ * The byte written is an address.
+ */
+
+ if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
+
+ NS_DBG("write_byte: operation isn't known yet, identify it\n");
+
+ if (find_operation(ns, 1) < 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ ns->regs.count = 0;
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ /* Check that chip is expecting address */
+ if (!(ns->nxstate & STATE_ADDR_MASK)) {
+ NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
+ "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_ERR("write_byte: no more address bytes expected\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ accept_addr_byte(ns, byte);
+
+ ns->regs.count += 1;
+
+ NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
+ (uint)byte, ns->regs.count, ns->regs.num);
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
+ switch_state(ns);
+ }
+
+ } else {
+ /*
+ * The byte written is an input data.
+ */
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
+ "switch to %s\n", (uint)byte,
+ get_state_name(ns->state), get_state_name(STATE_READY));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
+ ns->regs.num);
+ return;
+ }
+
+ if (ns->busw == 8) {
+ ns->buf.byte[ns->regs.count] = byte;
+ ns->regs.count += 1;
+ } else {
+ ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
+ ns->regs.count += 2;
+ }
+ }
+
+ return;
+}
+
+static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
+ ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
+ ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
+
+ if (cmd != NAND_CMD_NONE)
+ ns_nand_write_byte(mtd, cmd);
+}
+
+static int ns_device_ready(struct mtd_info *mtd)
+{
+ NS_DBG("device_ready\n");
+ return 1;
+}
+
+static uint16_t ns_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+
+ NS_DBG("read_word\n");
+
+ return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
+}
+
+static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_buf: data input isn't expected, state is %s, "
+ "switch to STATE_READY\n", get_state_name(ns->state));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("write_buf: too many input bytes\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(ns->buf.byte + ns->regs.count, buf, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
+ }
+}
+
+static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_buf: chip is disabled\n");
+ return;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_buf: ALE or CLE pin is high\n");
+ return;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
+ get_state_name(ns->state));
+ return;
+ }
+
+ if (NS_STATE(ns->state) != STATE_DATAOUT) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
+
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("read_buf: too many bytes to read\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(buf, ns->buf.byte + ns->regs.count, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ if (NS_STATE(ns->nxstate) == STATE_READY)
+ switch_state(ns);
+ }
+
+ return;
+}
+
+/*
+ * Module initialization function
+ */
+static int __init ns_init_module(void)
+{
+ struct nand_chip *chip;
+ struct nandsim *nand;
+ int retval = -ENOMEM, i;
+
+ if (bus_width != 8 && bus_width != 16) {
+ NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
+ return -EINVAL;
+ }
+
+ /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
+ nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
+ + sizeof(struct nandsim), GFP_KERNEL);
+ if (!nsmtd) {
+ NS_ERR("unable to allocate core structures.\n");
+ return -ENOMEM;
+ }
+ chip = (struct nand_chip *)(nsmtd + 1);
+ nsmtd->priv = (void *)chip;
+ nand = (struct nandsim *)(chip + 1);
+ chip->priv = (void *)nand;
+
+ /*
+ * Register simulator's callbacks.
+ */
+ chip->cmd_ctrl = ns_hwcontrol;
+ chip->read_byte = ns_nand_read_byte;
+ chip->dev_ready = ns_device_ready;
+ chip->write_buf = ns_nand_write_buf;
+ chip->read_buf = ns_nand_read_buf;
+ chip->read_word = ns_nand_read_word;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
+ /* and 'badblocks' parameters to work */
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ switch (bbt) {
+ case 2:
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ case 1:
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ case 0:
+ break;
+ default:
+ NS_ERR("bbt has to be 0..2\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ /*
+ * Perform minimum nandsim structure initialization to handle
+ * the initial ID read command correctly
+ */
+ if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
+ nand->geom.idbytes = 8;
+ else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
+ nand->geom.idbytes = 6;
+ else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
+ nand->geom.idbytes = 4;
+ else
+ nand->geom.idbytes = 2;
+ nand->regs.status = NS_STATUS_OK(nand);
+ nand->nxstate = STATE_UNKNOWN;
+ nand->options |= OPT_PAGE512; /* temporary value */
+ memcpy(nand->ids, id_bytes, sizeof(nand->ids));
+ if (bus_width == 16) {
+ nand->busw = 16;
+ chip->options |= NAND_BUSWIDTH_16;
+ }
+
+ nsmtd->owner = THIS_MODULE;
+
+ if ((retval = parse_weakblocks()) != 0)
+ goto error;
+
+ if ((retval = parse_weakpages()) != 0)
+ goto error;
+
+ if ((retval = parse_gravepages()) != 0)
+ goto error;
+
+ retval = nand_scan_ident(nsmtd, 1, NULL);
+ if (retval) {
+ NS_ERR("cannot scan NAND Simulator device\n");
+ if (retval > 0)
+ retval = -ENXIO;
+ goto error;
+ }
+
+ if (bch) {
+ unsigned int eccsteps, eccbytes;
+ if (!mtd_nand_has_bch()) {
+ NS_ERR("BCH ECC support is disabled\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ /* use 512-byte ecc blocks */
+ eccsteps = nsmtd->writesize/512;
+ eccbytes = (bch*13+7)/8;
+ /* do not bother supporting small page devices */
+ if ((nsmtd->oobsize < 64) || !eccsteps) {
+ NS_ERR("bch not available on small page devices\n");
+ retval = -EINVAL;
+ goto error;
+ }
+ if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
+ NS_ERR("invalid bch value %u\n", bch);
+ retval = -EINVAL;
+ goto error;
+ }
+ chip->ecc.mode = NAND_ECC_SOFT_BCH;
+ chip->ecc.size = 512;
+ chip->ecc.strength = bch;
+ chip->ecc.bytes = eccbytes;
+ NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+ }
+
+ retval = nand_scan_tail(nsmtd);
+ if (retval) {
+ NS_ERR("can't register NAND Simulator\n");
+ if (retval > 0)
+ retval = -ENXIO;
+ goto error;
+ }
+
+ if (overridesize) {
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ if (new_size >> overridesize != nsmtd->erasesize) {
+ NS_ERR("overridesize is too big\n");
+ retval = -EINVAL;
+ goto err_exit;
+ }
+ /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+ nsmtd->size = new_size;
+ chip->chipsize = new_size;
+ chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ }
+
+ if ((retval = setup_wear_reporting(nsmtd)) != 0)
+ goto err_exit;
+
+ if ((retval = nandsim_debugfs_create(nand)) != 0)
+ goto err_exit;
+
+ if ((retval = init_nandsim(nsmtd)) != 0)
+ goto err_exit;
+
+ if ((retval = chip->scan_bbt(nsmtd)) != 0)
+ goto err_exit;
+
+ if ((retval = parse_badblocks(nand, nsmtd)) != 0)
+ goto err_exit;
+
+ /* Register NAND partitions */
+ retval = mtd_device_register(nsmtd, &nand->partitions[0],
+ nand->nbparts);
+ if (retval != 0)
+ goto err_exit;
+
+ return 0;
+
+err_exit:
+ free_nandsim(nand);
+ nand_release(nsmtd);
+ for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
+ kfree(nand->partitions[i].name);
+error:
+ kfree(nsmtd);
+ free_lists();
+
+ return retval;
+}
+
+module_init(ns_init_module);
+
+/*
+ * Module clean-up function
+ */
+static void __exit ns_cleanup_module(void)
+{
+ struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
+ int i;
+
+ nandsim_debugfs_remove(ns);
+ free_nandsim(ns); /* Free nandsim private resources */
+ nand_release(nsmtd); /* Unregister driver */
+ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+ kfree(nsmtd); /* Free other structures */
+ free_lists();
+}
+
+module_exit(ns_cleanup_module);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Artem B. Bityuckiy");
+MODULE_DESCRIPTION ("The NAND flash simulator");
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
new file mode 100644
index 000000000..3187c6b92
--- /dev/null
+++ b/drivers/mtd/nand/ndfc.c
@@ -0,0 +1,290 @@
+/*
+ * drivers/mtd/ndfc.c
+ *
+ * Overview:
+ * Platform independent driver for NDFC (NanD Flash Controller)
+ * integrated into EP440 cores
+ *
+ * Ported to an OF platform driver by Sean MacLennan
+ *
+ * The NDFC supports multiple chips, but this driver only supports a
+ * single chip since I do not have access to any boards with
+ * multiple chips.
+ *
+ * Author: Thomas Gleixner
+ *
+ * Copyright 2006 IBM
+ * Copyright 2008 PIKA Technologies
+ * Sean MacLennan <smaclennan@pikatech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/ndfc.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+
+#define NDFC_MAX_CS 4
+
+struct ndfc_controller {
+ struct platform_device *ofdev;
+ void __iomem *ndfcbase;
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int chip_select;
+ struct nand_hw_control ndfc_control;
+};
+
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
+
+static void ndfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ uint32_t ccr;
+ struct nand_chip *nchip = mtd->priv;
+ struct ndfc_controller *ndfc = nchip->priv;
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ if (chip >= 0) {
+ ccr &= ~NDFC_CCR_BS_MASK;
+ ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
+ } else
+ ccr |= NDFC_CCR_RESET_CE;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+}
+
+static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
+ else
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
+}
+
+static int ndfc_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+
+ return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
+}
+
+static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ uint32_t ccr;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ ccr |= NDFC_CCR_RESET_ECC;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+ wmb();
+}
+
+static int ndfc_calculate_ecc(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+ uint32_t ecc;
+ uint8_t *p = (uint8_t *)&ecc;
+
+ wmb();
+ ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
+ /* The NDFC uses Smart Media (SMC) bytes order */
+ ecc_code[0] = p[1];
+ ecc_code[1] = p[2];
+ ecc_code[2] = p[3];
+
+ return 0;
+}
+
+/*
+ * Speedups for buffer read/write/verify
+ *
+ * NDFC allows 32bit read/write of data. So we can speed up the buffer
+ * functions. No further checking, as nand_base will always read/write
+ * page aligned.
+ */
+static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ *p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
+}
+
+static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
+}
+
+/*
+ * Initialize chip structure
+ */
+static int ndfc_chip_init(struct ndfc_controller *ndfc,
+ struct device_node *node)
+{
+ struct device_node *flash_np;
+ struct nand_chip *chip = &ndfc->chip;
+ struct mtd_part_parser_data ppdata;
+ int ret;
+
+ chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
+ chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
+ chip->cmd_ctrl = ndfc_hwcontrol;
+ chip->dev_ready = ndfc_ready;
+ chip->select_chip = ndfc_select_chip;
+ chip->chip_delay = 50;
+ chip->controller = &ndfc->ndfc_control;
+ chip->read_buf = ndfc_read_buf;
+ chip->write_buf = ndfc_write_buf;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.hwctl = ndfc_enable_hwecc;
+ chip->ecc.calculate = ndfc_calculate_ecc;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ chip->priv = ndfc;
+
+ ndfc->mtd.priv = chip;
+ ndfc->mtd.owner = THIS_MODULE;
+
+ flash_np = of_get_next_child(node, NULL);
+ if (!flash_np)
+ return -ENODEV;
+
+ ppdata.of_node = flash_np;
+ ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
+ dev_name(&ndfc->ofdev->dev), flash_np->name);
+ if (!ndfc->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(&ndfc->mtd, 1);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0);
+
+err:
+ of_node_put(flash_np);
+ if (ret)
+ kfree(ndfc->mtd.name);
+ return ret;
+}
+
+static int ndfc_probe(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc;
+ const __be32 *reg;
+ u32 ccr;
+ u32 cs;
+ int err, len;
+
+ /* Read the reg property to get the chip select */
+ reg = of_get_property(ofdev->dev.of_node, "reg", &len);
+ if (reg == NULL || len != 12) {
+ dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
+ return -ENOENT;
+ }
+
+ cs = be32_to_cpu(reg[0]);
+ if (cs >= NDFC_MAX_CS) {
+ dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ ndfc = &ndfc_ctrl[cs];
+ ndfc->chip_select = cs;
+
+ spin_lock_init(&ndfc->ndfc_control.lock);
+ init_waitqueue_head(&ndfc->ndfc_control.wq);
+ ndfc->ofdev = ofdev;
+ dev_set_drvdata(&ofdev->dev, ndfc);
+
+ ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
+ if (!ndfc->ndfcbase) {
+ dev_err(&ofdev->dev, "failed to get memory\n");
+ return -EIO;
+ }
+
+ ccr = NDFC_CCR_BS(ndfc->chip_select);
+
+ /* It is ok if ccr does not exist - just default to 0 */
+ reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
+ if (reg)
+ ccr |= be32_to_cpup(reg);
+
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+
+ /* Set the bank settings if given */
+ reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
+ if (reg) {
+ int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
+ out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
+ }
+
+ err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
+ if (err) {
+ iounmap(ndfc->ndfcbase);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ndfc_remove(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
+
+ nand_release(&ndfc->mtd);
+ kfree(ndfc->mtd.name);
+
+ return 0;
+}
+
+static const struct of_device_id ndfc_match[] = {
+ { .compatible = "ibm,ndfc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ndfc_match);
+
+static struct platform_driver ndfc_driver = {
+ .driver = {
+ .name = "ndfc",
+ .of_match_table = ndfc_match,
+ },
+ .probe = ndfc_probe,
+ .remove = ndfc_remove,
+};
+
+module_platform_driver(ndfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
new file mode 100644
index 000000000..e58c644dd
--- /dev/null
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright © 2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#define REG_FMICSR 0x00
+#define REG_SMCSR 0xa0
+#define REG_SMISR 0xac
+#define REG_SMCMD 0xb0
+#define REG_SMADDR 0xb4
+#define REG_SMDATA 0xb8
+
+#define RESET_FMI 0x01
+#define NAND_EN 0x08
+#define READYBUSY (0x01 << 18)
+
+#define SWRST 0x01
+#define PSIZE (0x01 << 3)
+#define DMARWEN (0x03 << 1)
+#define BUSWID (0x01 << 4)
+#define ECC4EN (0x01 << 5)
+#define WP (0x01 << 24)
+#define NANDCS (0x01 << 25)
+#define ENDADDR (0x01 << 31)
+
+#define read_data_reg(dev) \
+ __raw_readl((dev)->reg + REG_SMDATA)
+
+#define write_data_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMDATA)
+
+#define write_cmd_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMCMD)
+
+#define write_addr_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMADDR)
+
+struct nuc900_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ void __iomem *reg;
+ struct clk *clk;
+ spinlock_t lock;
+};
+
+static const struct mtd_partition partitions[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8 * 1024 * 1024
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ }
+};
+
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
+{
+ unsigned char ret;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ ret = (unsigned char)read_data_reg(nand);
+
+ return ret;
+}
+
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
+{
+ int i;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ for (i = 0; i < len; i++)
+ buf[i] = (unsigned char)read_data_reg(nand);
+}
+
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
+{
+ int i;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ for (i = 0; i < len; i++)
+ write_data_reg(nand, buf[i]);
+}
+
+static int nuc900_check_rb(struct nuc900_nand *nand)
+{
+ unsigned int val;
+ spin_lock(&nand->lock);
+ val = __raw_readl(REG_SMISR);
+ val &= READYBUSY;
+ spin_unlock(&nand->lock);
+
+ return val;
+}
+
+static int nuc900_nand_devready(struct mtd_info *mtd)
+{
+ struct nuc900_nand *nand;
+ int ready;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ ready = (nuc900_check_rb(nand)) ? 1 : 0;
+ return ready;
+}
+
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+ struct nuc900_nand *nand;
+
+ nand = container_of(mtd, struct nuc900_nand, mtd);
+
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ write_cmd_reg(nand, command & 0xff);
+
+ if (column != -1 || page_addr != -1) {
+
+ if (column != -1) {
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ write_addr_reg(nand, column);
+ write_addr_reg(nand, column >> 8 | ENDADDR);
+ }
+ if (page_addr != -1) {
+ write_addr_reg(nand, page_addr);
+
+ if (chip->chipsize > (128 << 20)) {
+ write_addr_reg(nand, page_addr >> 8);
+ write_addr_reg(nand, page_addr >> 16 | ENDADDR);
+ } else {
+ write_addr_reg(nand, page_addr >> 8 | ENDADDR);
+ }
+ }
+ }
+
+ switch (command) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+
+ write_cmd_reg(nand, NAND_CMD_STATUS);
+ write_cmd_reg(nand, command);
+
+ while (!nuc900_check_rb(nand))
+ ;
+
+ return;
+
+ case NAND_CMD_RNDOUT:
+ write_cmd_reg(nand, NAND_CMD_RNDOUTSTART);
+ return;
+
+ case NAND_CMD_READ0:
+
+ write_cmd_reg(nand, NAND_CMD_READSTART);
+ default:
+
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ while (!chip->dev_ready(mtd))
+ ;
+}
+
+
+static void nuc900_nand_enable(struct nuc900_nand *nand)
+{
+ unsigned int val;
+ spin_lock(&nand->lock);
+ __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR));
+
+ val = __raw_readl(nand->reg + REG_FMICSR);
+
+ if (!(val & NAND_EN))
+ __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
+
+ val = __raw_readl(nand->reg + REG_SMCSR);
+
+ val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS);
+ val |= WP;
+
+ __raw_writel(val, nand->reg + REG_SMCSR);
+
+ spin_unlock(&nand->lock);
+}
+
+static int nuc900_nand_probe(struct platform_device *pdev)
+{
+ struct nuc900_nand *nuc900_nand;
+ struct nand_chip *chip;
+ struct resource *res;
+
+ nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand),
+ GFP_KERNEL);
+ if (!nuc900_nand)
+ return -ENOMEM;
+ chip = &(nuc900_nand->chip);
+
+ nuc900_nand->mtd.priv = chip;
+ nuc900_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&nuc900_nand->lock);
+
+ nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk))
+ return -ENOENT;
+ clk_enable(nuc900_nand->clk);
+
+ chip->cmdfunc = nuc900_nand_command_lp;
+ chip->dev_ready = nuc900_nand_devready;
+ chip->read_byte = nuc900_nand_read_byte;
+ chip->write_buf = nuc900_nand_write_buf;
+ chip->read_buf = nuc900_nand_read_buf;
+ chip->chip_delay = 50;
+ chip->options = 0;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nuc900_nand->reg))
+ return PTR_ERR(nuc900_nand->reg);
+
+ nuc900_nand_enable(nuc900_nand);
+
+ if (nand_scan(&(nuc900_nand->mtd), 1))
+ return -ENXIO;
+
+ mtd_device_register(&(nuc900_nand->mtd), partitions,
+ ARRAY_SIZE(partitions));
+
+ platform_set_drvdata(pdev, nuc900_nand);
+
+ return 0;
+}
+
+static int nuc900_nand_remove(struct platform_device *pdev)
+{
+ struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
+
+ nand_release(&nuc900_nand->mtd);
+ clk_disable(nuc900_nand->clk);
+
+ return 0;
+}
+
+static struct platform_driver nuc900_nand_driver = {
+ .probe = nuc900_nand_probe,
+ .remove = nuc900_nand_remove,
+ .driver = {
+ .name = "nuc900-fmi",
+ },
+};
+
+module_platform_driver(nuc900_nand_driver);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 000000000..60fa89939
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,2086 @@
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/mtd/nand_bch.h>
+#include <linux/platform_data/elm.h>
+
+#include <linux/platform_data/mtd-nand-omap2.h>
+
+#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#define PREFETCH_CONFIG1_CS_SHIFT 24
+#define ECC_CONFIG_CS_SHIFT 1
+#define CS_MASK 0x7
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE_SHIFT 2
+#define ECCSIZE0_SHIFT 12
+#define ECCSIZE1_SHIFT 22
+#define ECC1RESULTSIZE 0x1
+#define ECCCLEAR 0x100
+#define ECC1 0x1
+#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
+#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
+#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
+#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
+#define STATUS_BUFF_EMPTY 0x00000001
+
+#define OMAP24XX_DMA_GPMC 4
+
+#define SECTOR_BYTES 512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+
+#define BADBLOCK_MARKER_LENGTH 2
+
+static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
+ 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
+ 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
+ 0x07, 0x0e};
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+ 0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_hw_control omap_gpmc_controller = {
+ .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
+};
+
+struct omap_nand_info {
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ unsigned long phys_base;
+ enum omap_ecc ecc_opt;
+ struct completion comp;
+ struct dma_chan *dma;
+ int gpmc_irq_fifo;
+ int gpmc_irq_count;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
+ struct gpmc_nand_regs reg;
+ /* generated at runtime depending on ECC algorithm and layout selected */
+ struct nand_ecclayout oobinfo;
+ /* fields specific for BCHx_HW ECC scheme */
+ struct device *elm_dev;
+ struct device_node *of_node;
+};
+
+/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+ unsigned int u32_count, int is_write, struct omap_nand_info *info)
+{
+ u32 val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -1;
+
+ if (readl(info->reg.gpmc_prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
+ writel(val, info->reg.gpmc_prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(0x1, info->reg.gpmc_prefetch_control);
+
+ return 0;
+}
+
+/**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+{
+ u32 config1;
+
+ /* check if the same module/cs is trying to reset */
+ config1 = readl(info->reg.gpmc_prefetch_config1);
+ if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+ return -EINVAL;
+
+ /* Stop the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_control);
+
+ /* Reset/disable the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_config1);
+
+ return 0;
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->reg.gpmc_nand_command);
+
+ else if (ctrl & NAND_ALE)
+ writeb(cmd, info->reg.gpmc_nand_address);
+
+ else /* NAND_NCE */
+ writeb(cmd, info->reg.gpmc_nand_data);
+ }
+}
+
+/**
+ * omap_read_buf8 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ ioread8_rep(nand->IO_ADDR_R, buf, len);
+}
+
+/**
+ * omap_write_buf8 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u_char *p = (u_char *)buf;
+ u32 status = 0;
+
+ while (len--) {
+ iowrite8(*p++, info->nand.IO_ADDR_W);
+ /* wait until buffer is available for write */
+ do {
+ status = readl(info->reg.gpmc_status) &
+ STATUS_BUFF_EMPTY;
+ } while (!status);
+ }
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u16 *p = (u16 *) buf;
+ u32 status = 0;
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ iowrite16(*p++, info->nand.IO_ADDR_W);
+ /* wait until buffer is available for write */
+ do {
+ status = readl(info->reg.gpmc_status) &
+ STATUS_BUFF_EMPTY;
+ } while (!status);
+ }
+}
+
+/**
+ * omap_read_buf_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ uint32_t r_count = 0;
+ int ret = 0;
+ u32 *p = (u32 *)buf;
+
+ /* take care of subpage reads */
+ if (len % 4) {
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len % 4);
+ else
+ omap_read_buf8(mtd, buf, len % 4);
+ p = (u32 *) (buf + len % 4);
+ len -= len % 4;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, (u_char *)p, len);
+ else
+ omap_read_buf8(mtd, (u_char *)p, len);
+ } else {
+ do {
+ r_count = readl(info->reg.gpmc_prefetch_status);
+ r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
+ r_count = r_count >> 2;
+ ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
+ p += r_count;
+ len -= r_count << 2;
+ } while (len);
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ }
+}
+
+/**
+ * omap_write_buf_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ uint32_t w_count = 0;
+ int i = 0, ret = 0;
+ u16 *p = (u16 *)buf;
+ unsigned long tim, limit;
+ u32 val;
+
+ /* take care of subpage writes */
+ if (len % 2 != 0) {
+ writeb(*buf, info->nand.IO_ADDR_W);
+ p = (u16 *)(buf + 1);
+ len--;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, (u_char *)p, len);
+ else
+ omap_write_buf8(mtd, (u_char *)p, len);
+ } else {
+ while (len) {
+ w_count = readl(info->reg.gpmc_prefetch_status);
+ w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
+ w_count = w_count >> 1;
+ for (i = 0; (i < w_count) && len; i++, len -= 2)
+ iowrite16(*p++, info->nand.IO_ADDR_W);
+ }
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ do {
+ cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ }
+}
+
+/*
+ * omap_nand_dma_callback: callback on the completion of dma transfer
+ * @data: pointer to completion data structure
+ */
+static void omap_nand_dma_callback(void *data)
+{
+ complete((struct completion *) data);
+}
+
+/*
+ * omap_nand_dma_transfer: configure and start dma transfer
+ * @mtd: MTD device structure
+ * @addr: virtual address in RAM of source/destination
+ * @len: number of data bytes to be transferred
+ * @is_write: flag for read/write operation
+ */
+static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
+ unsigned int len, int is_write)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ struct dma_async_tx_descriptor *tx;
+ enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
+ struct scatterlist sg;
+ unsigned long tim, limit;
+ unsigned n;
+ int ret;
+ u32 val;
+
+ if (addr >= high_memory) {
+ struct page *p1;
+
+ if (((size_t)addr & PAGE_MASK) !=
+ ((size_t)(addr + len - 1) & PAGE_MASK))
+ goto out_copy;
+ p1 = vmalloc_to_page(addr);
+ if (!p1)
+ goto out_copy;
+ addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
+ }
+
+ sg_init_one(&sg, addr, len);
+ n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+ if (n == 0) {
+ dev_err(&info->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n", len);
+ goto out_copy;
+ }
+
+ tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+ is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto out_copy_unmap;
+
+ tx->callback = omap_nand_dma_callback;
+ tx->callback_param = &info->comp;
+ dmaengine_submit(tx);
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy_unmap;
+
+ init_completion(&info->comp);
+ dma_async_issue_pending(info->dma);
+
+ /* setup and start DMA using dma_addr */
+ wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+
+ do {
+ cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+ return 0;
+
+out_copy_unmap:
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
+ : omap_write_buf16(mtd, (u_char *) addr, len);
+ else
+ is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
+ : omap_write_buf8(mtd, (u_char *) addr, len);
+ return 0;
+}
+
+/**
+ * omap_read_buf_dma_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ if (len <= mtd->oobsize)
+ omap_read_buf_pref(mtd, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, buf, len, 0x0);
+}
+
+/**
+ * omap_write_buf_dma_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_dma_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ if (len <= mtd->oobsize)
+ omap_write_buf_pref(mtd, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
+}
+
+/*
+ * omap_nand_irq - GPMC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+
+ bytes = readl(info->reg.gpmc_prefetch_status);
+ bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (this_irq == info->gpmc_irq_count)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->nand.IO_ADDR_W,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->nand.IO_ADDR_R,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (this_irq == info->gpmc_irq_count)
+ goto done;
+ }
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+
+ disable_irq_nosync(info->gpmc_irq_fifo);
+ disable_irq_nosync(info->gpmc_irq_count);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+
+ if (len <= mtd->oobsize) {
+ omap_read_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+ unsigned long tim, limit;
+ u32 val;
+
+ if (len <= mtd->oobsize) {
+ omap_write_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ do {
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ cpu_relax();
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ * If there is no error, %0 is returned. If there is an error but it
+ * was corrected, %1 is returned. Otherwise, %-1 is returned.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ pr_debug("ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+
+ case 11:
+ /* UN-Correctable error */
+ pr_debug("ECC UNCORRECTED_ERROR B\n");
+ return -1;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ pr_debug("Correcting single bit ECC error at offset: "
+ "%d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 1;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ pr_debug("UNCORRECTED_ERROR default\n");
+ return -1;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
+ * detection and correction. If there are no errors, %0 is returned. If
+ * there were errors and all of the errors were corrected, the number of
+ * corrected errors is returned. If uncorrectable errors exist, %-1 is
+ * returned.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+ int stat = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+ (info->nand.ecc.size == 2048))
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ /* keep track of the number of corrected errors */
+ stat += ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return stat;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+ if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+ val = readl(info->reg.gpmc_ecc1_result);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ u32 val;
+
+ /* clear ecc and enable bits */
+ val = ECCCLEAR | ECC1;
+ writel(val, info->reg.gpmc_ecc_control);
+
+ /* program ecc and result sizes */
+ val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+ ECC1RESULTSIZE);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+ break;
+ case NAND_ECC_READSYN:
+ writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+ break;
+ default:
+ dev_info(&info->pdev->dev,
+ "error: unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ writel(val, info->reg.gpmc_ecc_config);
+}
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long timeo = jiffies;
+ int status, state = this->state;
+
+ if (state == FL_ERASING)
+ timeo += msecs_to_jiffies(400);
+ else
+ timeo += msecs_to_jiffies(20);
+
+ writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
+ while (time_before(jiffies, timeo)) {
+ status = readb(info->reg.gpmc_nand_data);
+ if (status & NAND_STATUS_READY)
+ break;
+ cond_resched();
+ }
+
+ status = readb(info->reg.gpmc_nand_data);
+ return status;
+}
+
+/**
+ * omap_dev_ready - calls the platform specific dev_ready function
+ * @mtd: MTD device structure
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ unsigned int val = 0;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+
+ val = readl(info->reg.gpmc_status);
+
+ if ((val & 0x100) == 0x100) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ *
+ * When using BCH with SW correction (i.e. no ELM), sector size is set
+ * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
+ * for both reading and writing with:
+ * eccsize0 = 0 (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+ */
+static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+ unsigned int bch_type;
+ unsigned int dev_width, nsectors;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ enum omap_ecc ecc_opt = info->ecc_opt;
+ struct nand_chip *chip = mtd->priv;
+ u32 val, wr_mode;
+ unsigned int ecc_size1, ecc_size0;
+
+ /* GPMC configurations for calculating ECC */
+ switch (ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ bch_type = 0;
+ nsectors = 1;
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_type = 0;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH4R_ECC_SIZE0;
+ ecc_size1 = BCH4R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ bch_type = 1;
+ nsectors = 1;
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_type = 1;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH8R_ECC_SIZE0;
+ ecc_size1 = BCH8R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ bch_type = 0x2;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = 0x01;
+ ecc_size0 = 52; /* ECC bits in nibbles per sector */
+ ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
+ } else {
+ wr_mode = 0x01;
+ ecc_size0 = 0; /* extra bits in nibbles per sector */
+ ecc_size1 = 52; /* OOB bits in nibbles per sector */
+ }
+ break;
+ default:
+ return;
+ }
+
+ writel(ECC1, info->reg.gpmc_ecc_control);
+
+ /* Configure ecc size for BCH */
+ val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
+ /* BCH configuration */
+ val = ((1 << 16) | /* enable BCH */
+ (bch_type << 12) | /* BCH4/BCH8/BCH16 */
+ (wr_mode << 8) | /* wrap mode */
+ (dev_width << 7) | /* bus width */
+ (((nsectors-1) & 0x7) << 4) | /* number of sectors */
+ (info->gpmc_cs << 1) | /* ECC CS */
+ (0x1)); /* enable ECC */
+
+ writel(val, info->reg.gpmc_ecc_config);
+
+ /* Clear ecc and enable bits */
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+}
+
+static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
+static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
+ 0x97, 0x79, 0xe5, 0x24, 0xb5};
+
+/**
+ * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8 ecc vectors for the page
+ */
+static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ struct gpmc_nand_regs *gpmc_regs = &info->reg;
+ u8 *ecc_code;
+ unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ u32 val;
+ int i, j;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+ for (i = 0; i < nsectors; i++) {
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ecc_calc += eccbytes;
+ }
+
+ return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data: data sector buffer
+ * @oob: oob buffer
+ * @info: omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+ struct omap_nand_info *info)
+{
+ int flip_bits = 0, i;
+
+ for (i = 0; i < info->nand.ecc.size; i++) {
+ flip_bits += hweight8(~data[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+ flip_bits += hweight8(~oob[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ /*
+ * Bit flips falls in correctable level.
+ * Fill data area with 0xFF
+ */
+ if (flip_bits) {
+ memset(data, 0xFF, info->nand.ecc.size);
+ memset(oob, 0xFF, info->nand.ecc.bytes);
+ }
+
+ return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of non-zero ecc vector, first filter out erased-pages, and
+ * then process data via ELM to detect bit-flips.
+ */
+static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_ecc_ctrl *ecc = &info->nand.ecc;
+ int eccsteps = info->nand.ecc.steps;
+ int i , j, stat = 0;
+ int eccflag, actual_eccbytes;
+ struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+ u_char *ecc_vec = calc_ecc;
+ u_char *spare_ecc = read_ecc;
+ u_char *erased_ecc_vec;
+ u_char *buf;
+ int bitflip_count;
+ bool is_error_reported = false;
+ u32 bit_pos, byte_pos, error_max, pos;
+ int err;
+
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* omit 7th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch4_vector;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* omit 14th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch8_vector;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ actual_eccbytes = ecc->bytes;
+ erased_ecc_vec = bch16_vector;
+ break;
+ default:
+ dev_err(&info->pdev->dev, "invalid driver configuration\n");
+ return -EINVAL;
+ }
+
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
+
+ for (i = 0; i < eccsteps ; i++) {
+ eccflag = 0; /* initialize eccflag */
+
+ /*
+ * Check any error reported,
+ * In case of error, non zero ecc reported.
+ */
+ for (j = 0; j < actual_eccbytes; j++) {
+ if (calc_ecc[j] != 0) {
+ eccflag = 1; /* non zero ecc, error present */
+ break;
+ }
+ }
+
+ if (eccflag == 1) {
+ if (memcmp(calc_ecc, erased_ecc_vec,
+ actual_eccbytes) == 0) {
+ /*
+ * calc_ecc[] matches pattern for ECC(all 0xff)
+ * so this is definitely an erased-page
+ */
+ } else {
+ buf = &data[info->nand.ecc.size * i];
+ /*
+ * count number of 0-bits in read_buf.
+ * This check can be removed once a similar
+ * check is introduced in generic NAND driver
+ */
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+ if (bitflip_count) {
+ /*
+ * number of 0-bits within ECC limits
+ * So this may be an erased-page
+ */
+ stat += bitflip_count;
+ } else {
+ /*
+ * Too many 0-bits. It may be a
+ * - programmed-page, OR
+ * - erased-page with many bit-flips
+ * So this page requires check by ELM
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
+ }
+ }
+ }
+
+ /* Update the ecc vector */
+ calc_ecc += ecc->bytes;
+ read_ecc += ecc->bytes;
+ }
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+ return stat;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+ err = 0;
+ for (i = 0; i < eccsteps; i++) {
+ if (err_vec[i].error_uncorrectable) {
+ dev_err(&info->pdev->dev,
+ "uncorrectable bit-flips found\n");
+ err = -EBADMSG;
+ } else if (err_vec[i].error_reported) {
+ for (j = 0; j < err_vec[i].error_count; j++) {
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Add 4 bits to take care of padding */
+ pos = err_vec[i].error_loc[j] +
+ BCH4_BIT_PAD;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
+ pos = err_vec[i].error_loc[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+ error_max = (ecc->size + actual_eccbytes) * 8;
+ /* Calculate bit position of error */
+ bit_pos = pos % 8;
+
+ /* Calculate byte position of error */
+ byte_pos = (error_max - pos - 1) / 8;
+
+ if (pos < error_max) {
+ if (byte_pos < 512) {
+ pr_debug("bitflip@dat[%d]=%x\n",
+ byte_pos, data[byte_pos]);
+ data[byte_pos] ^= 1 << bit_pos;
+ } else {
+ pr_debug("bitflip@oob[%d]=%x\n",
+ (byte_pos - 512),
+ spare_ecc[byte_pos - 512]);
+ spare_ecc[byte_pos - 512] ^=
+ 1 << bit_pos;
+ }
+ } else {
+ dev_err(&info->pdev->dev,
+ "invalid bit-flip @ %d:%d\n",
+ byte_pos, bit_pos);
+ err = -EBADMSG;
+ }
+ }
+ }
+
+ /* Update number of correctable errors */
+ stat += err_vec[i].error_count;
+
+ /* Update page data with sector size */
+ data += ecc->size;
+ spare_ecc += ecc->bytes;
+ }
+
+ return (err) ? err : stat;
+}
+
+/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ /* Update ecc vector from GPMC result registers */
+ chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* Write ecc vector to OOB area */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *oob = &chip->oob_poi[eccpos[0]];
+ uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
+ int stat;
+ unsigned int max_bitflips = 0;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ /* Read data */
+ chip->read_buf(mtd, buf, mtd->writesize);
+
+ /* Read oob bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
+ chip->read_buf(mtd, oob, chip->ecc.total);
+
+ /* Calculate ecc bytes */
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+
+ stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @omap_nand_info: NAND device structure containing platform data
+ */
+static bool is_elm_present(struct omap_nand_info *info,
+ struct device_node *elm_node)
+{
+ struct platform_device *pdev;
+
+ /* check whether elm-id is passed via DT */
+ if (!elm_node) {
+ dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
+ return false;
+ }
+ pdev = of_find_device_by_node(elm_node);
+ /* check whether ELM device is registered */
+ if (!pdev) {
+ dev_err(&info->pdev->dev, "ELM device not found\n");
+ return false;
+ }
+ /* ELM module available, now configure it */
+ info->elm_dev = &pdev->dev;
+ return true;
+}
+
+static bool omap2_nand_ecc_check(struct omap_nand_info *info,
+ struct omap_nand_platform_data *pdata)
+{
+ bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
+
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ ecc_needs_omap_bch = false;
+ ecc_needs_bch = true;
+ ecc_needs_elm = false;
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
+ ecc_needs_omap_bch = true;
+ ecc_needs_bch = false;
+ ecc_needs_elm = true;
+ break;
+ default:
+ ecc_needs_omap_bch = false;
+ ecc_needs_bch = false;
+ ecc_needs_elm = false;
+ break;
+ }
+
+ if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_BCH)) {
+ dev_err(&info->pdev->dev,
+ "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ return false;
+ }
+ if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
+ dev_err(&info->pdev->dev,
+ "CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ return false;
+ }
+ if (ecc_needs_elm && !is_elm_present(info, pdata->elm_of_node)) {
+ dev_err(&info->pdev->dev, "ELM not available\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct nand_ecclayout *ecclayout;
+ int err;
+ int i;
+ dma_cap_mask_t mask;
+ unsigned sig;
+ unsigned oob_index;
+ struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ info->pdev = pdev;
+ info->gpmc_cs = pdata->cs;
+ info->reg = pdata->reg;
+ info->of_node = pdata->of_node;
+ info->ecc_opt = pdata->ecc_opt;
+ mtd = &info->mtd;
+ mtd->priv = &info->nand;
+ mtd->name = dev_name(&pdev->dev);
+ mtd->owner = THIS_MODULE;
+ nand_chip = &info->nand;
+ nand_chip->ecc.priv = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nand_chip->IO_ADDR_R))
+ return PTR_ERR(nand_chip->IO_ADDR_R);
+
+ info->phys_base = res->start;
+
+ nand_chip->controller = &omap_gpmc_controller;
+
+ nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+ nand_chip->cmd_ctrl = omap_hwcontrol;
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * function and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line. Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (pdata->dev_ready) {
+ nand_chip->dev_ready = omap_dev_ready;
+ nand_chip->chip_delay = 0;
+ } else {
+ nand_chip->waitfunc = omap_wait;
+ nand_chip->chip_delay = 50;
+ }
+
+ if (pdata->flash_bbt)
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ else
+ nand_chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* scan NAND device connected to chip controller */
+ nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ dev_err(&info->pdev->dev, "scan failed, may be bus-width mismatch\n");
+ err = -ENXIO;
+ goto return_error;
+ }
+
+ /* re-populate low-level callbacks based on xfer modes */
+ switch (pdata->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
+ nand_chip->read_buf = omap_read_buf_pref;
+ nand_chip->write_buf = omap_write_buf_pref;
+ break;
+
+ case NAND_OMAP_POLLED:
+ /* Use nand_base defaults for {read,write}_buf */
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = OMAP24XX_DMA_GPMC;
+ info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!info->dma) {
+ dev_err(&pdev->dev, "DMA engine request failed\n");
+ err = -ENXIO;
+ goto return_error;
+ } else {
+ struct dma_slave_config cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = info->phys_base;
+ cfg.dst_addr = info->phys_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 16;
+ cfg.dst_maxburst = 16;
+ err = dmaengine_slave_config(info->dma, &cfg);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+ err);
+ goto return_error;
+ }
+ nand_chip->read_buf = omap_read_buf_dma_pref;
+ nand_chip->write_buf = omap_write_buf_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+ if (info->gpmc_irq_fifo <= 0) {
+ dev_err(&pdev->dev, "error getting fifo irq\n");
+ err = -ENODEV;
+ goto return_error;
+ }
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ info->gpmc_irq_fifo, err);
+ info->gpmc_irq_fifo = 0;
+ goto return_error;
+ }
+
+ info->gpmc_irq_count = platform_get_irq(pdev, 1);
+ if (info->gpmc_irq_count <= 0) {
+ dev_err(&pdev->dev, "error getting count irq\n");
+ err = -ENODEV;
+ goto return_error;
+ }
+ err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ info->gpmc_irq_count, err);
+ info->gpmc_irq_count = 0;
+ goto return_error;
+ }
+
+ nand_chip->read_buf = omap_read_buf_irq_pref;
+ nand_chip->write_buf = omap_write_buf_irq_pref;
+
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "xfer_type(%d) not supported!\n", pdata->xfer_type);
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ if (!omap2_nand_ecc_check(info, pdata)) {
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ /* populate MTD interface based on ECC scheme */
+ ecclayout = &info->oobinfo;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_HAM1_CODE_SW:
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ break;
+
+ case OMAP_ECC_HAM1_CODE_HW:
+ pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.bytes = 3;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.strength = 1;
+ nand_chip->ecc.calculate = omap_calculate_ecc;
+ nand_chip->ecc.hwctl = omap_enable_hwecc;
+ nand_chip->ecc.correct = omap_correct_data;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ if (nand_chip->options & NAND_BUSWIDTH_16)
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ else
+ oob_index = 1;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* no reserved-marker in ecclayout for this ecc-scheme */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 7;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &ecclayout);
+ if (!nand_chip->ecc.priv) {
+ dev_err(&info->pdev->dev, "unable to use BCH library\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW:
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 7 + 1;
+ nand_chip->ecc.strength = 4;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+
+ err = elm_config(info->elm_dev, BCH4_ECC,
+ info->mtd.writesize / nand_chip->ecc.size,
+ nand_chip->ecc.size, nand_chip->ecc.bytes);
+ if (err < 0)
+ goto return_error;
+ break;
+
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 13;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = nand_bch_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ /* software bch library is used for locating errors */
+ nand_chip->ecc.priv = nand_bch_init(mtd,
+ nand_chip->ecc.size,
+ nand_chip->ecc.bytes,
+ &ecclayout);
+ if (!nand_chip->ecc.priv) {
+ dev_err(&info->pdev->dev, "unable to use BCH library\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+ break;
+
+ case OMAP_ECC_BCH8_CODE_HW:
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ nand_chip->ecc.bytes = 13 + 1;
+ nand_chip->ecc.strength = 8;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+
+ err = elm_config(info->elm_dev, BCH8_ECC,
+ info->mtd.writesize / nand_chip->ecc.size,
+ nand_chip->ecc.size, nand_chip->ecc.bytes);
+ if (err < 0)
+ goto return_error;
+
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ break;
+
+ case OMAP_ECC_BCH16_CODE_HW:
+ pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 26;
+ nand_chip->ecc.strength = 16;
+ nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
+ nand_chip->ecc.correct = omap_elm_correct_data;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.read_page = omap_read_page_bch;
+ nand_chip->ecc.write_page = omap_write_page_bch;
+
+ err = elm_config(info->elm_dev, BCH16_ECC,
+ info->mtd.writesize / nand_chip->ecc.size,
+ nand_chip->ecc.size, nand_chip->ecc.bytes);
+ if (err < 0)
+ goto return_error;
+
+ /* define ECC layout */
+ ecclayout->eccbytes = nand_chip->ecc.bytes *
+ (mtd->writesize /
+ nand_chip->ecc.size);
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ break;
+ default:
+ dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n");
+ err = -EINVAL;
+ goto return_error;
+ }
+
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW)
+ goto scan_tail;
+
+ /* all OOB bytes from oobfree->offset till end off OOB are free */
+ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
+ /* check if NAND device's OOB is enough to store ECC signatures */
+ if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
+ dev_err(&info->pdev->dev,
+ "not enough OOB bytes required = %d, available=%d\n",
+ ecclayout->eccbytes, mtd->oobsize);
+ err = -EINVAL;
+ goto return_error;
+ }
+ nand_chip->ecc.layout = ecclayout;
+
+scan_tail:
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ err = -ENXIO;
+ goto return_error;
+ }
+
+ ppdata.of_node = pdata->of_node;
+ mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts,
+ pdata->nr_parts);
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+
+return_error:
+ if (info->dma)
+ dma_release_channel(info->dma);
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ if (nand_chip->ecc.priv) {
+ nand_bch_free(nand_chip->ecc.priv);
+ nand_chip->ecc.priv = NULL;
+ }
+ if (info->dma)
+ dma_release_channel(info->dma);
+ nand_release(mtd);
+ return 0;
+}
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(omap_nand_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c
new file mode 100644
index 000000000..376bfe191
--- /dev/null
+++ b/drivers/mtd/nand/omap_elm.c
@@ -0,0 +1,578 @@
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRIVER_NAME "omap-elm"
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_SYSCONFIG 0x010
+#define ELM_IRQSTATUS 0x018
+#define ELM_IRQENABLE 0x01c
+#define ELM_LOCATION_CONFIG 0x020
+#define ELM_PAGE_CTRL 0x080
+#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_1 0x404
+#define ELM_SYNDROME_FRAGMENT_2 0x408
+#define ELM_SYNDROME_FRAGMENT_3 0x40c
+#define ELM_SYNDROME_FRAGMENT_4 0x410
+#define ELM_SYNDROME_FRAGMENT_5 0x414
+#define ELM_SYNDROME_FRAGMENT_6 0x418
+#define ELM_LOCATION_STATUS 0x800
+#define ELM_ERROR_LOCATION_0 0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK 0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK BIT(8)
+#define ECC_NB_ERRORS_MASK 0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK 0x1fff
+
+#define ELM_ECC_SIZE 0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE 0x40
+#define ERROR_LOCATION_SIZE 0x100
+
+struct elm_registers {
+ u32 elm_irqenable;
+ u32 elm_sysconfig;
+ u32 elm_location_config;
+ u32 elm_page_ctrl;
+ u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
+};
+
+struct elm_info {
+ struct device *dev;
+ void __iomem *elm_base;
+ struct completion elm_completion;
+ struct list_head list;
+ enum bch_ecc bch_type;
+ struct elm_registers elm_regs;
+ int ecc_steps;
+ int ecc_syndrome_size;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+ writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+ return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev: ELM device
+ * @bch_type: Type of BCH ecc
+ */
+int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_get_drvdata(dev);
+
+ if (!info) {
+ dev_err(dev, "Unable to configure elm - device not probed?\n");
+ return -EPROBE_DEFER;
+ }
+ /* ELM cannot detect ECC errors for chunks > 1KB */
+ if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
+ dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
+ return -EINVAL;
+ }
+ /* ELM support 8 error syndrome process */
+ if (ecc_steps > ERROR_VECTOR_MAX) {
+ dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
+ return -EINVAL;
+ }
+
+ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+ info->bch_type = bch_type;
+ info->ecc_steps = ecc_steps;
+ info->ecc_syndrome_size = ecc_syndrome_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info: elm info
+ * @index: index number of syndrome fragment vector
+ * @enable: enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+ if (enable)
+ reg_val |= BIT(index); /* enable page mode */
+ else
+ reg_val &= ~BIT(index); /* disable page mode */
+
+ elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info: elm info
+ * @err_vec: elm error vectors
+ * @ecc: buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+ struct elm_errorvec *err_vec, u8 *ecc)
+{
+ int i, offset;
+ u32 val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ elm_configure_page_mode(info, i, true);
+ offset = ELM_SYNDROME_FRAGMENT_0 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ switch (info->bch_type) {
+ case BCH8_ECC:
+ /* syndrome fragment 0 = ecc[9-12B] */
+ val = cpu_to_be32(*(u32 *) &ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+ offset += 4;
+ val = ecc[0];
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH4_ECC:
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+ val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH16_ECC:
+ val = cpu_to_be32(*(u32 *) &ecc[22]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[18]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[14]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[10]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[6]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[2]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
+ elm_write_reg(info, offset, val);
+ break;
+ default:
+ pr_err("invalid config bch_type\n");
+ }
+ }
+
+ /* Update ecc pointer with ecc byte size */
+ ecc += info->ecc_syndrome_size;
+ }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, offset;
+ u32 reg_val;
+
+ /*
+ * Set syndrome vector valid, so that ELM module
+ * will process it for vectors error is reported
+ */
+ for (i = 0; i < info->ecc_steps; i++) {
+ if (err_vec[i].error_reported) {
+ offset = ELM_SYNDROME_FRAGMENT_6 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+ reg_val |= ELM_SYNDROME_VALID;
+ elm_write_reg(info, offset, reg_val);
+ }
+ }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, j, errors = 0;
+ int offset;
+ u32 reg_val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+
+ /* Check correctable error or not */
+ if (reg_val & ECC_CORRECTABLE_MASK) {
+ offset = ELM_ERROR_LOCATION_0 +
+ ERROR_LOCATION_SIZE * i;
+
+ /* Read count of correctable errors */
+ err_vec[i].error_count = reg_val &
+ ECC_NB_ERRORS_MASK;
+
+ /* Update the error locations in error vector */
+ for (j = 0; j < err_vec[i].error_count; j++) {
+
+ reg_val = elm_read_reg(info, offset);
+ err_vec[i].error_loc[j] = reg_val &
+ ECC_ERROR_LOCATION_MASK;
+
+ /* Update error location register */
+ offset += 4;
+ }
+
+ errors += err_vec[i].error_count;
+ } else {
+ err_vec[i].error_uncorrectable = true;
+ }
+
+ /* Clearing interrupts for processed error vectors */
+ elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+ /* Disable page mode */
+ elm_configure_page_mode(info, i, false);
+ }
+ }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev: device pointer
+ * @ecc_calc: calculated ECC bytes from GPMC
+ * @err_vec: elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ /* Enable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+ elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+ elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+ /* Load valid ecc byte to syndrome fragment register */
+ elm_load_syndrome(info, err_vec, ecc_calc);
+
+ /* Enable syndrome processing for which syndrome fragment is updated */
+ elm_start_processing(info, err_vec);
+
+ /* Wait for ELM module to finish locating error correction */
+ wait_for_completion(&info->elm_completion);
+
+ /* Disable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQENABLE);
+ elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+ elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_id;
+
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+ /* All error vectors processed */
+ if (reg_val & INTR_STATUS_PAGE_VALID) {
+ elm_write_reg(info, ELM_IRQSTATUS,
+ reg_val & INTR_STATUS_PAGE_VALID);
+ complete(&info->elm_completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res, *irq;
+ struct elm_info *info;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->elm_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->elm_base))
+ return PTR_ERR(info->elm_base);
+
+ ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev) < 0) {
+ ret = -EINVAL;
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+ }
+
+ init_completion(&info->elm_completion);
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &elm_devices);
+ platform_set_drvdata(pdev, info);
+ return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * elm_context_save
+ * saves ELM configurations to preserve them across Hardware powered-down
+ */
+static int elm_context_save(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ regs->elm_irqenable = elm_read_reg(info, ELM_IRQENABLE);
+ regs->elm_sysconfig = elm_read_reg(info, ELM_SYSCONFIG);
+ regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
+ regs->elm_page_ctrl = elm_read_reg(info, ELM_PAGE_CTRL);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_5 + offset);
+ regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_4 + offset);
+ case BCH8_ECC:
+ regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_3 + offset);
+ regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_2 + offset);
+ case BCH4_ECC:
+ regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_1 + offset);
+ regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_0 + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
+ * to be saved for all BCH schemes*/
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ }
+ return 0;
+}
+
+/**
+ * elm_context_restore
+ * writes configurations saved duing power-down back into ELM registers
+ */
+static int elm_context_restore(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ elm_write_reg(info, ELM_IRQENABLE, regs->elm_irqenable);
+ elm_write_reg(info, ELM_SYSCONFIG, regs->elm_sysconfig);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
+ elm_write_reg(info, ELM_PAGE_CTRL, regs->elm_page_ctrl);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
+ regs->elm_syndrome_fragment_5[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
+ regs->elm_syndrome_fragment_4[i]);
+ case BCH8_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
+ regs->elm_syndrome_fragment_3[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
+ regs->elm_syndrome_fragment_2[i]);
+ case BCH4_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
+ regs->elm_syndrome_fragment_1[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+ regs->elm_syndrome_fragment_0[i]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i] &
+ ELM_SYNDROME_VALID);
+ }
+ return 0;
+}
+
+static int elm_suspend(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ elm_context_save(info);
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int elm_resume(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ pm_runtime_get_sync(dev);
+ elm_context_restore(info);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+ { .compatible = "ti,am3352-elm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(elm_of_match),
+ .pm = &elm_pm_ops,
+ },
+ .probe = elm_probe,
+ .remove = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform: elm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
new file mode 100644
index 000000000..c3c6d305c
--- /dev/null
+++ b/drivers/mtd/nand/orion_nand.c
@@ -0,0 +1,219 @@
+/*
+ * drivers/mtd/nand/orion_nand.c
+ *
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <asm/sizes.h>
+#include <linux/platform_data/mtd-orion_nand.h>
+
+static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nc = mtd->priv;
+ struct orion_nand_data *board = nc->priv;
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << board->cle);
+ else if (ctrl & NAND_ALE)
+ offs = (1 << board->ale);
+ else
+ return;
+
+ if (nc->options & NAND_BUSWIDTH_16)
+ offs <<= 1;
+
+ writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *io_base = chip->IO_ADDR_R;
+ uint64_t *buf64;
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ /*
+ * Since GCC has no proper constraint (PR 43518)
+ * force x variable to r2/r3 registers as ldrd instruction
+ * requires first register to be even.
+ */
+ register uint64_t x asm ("r2");
+
+ asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct mtd_part_parser_data ppdata = {};
+ struct nand_chip *nc;
+ struct orion_nand_data *board;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *io_base;
+ int ret = 0;
+ u32 val = 0;
+
+ nc = devm_kzalloc(&pdev->dev,
+ sizeof(struct nand_chip) + sizeof(struct mtd_info),
+ GFP_KERNEL);
+ if (!nc)
+ return -ENOMEM;
+ mtd = (struct mtd_info *)(nc + 1);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ if (pdev->dev.of_node) {
+ board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
+ GFP_KERNEL);
+ if (!board)
+ return -ENOMEM;
+ if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
+ board->cle = (u8)val;
+ else
+ board->cle = 0;
+ if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
+ board->ale = (u8)val;
+ else
+ board->ale = 1;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "bank-width", &val))
+ board->width = (u8)val * 8;
+ else
+ board->width = 8;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "chip-delay", &val))
+ board->chip_delay = (u8)val;
+ } else {
+ board = dev_get_platdata(&pdev->dev);
+ }
+
+ mtd->priv = nc;
+ mtd->owner = THIS_MODULE;
+
+ nc->priv = board;
+ nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
+ nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->read_buf = orion_nand_read_buf;
+ nc->ecc.mode = NAND_ECC_SOFT;
+
+ if (board->chip_delay)
+ nc->chip_delay = board->chip_delay;
+
+ WARN(board->width > 16,
+ "%d bit bus width out of range",
+ board->width);
+
+ if (board->width == 16)
+ nc->options |= NAND_BUSWIDTH_16;
+
+ if (board->dev_ready)
+ nc->dev_ready = board->dev_ready;
+
+ platform_set_drvdata(pdev, mtd);
+
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
+ if (nand_scan(mtd, 1)) {
+ ret = -ENXIO;
+ goto no_dev;
+ }
+
+ mtd->name = "orion_nand";
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata,
+ board->parts, board->nr_parts);
+ if (ret) {
+ nand_release(mtd);
+ goto no_dev;
+ }
+
+ return 0;
+
+no_dev:
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+
+ return ret;
+}
+
+static int orion_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct clk *clk;
+
+ nand_release(mtd);
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id orion_nand_of_match_table[] = {
+ { .compatible = "marvell,orion-nand", },
+ {},
+};
+#endif
+
+static struct platform_driver orion_nand_driver = {
+ .remove = orion_nand_remove,
+ .driver = {
+ .name = "orion_nand",
+ .of_match_table = of_match_ptr(orion_nand_of_match_table),
+ },
+};
+
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
+MODULE_ALIAS("platform:orion_nand");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
new file mode 100644
index 000000000..66c345b42
--- /dev/null
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR 0x00004000
+#define CLE_PIN_CTL 15
+#define ALE_PIN_CTL 14
+
+static unsigned int lpcctl;
+static struct mtd_info *pasemi_nand_mtd;
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, chip->IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(chip->IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+ else
+ out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+ /* Push out posted writes */
+ eieio();
+ inl(lpcctl);
+}
+
+int pasemi_device_ready(struct mtd_info *mtd)
+{
+ return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int pasemi_nand_probe(struct platform_device *ofdev)
+{
+ struct pci_dev *pdev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct resource res;
+ struct nand_chip *chip;
+ int err = 0;
+
+ err = of_address_to_resource(np, 0, &res);
+
+ if (err)
+ return -EINVAL;
+
+ /* We only support one device at the moment */
+ if (pasemi_nand_mtd)
+ return -ENODEV;
+
+ pr_debug("pasemi_nand at %pR\n", &res);
+
+ /* Allocate memory for MTD device structure and private data */
+ pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!pasemi_nand_mtd) {
+ printk(KERN_WARNING
+ "Unable to allocate PASEMI NAND MTD device structure\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get pointer to private data */
+ chip = (struct nand_chip *)&pasemi_nand_mtd[1];
+
+ /* Link the private data with the MTD structure */
+ pasemi_nand_mtd->priv = chip;
+ pasemi_nand_mtd->owner = THIS_MODULE;
+
+ chip->IO_ADDR_R = of_iomap(np, 0);
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+
+ if (!chip->IO_ADDR_R) {
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out_ior;
+ }
+
+ lpcctl = pci_resource_start(pdev, 0);
+ pci_dev_put(pdev);
+
+ if (!request_region(lpcctl, 4, driver_name)) {
+ err = -EBUSY;
+ goto out_ior;
+ }
+
+ chip->cmd_ctrl = pasemi_hwcontrol;
+ chip->dev_ready = pasemi_device_ready;
+ chip->read_buf = pasemi_read_buf;
+ chip->write_buf = pasemi_write_buf;
+ chip->chip_delay = 0;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Enable the following for a flash based bad block table */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(pasemi_nand_mtd, 1)) {
+ err = -ENXIO;
+ goto out_lpc;
+ }
+
+ if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
+ printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
+ err = -ENODEV;
+ goto out_lpc;
+ }
+
+ printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n",
+ res.start, lpcctl);
+
+ return 0;
+
+ out_lpc:
+ release_region(lpcctl, 4);
+ out_ior:
+ iounmap(chip->IO_ADDR_R);
+ out_mtd:
+ kfree(pasemi_nand_mtd);
+ out:
+ return err;
+}
+
+static int pasemi_nand_remove(struct platform_device *ofdev)
+{
+ struct nand_chip *chip;
+
+ if (!pasemi_nand_mtd)
+ return 0;
+
+ chip = pasemi_nand_mtd->priv;
+
+ /* Release resources, unregister device */
+ nand_release(pasemi_nand_mtd);
+
+ release_region(lpcctl, 4);
+
+ iounmap(chip->IO_ADDR_R);
+
+ /* Free the MTD device structure */
+ kfree(pasemi_nand_mtd);
+
+ pasemi_nand_mtd = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id pasemi_nand_match[] =
+{
+ {
+ .compatible = "pasemi,localbus-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct platform_driver pasemi_nand_driver =
+{
+ .driver = {
+ .name = driver_name,
+ .of_match_table = pasemi_nand_match,
+ },
+ .probe = pasemi_nand_probe,
+ .remove = pasemi_nand_remove,
+};
+
+module_platform_driver(pasemi_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
new file mode 100644
index 000000000..4535c263f
--- /dev/null
+++ b/drivers/mtd/nand/plat_nand.c
@@ -0,0 +1,150 @@
+/*
+ * Generic NAND driver
+ *
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+struct plat_nand_data {
+ struct nand_chip chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+};
+
+static const char *part_probe_types[] = { "cmdlinepart", NULL };
+
+/*
+ * Probe for the NAND device.
+ */
+static int plat_nand_probe(struct platform_device *pdev)
+{
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mtd_part_parser_data ppdata;
+ struct plat_nand_data *data;
+ struct resource *res;
+ const char **part_types;
+ int err = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_nand_data is missing\n");
+ return -EINVAL;
+ }
+
+ if (pdata->chip.nr_chips < 1) {
+ dev_err(&pdev->dev, "invalid number of chips specified\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->io_base))
+ return PTR_ERR(data->io_base);
+
+ data->chip.priv = &data;
+ data->mtd.priv = &data->chip;
+ data->mtd.owner = THIS_MODULE;
+ data->mtd.name = dev_name(&pdev->dev);
+
+ data->chip.IO_ADDR_R = data->io_base;
+ data->chip.IO_ADDR_W = data->io_base;
+ data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
+ data->chip.dev_ready = pdata->ctrl.dev_ready;
+ data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.write_buf = pdata->ctrl.write_buf;
+ data->chip.read_buf = pdata->ctrl.read_buf;
+ data->chip.read_byte = pdata->ctrl.read_byte;
+ data->chip.chip_delay = pdata->chip.chip_delay;
+ data->chip.options |= pdata->chip.options;
+ data->chip.bbt_options |= pdata->chip.bbt_options;
+
+ data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
+ data->chip.ecc.layout = pdata->chip.ecclayout;
+ data->chip.ecc.mode = NAND_ECC_SOFT;
+
+ platform_set_drvdata(pdev, data);
+
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ err = pdata->ctrl.probe(pdev);
+ if (err)
+ goto out;
+ }
+
+ /* Scan to find existence of the device */
+ if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ part_types = pdata->chip.part_probe_types ? : part_probe_types;
+
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
+ pdata->chip.partitions,
+ pdata->chip.nr_partitions);
+
+ if (!err)
+ return err;
+
+ nand_release(&data->mtd);
+out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+ return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int plat_nand_remove(struct platform_device *pdev)
+{
+ struct plat_nand_data *data = platform_get_drvdata(pdev);
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+
+ nand_release(&data->mtd);
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id plat_nand_match[] = {
+ { .compatible = "gen_nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
+static struct platform_driver plat_nand_driver = {
+ .probe = plat_nand_probe,
+ .remove = plat_nand_remove,
+ .driver = {
+ .name = "gen_nand",
+ .of_match_table = plat_nand_match,
+ },
+};
+
+module_platform_driver(plat_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_DESCRIPTION("Simple generic NAND driver");
+MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
new file mode 100644
index 000000000..a4615fcc3
--- /dev/null
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -0,0 +1,1940 @@
+/*
+ * drivers/mtd/nand/pxa3xx_nand.c
+ *
+ * Copyright © 2005 Intel Corporation
+ * Copyright © 2006 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
+#define ARCH_HAS_DMA
+#endif
+
+#ifdef ARCH_HAS_DMA
+#include <mach/dma.h>
+#endif
+
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
+#define NAND_STOP_DELAY msecs_to_jiffies(40)
+#define PAGE_CHUNK_SIZE (2048)
+
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM. The largest of these is the PARAM command,
+ * needing 256 bytes.
+ */
+#define INIT_BUFFER_SIZE 256
+
+/* registers and bit definitions */
+#define NDCR (0x00) /* Control register */
+#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
+#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
+#define NDSR (0x14) /* Status Register */
+#define NDPCR (0x18) /* Page Count Register */
+#define NDBDR0 (0x1C) /* Bad Block Register 0 */
+#define NDBDR1 (0x20) /* Bad Block Register 1 */
+#define NDECCCTRL (0x28) /* ECC control */
+#define NDDB (0x40) /* Data Buffer */
+#define NDCB0 (0x48) /* Command Buffer0 */
+#define NDCB1 (0x4C) /* Command Buffer1 */
+#define NDCB2 (0x50) /* Command Buffer2 */
+
+#define NDCR_SPARE_EN (0x1 << 31)
+#define NDCR_ECC_EN (0x1 << 30)
+#define NDCR_DMA_EN (0x1 << 29)
+#define NDCR_ND_RUN (0x1 << 28)
+#define NDCR_DWIDTH_C (0x1 << 27)
+#define NDCR_DWIDTH_M (0x1 << 26)
+#define NDCR_PAGE_SZ (0x1 << 24)
+#define NDCR_NCSX (0x1 << 23)
+#define NDCR_ND_MODE (0x3 << 21)
+#define NDCR_NAND_MODE (0x0)
+#define NDCR_CLR_PG_CNT (0x1 << 20)
+#define NDCR_STOP_ON_UNCOR (0x1 << 19)
+#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
+#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
+
+#define NDCR_RA_START (0x1 << 15)
+#define NDCR_PG_PER_BLK (0x1 << 14)
+#define NDCR_ND_ARB_EN (0x1 << 12)
+#define NDCR_INT_MASK (0xFFF)
+
+#define NDSR_MASK (0xfff)
+#define NDSR_ERR_CNT_OFF (16)
+#define NDSR_ERR_CNT_MASK (0x1f)
+#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
+#define NDSR_RDY (0x1 << 12)
+#define NDSR_FLASH_RDY (0x1 << 11)
+#define NDSR_CS0_PAGED (0x1 << 10)
+#define NDSR_CS1_PAGED (0x1 << 9)
+#define NDSR_CS0_CMDD (0x1 << 8)
+#define NDSR_CS1_CMDD (0x1 << 7)
+#define NDSR_CS0_BBD (0x1 << 6)
+#define NDSR_CS1_BBD (0x1 << 5)
+#define NDSR_UNCORERR (0x1 << 4)
+#define NDSR_CORERR (0x1 << 3)
+#define NDSR_WRDREQ (0x1 << 2)
+#define NDSR_RDDREQ (0x1 << 1)
+#define NDSR_WRCMDREQ (0x1)
+
+#define NDCB0_LEN_OVRD (0x1 << 28)
+#define NDCB0_ST_ROW_EN (0x1 << 26)
+#define NDCB0_AUTO_RS (0x1 << 25)
+#define NDCB0_CSEL (0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
+#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
+#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
+#define NDCB0_NC (0x1 << 20)
+#define NDCB0_DBC (0x1 << 19)
+#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
+#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
+#define NDCB0_CMD2_MASK (0xff << 8)
+#define NDCB0_CMD1_MASK (0xff)
+#define NDCB0_ADDR_CYC_SHIFT (16)
+
+#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ 4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL 3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
+
+/* macros for registers read/write */
+#define nand_writel(info, off, val) \
+ writel_relaxed((val), (info)->mmio_base + (off))
+
+#define nand_readl(info, off) \
+ readl_relaxed((info)->mmio_base + (off))
+
+/* error code and state */
+enum {
+ ERR_NONE = 0,
+ ERR_DMABUSERR = -1,
+ ERR_SENDCMD = -2,
+ ERR_UNCORERR = -3,
+ ERR_BBERR = -4,
+ ERR_CORERR = -5,
+};
+
+enum {
+ STATE_IDLE = 0,
+ STATE_PREPARED,
+ STATE_CMD_HANDLE,
+ STATE_DMA_READING,
+ STATE_DMA_WRITING,
+ STATE_DMA_DONE,
+ STATE_PIO_READING,
+ STATE_PIO_WRITING,
+ STATE_CMD_DONE,
+ STATE_READY,
+};
+
+enum pxa3xx_nand_variant {
+ PXA3XX_NAND_VARIANT_PXA,
+ PXA3XX_NAND_VARIANT_ARMADA370,
+};
+
+struct pxa3xx_nand_host {
+ struct nand_chip chip;
+ struct mtd_info *mtd;
+ void *info_data;
+
+ /* page size of attached chip */
+ int use_ecc;
+ int cs;
+
+ /* calculated from pxa3xx_nand_flash data */
+ unsigned int col_addr_cycles;
+ unsigned int row_addr_cycles;
+ size_t read_id_bytes;
+
+};
+
+struct pxa3xx_nand_info {
+ struct nand_hw_control controller;
+ struct platform_device *pdev;
+
+ struct clk *clk;
+ void __iomem *mmio_base;
+ unsigned long mmio_phys;
+ struct completion cmd_complete, dev_ready;
+
+ unsigned int buf_start;
+ unsigned int buf_count;
+ unsigned int buf_size;
+ unsigned int data_buff_pos;
+ unsigned int oob_buff_pos;
+
+ /* DMA information */
+ int drcmr_dat;
+ int drcmr_cmd;
+
+ unsigned char *data_buff;
+ unsigned char *oob_buff;
+ dma_addr_t data_buff_phys;
+ int data_dma_ch;
+ struct pxa_dma_desc *data_desc;
+ dma_addr_t data_desc_addr;
+
+ struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
+ unsigned int state;
+
+ /*
+ * This driver supports NFCv1 (as found in PXA SoC)
+ * and NFCv2 (as found in Armada 370/XP SoC).
+ */
+ enum pxa3xx_nand_variant variant;
+
+ int cs;
+ int use_ecc; /* use HW ECC ? */
+ int ecc_bch; /* using BCH ECC? */
+ int use_dma; /* use DMA ? */
+ int use_spare; /* use spare ? */
+ int need_wait;
+
+ unsigned int data_size; /* data to be read from FIFO */
+ unsigned int chunk_size; /* split commands chunk size */
+ unsigned int oob_size;
+ unsigned int spare_size;
+ unsigned int ecc_size;
+ unsigned int ecc_err_cnt;
+ unsigned int max_bitflips;
+ int retcode;
+
+ /* cached register value */
+ uint32_t reg_ndcr;
+ uint32_t ndtr0cs0;
+ uint32_t ndtr1cs0;
+
+ /* generated NDCBx register values */
+ uint32_t ndcb0;
+ uint32_t ndcb1;
+ uint32_t ndcb2;
+ uint32_t ndcb3;
+};
+
+static bool use_dma = 1;
+module_param(use_dma, bool, 0444);
+MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
+
+static struct pxa3xx_nand_timing timing[] = {
+ { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
+ { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
+ { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
+ { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
+};
+
+static struct pxa3xx_nand_flash builtin_flash_types[] = {
+{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
+{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
+{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
+{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
+{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
+{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
+{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
+{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
+{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
+};
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+ .eccbytes = 32,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+ .eccbytes = 64,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ /* Bootrom looks in bytes 0 & 5 for bad blocks */
+ .oobfree = { {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+ .eccbytes = 128,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { }
+};
+
+/* Define a default flash type setting serve as flash detecting only */
+#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
+
+#define NDTR0_tCH(c) (min((c), 7) << 19)
+#define NDTR0_tCS(c) (min((c), 7) << 16)
+#define NDTR0_tWH(c) (min((c), 7) << 11)
+#define NDTR0_tWP(c) (min((c), 7) << 8)
+#define NDTR0_tRH(c) (min((c), 7) << 3)
+#define NDTR0_tRP(c) (min((c), 7) << 0)
+
+#define NDTR1_tR(c) (min((c), 65535) << 16)
+#define NDTR1_tWHR(c) (min((c), 15) << 4)
+#define NDTR1_tAR(c) (min((c), 15) << 0)
+
+/* convert nano-seconds to nand flash controller clock cycles */
+#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+
+static const struct of_device_id pxa3xx_nand_dt_ids[] = {
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_PXA,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+ if (!of_id)
+ return PXA3XX_NAND_VARIANT_PXA;
+ return (enum pxa3xx_nand_variant)of_id->data;
+}
+
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
+ const struct pxa3xx_nand_timing *t)
+{
+ struct pxa3xx_nand_info *info = host->info_data;
+ unsigned long nand_clk = clk_get_rate(info->clk);
+ uint32_t ndtr0, ndtr1;
+
+ ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
+ NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
+ NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
+ NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
+ NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
+ NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
+
+ ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
+ NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
+ NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
+
+ info->ndtr0cs0 = ndtr0;
+ info->ndtr1cs0 = ndtr1;
+ nand_writel(info, NDTR0CS0, ndtr0);
+ nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+/*
+ * Set the data and OOB size, depending on the selected
+ * spare and ECC configuration.
+ * Only applicable to READ0, READOOB and PAGEPROG commands.
+ */
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
+ struct mtd_info *mtd)
+{
+ int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
+
+ info->data_size = mtd->writesize;
+ if (!oob_enable)
+ return;
+
+ info->oob_size = info->spare_size;
+ if (!info->use_ecc)
+ info->oob_size += info->ecc_size;
+}
+
+/**
+ * NOTE: it is a must to set ND_RUN firstly, then write
+ * command buffer, otherwise, it does not work.
+ * We enable all the interrupt at the same time, and
+ * let pxa3xx_nand_irq to handle all logic.
+ */
+static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
+{
+ uint32_t ndcr;
+
+ ndcr = info->reg_ndcr;
+
+ if (info->use_ecc) {
+ ndcr |= NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x1);
+ } else {
+ ndcr &= ~NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x0);
+ }
+
+ if (info->use_dma)
+ ndcr |= NDCR_DMA_EN;
+ else
+ ndcr &= ~NDCR_DMA_EN;
+
+ if (info->use_spare)
+ ndcr |= NDCR_SPARE_EN;
+ else
+ ndcr &= ~NDCR_SPARE_EN;
+
+ ndcr |= NDCR_ND_RUN;
+
+ /* clear status bits and run */
+ nand_writel(info, NDCR, 0);
+ nand_writel(info, NDSR, NDSR_MASK);
+ nand_writel(info, NDCR, ndcr);
+}
+
+static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
+{
+ uint32_t ndcr;
+ int timeout = NAND_STOP_DELAY;
+
+ /* wait RUN bit in NDCR become 0 */
+ ndcr = nand_readl(info, NDCR);
+ while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
+ ndcr = nand_readl(info, NDCR);
+ udelay(1);
+ }
+
+ if (timeout <= 0) {
+ ndcr &= ~NDCR_ND_RUN;
+ nand_writel(info, NDCR, ndcr);
+ }
+ /* clear status bits */
+ nand_writel(info, NDSR, NDSR_MASK);
+}
+
+static void __maybe_unused
+enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+ uint32_t ndcr;
+
+ ndcr = nand_readl(info, NDCR);
+ nand_writel(info, NDCR, ndcr & ~int_mask);
+}
+
+static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+ uint32_t ndcr;
+
+ ndcr = nand_readl(info, NDCR);
+ nand_writel(info, NDCR, ndcr | int_mask);
+}
+
+static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
+{
+ if (info->ecc_bch) {
+ int timeout;
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO 8 32 bits reads at a time, and skip
+ * the polling on the last read.
+ */
+ while (len > 8) {
+ __raw_readsl(info->mmio_base + NDDB, data, 8);
+
+ for (timeout = 0;
+ !(nand_readl(info, NDSR) & NDSR_RDDREQ);
+ timeout++) {
+ if (timeout >= 5) {
+ dev_err(&info->pdev->dev,
+ "Timeout on RDDREQ while draining the FIFO\n");
+ return;
+ }
+
+ mdelay(1);
+ }
+
+ data += 32;
+ len -= 8;
+ }
+ }
+
+ __raw_readsl(info->mmio_base + NDDB, data, len);
+}
+
+static void handle_data_pio(struct pxa3xx_nand_info *info)
+{
+ unsigned int do_bytes = min(info->data_size, info->chunk_size);
+
+ switch (info->state) {
+ case STATE_PIO_WRITING:
+ __raw_writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
+ if (info->oob_size > 0)
+ __raw_writesl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
+ break;
+ case STATE_PIO_READING:
+ drain_fifo(info,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
+ if (info->oob_size > 0)
+ drain_fifo(info,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
+ break;
+ default:
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+ info->state);
+ BUG();
+ }
+
+ /* Update buffer pointers for multi-page read/write */
+ info->data_buff_pos += do_bytes;
+ info->oob_buff_pos += info->oob_size;
+ info->data_size -= do_bytes;
+}
+
+#ifdef ARCH_HAS_DMA
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{
+ struct pxa_dma_desc *desc = info->data_desc;
+ int dma_len = ALIGN(info->data_size + info->oob_size, 32);
+
+ desc->ddadr = DDADR_STOP;
+ desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
+
+ switch (info->state) {
+ case STATE_DMA_WRITING:
+ desc->dsadr = info->data_buff_phys;
+ desc->dtadr = info->mmio_phys + NDDB;
+ desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
+ break;
+ case STATE_DMA_READING:
+ desc->dtadr = info->data_buff_phys;
+ desc->dsadr = info->mmio_phys + NDDB;
+ desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
+ break;
+ default:
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+ info->state);
+ BUG();
+ }
+
+ DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
+ DDADR(info->data_dma_ch) = info->data_desc_addr;
+ DCSR(info->data_dma_ch) |= DCSR_RUN;
+}
+
+static void pxa3xx_nand_data_dma_irq(int channel, void *data)
+{
+ struct pxa3xx_nand_info *info = data;
+ uint32_t dcsr;
+
+ dcsr = DCSR(channel);
+ DCSR(channel) = dcsr;
+
+ if (dcsr & DCSR_BUSERR) {
+ info->retcode = ERR_DMABUSERR;
+ }
+
+ info->state = STATE_DMA_DONE;
+ enable_int(info, NDCR_INT_MASK);
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+}
+#else
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{}
+#endif
+
+static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
+{
+ struct pxa3xx_nand_info *info = data;
+
+ handle_data_pio(info);
+
+ info->state = STATE_CMD_DONE;
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
+{
+ struct pxa3xx_nand_info *info = devid;
+ unsigned int status, is_completed = 0, is_ready = 0;
+ unsigned int ready, cmd_done;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (info->cs == 0) {
+ ready = NDSR_FLASH_RDY;
+ cmd_done = NDSR_CS0_CMDD;
+ } else {
+ ready = NDSR_RDY;
+ cmd_done = NDSR_CS1_CMDD;
+ }
+
+ status = nand_readl(info, NDSR);
+
+ if (status & NDSR_UNCORERR)
+ info->retcode = ERR_UNCORERR;
+ if (status & NDSR_CORERR) {
+ info->retcode = ERR_CORERR;
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ info->ecc_bch)
+ info->ecc_err_cnt = NDSR_ERR_CNT(status);
+ else
+ info->ecc_err_cnt = 1;
+
+ /*
+ * Each chunk composing a page is corrected independently,
+ * and we need to store maximum number of corrected bitflips
+ * to return it to the MTD layer in ecc.read_page().
+ */
+ info->max_bitflips = max_t(unsigned int,
+ info->max_bitflips,
+ info->ecc_err_cnt);
+ }
+ if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
+ /* whether use dma to transfer data */
+ if (info->use_dma) {
+ disable_int(info, NDCR_INT_MASK);
+ info->state = (status & NDSR_RDDREQ) ?
+ STATE_DMA_READING : STATE_DMA_WRITING;
+ start_data_dma(info);
+ goto NORMAL_IRQ_EXIT;
+ } else {
+ info->state = (status & NDSR_RDDREQ) ?
+ STATE_PIO_READING : STATE_PIO_WRITING;
+ ret = IRQ_WAKE_THREAD;
+ goto NORMAL_IRQ_EXIT;
+ }
+ }
+ if (status & cmd_done) {
+ info->state = STATE_CMD_DONE;
+ is_completed = 1;
+ }
+ if (status & ready) {
+ info->state = STATE_READY;
+ is_ready = 1;
+ }
+
+ if (status & NDSR_WRCMDREQ) {
+ nand_writel(info, NDSR, NDSR_WRCMDREQ);
+ status &= ~NDSR_WRCMDREQ;
+ info->state = STATE_CMD_HANDLE;
+
+ /*
+ * Command buffer registers NDCB{0-2} (and optionally NDCB3)
+ * must be loaded by writing directly either 12 or 16
+ * bytes directly to NDCB0, four bytes at a time.
+ *
+ * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
+ * but each NDCBx register can be read.
+ */
+ nand_writel(info, NDCB0, info->ndcb0);
+ nand_writel(info, NDCB0, info->ndcb1);
+ nand_writel(info, NDCB0, info->ndcb2);
+
+ /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ nand_writel(info, NDCB0, info->ndcb3);
+ }
+
+ /* clear NDSR to let the controller exit the IRQ */
+ nand_writel(info, NDSR, status);
+ if (is_completed)
+ complete(&info->cmd_complete);
+ if (is_ready)
+ complete(&info->dev_ready);
+NORMAL_IRQ_EXIT:
+ return ret;
+}
+
+static inline int is_buf_blank(uint8_t *buf, size_t len)
+{
+ for (; len > 0; len--)
+ if (*buf++ != 0xff)
+ return 0;
+ return 1;
+}
+
+static void set_command_address(struct pxa3xx_nand_info *info,
+ unsigned int page_size, uint16_t column, int page_addr)
+{
+ /* small page addr setting */
+ if (page_size < PAGE_CHUNK_SIZE) {
+ info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+ | (column & 0xFF);
+
+ info->ndcb2 = 0;
+ } else {
+ info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+ | (column & 0xFFFF);
+
+ if (page_addr & 0xFF0000)
+ info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+ else
+ info->ndcb2 = 0;
+ }
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ struct mtd_info *mtd = host->mtd;
+
+ /* reset data and oob column point to handle data */
+ info->buf_start = 0;
+ info->buf_count = 0;
+ info->oob_size = 0;
+ info->data_buff_pos = 0;
+ info->oob_buff_pos = 0;
+ info->use_ecc = 0;
+ info->use_spare = 1;
+ info->retcode = ERR_NONE;
+ info->ecc_err_cnt = 0;
+ info->ndcb3 = 0;
+ info->need_wait = 0;
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_PAGEPROG:
+ info->use_ecc = 1;
+ case NAND_CMD_READOOB:
+ pxa3xx_set_datasize(info, mtd);
+ break;
+ case NAND_CMD_PARAM:
+ info->use_spare = 0;
+ break;
+ default:
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ break;
+ }
+
+ /*
+ * If we are about to issue a read command, or about to set
+ * the write address, then clean the data buffer.
+ */
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READOOB ||
+ command == NAND_CMD_SEQIN) {
+
+ info->buf_count = mtd->writesize + mtd->oobsize;
+ memset(info->data_buff, 0xFF, info->buf_count);
+ }
+
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+ int ext_cmd_type, uint16_t column, int page_addr)
+{
+ int addr_cycle, exec_cmd;
+ struct pxa3xx_nand_host *host;
+ struct mtd_info *mtd;
+
+ host = info->host[info->cs];
+ mtd = host->mtd;
+ addr_cycle = 0;
+ exec_cmd = 1;
+
+ if (info->cs != 0)
+ info->ndcb0 = NDCB0_CSEL;
+ else
+ info->ndcb0 = 0;
+
+ if (command == NAND_CMD_SEQIN)
+ exec_cmd = 0;
+
+ addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+ + host->col_addr_cycles);
+
+ switch (command) {
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READ0:
+ info->buf_start = column;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | addr_cycle
+ | NAND_CMD_READ0;
+
+ if (command == NAND_CMD_READOOB)
+ info->buf_start += mtd->writesize;
+
+ /*
+ * Multiple page read needs an 'extended command type' field,
+ * which is either naked-read or last-read according to the
+ * state.
+ */
+ if (mtd->writesize == PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+ } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+ }
+
+ set_command_address(info, mtd->writesize, column, page_addr);
+ break;
+
+ case NAND_CMD_SEQIN:
+
+ info->buf_start = column;
+ set_command_address(info, mtd->writesize, 0, page_addr);
+
+ /*
+ * Multiple page programming needs to execute the initial
+ * SEQIN command that sets the page address.
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | addr_cycle
+ | command;
+ /* No data transfer in this case */
+ info->data_size = 0;
+ exec_cmd = 1;
+ }
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ if (is_buf_blank(info->data_buff,
+ (mtd->writesize + mtd->oobsize))) {
+ exec_cmd = 0;
+ break;
+ }
+
+ /* Second command setting for large pages */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ /*
+ * Multiple page write uses the 'extended command'
+ * field. This can be used to issue a command dispatch
+ * or a naked-write depending on the current stage.
+ */
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+
+ /*
+ * This is the command dispatch that completes a chunked
+ * page program operation.
+ */
+ if (info->data_size == 0) {
+ info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | command;
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ info->ndcb3 = 0;
+ }
+ } else {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_AUTO_RS
+ | NDCB0_ST_ROW_EN
+ | NDCB0_DBC
+ | (NAND_CMD_PAGEPROG << 8)
+ | NAND_CMD_SEQIN
+ | addr_cycle;
+ }
+ break;
+
+ case NAND_CMD_PARAM:
+ info->buf_count = 256;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | NDCB0_ADDR_CYC(1)
+ | NDCB0_LEN_OVRD
+ | command;
+ info->ndcb1 = (column & 0xFF);
+ info->ndcb3 = 256;
+ info->data_size = 256;
+ break;
+
+ case NAND_CMD_READID:
+ info->buf_count = host->read_id_bytes;
+ info->ndcb0 |= NDCB0_CMD_TYPE(3)
+ | NDCB0_ADDR_CYC(1)
+ | command;
+ info->ndcb1 = (column & 0xFF);
+
+ info->data_size = 8;
+ break;
+ case NAND_CMD_STATUS:
+ info->buf_count = 1;
+ info->ndcb0 |= NDCB0_CMD_TYPE(4)
+ | NDCB0_ADDR_CYC(1)
+ | command;
+
+ info->data_size = 8;
+ break;
+
+ case NAND_CMD_ERASE1:
+ info->ndcb0 |= NDCB0_CMD_TYPE(2)
+ | NDCB0_AUTO_RS
+ | NDCB0_ADDR_CYC(3)
+ | NDCB0_DBC
+ | (NAND_CMD_ERASE2 << 8)
+ | NAND_CMD_ERASE1;
+ info->ndcb1 = page_addr;
+ info->ndcb2 = 0;
+
+ break;
+ case NAND_CMD_RESET:
+ info->ndcb0 |= NDCB0_CMD_TYPE(5)
+ | command;
+
+ break;
+
+ case NAND_CMD_ERASE2:
+ exec_cmd = 0;
+ break;
+
+ default:
+ exec_cmd = 0;
+ dev_err(&info->pdev->dev, "non-supported command %x\n",
+ command);
+ break;
+ }
+
+ return exec_cmd;
+}
+
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int exec_cmd;
+
+ /*
+ * if this is a x16 device ,then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ prepare_start_command(info, command);
+
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
+ if (exec_cmd) {
+ init_completion(&info->cmd_complete);
+ init_completion(&info->dev_ready);
+ info->need_wait = 1;
+ pxa3xx_nand_start(info);
+
+ if (!wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT)) {
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
+ /* Stop State Machine for next command cycle */
+ pxa3xx_nand_stop(info);
+ }
+ }
+ info->state = STATE_IDLE;
+}
+
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+ const unsigned command,
+ int column, int page_addr)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int exec_cmd, ext_cmd_type;
+
+ /*
+ * if this is a x16 device then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ /* Select the extended command for the first command */
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ ext_cmd_type = EXT_CMD_TYPE_MONO;
+ break;
+ case NAND_CMD_SEQIN:
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ break;
+ case NAND_CMD_PAGEPROG:
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+ break;
+ default:
+ ext_cmd_type = 0;
+ break;
+ }
+
+ prepare_start_command(info, command);
+
+ /*
+ * Prepare the "is ready" completion before starting a command
+ * transaction sequence. If the command is not executed the
+ * completion will be completed, see below.
+ *
+ * We can do that inside the loop because the command variable
+ * is invariant and thus so is the exec_cmd.
+ */
+ info->need_wait = 1;
+ init_completion(&info->dev_ready);
+ do {
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+ column, page_addr);
+ if (!exec_cmd) {
+ info->need_wait = 0;
+ complete(&info->dev_ready);
+ break;
+ }
+
+ init_completion(&info->cmd_complete);
+ pxa3xx_nand_start(info);
+
+ if (!wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT)) {
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
+ /* Stop State Machine for next command cycle */
+ pxa3xx_nand_stop(info);
+ break;
+ }
+
+ /* Check if the sequence is complete */
+ if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+ break;
+
+ /*
+ * After a splitted program command sequence has issued
+ * the command dispatch, the command sequence is complete.
+ */
+ if (info->data_size == 0 &&
+ command == NAND_CMD_PAGEPROG &&
+ ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+ break;
+
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+ /* Last read: issue a 'last naked read' */
+ if (info->data_size == info->chunk_size)
+ ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+ else
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+ /*
+ * If a splitted program command has no more data to transfer,
+ * the command dispatch must be issued to complete.
+ */
+ } else if (command == NAND_CMD_PAGEPROG &&
+ info->data_size == 0) {
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ }
+ } while (1);
+
+ info->state = STATE_IDLE;
+}
+
+static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (info->retcode == ERR_CORERR && info->use_ecc) {
+ mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+ } else if (info->retcode == ERR_UNCORERR) {
+ /*
+ * for blank page (all 0xff), HW will calculate its ECC as
+ * 0, which is different from the ECC information within
+ * OOB, ignore such uncorrectable errors
+ */
+ if (is_buf_blank(buf, mtd->writesize))
+ info->retcode = ERR_NONE;
+ else
+ mtd->ecc_stats.failed++;
+ }
+
+ return info->max_bitflips;
+}
+
+static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ char retval = 0xFF;
+
+ if (info->buf_start < info->buf_count)
+ /* Has just send a new command? */
+ retval = info->data_buff[info->buf_start++];
+
+ return retval;
+}
+
+static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ u16 retval = 0xFFFF;
+
+ if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
+ retval = *((u16 *)(info->data_buff+info->buf_start));
+ info->buf_start += 2;
+ }
+ return retval;
+}
+
+static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+ memcpy(buf, info->data_buff + info->buf_start, real_len);
+ info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+ memcpy(info->data_buff + info->buf_start, buf, real_len);
+ info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ return;
+}
+
+static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+
+ if (info->need_wait) {
+ info->need_wait = 0;
+ if (!wait_for_completion_timeout(&info->dev_ready,
+ CHIP_DELAY_TIMEOUT)) {
+ dev_err(&info->pdev->dev, "Ready time out!!!\n");
+ return NAND_STATUS_FAIL;
+ }
+ }
+
+ /* pxa3xx_nand_send_command has waited for command complete */
+ if (this->state == FL_WRITING || this->state == FL_ERASING) {
+ if (info->retcode == ERR_NONE)
+ return 0;
+ else
+ return NAND_STATUS_FAIL;
+ }
+
+ return NAND_STATUS_READY;
+}
+
+static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
+ const struct pxa3xx_nand_flash *f)
+{
+ struct platform_device *pdev = info->pdev;
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ uint32_t ndcr = 0x0; /* enable all interrupts */
+
+ if (f->page_size != 2048 && f->page_size != 512) {
+ dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
+ return -EINVAL;
+ }
+
+ if (f->flash_width != 16 && f->flash_width != 8) {
+ dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
+ return -EINVAL;
+ }
+
+ /* calculate flash information */
+ host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
+
+ /* calculate addressing information */
+ host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
+
+ if (f->num_blocks * f->page_per_block > 65536)
+ host->row_addr_cycles = 3;
+ else
+ host->row_addr_cycles = 2;
+
+ ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+ ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+ ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
+ ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
+ ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
+ ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
+
+ ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
+ ndcr |= NDCR_SPARE_EN; /* enable spare by default */
+
+ info->reg_ndcr = ndcr;
+
+ pxa3xx_nand_set_timing(host, f->timing);
+ return 0;
+}
+
+static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
+{
+ /*
+ * We set 0 by hard coding here, for we don't support keep_config
+ * when there is more than one chip attached to the controller
+ */
+ struct pxa3xx_nand_host *host = info->host[0];
+ uint32_t ndcr = nand_readl(info, NDCR);
+
+ if (ndcr & NDCR_PAGE_SZ) {
+ /* Controller's FIFO size */
+ info->chunk_size = 2048;
+ host->read_id_bytes = 4;
+ } else {
+ info->chunk_size = 512;
+ host->read_id_bytes = 2;
+ }
+
+ /* Set an initial chunk size */
+ info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+ info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+ info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
+ return 0;
+}
+
+#ifdef ARCH_HAS_DMA
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+ struct platform_device *pdev = info->pdev;
+ int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
+
+ if (use_dma == 0) {
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+ return 0;
+ }
+
+ info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
+ &info->data_buff_phys, GFP_KERNEL);
+ if (info->data_buff == NULL) {
+ dev_err(&pdev->dev, "failed to allocate dma buffer\n");
+ return -ENOMEM;
+ }
+
+ info->data_desc = (void *)info->data_buff + data_desc_offset;
+ info->data_desc_addr = info->data_buff_phys + data_desc_offset;
+
+ info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
+ pxa3xx_nand_data_dma_irq, info);
+ if (info->data_dma_ch < 0) {
+ dev_err(&pdev->dev, "failed to request data dma\n");
+ dma_free_coherent(&pdev->dev, info->buf_size,
+ info->data_buff, info->data_buff_phys);
+ return info->data_dma_ch;
+ }
+
+ /*
+ * Now that DMA buffers are allocated we turn on
+ * DMA proper for I/O operations.
+ */
+ info->use_dma = 1;
+ return 0;
+}
+
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+ struct platform_device *pdev = info->pdev;
+ if (info->use_dma) {
+ pxa_free_dma(info->data_dma_ch);
+ dma_free_coherent(&pdev->dev, info->buf_size,
+ info->data_buff, info->data_buff_phys);
+ } else {
+ kfree(info->data_buff);
+ }
+}
+#else
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+ kfree(info->data_buff);
+}
+#endif
+
+static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+
+ mtd = info->host[info->cs]->mtd;
+ chip = mtd->priv;
+
+ /* use the common timing to make a try */
+ ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
+ if (ret)
+ return ret;
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret & NAND_STATUS_FAIL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+ struct nand_ecc_ctrl *ecc,
+ int strength, int ecc_stepsize, int page_size)
+{
+ if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+ info->chunk_size = 2048;
+ info->spare_size = 40;
+ info->ecc_size = 24;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+
+ } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+ info->chunk_size = 512;
+ info->spare_size = 8;
+ info->ecc_size = 8;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+
+ /*
+ * Required ECC: 4-bit correction per 512 bytes
+ * Select: 16-bit correction per 2048 bytes
+ */
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_2KB_bch4bit;
+ ecc->strength = 16;
+
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch4bit;
+ ecc->strength = 16;
+
+ /*
+ * Required ECC: 8-bit correction per 512 bytes
+ * Select: 16-bit correction per 1024 bytes
+ */
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch8bit;
+ ecc->strength = 16;
+ } else {
+ dev_err(&info->pdev->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ strength, page_size);
+ return -ENODEV;
+ }
+
+ dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
+ ecc->strength, ecc->size);
+ return 0;
+}
+
+static int pxa3xx_nand_scan(struct mtd_info *mtd)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ struct platform_device *pdev = info->pdev;
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
+ const struct pxa3xx_nand_flash *f = NULL;
+ struct nand_chip *chip = mtd->priv;
+ uint32_t id = -1;
+ uint64_t chipsize;
+ int i, ret, num;
+ uint16_t ecc_strength, ecc_step;
+
+ if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
+ goto KEEP_CONFIG;
+
+ ret = pxa3xx_nand_sensing(info);
+ if (ret) {
+ dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
+ info->cs);
+
+ return ret;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
+ id = *((uint16_t *)(info->data_buff));
+ if (id != 0)
+ dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
+ else {
+ dev_warn(&info->pdev->dev,
+ "Read out ID 0, potential timing set wrong!!\n");
+
+ return -EINVAL;
+ }
+
+ num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
+ for (i = 0; i < num; i++) {
+ if (i < pdata->num_flash)
+ f = pdata->flash + i;
+ else
+ f = &builtin_flash_types[i - pdata->num_flash + 1];
+
+ /* find the chip in default list */
+ if (f->chip_id == id)
+ break;
+ }
+
+ if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
+ dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
+
+ return -EINVAL;
+ }
+
+ ret = pxa3xx_nand_config_flash(info, f);
+ if (ret) {
+ dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
+ return ret;
+ }
+
+ memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
+
+ pxa3xx_flash_ids[0].name = f->name;
+ pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
+ pxa3xx_flash_ids[0].pagesize = f->page_size;
+ chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
+ pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
+ pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
+ if (f->flash_width == 16)
+ pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
+ pxa3xx_flash_ids[1].name = NULL;
+ def = pxa3xx_flash_ids;
+KEEP_CONFIG:
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ /* Device detection must be done with ECC disabled */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ nand_writel(info, NDECCCTRL, 0x0);
+
+ if (nand_scan_ident(mtd, 1, def))
+ return -ENODEV;
+
+ if (pdata->flash_bbt) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_USE_FLASH |
+ NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /*
+ * If the page size is bigger than the FIFO size, let's check
+ * we are given the right variant and then switch to the extended
+ * (aka splitted) command handling,
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ chip->cmdfunc = nand_cmdfunc_extended;
+ } else {
+ dev_err(&info->pdev->dev,
+ "unsupported page size on this variant\n");
+ return -ENODEV;
+ }
+ }
+
+ if (pdata->ecc_strength && pdata->ecc_step_size) {
+ ecc_strength = pdata->ecc_strength;
+ ecc_step = pdata->ecc_step_size;
+ } else {
+ ecc_strength = chip->ecc_strength_ds;
+ ecc_step = chip->ecc_step_ds;
+ }
+
+ /* Set default ECC strength requirements on non-ONFI devices */
+ if (ecc_strength < 1 && ecc_step < 1) {
+ ecc_strength = 1;
+ ecc_step = 512;
+ }
+
+ ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+ ecc_step, mtd->writesize);
+ if (ret)
+ return ret;
+
+ /* calculate addressing information */
+ if (mtd->writesize >= 2048)
+ host->col_addr_cycles = 2;
+ else
+ host->col_addr_cycles = 1;
+
+ /* release the initial buffer */
+ kfree(info->data_buff);
+
+ /* allocate the real data + oob buffer */
+ info->buf_size = mtd->writesize + mtd->oobsize;
+ ret = pxa3xx_nand_init_buff(info);
+ if (ret)
+ return ret;
+ info->oob_buff = info->data_buff + mtd->writesize;
+
+ if ((mtd->size >> chip->page_shift) > 65536)
+ host->row_addr_cycles = 3;
+ else
+ host->row_addr_cycles = 2;
+ return nand_scan_tail(mtd);
+}
+
+static int alloc_nand_resource(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct pxa3xx_nand_info *info;
+ struct pxa3xx_nand_host *host;
+ struct nand_chip *chip = NULL;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int ret, irq, cs;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata->num_cs <= 0)
+ return -ENODEV;
+ info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
+ sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdev = pdev;
+ info->variant = pxa3xx_nand_get_variant(pdev);
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = (struct mtd_info *)((unsigned int)&info[1] +
+ (sizeof(*mtd) + sizeof(*host)) * cs);
+ chip = (struct nand_chip *)(&mtd[1]);
+ host = (struct pxa3xx_nand_host *)chip;
+ info->host[cs] = host;
+ host->mtd = mtd;
+ host->cs = cs;
+ host->info_data = info;
+ mtd->priv = host;
+ mtd->owner = THIS_MODULE;
+
+ chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
+ chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
+ chip->controller = &info->controller;
+ chip->waitfunc = pxa3xx_nand_waitfunc;
+ chip->select_chip = pxa3xx_nand_select_chip;
+ chip->read_word = pxa3xx_nand_read_word;
+ chip->read_byte = pxa3xx_nand_read_byte;
+ chip->read_buf = pxa3xx_nand_read_buf;
+ chip->write_buf = pxa3xx_nand_write_buf;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->cmdfunc = nand_cmdfunc;
+ }
+
+ spin_lock_init(&chip->controller->lock);
+ init_waitqueue_head(&chip->controller->wq);
+ info->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get nand clock\n");
+ return PTR_ERR(info->clk);
+ }
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0)
+ return ret;
+
+ if (use_dma) {
+ /*
+ * This is a dirty hack to make this driver work from
+ * devicetree bindings. It can be removed once we have
+ * a prober DMA controller framework for DT.
+ */
+ if (pdev->dev.of_node &&
+ of_machine_is_compatible("marvell,pxa3xx")) {
+ info->drcmr_dat = 97;
+ info->drcmr_cmd = 99;
+ } else {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+ info->drcmr_dat = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for cmd DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+ info->drcmr_cmd = r->start;
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(info->mmio_base)) {
+ ret = PTR_ERR(info->mmio_base);
+ goto fail_disable_clk;
+ }
+ info->mmio_phys = r->start;
+
+ /* Allocate a buffer to allow flash detection */
+ info->buf_size = INIT_BUFFER_SIZE;
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL) {
+ ret = -ENOMEM;
+ goto fail_disable_clk;
+ }
+
+ /* initialize all interrupts to be disabled */
+ disable_int(info, NDSR_MASK);
+
+ ret = request_threaded_irq(irq, pxa3xx_nand_irq,
+ pxa3xx_nand_irq_thread, IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto fail_free_buf;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+fail_free_buf:
+ free_irq(irq, info);
+ kfree(info->data_buff);
+fail_disable_clk:
+ clk_disable_unprepare(info->clk);
+ return ret;
+}
+
+static int pxa3xx_nand_remove(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct pxa3xx_nand_platform_data *pdata;
+ int irq, cs;
+
+ if (!info)
+ return 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0)
+ free_irq(irq, info);
+ pxa3xx_nand_free_buff(info);
+
+ clk_disable_unprepare(info->clk);
+
+ for (cs = 0; cs < pdata->num_cs; cs++)
+ nand_release(info->host[cs]->mtd);
+ return 0;
+}
+
+static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+
+ if (!of_id)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
+ pdata->enable_arbiter = 1;
+ if (of_get_property(np, "marvell,nand-keep-config", NULL))
+ pdata->keep_config = 1;
+ of_property_read_u32(np, "num-cs", &pdata->num_cs);
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ pdata->ecc_strength = of_get_nand_ecc_strength(np);
+ if (pdata->ecc_strength < 0)
+ pdata->ecc_strength = 0;
+
+ pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
+ if (pdata->ecc_step_size < 0)
+ pdata->ecc_step_size = 0;
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+
+static int pxa3xx_nand_probe(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_part_parser_data ppdata = {};
+ struct pxa3xx_nand_info *info;
+ int ret, cs, probe_success;
+
+#ifndef ARCH_HAS_DMA
+ if (use_dma) {
+ use_dma = 0;
+ dev_warn(&pdev->dev,
+ "This platform can't do DMA on this device\n");
+ }
+#endif
+ ret = pxa3xx_nand_probe_dt(pdev);
+ if (ret)
+ return ret;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -ENODEV;
+ }
+
+ ret = alloc_nand_resource(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "alloc nand resource failed\n");
+ return ret;
+ }
+
+ info = platform_get_drvdata(pdev);
+ probe_success = 0;
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ struct mtd_info *mtd = info->host[cs]->mtd;
+
+ /*
+ * The mtd name matches the one used in 'mtdparts' kernel
+ * parameter. This name cannot be changed or otherwise
+ * user's mtd partitions configuration would get broken.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ info->cs = cs;
+ ret = pxa3xx_nand_scan(mtd);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
+ cs);
+ continue;
+ }
+
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL,
+ &ppdata, pdata->parts[cs],
+ pdata->nr_parts[cs]);
+ if (!ret)
+ probe_success = 1;
+ }
+
+ if (!probe_success) {
+ pxa3xx_nand_remove(pdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (info->state) {
+ dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
+ return -EAGAIN;
+ }
+
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd_suspend(mtd);
+ }
+
+ return 0;
+}
+
+static int pxa3xx_nand_resume(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+ struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_info *mtd;
+ int cs;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ /* We don't want to handle interrupt without calling mtd routine */
+ disable_int(info, NDCR_INT_MASK);
+
+ /*
+ * Directly set the chip select to a invalid value,
+ * then the driver would reset the timing according
+ * to current chip select at the beginning of cmdfunc
+ */
+ info->cs = 0xff;
+
+ /*
+ * As the spec says, the NDSR would be updated to 0x1800 when
+ * doing the nand_clk disable/enable.
+ * To prevent it damaging state machine of the driver, clear
+ * all status before resume
+ */
+ nand_writel(info, NDSR, NDSR_MASK);
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ mtd = info->host[cs]->mtd;
+ mtd_resume(mtd);
+ }
+
+ return 0;
+}
+#else
+#define pxa3xx_nand_suspend NULL
+#define pxa3xx_nand_resume NULL
+#endif
+
+static struct platform_driver pxa3xx_nand_driver = {
+ .driver = {
+ .name = "pxa3xx-nand",
+ .of_match_table = pxa3xx_nand_dt_ids,
+ },
+ .probe = pxa3xx_nand_probe,
+ .remove = pxa3xx_nand_remove,
+ .suspend = pxa3xx_nand_suspend,
+ .resume = pxa3xx_nand_resume,
+};
+
+module_platform_driver(pxa3xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 000000000..baea83f4d
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1085 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static bool r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+ mmiowb();
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+ mmiowb();
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ return chip->priv;
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
+ dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+ reinit_completion(&dev->dma_done);
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set initial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ R852_DMA_LEN,
+ (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len >= 4) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len > 0) {
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+ len--;
+ }
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct r852_device *dev = chip->priv;
+
+ unsigned long timeout;
+ int status;
+
+ timeout = jiffies + (chip->state == FL_ERASING ?
+ msecs_to_jiffies(400) : msecs_to_jiffies(20));
+
+ while (time_before(jiffies, timeout))
+ if (chip->dev_ready(mtd))
+ break;
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ status = (int)chip->read_byte(mtd);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+static int r852_ready(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint16_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return 0;
+
+ if (dev->dma_error) {
+ dev->dma_error = 0;
+ return -1;
+ }
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -1;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/*
+ * Start the nand engine
+ */
+
+static void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+static void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+static void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+static void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+static ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+
+static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+static void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+static int r852_register_nand_device(struct r852_device *dev)
+{
+ dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+
+ if (!dev->mtd)
+ goto error1;
+
+ WARN_ON(dev->card_registred);
+
+ dev->mtd->owner = THIS_MODULE;
+ dev->mtd->priv = dev->chip;
+ dev->mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(dev->mtd, dev->sm))
+ goto error2;
+
+ if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
+ message("can't create media type sysfs attribute");
+
+ dev->card_registred = 1;
+ return 0;
+error2:
+ kfree(dev->mtd);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+static void r852_unregister_nand_device(struct r852_device *dev)
+{
+ if (!dev->card_registred)
+ return;
+
+ device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
+ nand_release(dev->mtd);
+ r852_engine_disable(dev);
+ dev->card_registred = 0;
+ kfree(dev->mtd);
+ dev->mtd = NULL;
+}
+
+/* Card state updater */
+static void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ r852_update_card_detect(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registred)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't receive any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("received dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ complete(&dev->dma_done);
+ goto out;
+ }
+
+ /* received DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3) {
+ r852_dma_done(dev, 0);
+ complete(&dev->dma_done);
+ }
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ret;
+}
+
+static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->cmd_ctrl = r852_cmdctl;
+ chip->waitfunc = r852_wait;
+ chip->dev_ready = r852_ready;
+
+ /* I/O */
+ chip->read_byte = r852_read_byte;
+ chip->read_buf = r852_read_buf;
+ chip->write_buf = r852_write_buf;
+
+ /* ecc */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.strength = 2;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ chip->priv = dev;
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ /* kick initial present test */
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+static void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+static void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+ return 0;
+}
+
+static int r852_resume(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registred) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(1000));
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registred) {
+ r852_engine_enable(dev);
+ dev->chip->select_chip(dev->mtd, 0);
+ dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
+ dev->chip->select_chip(dev->mtd, -1);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+module_pci_driver(r852_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 000000000..e6a21d9d2
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/nand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ void __iomem *mmio; /* mmio */
+ struct mtd_info *mtd; /* mtd backpointer */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registred; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define DRV_NAME "r852"
+
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
new file mode 100644
index 000000000..0e02be47c
--- /dev/null
+++ b/drivers/mtd/nand/s3c2410.c
@@ -0,0 +1,1142 @@
+/* linux/drivers/mtd/nand/s3c2410.c
+ *
+ * Copyright © 2004-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Samsung S3C2410/S3C2440/S3C2412 NAND driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#define pr_fmt(fmt) "nand-s3c2410: " fmt
+
+#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/platform_data/mtd-nand-s3c2410.h>
+
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
+#define S3C2410_NFECC S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN (1<<15)
+#define S3C2410_NFCONF_INITECC (1<<12)
+#define S3C2410_NFCONF_nFCE (1<<11)
+#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
+#define S3C2410_NFSTAT_BUSY (1<<0)
+#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
+#define S3C2440_NFCONT_INITECC (1<<4)
+#define S3C2440_NFCONT_nFCE (1<<1)
+#define S3C2440_NFCONT_ENABLE (1<<0)
+#define S3C2440_NFSTAT_READY (1<<0)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
+
+/* new oob placement block for use with hardware ecc generation
+ */
+
+static struct nand_ecclayout nand_hw_eccoob = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {{8, 8}}
+};
+
+/* controller and mtd information */
+
+struct s3c2410_nand_info;
+
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
+struct s3c2410_nand_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct s3c2410_nand_set *set;
+ struct s3c2410_nand_info *info;
+ int scan_res;
+};
+
+enum s3c_cpu_type {
+ TYPE_S3C2410,
+ TYPE_S3C2412,
+ TYPE_S3C2440,
+};
+
+enum s3c_nand_clk_state {
+ CLOCK_DISABLE = 0,
+ CLOCK_ENABLE,
+ CLOCK_SUSPEND,
+};
+
+/* overview of the s3c2410 nand state */
+
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
+ * @cpu_type: The exact type of this controller.
+ */
+struct s3c2410_nand_info {
+ /* mtd info */
+ struct nand_hw_control controller;
+ struct s3c2410_nand_mtd *mtds;
+ struct s3c2410_platform_nand *platform;
+
+ /* device info */
+ struct device *device;
+ struct clk *clk;
+ void __iomem *regs;
+ void __iomem *sel_reg;
+ int sel_bit;
+ int mtd_count;
+ unsigned long save_sel;
+ unsigned long clk_rate;
+ enum s3c_nand_clk_state clk_state;
+
+ enum s3c_cpu_type cpu_type;
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+/* conversion functions */
+
+static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct s3c2410_nand_mtd, mtd);
+}
+
+static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+ return s3c2410_nand_mtd_toours(mtd)->info;
+}
+
+static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+
+static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+ return dev_get_platdata(&dev->dev);
+}
+
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
+{
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+ enum s3c_nand_clk_state new_state)
+{
+ if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+ return;
+
+ if (info->clk_state == CLOCK_ENABLE) {
+ if (new_state != CLOCK_ENABLE)
+ clk_disable_unprepare(info->clk);
+ } else {
+ if (new_state == CLOCK_ENABLE)
+ clk_prepare_enable(info->clk);
+ }
+
+ info->clk_state = new_state;
+}
+
+/* timing calculations */
+
+#define NS_IN_KHZ 1000000
+
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
+static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
+
+ pr_debug("result %d from %ld, %d\n", result, clk, wanted);
+
+ if (result > max) {
+ pr_err("%d ns is too big for current clock rate %ld\n",
+ wanted, clk);
+ return -1;
+ }
+
+ if (result < 1)
+ result = 1;
+
+ return result;
+}
+
+#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+
+/* controller setup */
+
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
+static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
+{
+ struct s3c2410_platform_nand *plat = info->platform;
+ int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
+ int tacls, twrph0, twrph1;
+ unsigned long clkrate = clk_get_rate(info->clk);
+ unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
+ unsigned long flags;
+
+ /* calculate the timing information for the controller */
+
+ info->clk_rate = clkrate;
+ clkrate /= 1000; /* turn clock into kHz for ease of use */
+
+ if (plat != NULL) {
+ tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
+ twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
+ twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
+ } else {
+ /* default timings */
+ tacls = tacls_max;
+ twrph0 = 8;
+ twrph1 = 8;
+ }
+
+ if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
+ dev_err(info->device, "cannot get suitable timings\n");
+ return -EINVAL;
+ }
+
+ dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+ tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
+ twrph1, to_ns(twrph1, clkrate));
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ mask = (S3C2410_NFCONF_TACLS(3) |
+ S3C2410_NFCONF_TWRPH0(7) |
+ S3C2410_NFCONF_TWRPH1(7));
+ set = S3C2410_NFCONF_EN;
+ set |= S3C2410_NFCONF_TACLS(tacls - 1);
+ set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
+
+ set = S3C2440_NFCONF_TACLS(tacls - 1);
+ set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ default:
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ cfg = readl(info->regs + S3C2410_NFCONF);
+ cfg &= ~mask;
+ cfg |= set;
+ writel(cfg, info->regs + S3C2410_NFCONF);
+
+ local_irq_restore(flags);
+
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
+{
+ int ret;
+
+ ret = s3c2410_nand_setrate(info);
+ if (ret < 0)
+ return ret;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ default:
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ /* enable the controller and de-assert nFCE */
+
+ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
+ }
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
+static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct nand_chip *this = mtd->priv;
+ unsigned long cur;
+
+ nmtd = this->priv;
+ info = nmtd->info;
+
+ if (chip != -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ cur = readl(info->sel_reg);
+
+ if (chip == -1) {
+ cur |= info->sel_bit;
+ } else {
+ if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
+ dev_err(info->device, "invalid chip %d\n", chip);
+ return;
+ }
+
+ if (info->platform != NULL) {
+ if (info->platform->select_chip != NULL)
+ (info->platform->select_chip) (nmtd->set, chip);
+ }
+
+ cur &= ~info->sel_bit;
+ }
+
+ writel(cur, info->sel_reg);
+
+ if (chip == -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+}
+
+/* s3c2410_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+*/
+
+static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2410_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2410_NFADDR);
+}
+
+/* command and control functions */
+
+static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2440_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2440_NFADDR);
+}
+
+/* s3c2410_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+*/
+
+static int s3c2410_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
+}
+
+static int s3c2440_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+}
+
+static int s3c2412_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
+}
+
+/* ECC handling functions */
+
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned int diff0, diff1, diff2;
+ unsigned int bit, byte;
+
+ pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
+
+ diff0 = read_ecc[0] ^ calc_ecc[0];
+ diff1 = read_ecc[1] ^ calc_ecc[1];
+ diff2 = read_ecc[2] ^ calc_ecc[2];
+
+ pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
+ __func__, 3, read_ecc, 3, calc_ecc,
+ diff0, diff1, diff2);
+
+ if (diff0 == 0 && diff1 == 0 && diff2 == 0)
+ return 0; /* ECC is ok */
+
+ /* sometimes people do not think about using the ECC, so check
+ * to see if we have an 0xff,0xff,0xff read ECC and then ignore
+ * the error, on the assumption that this is an un-eccd page.
+ */
+ if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
+ && info->platform->ignore_unset_ecc)
+ return 0;
+
+ /* Can we correct this ECC (ie, one row and column change).
+ * Note, this is similar to the 256 error code on smartmedia */
+
+ if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
+ ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
+ ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
+ /* calculate the bit position of the error */
+
+ bit = ((diff2 >> 3) & 1) |
+ ((diff2 >> 4) & 2) |
+ ((diff2 >> 5) & 4);
+
+ /* calculate the byte position of the error */
+
+ byte = ((diff2 << 7) & 0x100) |
+ ((diff1 << 0) & 0x80) |
+ ((diff1 << 1) & 0x40) |
+ ((diff1 << 2) & 0x20) |
+ ((diff1 << 3) & 0x10) |
+ ((diff0 >> 4) & 0x08) |
+ ((diff0 >> 3) & 0x04) |
+ ((diff0 >> 2) & 0x02) |
+ ((diff0 >> 1) & 0x01);
+
+ dev_dbg(info->device, "correcting error bit %d, byte %d\n",
+ bit, byte);
+
+ dat[byte] ^= (1 << bit);
+ return 1;
+ }
+
+ /* if there is only one bit difference in the ECC, then
+ * one of only a row or column parity has changed, which
+ * means the error is most probably in the ECC itself */
+
+ diff0 |= (diff1 << 8);
+ diff0 |= (diff2 << 16);
+
+ if ((diff0 & ~(1<<fls(diff0))) == 0)
+ return 1;
+
+ return -1;
+}
+
+/* ECC functions
+ *
+ * These allow the s3c2410 and s3c2440 to use the controller's ECC
+ * generator block to ECC the data as it passes through]
+*/
+
+static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2410_NFCONF);
+ ctrl |= S3C2410_NFCONF_INITECC;
+ writel(ctrl, info->regs + S3C2410_NFCONF);
+}
+
+static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
+ info->regs + S3C2440_NFCONT);
+}
+
+static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
+}
+
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
+ ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
+ ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
+
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+ return 0;
+}
+
+static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+ return 0;
+}
+
+static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
+
+ return 0;
+}
+#endif
+
+/* over-ride the standard functions for a little more speed. We can
+ * use read/write block to move the data buffers to/from the controller
+*/
+
+static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ readsb(this->IO_ADDR_R, buf, len);
+}
+
+static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
+}
+
+static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
+{
+ struct nand_chip *this = mtd->priv;
+ writesb(this->IO_ADDR_W, buf, len);
+}
+
+static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
+}
+
+/* cpufreq driver support */
+
+#ifdef CONFIG_CPU_FREQ
+
+static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long newclk;
+
+ info = container_of(nb, struct s3c2410_nand_info, freq_transition);
+ newclk = clk_get_rate(info->clk);
+
+ if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
+ (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
+ s3c2410_nand_setrate(info);
+ }
+
+ return 0;
+}
+
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
+
+ return cpufreq_register_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+ cpufreq_unregister_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+ return 0;
+}
+
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+}
+#endif
+
+/* device management functions */
+
+static int s3c24xx_nand_remove(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = to_nand_info(pdev);
+
+ if (info == NULL)
+ return 0;
+
+ s3c2410_nand_cpufreq_deregister(info);
+
+ /* Release all our mtds and their partitions, then go through
+ * freeing the resources used
+ */
+
+ if (info->mtds != NULL) {
+ struct s3c2410_nand_mtd *ptr = info->mtds;
+ int mtdno;
+
+ for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
+ pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
+ nand_release(&ptr->mtd);
+ }
+ }
+
+ /* free the common resources */
+
+ if (!IS_ERR(info->clk))
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+
+ return 0;
+}
+
+static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *mtd,
+ struct s3c2410_nand_set *set)
+{
+ if (set) {
+ mtd->mtd.name = set->name;
+
+ return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+ set->partitions, set->nr_partitions);
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
+ *
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
+static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd,
+ struct s3c2410_nand_set *set)
+{
+ struct nand_chip *chip = &nmtd->chip;
+ void __iomem *regs = info->regs;
+
+ chip->write_buf = s3c2410_nand_write_buf;
+ chip->read_buf = s3c2410_nand_read_buf;
+ chip->select_chip = s3c2410_nand_select_chip;
+ chip->chip_delay = 50;
+ chip->priv = nmtd;
+ chip->options = set->options;
+ chip->controller = &info->controller;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->IO_ADDR_W = regs + S3C2410_NFDATA;
+ info->sel_reg = regs + S3C2410_NFCONF;
+ info->sel_bit = S3C2410_NFCONF_nFCE;
+ chip->cmd_ctrl = s3c2410_nand_hwcontrol;
+ chip->dev_ready = s3c2410_nand_devready;
+ break;
+
+ case TYPE_S3C2440:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2440_NFCONT_nFCE;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2440_nand_devready;
+ chip->read_buf = s3c2440_nand_read_buf;
+ chip->write_buf = s3c2440_nand_write_buf;
+ break;
+
+ case TYPE_S3C2412:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2412_NFCONT_nFCE0;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2412_nand_devready;
+
+ if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
+ dev_info(info->device, "System booted from NAND\n");
+
+ break;
+ }
+
+ chip->IO_ADDR_R = chip->IO_ADDR_W;
+
+ nmtd->info = info;
+ nmtd->mtd.priv = chip;
+ nmtd->mtd.owner = THIS_MODULE;
+ nmtd->set = set;
+
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+ }
+#else
+ chip->ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ if (set->ecc_layout != NULL)
+ chip->ecc.layout = set->ecc_layout;
+
+ if (set->disable_ecc)
+ chip->ecc.mode = NAND_ECC_NONE;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ dev_info(info->device, "NAND ECC disabled\n");
+ break;
+ case NAND_ECC_SOFT:
+ dev_info(info->device, "NAND soft ECC\n");
+ break;
+ case NAND_ECC_HW:
+ dev_info(info->device, "NAND hardware ECC\n");
+ break;
+ default:
+ dev_info(info->device, "NAND ECC UNKNOWN\n");
+ break;
+ }
+
+ /* If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND, and also skip the
+ * full NAND scan that can take 1/2s or so. Little things... */
+ if (set->flash_bbt) {
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->options |= NAND_SKIP_BBTSCAN;
+ }
+}
+
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
+ *
+ * This routine is called after the chip probe has successfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
+static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd)
+{
+ struct nand_chip *chip = &nmtd->chip;
+
+ dev_dbg(info->device, "chip %p => page shift %d\n",
+ chip, chip->page_shift);
+
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return;
+
+ /* change the behaviour depending on whether we are using
+ * the large or small page nand device */
+
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.layout = &nand_hw_eccoob;
+ }
+}
+
+/* s3c24xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+*/
+static int s3c24xx_nand_probe(struct platform_device *pdev)
+{
+ struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ enum s3c_cpu_type cpu_type;
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct s3c2410_nand_set *sets;
+ struct resource *res;
+ int err = 0;
+ int size;
+ int nr_sets;
+ int setno;
+
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ /* get the clock source and enable it */
+
+ info->clk = devm_clk_get(&pdev->dev, "nand");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ /* allocate and map the resource */
+
+ /* currently we assume we have the one resource */
+ res = pdev->resource;
+ size = resource_size(res);
+
+ info->device = &pdev->dev;
+ info->platform = plat;
+ info->cpu_type = cpu_type;
+
+ info->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->regs)) {
+ err = PTR_ERR(info->regs);
+ goto exit_error;
+ }
+
+ dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
+
+ /* initialise the hardware */
+
+ err = s3c2410_nand_inithw(info);
+ if (err != 0)
+ goto exit_error;
+
+ sets = (plat != NULL) ? plat->sets : NULL;
+ nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+
+ info->mtd_count = nr_sets;
+
+ /* allocate our information */
+
+ size = nr_sets * sizeof(*info->mtds);
+ info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (info->mtds == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ /* initialise all possible chips */
+
+ nmtd = info->mtds;
+
+ for (setno = 0; setno < nr_sets; setno++, nmtd++) {
+ pr_debug("initialising set %d (%p, info %p)\n",
+ setno, nmtd, info);
+
+ s3c2410_nand_init_chip(info, nmtd, sets);
+
+ nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
+ (sets) ? sets->nr_chips : 1,
+ NULL);
+
+ if (nmtd->scan_res == 0) {
+ s3c2410_nand_update_chip(info, nmtd);
+ nand_scan_tail(&nmtd->mtd);
+ s3c2410_nand_add_partition(info, nmtd, sets);
+ }
+
+ if (sets != NULL)
+ sets++;
+ }
+
+ err = s3c2410_nand_cpufreq_register(info);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init cpufreq support\n");
+ goto exit_error;
+ }
+
+ if (allow_clk_suspend(info)) {
+ dev_info(&pdev->dev, "clock idle support enabled\n");
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ return 0;
+
+ exit_error:
+ s3c24xx_nand_remove(pdev);
+
+ if (err == 0)
+ err = -EINVAL;
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+
+static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ info->save_sel = readl(info->sel_reg);
+
+ /* For the moment, we must ensure nFCE is high during
+ * the time we are suspended. This really should be
+ * handled by suspending the MTDs we are using, but
+ * that is currently not the case. */
+
+ writel(info->save_sel | info->sel_bit, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_resume(struct platform_device *dev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+ unsigned long sel;
+
+ if (info) {
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+ s3c2410_nand_inithw(info);
+
+ /* Restore the state of the nFCE line. */
+
+ sel = readl(info->sel_reg);
+ sel &= ~info->sel_bit;
+ sel |= info->save_sel & info->sel_bit;
+ writel(sel, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ return 0;
+}
+
+#else
+#define s3c24xx_nand_suspend NULL
+#define s3c24xx_nand_resume NULL
+#endif
+
+/* driver device registration */
+
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
+ .driver = {
+ .name = "s3c24xx-nand",
+ },
+};
+
+module_platform_driver(s3c24xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
new file mode 100644
index 000000000..c3ce81c1a
--- /dev/null
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -0,0 +1,1200 @@
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright (c) 2008 Renesas Solutions Corp.
+ * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
+ *
+ * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sh_flctl.h>
+
+static struct nand_ecclayout flctl_4secc_oob_16 = {
+ .eccbytes = 10,
+ .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+ .oobfree = {
+ {.offset = 12,
+ . length = 4} },
+};
+
+static struct nand_ecclayout flctl_4secc_oob_64 = {
+ .eccbytes = 4 * 10,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
+ .oobfree = {
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 6},
+ {.offset = 32, .length = 6},
+ {.offset = 48, .length = 6} },
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr flctl_4secc_smallpage = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 11,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+static struct nand_bbt_descr flctl_4secc_largepage = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+static void empty_fifo(struct sh_flctl *flctl)
+{
+ writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+}
+
+static void start_translation(struct sh_flctl *flctl)
+{
+ writeb(TRSTRT, FLTRCR(flctl));
+}
+
+static void timeout_error(struct sh_flctl *flctl, const char *str)
+{
+ dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
+}
+
+static void wait_completion(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ if (readb(FLTRCR(flctl)) & TREND) {
+ writeb(0x0, FLTRCR(flctl));
+ return;
+ }
+ udelay(1);
+ }
+
+ timeout_error(flctl, __func__);
+ writeb(0x0, FLTRCR(flctl));
+}
+
+static void flctl_dma_complete(void *param)
+{
+ struct sh_flctl *flctl = param;
+
+ complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+ if (flctl->chan_fifo0_rx) {
+ dma_release_channel(flctl->chan_fifo0_rx);
+ flctl->chan_fifo0_rx = NULL;
+ }
+ if (flctl->chan_fifo0_tx) {
+ dma_release_channel(flctl->chan_fifo0_tx);
+ flctl->chan_fifo0_tx = NULL;
+ }
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+ dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ struct platform_device *pdev = flctl->pdev;
+ struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+ return;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
+ dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+ flctl->chan_fifo0_tx);
+
+ if (!flctl->chan_fifo0_tx)
+ return;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
+ dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+ flctl->chan_fifo0_rx);
+
+ if (!flctl->chan_fifo0_rx)
+ goto err;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
+ ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ init_completion(&flctl->dma_complete);
+
+ return;
+
+err:
+ flctl_release_dma(flctl);
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t addr = 0;
+
+ if (column == -1) {
+ addr = page_addr; /* ERASE1 */
+ } else if (page_addr != -1) {
+ /* SEQIN, READ0, etc.. */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ if (flctl->page_size) {
+ addr = column & 0x0FFF;
+ addr |= (page_addr & 0xff) << 16;
+ addr |= ((page_addr >> 8) & 0xff) << 24;
+ /* big than 128MB */
+ if (flctl->rw_ADRCNT == ADRCNT2_E) {
+ uint32_t addr2;
+ addr2 = (page_addr >> 16) & 0xff;
+ writel(addr2, FLADR2(flctl));
+ }
+ } else {
+ addr = column;
+ addr |= (page_addr & 0xff) << 8;
+ addr |= ((page_addr >> 8) & 0xff) << 16;
+ addr |= ((page_addr >> 16) & 0xff) << 24;
+ }
+ }
+ writel(addr, FLADR(flctl));
+}
+
+static void wait_rfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ uint32_t val;
+ /* check FIFO */
+ val = readl(FLDTCNTR(flctl)) >> 16;
+ if (val & 0xFF)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static void wait_wfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t len, timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ /* check FIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static enum flctl_ecc_res_t wait_recfifo_ready
+ (struct sh_flctl *flctl, int sector_number)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ void __iomem *ecc_reg[4];
+ int i;
+ int state = FL_SUCCESS;
+ uint32_t data, size;
+
+ /*
+ * First this loops checks in FLDTCNTR if we are ready to read out the
+ * oob data. This is the case if either all went fine without errors or
+ * if the bottom part of the loop corrected the errors or marked them as
+ * uncorrectable and the controller is given time to push the data into
+ * the FIFO.
+ */
+ while (timeout--) {
+ /* check if all is ok and we can read out the OOB */
+ size = readl(FLDTCNTR(flctl)) >> 24;
+ if ((size & 0xFF) == 4)
+ return state;
+
+ /* check if a correction code has been calculated */
+ if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
+ /*
+ * either we wait for the fifo to be filled or a
+ * correction pattern is being generated
+ */
+ udelay(1);
+ continue;
+ }
+
+ /* check for an uncorrectable error */
+ if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
+ /* check if we face a non-empty page */
+ for (i = 0; i < 512; i++) {
+ if (flctl->done_buff[i] != 0xff) {
+ state = FL_ERROR; /* can't correct */
+ break;
+ }
+ }
+
+ if (state == FL_SUCCESS)
+ dev_dbg(&flctl->pdev->dev,
+ "reading empty sector %d, ecc error ignored\n",
+ sector_number);
+
+ writel(0, FL4ECCCR(flctl));
+ continue;
+ }
+
+ /* start error correction */
+ ecc_reg[0] = FL4ECCRESULT0(flctl);
+ ecc_reg[1] = FL4ECCRESULT1(flctl);
+ ecc_reg[2] = FL4ECCRESULT2(flctl);
+ ecc_reg[3] = FL4ECCRESULT3(flctl);
+
+ for (i = 0; i < 3; i++) {
+ uint8_t org;
+ unsigned int index;
+
+ data = readl(ecc_reg[i]);
+
+ if (flctl->page_size)
+ index = (512 * sector_number) +
+ (data >> 16);
+ else
+ index = data >> 16;
+
+ org = flctl->done_buff[index];
+ flctl->done_buff[index] = org ^ (data & 0xFF);
+ }
+ state = FL_REPAIRABLE;
+ writel(0, FL4ECCCR(flctl));
+ }
+
+ timeout_error(flctl, __func__);
+ return FL_TIMEOUT; /* timeout */
+}
+
+static void wait_wecfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ uint32_t len;
+
+ while (timeout--) {
+ /* check FLECFIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+ int len, enum dma_data_direction dir)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan;
+ enum dma_transfer_direction tr_dir;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie = -EINVAL;
+ uint32_t reg;
+ int ret;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = flctl->chan_fifo0_rx;
+ tr_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = flctl->chan_fifo0_tx;
+ tr_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+ if (dma_addr)
+ desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+ tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (desc) {
+ reg = readl(FLINTDMACR(flctl));
+ reg |= DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ desc->callback = flctl_dma_complete;
+ desc->callback_param = flctl;
+ cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(chan);
+ } else {
+ /* DMA failed, fall back to PIO */
+ flctl_release_dma(flctl);
+ dev_warn(&flctl->pdev->dev,
+ "DMA failed, falling back to PIO\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret =
+ wait_for_completion_timeout(&flctl->dma_complete,
+ msecs_to_jiffies(3000));
+
+ if (ret <= 0) {
+ dmaengine_terminate_all(chan);
+ dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ }
+
+out:
+ reg = readl(FLINTDMACR(flctl));
+ reg &= ~DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+ /* ret > 0 is success */
+ return ret;
+}
+
+static void read_datareg(struct sh_flctl *flctl, int offset)
+{
+ unsigned long data;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ wait_completion(flctl);
+
+ data = readl(FLDATAR(flctl));
+ *buf = le32_to_cpu(data);
+}
+
+static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_rx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+ goto convert; /* DMA success */
+
+ /* do polling transfer */
+ for (i = 0; i < len_4align; i++) {
+ wait_rfifo_ready(flctl);
+ buf[i] = readl(FLDTFIFO(flctl));
+ }
+
+convert:
+ for (i = 0; i < len_4align; i++)
+ buf[i] = be32_to_cpu(buf[i]);
+}
+
+static enum flctl_ecc_res_t read_ecfiforeg
+ (struct sh_flctl *flctl, uint8_t *buff, int sector)
+{
+ int i;
+ enum flctl_ecc_res_t res;
+ unsigned long *ecc_buf = (unsigned long *)buff;
+
+ res = wait_recfifo_ready(flctl , sector);
+
+ if (res != FL_ERROR) {
+ for (i = 0; i < 4; i++) {
+ ecc_buf[i] = readl(FLECFIFO(flctl));
+ ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ }
+ }
+
+ return res;
+}
+
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+ for (i = 0; i < len_4align; i++) {
+ wait_wfifo_ready(flctl);
+ writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
+ }
+}
+
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_tx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+ return; /* DMA success */
+
+ /* do polling transfer */
+ for (i = 0; i < len_4align; i++) {
+ wait_wecfifo_ready(flctl);
+ writel(buf[i], FLECFIFO(flctl));
+ }
+}
+
+static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
+ uint32_t flcmdcr_val, addr_len_bytes = 0;
+
+ /* Set SNAND bit if page size is 2048byte */
+ if (flctl->page_size)
+ flcmncr_val |= SNAND_E;
+ else
+ flcmncr_val &= ~SNAND_E;
+
+ /* default FLCMDCR val */
+ flcmdcr_val = DOCMD1_E | DOADR_E;
+
+ /* Set for FLCMDCR */
+ switch (cmd) {
+ case NAND_CMD_ERASE1:
+ addr_len_bytes = flctl->erase_ADRCNT;
+ flcmdcr_val |= DOCMD2_E;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_RNDOUT:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= CDSRC_E;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_SEQIN:
+ /* This case is that cmd is READ0 or READ1 or READ00 */
+ flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
+ break;
+ case NAND_CMD_PAGEPROG:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_READID:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val |= CDSRC_E;
+ addr_len_bytes = ADRCNT_1;
+ break;
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RESET:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val &= ~(DOADR_E | DOSR_E);
+ break;
+ default:
+ break;
+ }
+
+ /* Set address bytes parameter */
+ flcmdcr_val |= addr_len_bytes;
+
+ /* Now actually write */
+ writel(flcmncr_val, FLCMNCR(flctl));
+ writel(flcmdcr_val, FLCMDCR(flctl));
+ writel(flcmcdr_val, FLCMCDR(flctl));
+}
+
+static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ chip->read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int sector, page_sectors;
+ enum flctl_ecc_res_t ecc_result;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+ FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+
+ empty_fifo(flctl);
+ start_translation(flctl);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ read_fiforeg(flctl, 512, 512 * sector);
+
+ ecc_result = read_ecfiforeg(flctl,
+ &flctl->done_buff[mtd->writesize + 16 * sector],
+ sector);
+
+ switch (ecc_result) {
+ case FL_REPAIRABLE:
+ dev_info(&flctl->pdev->dev,
+ "applied ecc on page 0x%x", page_addr);
+ flctl->mtd.ecc_stats.corrected++;
+ break;
+ case FL_ERROR:
+ dev_warn(&flctl->pdev->dev,
+ "page 0x%x contains corrupted data\n",
+ page_addr);
+ flctl->mtd.ecc_stats.failed++;
+ break;
+ default:
+ ;
+ }
+ }
+
+ wait_completion(flctl);
+
+ writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
+ FLCMNCR(flctl));
+}
+
+static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_sectors = flctl->page_size ? 4 : 1;
+ int i;
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ empty_fifo(flctl);
+
+ for (i = 0; i < page_sectors; i++) {
+ set_addr(mtd, (512 + 16) * i + 512 , page_addr);
+ writel(16, FLDTCNTR(flctl));
+
+ start_translation(flctl);
+ read_fiforeg(flctl, 16, 16 * i);
+ wait_completion(flctl);
+ }
+}
+
+static void execmd_write_page_sector(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ empty_fifo(flctl);
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+ start_translation(flctl);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ write_fiforeg(flctl, 512, 512 * sector);
+ write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
+ }
+
+ wait_completion(flctl);
+ writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
+}
+
+static void execmd_write_oob(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ empty_fifo(flctl);
+ set_addr(mtd, sector * 528 + 512, page_addr);
+ writel(16, FLDTCNTR(flctl)); /* set read size */
+
+ start_translation(flctl);
+ write_fiforeg(flctl, 16, 16 * sector);
+ wait_completion(flctl);
+ }
+}
+
+static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t read_cmd = 0;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+
+ flctl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ flctl->index = 0;
+
+ switch (command) {
+ case NAND_CMD_READ1:
+ case NAND_CMD_READ0:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_page_sector(mtd, page_addr);
+ break;
+ }
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, 0, page_addr);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ flctl->index += column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READOOB:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_oob(mtd, page_addr);
+ break;
+ }
+
+ if (flctl->page_size) {
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | NAND_CMD_READ0);
+ set_addr(mtd, mtd->writesize, page_addr);
+ } else {
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, 0, page_addr);
+ }
+ flctl->read_bytes = mtd->oobsize;
+ goto read_normal_exit;
+
+ case NAND_CMD_RNDOUT:
+ if (flctl->hwecc)
+ break;
+
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READID:
+ set_cmd_regs(mtd, command, command);
+
+ /* READID is always performed using an 8-bit bus */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column <<= 1;
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = 8;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_ERASE1:
+ flctl->erase1_page_addr = page_addr;
+ break;
+
+ case NAND_CMD_ERASE2:
+ set_cmd_regs(mtd, NAND_CMD_ERASE1,
+ (command << 8) | NAND_CMD_ERASE1);
+ set_addr(mtd, -1, flctl->erase1_page_addr);
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (!flctl->page_size) {
+ /* output read command */
+ if (column >= mtd->writesize) {
+ column -= mtd->writesize;
+ read_cmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ read_cmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ read_cmd = NAND_CMD_READ1;
+ }
+ }
+ flctl->seqin_column = column;
+ flctl->seqin_page_addr = page_addr;
+ flctl->seqin_read_cmd = read_cmd;
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ empty_fifo(flctl);
+ if (!flctl->page_size) {
+ set_cmd_regs(mtd, NAND_CMD_SEQIN,
+ flctl->seqin_read_cmd);
+ set_addr(mtd, -1, -1);
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ }
+ if (flctl->hwecc) {
+ /* write page with hwecc */
+ if (flctl->seqin_column == mtd->writesize)
+ execmd_write_oob(mtd);
+ else if (!flctl->seqin_column)
+ execmd_write_page_sector(mtd);
+ else
+ printk(KERN_ERR "Invalid address !?\n");
+ break;
+ }
+ set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
+ set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
+ writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
+ start_translation(flctl);
+ write_fiforeg(flctl, flctl->index, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_STATUS:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ flctl->read_bytes = 1;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ start_translation(flctl);
+ read_datareg(flctl, 0); /* read and end */
+ break;
+
+ case NAND_CMD_RESET:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ default:
+ break;
+ }
+ goto runtime_exit;
+
+read_normal_exit:
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+runtime_exit:
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ return;
+}
+
+static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int ret;
+
+ switch (chipnr) {
+ case -1:
+ flctl->flcmncr_base &= ~CE0_ENABLE;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+ if (flctl->qos_request) {
+ dev_pm_qos_remove_request(&flctl->pm_qos);
+ flctl->qos_request = 0;
+ }
+
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ break;
+ case 0:
+ flctl->flcmncr_base |= CE0_ENABLE;
+
+ if (!flctl->qos_request) {
+ ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+ &flctl->pm_qos,
+ DEV_PM_QOS_RESUME_LATENCY,
+ 100);
+ if (ret < 0)
+ dev_err(&flctl->pdev->dev,
+ "PM QoS request failed: %d\n", ret);
+ flctl->qos_request = 1;
+ }
+
+ if (flctl->holden) {
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(HOLDEN, FLHOLDCR(flctl));
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+ memcpy(&flctl->done_buff[flctl->index], buf, len);
+ flctl->index += len;
+}
+
+static uint8_t flctl_read_byte(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint8_t data;
+
+ data = flctl->done_buff[flctl->index];
+ flctl->index++;
+ return data;
+}
+
+static uint16_t flctl_read_word(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
+
+ flctl->index += 2;
+ return *buf;
+}
+
+static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+ memcpy(buf, &flctl->done_buff[flctl->index], len);
+ flctl->index += len;
+}
+
+static int flctl_chip_init_tail(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ struct nand_chip *chip = &flctl->chip;
+
+ if (mtd->writesize == 512) {
+ flctl->page_size = 0;
+ if (chip->chipsize > (32 << 20)) {
+ /* big than 32MB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (chip->chipsize > (2 << 16)) {
+ /* big than 128KB */
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_2;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ } else {
+ flctl->page_size = 1;
+ if (chip->chipsize > (128 << 20)) {
+ /* big than 128MB */
+ flctl->rw_ADRCNT = ADRCNT2_E;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (chip->chipsize > (8 << 16)) {
+ /* big than 512KB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ }
+
+ if (flctl->hwecc) {
+ if (mtd->writesize == 512) {
+ chip->ecc.layout = &flctl_4secc_oob_16;
+ chip->badblock_pattern = &flctl_4secc_smallpage;
+ } else {
+ chip->ecc.layout = &flctl_4secc_oob_64;
+ chip->badblock_pattern = &flctl_4secc_largepage;
+ }
+
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 10;
+ chip->ecc.strength = 4;
+ chip->ecc.read_page = flctl_read_page_hwecc;
+ chip->ecc.write_page = flctl_write_page_hwecc;
+ chip->ecc.mode = NAND_ECC_HW;
+
+ /* 4 symbols ECC enabled */
+ flctl->flcmncr_base |= _4ECCEN;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ return 0;
+}
+
+static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
+{
+ struct sh_flctl *flctl = dev_id;
+
+ dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+
+ return IRQ_HANDLED;
+}
+
+struct flctl_soc_config {
+ unsigned long flcmncr_val;
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+ .has_hwecc = 1,
+ .use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+ { .compatible = "renesas,shmobile-flctl-sh7372",
+ .data = &flctl_sh7372_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ const struct of_device_id *match;
+ struct flctl_soc_config *config;
+ struct sh_flctl_platform_data *pdata;
+ struct device_node *dn = dev->of_node;
+ int ret;
+
+ match = of_match_device(of_flctl_match, dev);
+ if (match)
+ config = (struct flctl_soc_config *)match->data;
+ else {
+ dev_err(dev, "%s: no OF configuration attached\n", __func__);
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /* set SoC specific options */
+ pdata->flcmncr_val = config->flcmncr_val;
+ pdata->has_hwecc = config->has_hwecc;
+ pdata->use_holden = config->use_holden;
+
+ /* parse user defined options */
+ ret = of_get_nand_bus_width(dn);
+ if (ret == 16)
+ pdata->flcmncr_val |= SEL_16BIT;
+ else if (ret != 8) {
+ dev_err(dev, "%s: invalid bus width\n", __func__);
+ return NULL;
+ }
+
+ return pdata;
+}
+
+static int flctl_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct sh_flctl *flctl;
+ struct mtd_info *flctl_mtd;
+ struct nand_chip *nand;
+ struct sh_flctl_platform_data *pdata;
+ int ret;
+ int irq;
+ struct mtd_part_parser_data ppdata = {};
+
+ flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+ if (!flctl)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flctl->reg))
+ return PTR_ERR(flctl->reg);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get flste irq data\n");
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+ "flste", flctl);
+ if (ret) {
+ dev_err(&pdev->dev, "request interrupt failed.\n");
+ return ret;
+ }
+
+ if (pdev->dev.of_node)
+ pdata = flctl_parse_dt(&pdev->dev);
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no setup data defined\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, flctl);
+ flctl_mtd = &flctl->mtd;
+ nand = &flctl->chip;
+ flctl_mtd->priv = nand;
+ flctl->pdev = pdev;
+ flctl->hwecc = pdata->has_hwecc;
+ flctl->holden = pdata->use_holden;
+ flctl->flcmncr_base = pdata->flcmncr_val;
+ flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
+
+ /* Set address of hardware control function */
+ /* 20 us command delay time */
+ nand->chip_delay = 20;
+
+ nand->read_byte = flctl_read_byte;
+ nand->write_buf = flctl_write_buf;
+ nand->read_buf = flctl_read_buf;
+ nand->select_chip = flctl_select_chip;
+ nand->cmdfunc = flctl_cmdfunc;
+
+ if (pdata->flcmncr_val & SEL_16BIT) {
+ nand->options |= NAND_BUSWIDTH_16;
+ nand->read_word = flctl_read_word;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ flctl_setup_dma(flctl);
+
+ ret = nand_scan_ident(flctl_mtd, 1, NULL);
+ if (ret)
+ goto err_chip;
+
+ ret = flctl_chip_init_tail(flctl_mtd);
+ if (ret)
+ goto err_chip;
+
+ ret = nand_scan_tail(flctl_mtd);
+ if (ret)
+ goto err_chip;
+
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
+ pdata->nr_parts);
+
+ return 0;
+
+err_chip:
+ flctl_release_dma(flctl);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int flctl_remove(struct platform_device *pdev)
+{
+ struct sh_flctl *flctl = platform_get_drvdata(pdev);
+
+ flctl_release_dma(flctl);
+ nand_release(&flctl->mtd);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver flctl_driver = {
+ .remove = flctl_remove,
+ .driver = {
+ .name = "sh_flctl",
+ .of_match_table = of_match_ptr(of_flctl_match),
+ },
+};
+
+module_platform_driver_probe(flctl_driver, flctl_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("SuperH FLCTL driver");
+MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
new file mode 100644
index 000000000..842c47a45
--- /dev/null
+++ b/drivers/mtd/nand/sharpsl.c
@@ -0,0 +1,232 @@
+/*
+ * drivers/mtd/nand/sharpsl.c
+ *
+ * Copyright (C) 2004 Richard Purdie
+ * Copyright (C) 2008 Dmitry Baryshkov
+ *
+ * Based on Sharp's NAND driver sharp_sl.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sharpsl.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+
+struct sharpsl_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+
+ void __iomem *io;
+};
+
+#define mtd_to_sharpsl(_mtd) container_of(_mtd, struct sharpsl_nand, mtd)
+
+/* register offset */
+#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
+#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
+#define ECCCP 0x08 /* column parity 5 - 0 bit */
+#define ECCCNTR 0x0C /* ECC byte counter */
+#define ECCCLRR 0x10 /* cleare ECC */
+#define FLASHIO 0x14 /* Flash I/O */
+#define FLASHCTL 0x18 /* Flash Control */
+
+/* Flash control bit */
+#define FLRYBY (1 << 5)
+#define FLCE1 (1 << 4)
+#define FLWP (1 << 3)
+#define FLALE (1 << 2)
+#define FLCLE (1 << 1)
+#define FLCE0 (1 << 0)
+
+/*
+ * hardware specific access to control-lines
+ * ctrl:
+ * NAND_CNE: bit 0 -> ! bit 0 & 4
+ * NAND_CLE: bit 1 -> bit 1
+ * NAND_ALE: bit 2 -> bit 2
+ *
+ */
+static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ unsigned char bits = ctrl & 0x07;
+
+ bits |= (ctrl & 0x01) << 4;
+
+ bits ^= 0x11;
+
+ writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
+}
+
+static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ writeb(0, sharpsl->io + ECCCLRR);
+}
+
+static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
+ ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
+ ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
+ return readb(sharpsl->io + ECCCNTR) != 0;
+}
+
+/*
+ * Main initialization routine
+ */
+static int sharpsl_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct resource *r;
+ int err = 0;
+ struct sharpsl_nand *sharpsl;
+ struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
+
+ if (!data) {
+ dev_err(&pdev->dev, "no platform data!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
+ if (!sharpsl)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no io memory resource defined!\n");
+ err = -ENODEV;
+ goto err_get_res;
+ }
+
+ /* map physical address */
+ sharpsl->io = ioremap(r->start, resource_size(r));
+ if (!sharpsl->io) {
+ dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&sharpsl->chip);
+
+ /* Link the private data with the MTD structure */
+ sharpsl->mtd.priv = this;
+ sharpsl->mtd.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, sharpsl);
+
+ /*
+ * PXA initialize
+ */
+ writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = sharpsl->io + FLASHIO;
+ this->IO_ADDR_W = sharpsl->io + FLASHIO;
+ /* Set address of hardware control function */
+ this->cmd_ctrl = sharpsl_nand_hwcontrol;
+ this->dev_ready = sharpsl_nand_dev_ready;
+ /* 15 us command delay time */
+ this->chip_delay = 15;
+ /* set eccmode using hardware ECC */
+ this->ecc.mode = NAND_ECC_HW;
+ this->ecc.size = 256;
+ this->ecc.bytes = 3;
+ this->ecc.strength = 1;
+ this->badblock_pattern = data->badblock_pattern;
+ this->ecc.layout = data->ecc_layout;
+ this->ecc.hwctl = sharpsl_nand_enable_hwecc;
+ this->ecc.calculate = sharpsl_nand_calculate_ecc;
+ this->ecc.correct = nand_correct_data;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&sharpsl->mtd, 1);
+ if (err)
+ goto err_scan;
+
+ /* Register the partitions */
+ sharpsl->mtd.name = "sharpsl-nand";
+
+ err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL,
+ data->partitions, data->nr_partitions);
+ if (err)
+ goto err_add;
+
+ /* Return happy */
+ return 0;
+
+err_add:
+ nand_release(&sharpsl->mtd);
+
+err_scan:
+ iounmap(sharpsl->io);
+err_ioremap:
+err_get_res:
+ kfree(sharpsl);
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int sharpsl_nand_remove(struct platform_device *pdev)
+{
+ struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+
+ /* Release resources, unregister device */
+ nand_release(&sharpsl->mtd);
+
+ iounmap(sharpsl->io);
+
+ /* Free the MTD device structure */
+ kfree(sharpsl);
+
+ return 0;
+}
+
+static struct platform_driver sharpsl_nand_driver = {
+ .driver = {
+ .name = "sharpsl-nand",
+ },
+ .probe = sharpsl_nand_probe,
+ .remove = sharpsl_nand_remove,
+};
+
+module_platform_driver(sharpsl_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 000000000..e06b5e5d3
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/nand.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include "sm_common.h"
+
+static struct nand_ecclayout nand_oob_sm = {
+ .eccbytes = 6,
+ .eccpos = {8, 9, 10, 13, 14, 15},
+ .oobfree = {
+ {.offset = 0 , .length = 4}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ {.offset = 11, .length = 2} /* LBA2 */
+ }
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static struct nand_ecclayout nand_oob_sm_small = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3 , .length = 2}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ }
+};
+
+
+static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ struct sm_oob oob;
+ int ret;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd_write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ printk(KERN_NOTICE
+ "sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+ LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+ {NULL}
+};
+
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+ LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+ {NULL}
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ ret = nand_scan_ident(mtd, 1, smartmedia ?
+ nand_smartmedia_flash_ids : nand_xd_flash_ids);
+
+ if (ret)
+ return ret;
+
+ /* Bad block marker position */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ chip->ecc.layout = &nand_oob_sm;
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ chip->ecc.layout = &nand_oob_sm_small;
+ else
+ return -ENODEV;
+
+ ret = nand_scan_tail(mtd);
+
+ if (ret)
+ return ret;
+
+ return mtd_device_register(mtd, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 000000000..d3e028e58
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __packed;
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
new file mode 100644
index 000000000..d71062273
--- /dev/null
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -0,0 +1,253 @@
+/*
+ * drivers/mtd/nand/socrates_nand.c
+ *
+ * Copyright © 2008 Ilya Yanok, Emcraft Systems
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#define FPGA_NAND_CMD_MASK (0x7 << 28)
+#define FPGA_NAND_CMD_COMMAND (0x0 << 28)
+#define FPGA_NAND_CMD_ADDR (0x1 << 28)
+#define FPGA_NAND_CMD_READ (0x2 << 28)
+#define FPGA_NAND_CMD_WRITE (0x3 << 28)
+#define FPGA_NAND_BUSY (0x1 << 15)
+#define FPGA_NAND_ENABLE (0x1 << 31)
+#define FPGA_NAND_DATA_SHIFT 16
+
+struct socrates_nand_host {
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ struct device *dev;
+};
+
+/**
+ * socrates_nand_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void socrates_nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ struct socrates_nand_host *host = this->priv;
+
+ for (i = 0; i < len; i++) {
+ out_be32(host->io_base, FPGA_NAND_ENABLE |
+ FPGA_NAND_CMD_WRITE |
+ (buf[i] << FPGA_NAND_DATA_SHIFT));
+ }
+}
+
+/**
+ * socrates_nand_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void socrates_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ struct socrates_nand_host *host = this->priv;
+ uint32_t val;
+
+ val = FPGA_NAND_ENABLE | FPGA_NAND_CMD_READ;
+
+ out_be32(host->io_base, val);
+ for (i = 0; i < len; i++) {
+ buf[i] = (in_be32(host->io_base) >>
+ FPGA_NAND_DATA_SHIFT) & 0xff;
+ }
+}
+
+/**
+ * socrates_nand_read_byte - read one byte from the chip
+ * @mtd: MTD device structure
+ */
+static uint8_t socrates_nand_read_byte(struct mtd_info *mtd)
+{
+ uint8_t byte;
+ socrates_nand_read_buf(mtd, &byte, sizeof(byte));
+ return byte;
+}
+
+/**
+ * socrates_nand_read_word - read one word from the chip
+ * @mtd: MTD device structure
+ */
+static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
+{
+ uint16_t word;
+ socrates_nand_read_buf(mtd, (uint8_t *)&word, sizeof(word));
+ return word;
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void socrates_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct socrates_nand_host *host = nand_chip->priv;
+ uint32_t val;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ val = FPGA_NAND_CMD_COMMAND;
+ else
+ val = FPGA_NAND_CMD_ADDR;
+
+ if (ctrl & NAND_NCE)
+ val |= FPGA_NAND_ENABLE;
+
+ val |= (cmd & 0xff) << FPGA_NAND_DATA_SHIFT;
+
+ out_be32(host->io_base, val);
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int socrates_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct socrates_nand_host *host = nand_chip->priv;
+
+ if (in_be32(host->io_base) & FPGA_NAND_BUSY)
+ return 0; /* busy */
+ return 1;
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int socrates_nand_probe(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int res;
+ struct mtd_part_parser_data ppdata;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->io_base = of_iomap(ofdev->dev.of_node, 0);
+ if (host->io_base == NULL) {
+ dev_err(&ofdev->dev, "ioremap failed\n");
+ return -EIO;
+ }
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ host->dev = &ofdev->dev;
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+ mtd->name = "socrates_nand";
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &ofdev->dev;
+ ppdata.of_node = ofdev->dev.of_node;
+
+ /*should never be accessed directly */
+ nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
+ nand_chip->IO_ADDR_W = (void *)0xdeadbeef;
+
+ nand_chip->cmd_ctrl = socrates_nand_cmd_ctrl;
+ nand_chip->read_byte = socrates_nand_read_byte;
+ nand_chip->read_word = socrates_nand_read_word;
+ nand_chip->write_buf = socrates_nand_write_buf;
+ nand_chip->read_buf = socrates_nand_read_buf;
+ nand_chip->dev_ready = socrates_nand_device_ready;
+
+ nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
+
+ /* TODO: I have no idea what real delay is. */
+ nand_chip->chip_delay = 20; /* 20us command delay time */
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto out;
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto out;
+ }
+
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+out:
+ iounmap(host->io_base);
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int socrates_nand_remove(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+
+ iounmap(host->io_base);
+
+ return 0;
+}
+
+static const struct of_device_id socrates_nand_match[] =
+{
+ {
+ .compatible = "abb,socrates-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socrates_nand_match);
+
+static struct platform_driver socrates_nand_driver = {
+ .driver = {
+ .name = "socrates_nand",
+ .of_match_table = socrates_nand_match,
+ },
+ .probe = socrates_nand_probe,
+ .remove = socrates_nand_remove,
+};
+
+module_platform_driver(socrates_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ilya Yanok");
+MODULE_DESCRIPTION("NAND driver for Socrates board");
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
new file mode 100644
index 000000000..6f93b2990
--- /dev/null
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -0,0 +1,1430 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
+ *
+ * Derived from:
+ * https://github.com/yuq/sunxi-nfc-mtd
+ * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
+ *
+ * https://github.com/hno/Allwinner-Info
+ * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
+ *
+ * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
+ * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#define NFC_REG_CTL 0x0000
+#define NFC_REG_ST 0x0004
+#define NFC_REG_INT 0x0008
+#define NFC_REG_TIMING_CTL 0x000C
+#define NFC_REG_TIMING_CFG 0x0010
+#define NFC_REG_ADDR_LOW 0x0014
+#define NFC_REG_ADDR_HIGH 0x0018
+#define NFC_REG_SECTOR_NUM 0x001C
+#define NFC_REG_CNT 0x0020
+#define NFC_REG_CMD 0x0024
+#define NFC_REG_RCMD_SET 0x0028
+#define NFC_REG_WCMD_SET 0x002C
+#define NFC_REG_IO_DATA 0x0030
+#define NFC_REG_ECC_CTL 0x0034
+#define NFC_REG_ECC_ST 0x0038
+#define NFC_REG_DEBUG 0x003C
+#define NFC_REG_ECC_CNT0 0x0040
+#define NFC_REG_ECC_CNT1 0x0044
+#define NFC_REG_ECC_CNT2 0x0048
+#define NFC_REG_ECC_CNT3 0x004c
+#define NFC_REG_USER_DATA_BASE 0x0050
+#define NFC_REG_SPARE_AREA 0x00A0
+#define NFC_RAM0_BASE 0x0400
+#define NFC_RAM1_BASE 0x0800
+
+/* define bit use in NFC_CTL */
+#define NFC_EN BIT(0)
+#define NFC_RESET BIT(1)
+#define NFC_BUS_WIDYH BIT(2)
+#define NFC_RB_SEL BIT(3)
+#define NFC_CE_SEL GENMASK(26, 24)
+#define NFC_CE_CTL BIT(6)
+#define NFC_CE_CTL1 BIT(7)
+#define NFC_PAGE_SIZE GENMASK(11, 8)
+#define NFC_SAM BIT(12)
+#define NFC_RAM_METHOD BIT(14)
+#define NFC_DEBUG_CTL BIT(31)
+
+/* define bit use in NFC_ST */
+#define NFC_RB_B2R BIT(0)
+#define NFC_CMD_INT_FLAG BIT(1)
+#define NFC_DMA_INT_FLAG BIT(2)
+#define NFC_CMD_FIFO_STATUS BIT(3)
+#define NFC_STA BIT(4)
+#define NFC_NATCH_INT_FLAG BIT(5)
+#define NFC_RB_STATE0 BIT(8)
+#define NFC_RB_STATE1 BIT(9)
+#define NFC_RB_STATE2 BIT(10)
+#define NFC_RB_STATE3 BIT(11)
+
+/* define bit use in NFC_INT */
+#define NFC_B2R_INT_ENABLE BIT(0)
+#define NFC_CMD_INT_ENABLE BIT(1)
+#define NFC_DMA_INT_ENABLE BIT(2)
+#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
+ NFC_CMD_INT_ENABLE | \
+ NFC_DMA_INT_ENABLE)
+
+/* define bit use in NFC_CMD */
+#define NFC_CMD_LOW_BYTE GENMASK(7, 0)
+#define NFC_CMD_HIGH_BYTE GENMASK(15, 8)
+#define NFC_ADR_NUM GENMASK(18, 16)
+#define NFC_SEND_ADR BIT(19)
+#define NFC_ACCESS_DIR BIT(20)
+#define NFC_DATA_TRANS BIT(21)
+#define NFC_SEND_CMD1 BIT(22)
+#define NFC_WAIT_FLAG BIT(23)
+#define NFC_SEND_CMD2 BIT(24)
+#define NFC_SEQ BIT(25)
+#define NFC_DATA_SWAP_METHOD BIT(26)
+#define NFC_ROW_AUTO_INC BIT(27)
+#define NFC_SEND_CMD3 BIT(28)
+#define NFC_SEND_CMD4 BIT(29)
+#define NFC_CMD_TYPE GENMASK(31, 30)
+
+/* define bit use in NFC_RCMD_SET */
+#define NFC_READ_CMD GENMASK(7, 0)
+#define NFC_RANDOM_READ_CMD0 GENMASK(15, 8)
+#define NFC_RANDOM_READ_CMD1 GENMASK(23, 16)
+
+/* define bit use in NFC_WCMD_SET */
+#define NFC_PROGRAM_CMD GENMASK(7, 0)
+#define NFC_RANDOM_WRITE_CMD GENMASK(15, 8)
+#define NFC_READ_CMD0 GENMASK(23, 16)
+#define NFC_READ_CMD1 GENMASK(31, 24)
+
+/* define bit use in NFC_ECC_CTL */
+#define NFC_ECC_EN BIT(0)
+#define NFC_ECC_PIPELINE BIT(3)
+#define NFC_ECC_EXCEPTION BIT(4)
+#define NFC_ECC_BLOCK_SIZE BIT(5)
+#define NFC_RANDOM_EN BIT(9)
+#define NFC_RANDOM_DIRECTION BIT(10)
+#define NFC_ECC_MODE_SHIFT 12
+#define NFC_ECC_MODE GENMASK(15, 12)
+#define NFC_RANDOM_SEED GENMASK(30, 16)
+
+#define NFC_DEFAULT_TIMEOUT_MS 1000
+
+#define NFC_SRAM_SIZE 1024
+
+#define NFC_MAX_CS 7
+
+/*
+ * Ready/Busy detection type: describes the Ready/Busy detection modes
+ *
+ * @RB_NONE: no external detection available, rely on STATUS command
+ * and software timeouts
+ * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
+ * pin of the NAND flash chip must be connected to one of the
+ * native NAND R/B pins (those which can be muxed to the NAND
+ * Controller)
+ * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
+ * pin of the NAND flash chip must be connected to a GPIO capable
+ * pin.
+ */
+enum sunxi_nand_rb_type {
+ RB_NONE,
+ RB_NATIVE,
+ RB_GPIO,
+};
+
+/*
+ * Ready/Busy structure: stores information related to Ready/Busy detection
+ *
+ * @type: the Ready/Busy detection mode
+ * @info: information related to the R/B detection mode. Either a gpio
+ * id or a native R/B id (those supported by the NAND controller).
+ */
+struct sunxi_nand_rb {
+ enum sunxi_nand_rb_type type;
+ union {
+ int gpio;
+ int nativeid;
+ } info;
+};
+
+/*
+ * Chip Select structure: stores information related to NAND Chip Select
+ *
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy description
+ */
+struct sunxi_nand_chip_sel {
+ u8 cs;
+ struct sunxi_nand_rb rb;
+};
+
+/*
+ * sunxi HW ECC infos: stores information related to HW ECC support
+ *
+ * @mode: the sunxi ECC mode field deduced from ECC requirements
+ * @layout: the OOB layout depending on the ECC requirements and the
+ * selected ECC mode
+ */
+struct sunxi_nand_hw_ecc {
+ int mode;
+ struct nand_ecclayout layout;
+};
+
+/*
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @mtd: base MTD structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @selected: current active CS
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
+ */
+struct sunxi_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ struct mtd_info mtd;
+ unsigned long clk_rate;
+ int selected;
+ int nsels;
+ struct sunxi_nand_chip_sel sels[0];
+};
+
+static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct sunxi_nand_chip, nand);
+}
+
+/*
+ * NAND Controller structure: stores sunxi NAND controller information
+ *
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND Controller AHB clock
+ * @mod_clk: NAND Controller mod clock
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to
+ * this NAND controller
+ * @complete: a completion object used to wait for NAND
+ * controller events
+ */
+struct sunxi_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ahb_clk;
+ struct clk *mod_clk;
+ unsigned long assigned_cs;
+ unsigned long clk_rate;
+ struct list_head chips;
+ struct completion complete;
+};
+
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct sunxi_nfc, controller);
+}
+
+static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
+{
+ struct sunxi_nfc *nfc = dev_id;
+ u32 st = readl(nfc->regs + NFC_REG_ST);
+ u32 ien = readl(nfc->regs + NFC_REG_INT);
+
+ if (!(ien & st))
+ return IRQ_NONE;
+
+ if ((ien & st) == ien)
+ complete(&nfc->complete);
+
+ writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+ writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags,
+ unsigned int timeout_ms)
+{
+ init_completion(&nfc->complete);
+
+ writel(flags, nfc->regs + NFC_REG_INT);
+
+ if (!timeout_ms)
+ timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
+
+ if (!wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(nfc->dev, "wait interrupt timedout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+
+ do {
+ if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+
+ writel(0, nfc->regs + NFC_REG_ECC_CTL);
+ writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
+
+ do {
+ if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_rb *rb;
+ unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20);
+ int ret;
+
+ if (sunxi_nand->selected < 0)
+ return 0;
+
+ rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
+
+ switch (rb->type) {
+ case RB_NATIVE:
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ (NFC_RB_STATE0 << rb->info.nativeid));
+ if (ret)
+ break;
+
+ sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo);
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ (NFC_RB_STATE0 << rb->info.nativeid));
+ break;
+ case RB_GPIO:
+ ret = gpio_get_value(rb->info.gpio);
+ break;
+ case RB_NONE:
+ default:
+ ret = 0;
+ dev_err(nfc->dev, "cannot check R/B NAND status!\n");
+ break;
+ }
+
+ return ret;
+}
+
+static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_chip_sel *sel;
+ u32 ctl;
+
+ if (chip > 0 && chip >= sunxi_nand->nsels)
+ return;
+
+ if (chip == sunxi_nand->selected)
+ return;
+
+ ctl = readl(nfc->regs + NFC_REG_CTL) &
+ ~(NFC_CE_SEL | NFC_RB_SEL | NFC_EN);
+
+ if (chip >= 0) {
+ sel = &sunxi_nand->sels[chip];
+
+ ctl |= (sel->cs << 24) | NFC_EN |
+ (((nand->page_shift - 10) & 0xf) << 8);
+ if (sel->rb.type == RB_NONE) {
+ nand->dev_ready = NULL;
+ } else {
+ nand->dev_ready = sunxi_nfc_dev_ready;
+ if (sel->rb.type == RB_NATIVE)
+ ctl |= (sel->rb.info.nativeid << 3);
+ }
+
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
+ }
+ }
+
+ writel(ctl, nfc->regs + NFC_REG_CTL);
+
+ sunxi_nand->selected = chip;
+}
+
+static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ if (buf)
+ memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
+ cnt);
+ offs += cnt;
+ }
+}
+
+static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ offs += cnt;
+ }
+}
+
+static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
+{
+ uint8_t ret;
+
+ sunxi_nfc_read_buf(mtd, &ret, 1);
+
+ return ret;
+}
+
+static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ u32 tmp;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ tmp = readl(nfc->regs + NFC_REG_CTL);
+ if (ctrl & NAND_NCE)
+ tmp |= NFC_CE_CTL;
+ else
+ tmp &= ~NFC_CE_CTL;
+ writel(tmp, nfc->regs + NFC_REG_CTL);
+ }
+
+ if (dat == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE) {
+ writel(NFC_SEND_CMD1 | dat, nfc->regs + NFC_REG_CMD);
+ } else {
+ writel(dat, nfc->regs + NFC_REG_ADDR_LOW);
+ writel(NFC_SEND_ADR, nfc->regs + NFC_REG_CMD);
+ }
+
+ sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout = ecc->layout;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ unsigned int max_bitflips = 0;
+ int offset;
+ int ret;
+ u32 tmp;
+ int i;
+ int cnt;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (i)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, i * ecc->size, -1);
+
+ offset = mtd->writesize + layout->eccpos[i * ecc->bytes] - 4;
+
+ chip->read_buf(mtd, NULL, ecc->size);
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf + (i * ecc->size),
+ nfc->regs + NFC_RAM0_BASE, ecc->size);
+
+ if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
+ mtd->ecc_stats.failed++;
+ } else {
+ tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
+ mtd->ecc_stats.corrected += tmp;
+ max_bitflips = max_t(unsigned int, max_bitflips, tmp);
+ }
+
+ if (oob_required) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ offset -= mtd->writesize;
+ chip->read_buf(mtd, chip->oob_poi + offset,
+ ecc->bytes + 4);
+ }
+ }
+
+ if (oob_required) {
+ cnt = ecc->layout->oobfree[ecc->steps].length;
+ if (cnt > 0) {
+ offset = mtd->writesize +
+ ecc->layout->oobfree[ecc->steps].offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ offset -= mtd->writesize;
+ chip->read_buf(mtd, chip->oob_poi + offset, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout = ecc->layout;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ int offset;
+ int ret;
+ u32 tmp;
+ int i;
+ int cnt;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (i)
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, i * ecc->size, -1);
+
+ chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
+
+ offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
+
+ /* Fill OOB data in */
+ if (oob_required) {
+ tmp = 0xffffffff;
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+ 4);
+ } else {
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
+ chip->oob_poi + offset - mtd->writesize,
+ 4);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
+ (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_required) {
+ cnt = ecc->layout->oobfree[i].length;
+ if (cnt > 0) {
+ offset = mtd->writesize +
+ ecc->layout->oobfree[i].offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ offset -= mtd->writesize;
+ chip->write_buf(mtd, chip->oob_poi + offset, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return 0;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ unsigned int max_bitflips = 0;
+ uint8_t *oob = chip->oob_poi;
+ int offset = 0;
+ int ret;
+ int cnt;
+ u32 tmp;
+ int i;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ chip->read_buf(mtd, NULL, ecc->size);
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, nfc->regs + NFC_RAM0_BASE, ecc->size);
+ buf += ecc->size;
+ offset += ecc->size;
+
+ if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
+ mtd->ecc_stats.failed++;
+ } else {
+ tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
+ mtd->ecc_stats.corrected += tmp;
+ max_bitflips = max_t(unsigned int, max_bitflips, tmp);
+ }
+
+ if (oob_required) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, oob, ecc->bytes + ecc->prepad);
+ oob += ecc->bytes + ecc->prepad;
+ }
+
+ offset += ecc->bytes + ecc->prepad;
+ }
+
+ if (oob_required) {
+ cnt = mtd->oobsize - (oob - chip->oob_poi);
+ if (cnt > 0) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, oob, cnt);
+ }
+ }
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ uint8_t *oob = chip->oob_poi;
+ int offset = 0;
+ int ret;
+ int cnt;
+ u32 tmp;
+ int i;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
+ offset += ecc->size;
+
+ /* Fill OOB data in */
+ if (oob_required) {
+ tmp = 0xffffffff;
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+ 4);
+ } else {
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
+ 4);
+ }
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
+ (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ offset += ecc->bytes + ecc->prepad;
+ oob += ecc->bytes + ecc->prepad;
+ }
+
+ if (oob_required) {
+ cnt = mtd->oobsize - (oob - chip->oob_poi);
+ if (cnt > 0) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ chip->write_buf(mtd, oob, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return 0;
+}
+
+static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
+ const struct nand_sdr_timings *timings)
+{
+ u32 min_clk_period = 0;
+
+ /* T1 <=> tCLS */
+ if (timings->tCLS_min > min_clk_period)
+ min_clk_period = timings->tCLS_min;
+
+ /* T2 <=> tCLH */
+ if (timings->tCLH_min > min_clk_period)
+ min_clk_period = timings->tCLH_min;
+
+ /* T3 <=> tCS */
+ if (timings->tCS_min > min_clk_period)
+ min_clk_period = timings->tCS_min;
+
+ /* T4 <=> tCH */
+ if (timings->tCH_min > min_clk_period)
+ min_clk_period = timings->tCH_min;
+
+ /* T5 <=> tWP */
+ if (timings->tWP_min > min_clk_period)
+ min_clk_period = timings->tWP_min;
+
+ /* T6 <=> tWH */
+ if (timings->tWH_min > min_clk_period)
+ min_clk_period = timings->tWH_min;
+
+ /* T7 <=> tALS */
+ if (timings->tALS_min > min_clk_period)
+ min_clk_period = timings->tALS_min;
+
+ /* T8 <=> tDS */
+ if (timings->tDS_min > min_clk_period)
+ min_clk_period = timings->tDS_min;
+
+ /* T9 <=> tDH */
+ if (timings->tDH_min > min_clk_period)
+ min_clk_period = timings->tDH_min;
+
+ /* T10 <=> tRR */
+ if (timings->tRR_min > (min_clk_period * 3))
+ min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
+
+ /* T11 <=> tALH */
+ if (timings->tALH_min > min_clk_period)
+ min_clk_period = timings->tALH_min;
+
+ /* T12 <=> tRP */
+ if (timings->tRP_min > min_clk_period)
+ min_clk_period = timings->tRP_min;
+
+ /* T13 <=> tREH */
+ if (timings->tREH_min > min_clk_period)
+ min_clk_period = timings->tREH_min;
+
+ /* T14 <=> tRC */
+ if (timings->tRC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
+
+ /* T15 <=> tWC */
+ if (timings->tWC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+
+
+ /* Convert min_clk_period from picoseconds to nanoseconds */
+ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+ /*
+ * Convert min_clk_period into a clk frequency, then get the
+ * appropriate rate for the NAND controller IP given this formula
+ * (specified in the datasheet):
+ * nand clk_rate = 2 * min_clk_rate
+ */
+ chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
+
+ /* TODO: configure T16-T19 */
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip,
+ struct device_node *np)
+{
+ const struct nand_sdr_timings *timings;
+ int ret;
+ int mode;
+
+ mode = onfi_get_async_timing_mode(&chip->nand);
+ if (mode == ONFI_TIMING_MODE_UNKNOWN) {
+ mode = chip->nand.onfi_timing_mode_default;
+ } else {
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+
+ mode = fls(mode) - 1;
+ if (mode < 0)
+ mode = 0;
+
+ feature[0] = mode;
+ ret = chip->nand.onfi_set_features(&chip->mtd, &chip->nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE,
+ feature);
+ if (ret)
+ return ret;
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(mode);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ return sunxi_nand_chip_set_timings(chip, timings);
+}
+
+static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_hw_ecc *data;
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int ret;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Add ECC info retrieval from DT */
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (ecc->strength <= strengths[i])
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(strengths)) {
+ dev_err(nfc->dev, "unsupported strength\n");
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ data->mode = i;
+
+ /* HW ECC always request ECC bytes for 1024 bytes blocks */
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
+
+ /* HW ECC always work with even numbers of ECC bytes */
+ ecc->bytes = ALIGN(ecc->bytes, 2);
+
+ layout = &data->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ layout->eccbytes = (ecc->bytes * nsectors);
+
+ ecc->layout = layout;
+ ecc->priv = data;
+
+ return 0;
+
+err:
+ kfree(data);
+
+ return ret;
+}
+
+static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ kfree(ecc->priv);
+}
+
+static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i, j;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < nsectors; i++) {
+ if (i) {
+ layout->oobfree[i].offset =
+ layout->oobfree[i - 1].offset +
+ layout->oobfree[i - 1].length +
+ ecc->bytes;
+ layout->oobfree[i].length = 4;
+ } else {
+ /*
+ * The first 2 bytes are used for BB markers, hence we
+ * only have 2 bytes available in the first user data
+ * section.
+ */
+ layout->oobfree[i].length = 2;
+ layout->oobfree[i].offset = 2;
+ }
+
+ for (j = 0; j < ecc->bytes; j++)
+ layout->eccpos[(ecc->bytes * i) + j] =
+ layout->oobfree[i].offset +
+ layout->oobfree[i].length + j;
+ }
+
+ if (mtd->oobsize > (ecc->bytes + 4) * nsectors) {
+ layout->oobfree[nsectors].offset =
+ layout->oobfree[nsectors - 1].offset +
+ layout->oobfree[nsectors - 1].length +
+ ecc->bytes;
+ layout->oobfree[nsectors].length = mtd->oobsize -
+ ((ecc->bytes + 4) * nsectors);
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+
+ ecc->prepad = 4;
+ ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
+
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < (ecc->bytes * nsectors); i++)
+ layout->eccpos[i] = i;
+
+ layout->oobfree[0].length = mtd->oobsize - i;
+ layout->oobfree[0].offset = i;
+
+ return 0;
+}
+
+static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ case NAND_ECC_HW_SYNDROME:
+ sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
+ break;
+ case NAND_ECC_NONE:
+ kfree(ecc->layout);
+ default:
+ break;
+ }
+}
+
+static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_chip *nand = mtd->priv;
+ int strength;
+ int blk_size;
+ int ret;
+
+ blk_size = of_get_nand_ecc_step_size(np);
+ strength = of_get_nand_ecc_strength(np);
+ if (blk_size > 0 && strength > 0) {
+ ecc->size = blk_size;
+ ecc->strength = strength;
+ } else {
+ ecc->size = nand->ecc_step_ds;
+ ecc->strength = nand->ecc_strength_ds;
+ }
+
+ if (!ecc->size || !ecc->strength)
+ return -EINVAL;
+
+ ecc->mode = NAND_ECC_HW;
+
+ ret = of_get_nand_ecc_mode(np);
+ if (ret >= 0)
+ ecc->mode = ret;
+
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT_BCH:
+ break;
+ case NAND_ECC_HW:
+ ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_HW_SYNDROME:
+ ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL);
+ if (!ecc->layout)
+ return -ENOMEM;
+ ecc->layout->oobfree[0].length = mtd->oobsize;
+ case NAND_ECC_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
+ struct device_node *np)
+{
+ const struct nand_sdr_timings *timings;
+ struct sunxi_nand_chip *chip;
+ struct mtd_part_parser_data ppdata;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ int nsels;
+ int ret;
+ int i;
+ u32 tmp;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev,
+ sizeof(*chip) +
+ (nsels * sizeof(struct sunxi_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!chip) {
+ dev_err(dev, "could not allocate chip\n");
+ return -ENOMEM;
+ }
+
+ chip->nsels = nsels;
+ chip->selected = -1;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (tmp > NFC_MAX_CS) {
+ dev_err(dev,
+ "invalid reg value: %u (max CS = 7)\n",
+ tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ chip->sels[i].cs = tmp;
+
+ if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
+ tmp < 2) {
+ chip->sels[i].rb.type = RB_NATIVE;
+ chip->sels[i].rb.info.nativeid = tmp;
+ } else {
+ ret = of_get_named_gpio(np, "rb-gpios", i);
+ if (ret >= 0) {
+ tmp = ret;
+ chip->sels[i].rb.type = RB_GPIO;
+ chip->sels[i].rb.info.gpio = tmp;
+ ret = devm_gpio_request(dev, tmp, "nand-rb");
+ if (ret)
+ return ret;
+
+ ret = gpio_direction_input(tmp);
+ if (ret)
+ return ret;
+ } else {
+ chip->sels[i].rb.type = RB_NONE;
+ }
+ }
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings)) {
+ ret = PTR_ERR(timings);
+ dev_err(dev,
+ "could not retrieve timings for ONFI mode 0: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_chip_set_timings(chip, timings);
+ if (ret) {
+ dev_err(dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ nand = &chip->nand;
+ /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
+ nand->chip_delay = 200;
+ nand->controller = &nfc->controller;
+ nand->select_chip = sunxi_nfc_select_chip;
+ nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
+ nand->read_buf = sunxi_nfc_read_buf;
+ nand->write_buf = sunxi_nfc_write_buf;
+ nand->read_byte = sunxi_nfc_read_byte;
+
+ if (of_get_nand_on_flash_bbt(np))
+ nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ mtd = &chip->mtd;
+ mtd->dev.parent = dev;
+ mtd->priv = nand;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan_ident(mtd, nsels, NULL);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nand_chip_init_timings(chip, np);
+ if (ret) {
+ dev_err(dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (nchips > 8) {
+ dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = sunxi_nand_chip_init(dev, nfc, nand_np);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+{
+ struct sunxi_nand_chip *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
+ node);
+ nand_release(&chip->mtd);
+ sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+ }
+}
+
+static int sunxi_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct sunxi_nfc *nfc;
+ int irq;
+ int ret;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "failed to retrieve ahb clk\n");
+ return PTR_ERR(nfc->ahb_clk);
+ }
+
+ ret = clk_prepare_enable(nfc->ahb_clk);
+ if (ret)
+ return ret;
+
+ nfc->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(nfc->mod_clk)) {
+ dev_err(dev, "failed to retrieve mod clk\n");
+ ret = PTR_ERR(nfc->mod_clk);
+ goto out_ahb_clk_unprepare;
+ }
+
+ ret = clk_prepare_enable(nfc->mod_clk);
+ if (ret)
+ goto out_ahb_clk_unprepare;
+
+ ret = sunxi_nfc_rst(nfc);
+ if (ret)
+ goto out_mod_clk_unprepare;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
+ 0, "sunxi-nand", nfc);
+ if (ret)
+ goto out_mod_clk_unprepare;
+
+ platform_set_drvdata(pdev, nfc);
+
+ /*
+ * TODO: replace these magic values with proper flags as soon as we
+ * know what they are encoding.
+ */
+ writel(0x100, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(0x7ff, nfc->regs + NFC_REG_TIMING_CFG);
+
+ ret = sunxi_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto out_mod_clk_unprepare;
+ }
+
+ return 0;
+
+out_mod_clk_unprepare:
+ clk_disable_unprepare(nfc->mod_clk);
+out_ahb_clk_unprepare:
+ clk_disable_unprepare(nfc->ahb_clk);
+
+ return ret;
+}
+
+static int sunxi_nfc_remove(struct platform_device *pdev)
+{
+ struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
+
+ sunxi_nand_chips_cleanup(nfc);
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_nfc_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
+
+static struct platform_driver sunxi_nfc_driver = {
+ .driver = {
+ .name = "sunxi_nand",
+ .of_match_table = sunxi_nfc_ids,
+ },
+ .probe = sunxi_nfc_probe,
+ .remove = sunxi_nfc_remove,
+};
+module_platform_driver(sunxi_nfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Boris BREZILLON");
+MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
+MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
new file mode 100644
index 000000000..fb8fd35fa
--- /dev/null
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -0,0 +1,508 @@
+/*
+ * Toshiba TMIO NAND flash controller driver
+ *
+ * Slightly murky pre-git history of the driver:
+ *
+ * Copyright (c) Ian Molton 2004, 2005, 2008
+ * Original work, independent of sharps code. Included hardware ECC support.
+ * Hard ECC did not work for writes in the early revisions.
+ * Copyright (c) Dirk Opfer 2005.
+ * Modifications developed from sharps code but
+ * NOT containing any, ported onto Ians base.
+ * Copyright (c) Chris Humbert 2005
+ * Copyright (c) Dmitry Baryshkov 2008
+ * Minor fixes
+ *
+ * Parts copyright Sebastian Carlier
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * NAND Flash Host Controller Configuration Register
+ */
+#define CCR_COMMAND 0x04 /* w Command */
+#define CCR_BASE 0x10 /* l NAND Flash Control Reg Base Addr */
+#define CCR_INTP 0x3d /* b Interrupt Pin */
+#define CCR_INTE 0x48 /* b Interrupt Enable */
+#define CCR_EC 0x4a /* b Event Control */
+#define CCR_ICC 0x4c /* b Internal Clock Control */
+#define CCR_ECCC 0x5b /* b ECC Control */
+#define CCR_NFTC 0x60 /* b NAND Flash Transaction Control */
+#define CCR_NFM 0x61 /* b NAND Flash Monitor */
+#define CCR_NFPSC 0x62 /* b NAND Flash Power Supply Control */
+#define CCR_NFDC 0x63 /* b NAND Flash Detect Control */
+
+/*
+ * NAND Flash Control Register
+ */
+#define FCR_DATA 0x00 /* bwl Data Register */
+#define FCR_MODE 0x04 /* b Mode Register */
+#define FCR_STATUS 0x05 /* b Status Register */
+#define FCR_ISR 0x06 /* b Interrupt Status Register */
+#define FCR_IMR 0x07 /* b Interrupt Mask Register */
+
+/* FCR_MODE Register Command List */
+#define FCR_MODE_DATA 0x94 /* Data Data_Mode */
+#define FCR_MODE_COMMAND 0x95 /* Data Command_Mode */
+#define FCR_MODE_ADDRESS 0x96 /* Data Address_Mode */
+
+#define FCR_MODE_HWECC_CALC 0xB4 /* HW-ECC Data */
+#define FCR_MODE_HWECC_RESULT 0xD4 /* HW-ECC Calc result Read_Mode */
+#define FCR_MODE_HWECC_RESET 0xF4 /* HW-ECC Reset */
+
+#define FCR_MODE_POWER_ON 0x0C /* Power Supply ON to SSFDC card */
+#define FCR_MODE_POWER_OFF 0x08 /* Power Supply OFF to SSFDC card */
+
+#define FCR_MODE_LED_OFF 0x00 /* LED OFF */
+#define FCR_MODE_LED_ON 0x04 /* LED ON */
+
+#define FCR_MODE_EJECT_ON 0x68 /* Ejection events active */
+#define FCR_MODE_EJECT_OFF 0x08 /* Ejection events ignored */
+
+#define FCR_MODE_LOCK 0x6C /* Lock_Mode. Eject Switch Invalid */
+#define FCR_MODE_UNLOCK 0x0C /* UnLock_Mode. Eject Switch is valid */
+
+#define FCR_MODE_CONTROLLER_ID 0x40 /* Controller ID Read */
+#define FCR_MODE_STANDBY 0x00 /* SSFDC card Changes Standby State */
+
+#define FCR_MODE_WE 0x80
+#define FCR_MODE_ECC1 0x40
+#define FCR_MODE_ECC0 0x20
+#define FCR_MODE_CE 0x10
+#define FCR_MODE_PCNT1 0x08
+#define FCR_MODE_PCNT0 0x04
+#define FCR_MODE_ALE 0x02
+#define FCR_MODE_CLE 0x01
+
+#define FCR_STATUS_BUSY 0x80
+
+/*--------------------------------------------------------------------------*/
+
+struct tmio_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+
+ struct platform_device *dev;
+
+ void __iomem *ccr;
+ void __iomem *fcr;
+ unsigned long fcr_base;
+
+ unsigned int irq;
+
+ /* for tmio_nand_read_byte */
+ u8 read;
+ unsigned read_good:1;
+};
+
+#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
+
+
+/*--------------------------------------------------------------------------*/
+
+static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u8 mode;
+
+ if (ctrl & NAND_NCE) {
+ mode = FCR_MODE_DATA;
+
+ if (ctrl & NAND_CLE)
+ mode |= FCR_MODE_CLE;
+ else
+ mode &= ~FCR_MODE_CLE;
+
+ if (ctrl & NAND_ALE)
+ mode |= FCR_MODE_ALE;
+ else
+ mode &= ~FCR_MODE_ALE;
+ } else {
+ mode = FCR_MODE_STANDBY;
+ }
+
+ tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
+ tmio->read_good = 0;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ tmio_iowrite8(cmd, chip->IO_ADDR_W);
+}
+
+static int tmio_nand_dev_ready(struct mtd_info *mtd)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
+}
+
+static irqreturn_t tmio_irq(int irq, void *__tmio)
+{
+ struct tmio_nand *tmio = __tmio;
+ struct nand_chip *nand_chip = &tmio->chip;
+
+ /* disable RDYREQ interrupt */
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+
+ if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
+ dev_warn(&tmio->dev->dev, "spurious interrupt\n");
+
+ wake_up(&nand_chip->controller->wq);
+ return IRQ_HANDLED;
+}
+
+/*
+ *The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
+ *This interrupt is normally disabled, but for long operations like
+ *erase and write, we enable it to wake us up. The irq handler
+ *disables the interrupt.
+ */
+static int
+tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ long timeout;
+
+ /* enable RDYREQ interrupt */
+ tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+ tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
+
+ timeout = wait_event_timeout(nand_chip->controller->wq,
+ tmio_nand_dev_ready(mtd),
+ msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
+
+ if (unlikely(!tmio_nand_dev_ready(mtd))) {
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
+ nand_chip->state == FL_ERASING ? "erase" : "program",
+ nand_chip->state == FL_ERASING ? 400 : 20);
+
+ } else if (unlikely(!timeout)) {
+ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
+ }
+
+ nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ return nand_chip->read_byte(mtd);
+}
+
+/*
+ *The TMIO controller combines two 8-bit data bytes into one 16-bit
+ *word. This function separates them so nand_base.c works as expected,
+ *especially its NAND_CMD_READID routines.
+ *
+ *To prevent stale data from being read, tmio_nand_hwcontrol() clears
+ *tmio->read_good.
+ */
+static u_char tmio_nand_read_byte(struct mtd_info *mtd)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ unsigned int data;
+
+ if (tmio->read_good--)
+ return tmio->read;
+
+ data = tmio_ioread16(tmio->fcr + FCR_DATA);
+ tmio->read = data >> 8;
+ return data;
+}
+
+/*
+ *The TMIO controller converts an 8-bit NAND interface to a 16-bit
+ *bus interface, so all data reads and writes must be 16-bit wide.
+ *Thus, we implement 16-bit versions of the read, write, and verify
+ *buffer functions.
+ */
+static void
+tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+ tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
+ tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */
+ tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
+}
+
+static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ unsigned int ecc;
+
+ tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
+
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[1] = ecc; /* 000-255 LP7-0 */
+ ecc_code[0] = ecc >> 8; /* 000-255 LP15-8 */
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[2] = ecc; /* 000-255 CP5-0,11b */
+ ecc_code[4] = ecc >> 8; /* 256-511 LP7-0 */
+ ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+ ecc_code[3] = ecc; /* 256-511 LP15-8 */
+ ecc_code[5] = ecc >> 8; /* 256-511 CP5-0,11b */
+
+ tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
+ return 0;
+}
+
+static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ int r0, r1;
+
+ /* assume ecc.size = 512 and ecc.bytes = 6 */
+ r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ if (r0 < 0)
+ return r0;
+ r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
+ if (r1 < 0)
+ return r1;
+ return r0 + r1;
+}
+
+static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+ int ret;
+
+ if (cell->enable) {
+ ret = cell->enable(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* (4Ch) CLKRUN Enable 1st spcrunc */
+ tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
+
+ /* (10h)BaseAddress 0x1000 spba.spba2 */
+ tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
+ tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
+
+ /* (04h)Command Register I/O spcmd */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
+
+ /* (62h) Power Supply Control ssmpwc */
+ /* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
+
+ /* (63h) Detect Control ssmdtc */
+ tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
+
+ /* Interrupt status register clear sintst */
+ tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+
+ /* After power supply, Media are reset smode */
+ tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
+ tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
+ tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
+
+ /* Standby Mode smode */
+ tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
+
+ mdelay(5);
+
+ return 0;
+}
+
+static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
+ if (cell->disable)
+ cell->disable(dev);
+}
+
+static int tmio_probe(struct platform_device *dev)
+{
+ struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
+ struct resource *fcr = platform_get_resource(dev,
+ IORESOURCE_MEM, 0);
+ struct resource *ccr = platform_get_resource(dev,
+ IORESOURCE_MEM, 1);
+ int irq = platform_get_irq(dev, 0);
+ struct tmio_nand *tmio;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int retval;
+
+ if (data == NULL)
+ dev_warn(&dev->dev, "NULL platform data!\n");
+
+ tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
+ if (!tmio)
+ return -ENOMEM;
+
+ tmio->dev = dev;
+
+ platform_set_drvdata(dev, tmio);
+ mtd = &tmio->mtd;
+ nand_chip = &tmio->chip;
+ mtd->priv = nand_chip;
+ mtd->name = "tmio-nand";
+
+ tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
+ if (!tmio->ccr)
+ return -EIO;
+
+ tmio->fcr_base = fcr->start & 0xfffff;
+ tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
+ if (!tmio->fcr)
+ return -EIO;
+
+ retval = tmio_hw_init(dev, tmio);
+ if (retval)
+ return retval;
+
+ /* Set address of NAND IO lines */
+ nand_chip->IO_ADDR_R = tmio->fcr;
+ nand_chip->IO_ADDR_W = tmio->fcr;
+
+ /* Set address of hardware control function */
+ nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
+ nand_chip->dev_ready = tmio_nand_dev_ready;
+ nand_chip->read_byte = tmio_nand_read_byte;
+ nand_chip->write_buf = tmio_nand_write_buf;
+ nand_chip->read_buf = tmio_nand_read_buf;
+
+ /* set eccmode using hardware ECC */
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 6;
+ nand_chip->ecc.strength = 2;
+ nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
+ nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
+ nand_chip->ecc.correct = tmio_nand_correct_data;
+
+ if (data)
+ nand_chip->badblock_pattern = data->badblock_pattern;
+
+ /* 15 us command delay time */
+ nand_chip->chip_delay = 15;
+
+ retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
+ dev_name(&dev->dev), tmio);
+ if (retval) {
+ dev_err(&dev->dev, "request_irq error %d\n", retval);
+ goto err_irq;
+ }
+
+ tmio->irq = irq;
+ nand_chip->waitfunc = tmio_nand_wait;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(mtd, 1)) {
+ retval = -ENODEV;
+ goto err_irq;
+ }
+ /* Register the partitions */
+ retval = mtd_device_parse_register(mtd, NULL, NULL,
+ data ? data->partition : NULL,
+ data ? data->num_partitions : 0);
+ if (!retval)
+ return retval;
+
+ nand_release(mtd);
+
+err_irq:
+ tmio_hw_stop(dev, tmio);
+ return retval;
+}
+
+static int tmio_remove(struct platform_device *dev)
+{
+ struct tmio_nand *tmio = platform_get_drvdata(dev);
+
+ nand_release(&tmio->mtd);
+ tmio_hw_stop(dev, tmio);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tmio_suspend(struct platform_device *dev, pm_message_t state)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ if (cell->suspend)
+ cell->suspend(dev);
+
+ tmio_hw_stop(dev, platform_get_drvdata(dev));
+ return 0;
+}
+
+static int tmio_resume(struct platform_device *dev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+
+ /* FIXME - is this required or merely another attack of the broken
+ * SHARP platform? Looks suspicious.
+ */
+ tmio_hw_init(dev, platform_get_drvdata(dev));
+
+ if (cell->resume)
+ cell->resume(dev);
+
+ return 0;
+}
+#else
+#define tmio_suspend NULL
+#define tmio_resume NULL
+#endif
+
+static struct platform_driver tmio_driver = {
+ .driver.name = "tmio-nand",
+ .driver.owner = THIS_MODULE,
+ .probe = tmio_probe,
+ .remove = tmio_remove,
+ .suspend = tmio_suspend,
+ .resume = tmio_resume,
+};
+
+module_platform_driver(tmio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
+MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
+MODULE_ALIAS("platform:tmio-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
new file mode 100644
index 000000000..9c0bc45e2
--- /dev/null
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -0,0 +1,427 @@
+/*
+ * TXx9 NAND flash memory controller driver
+ * Based on RBTX49xx patch from CELF patch archive.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2004-2007
+ * All Rights Reserved.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <asm/txx9/ndfmc.h>
+
+/* TXX9 NDFMC Registers */
+#define TXX9_NDFDTR 0x00
+#define TXX9_NDFMCR 0x04
+#define TXX9_NDFSR 0x08
+#define TXX9_NDFISR 0x0c
+#define TXX9_NDFIMR 0x10
+#define TXX9_NDFSPR 0x14
+#define TXX9_NDFRSTR 0x18 /* not TX4939 */
+
+/* NDFMCR : NDFMC Mode Control */
+#define TXX9_NDFMCR_WE 0x80
+#define TXX9_NDFMCR_ECC_ALL 0x60
+#define TXX9_NDFMCR_ECC_RESET 0x60
+#define TXX9_NDFMCR_ECC_READ 0x40
+#define TXX9_NDFMCR_ECC_ON 0x20
+#define TXX9_NDFMCR_ECC_OFF 0x00
+#define TXX9_NDFMCR_CE 0x10
+#define TXX9_NDFMCR_BSPRT 0x04 /* TX4925/TX4926 only */
+#define TXX9_NDFMCR_ALE 0x02
+#define TXX9_NDFMCR_CLE 0x01
+/* TX4939 only */
+#define TXX9_NDFMCR_X16 0x0400
+#define TXX9_NDFMCR_DMAREQ_MASK 0x0300
+#define TXX9_NDFMCR_DMAREQ_NODMA 0x0000
+#define TXX9_NDFMCR_DMAREQ_128 0x0100
+#define TXX9_NDFMCR_DMAREQ_256 0x0200
+#define TXX9_NDFMCR_DMAREQ_512 0x0300
+#define TXX9_NDFMCR_CS_MASK 0x0c
+#define TXX9_NDFMCR_CS(ch) ((ch) << 2)
+
+/* NDFMCR : NDFMC Status */
+#define TXX9_NDFSR_BUSY 0x80
+/* TX4939 only */
+#define TXX9_NDFSR_DMARUN 0x40
+
+/* NDFMCR : NDFMC Reset */
+#define TXX9_NDFRSTR_RST 0x01
+
+struct txx9ndfmc_priv {
+ struct platform_device *dev;
+ struct nand_chip chip;
+ struct mtd_info mtd;
+ int cs;
+ const char *mtdname;
+};
+
+#define MAX_TXX9NDFMC_DEV 4
+struct txx9ndfmc_drvdata {
+ struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
+ void __iomem *base;
+ unsigned char hold; /* in gbusclock */
+ unsigned char spw; /* in gbusclock */
+ struct nand_hw_control hw_control;
+};
+
+static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct txx9ndfmc_priv *txx9_priv = chip->priv;
+ return txx9_priv->dev;
+}
+
+static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+ return drvdata->base + (reg << plat->shift);
+}
+
+static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
+{
+ return __raw_readl(ndregaddr(dev, reg));
+}
+
+static void txx9ndfmc_write(struct platform_device *dev,
+ u32 val, unsigned int reg)
+{
+ __raw_writel(val, ndregaddr(dev, reg));
+}
+
+static uint8_t txx9ndfmc_read_byte(struct mtd_info *mtd)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+
+ return txx9ndfmc_read(dev, TXX9_NDFDTR);
+}
+
+static void txx9ndfmc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
+ while (len--)
+ __raw_writel(*buf++, ndfdtr);
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+
+ while (len--)
+ *buf++ = __raw_readl(ndfdtr);
+}
+
+static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct txx9ndfmc_priv *txx9_priv = chip->priv;
+ struct platform_device *dev = txx9_priv->dev;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
+ mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
+ mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
+ /* TXX9_NDFMCR_CE bit is 0:high 1:low */
+ mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
+ if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
+ mcr &= ~TXX9_NDFMCR_CS_MASK;
+ mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
+ }
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+ }
+ if (cmd != NAND_CMD_NONE)
+ txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
+ if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
+ /* dummy write to update external latch */
+ if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
+ txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
+ }
+ mmiowb();
+}
+
+static int txx9ndfmc_dev_ready(struct mtd_info *mtd)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+
+ return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
+}
+
+static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ struct nand_chip *chip = mtd->priv;
+ int eccbytes;
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
+ for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
+ ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code += 3;
+ }
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ return 0;
+}
+
+static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ int eccsize;
+ int corrected = 0;
+ int stat;
+
+ for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
+ stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ if (stat < 0)
+ return stat;
+ corrected += stat;
+ buf += 256;
+ read_ecc += 3;
+ calc_ecc += 3;
+ }
+ return corrected;
+}
+
+static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct platform_device *dev = mtd_to_platdev(mtd);
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_initialize(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int tmout = 100;
+
+ if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
+ ; /* no NDFRSTR. Write to NDFSPR resets the NDFMC. */
+ else {
+ /* reset NDFMC */
+ txx9ndfmc_write(dev,
+ txx9ndfmc_read(dev, TXX9_NDFRSTR) |
+ TXX9_NDFRSTR_RST,
+ TXX9_NDFRSTR);
+ while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
+ if (--tmout == 0) {
+ dev_err(&dev->dev, "reset failed.\n");
+ break;
+ }
+ udelay(1);
+ }
+ }
+ /* setup Hold Time, Strobe Pulse Width */
+ txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
+ txx9ndfmc_write(dev,
+ (plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
+ TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
+}
+
+#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
+ DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
+
+static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (!ret) {
+ if (mtd->writesize >= 512) {
+ /* Hardware ECC 6 byte ECC per 512 Byte data */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ }
+ ret = nand_scan_tail(mtd);
+ }
+ return ret;
+}
+
+static int __init txx9ndfmc_probe(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+ int hold, spw;
+ int i;
+ struct txx9ndfmc_drvdata *drvdata;
+ unsigned long gbusclk = plat->gbus_clock;
+ struct resource *res;
+
+ drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ drvdata->base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ hold = plat->hold ?: 20; /* tDH */
+ spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
+
+ hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
+ spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
+ if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
+ hold -= 2; /* actual hold time : (HOLD + 2) BUSCLK */
+ spw -= 1; /* actual wait time : (SPW + 1) BUSCLK */
+ hold = clamp(hold, 1, 15);
+ drvdata->hold = hold;
+ spw = clamp(spw, 1, 15);
+ drvdata->spw = spw;
+ dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
+ (gbusclk + 500000) / 1000000, hold, spw);
+
+ spin_lock_init(&drvdata->hw_control.lock);
+ init_waitqueue_head(&drvdata->hw_control.wq);
+
+ platform_set_drvdata(dev, drvdata);
+ txx9ndfmc_initialize(dev);
+
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct txx9ndfmc_priv *txx9_priv;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+
+ if (!(plat->ch_mask & (1 << i)))
+ continue;
+ txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
+ GFP_KERNEL);
+ if (!txx9_priv)
+ continue;
+ chip = &txx9_priv->chip;
+ mtd = &txx9_priv->mtd;
+ mtd->owner = THIS_MODULE;
+
+ mtd->priv = chip;
+
+ chip->read_byte = txx9ndfmc_read_byte;
+ chip->read_buf = txx9ndfmc_read_buf;
+ chip->write_buf = txx9ndfmc_write_buf;
+ chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
+ chip->dev_ready = txx9ndfmc_dev_ready;
+ chip->ecc.calculate = txx9ndfmc_calculate_ecc;
+ chip->ecc.correct = txx9ndfmc_correct_data;
+ chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
+ chip->ecc.mode = NAND_ECC_HW;
+ /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ chip->chip_delay = 100;
+ chip->controller = &drvdata->hw_control;
+
+ chip->priv = txx9_priv;
+ txx9_priv->dev = dev;
+
+ if (plat->ch_mask != 1) {
+ txx9_priv->cs = i;
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
+ } else {
+ txx9_priv->cs = -1;
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
+ }
+ if (plat->wide_mask & (1 << i))
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (txx9ndfmc_nand_scan(mtd)) {
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ continue;
+ }
+ mtd->name = txx9_priv->mtdname;
+
+ mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ drvdata->mtds[i] = mtd;
+ }
+
+ return 0;
+}
+
+static int __exit txx9ndfmc_remove(struct platform_device *dev)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int i;
+
+ if (!drvdata)
+ return 0;
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct mtd_info *mtd = drvdata->mtds[i];
+ struct nand_chip *chip;
+ struct txx9ndfmc_priv *txx9_priv;
+
+ if (!mtd)
+ continue;
+ chip = mtd->priv;
+ txx9_priv = chip->priv;
+
+ nand_release(mtd);
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int txx9ndfmc_resume(struct platform_device *dev)
+{
+ if (platform_get_drvdata(dev))
+ txx9ndfmc_initialize(dev);
+ return 0;
+}
+#else
+#define txx9ndfmc_resume NULL
+#endif
+
+static struct platform_driver txx9ndfmc_driver = {
+ .remove = __exit_p(txx9ndfmc_remove),
+ .resume = txx9ndfmc_resume,
+ .driver = {
+ .name = "txx9ndfmc",
+ },
+};
+
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
+MODULE_ALIAS("platform:txx9ndfmc");
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
new file mode 100644
index 000000000..3f81dc8f2
--- /dev/null
+++ b/drivers/mtd/nand/xway_nand.c
@@ -0,0 +1,201 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright © 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/mtd/nand.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* nand registers */
+#define EBU_ADDSEL1 0x24
+#define EBU_NAND_CON 0xB0
+#define EBU_NAND_WAIT 0xB4
+#define EBU_NAND_ECC0 0xB8
+#define EBU_NAND_ECC_AC 0xBC
+
+/* nand commands */
+#define NAND_CMD_ALE (1 << 2)
+#define NAND_CMD_CLE (1 << 3)
+#define NAND_CMD_CS (1 << 4)
+#define NAND_WRITE_CMD_RESET 0xff
+#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_WRITE_DATA (NAND_CMD_CS)
+#define NAND_READ_DATA (NAND_CMD_CS)
+#define NAND_WAIT_WR_C (1 << 3)
+#define NAND_WAIT_RD (0x1)
+
+/* we need to tel the ebu which addr we mapped the nand to */
+#define ADDSEL1_MASK(x) (x << 4)
+#define ADDSEL1_REGEN 1
+
+/* we need to tell the EBU that we have nand attached and set it up properly */
+#define BUSCON1_SETUP (1 << 22)
+#define BUSCON1_BCGEN_RES (0x3 << 12)
+#define BUSCON1_WAITWRC2 (2 << 8)
+#define BUSCON1_WAITRDC2 (2 << 6)
+#define BUSCON1_HOLDC1 (1 << 4)
+#define BUSCON1_RECOVC1 (1 << 2)
+#define BUSCON1_CMULT4 1
+
+#define NAND_CON_CE (1 << 20)
+#define NAND_CON_OUT_CS1 (1 << 10)
+#define NAND_CON_IN_CS1 (1 << 8)
+#define NAND_CON_PRE_P (1 << 7)
+#define NAND_CON_WP_P (1 << 6)
+#define NAND_CON_SE_P (1 << 5)
+#define NAND_CON_CS_P (1 << 4)
+#define NAND_CON_CSMUX (1 << 1)
+#define NAND_CON_NANDM 1
+
+static void xway_reset_chip(struct nand_chip *chip)
+{
+ unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
+ unsigned long flags;
+
+ nandaddr &= ~NAND_WRITE_ADDR;
+ nandaddr |= NAND_WRITE_CMD;
+
+ /* finish with a reset */
+ spin_lock_irqsave(&ebu_lock, flags);
+ writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+static void xway_select_chip(struct mtd_info *mtd, int chip)
+{
+
+ switch (chip) {
+ case -1:
+ ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+ ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+ break;
+ case 0:
+ ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+ ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ unsigned long flags;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
+ if (ctrl & NAND_CLE)
+ nandaddr |= NAND_WRITE_CMD;
+ else
+ nandaddr |= NAND_WRITE_ADDR;
+ this->IO_ADDR_W = (void __iomem *) nandaddr;
+ }
+
+ if (cmd != NAND_CMD_NONE) {
+ spin_lock_irqsave(&ebu_lock, flags);
+ writeb(cmd, this->IO_ADDR_W);
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+ }
+}
+
+static int xway_dev_ready(struct mtd_info *mtd)
+{
+ return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
+}
+
+static unsigned char xway_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ return ret;
+}
+
+static int xway_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this = platform_get_drvdata(pdev);
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ const __be32 *cs = of_get_property(pdev->dev.of_node,
+ "lantiq,cs", NULL);
+ u32 cs_flag = 0;
+
+ /* load our CS from the DT. Either we find a valid 1 or default to 0 */
+ if (cs && (*cs == 1))
+ cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+
+ /* setup the EBU to run in NAND mode on our base addr */
+ ltq_ebu_w32(CPHYSADDR(nandaddr)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+
+ ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+
+ ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | cs_flag, EBU_NAND_CON);
+
+ /* finish with a reset */
+ xway_reset_chip(this);
+
+ return 0;
+}
+
+/* allow users to override the partition in DT using the cmdline */
+static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL };
+
+static struct platform_nand_data xway_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_delay = 30,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .probe = xway_nand_probe,
+ .cmd_ctrl = xway_cmd_ctrl,
+ .dev_ready = xway_dev_ready,
+ .select_chip = xway_select_chip,
+ .read_byte = xway_read_byte,
+ }
+};
+
+/*
+ * Try to find the node inside the DT. If it is available attach out
+ * platform_nand_data
+ */
+static int __init xway_register_nand(void)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+
+ node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
+ if (!node)
+ return -ENOENT;
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -EINVAL;
+ pdev->dev.platform_data = &xway_nand_data;
+ of_node_put(node);
+ return 0;
+}
+
+subsys_initcall(xway_register_nand);
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
new file mode 100644
index 000000000..46f27de01
--- /dev/null
+++ b/drivers/mtd/nftlcore.c
@@ -0,0 +1,829 @@
+/*
+ * Linux driver for NAND Flash Translation Layer
+ *
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define PRERELEASE
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+
+#include <linux/kmod.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/blktrans.h>
+
+/* maximum number of loops while examining next block, to have a
+ chance to detect consistency problems (they should never happen
+ because of the checks done in the mounting */
+
+#define MAX_LOOPS 10000
+
+
+static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct NFTLrecord *nftl;
+ unsigned long temp;
+
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
+ return;
+ /* OK, this is moderately ugly. But probably safe. Alternatives? */
+ if (memcmp(mtd->name, "DiskOnChip", 10))
+ return;
+
+ pr_debug("NFTL: add_mtd for %s\n", mtd->name);
+
+ nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
+
+ if (!nftl)
+ return;
+
+ nftl->mbd.mtd = mtd;
+ nftl->mbd.devnum = -1;
+
+ nftl->mbd.tr = tr;
+
+ if (NFTL_mount(nftl) < 0) {
+ printk(KERN_WARNING "NFTL: could not mount device\n");
+ kfree(nftl);
+ return;
+ }
+
+ /* OK, it's a new one. Set up all the data structures. */
+
+ /* Calculate geometry */
+ nftl->cylinders = 1024;
+ nftl->heads = 16;
+
+ temp = nftl->cylinders * nftl->heads;
+ nftl->sectors = nftl->mbd.size / temp;
+ if (nftl->mbd.size % temp) {
+ nftl->sectors++;
+ temp = nftl->cylinders * nftl->sectors;
+ nftl->heads = nftl->mbd.size / temp;
+
+ if (nftl->mbd.size % temp) {
+ nftl->heads++;
+ temp = nftl->heads * nftl->sectors;
+ nftl->cylinders = nftl->mbd.size / temp;
+ }
+ }
+
+ if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) {
+ /*
+ Oh no we don't have
+ mbd.size == heads * cylinders * sectors
+ */
+ printk(KERN_WARNING "NFTL: cannot calculate a geometry to "
+ "match size of 0x%lx.\n", nftl->mbd.size);
+ printk(KERN_WARNING "NFTL: using C:%d H:%d S:%d "
+ "(== 0x%lx sects)\n",
+ nftl->cylinders, nftl->heads , nftl->sectors,
+ (long)nftl->cylinders * (long)nftl->heads *
+ (long)nftl->sectors );
+ }
+
+ if (add_mtd_blktrans_dev(&nftl->mbd)) {
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
+ kfree(nftl);
+ return;
+ }
+#ifdef PSYCHO_DEBUG
+ printk(KERN_INFO "NFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
+#endif
+}
+
+static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct NFTLrecord *nftl = (void *)dev;
+
+ pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
+}
+
+/*
+ * Read oob data from flash
+ */
+int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ loff_t mask = mtd->writesize - 1;
+ struct mtd_oob_ops ops;
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & mask;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_read_oob(mtd, offs & ~mask, &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+/*
+ * Write oob data to flash
+ */
+int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ loff_t mask = mtd->writesize - 1;
+ struct mtd_oob_ops ops;
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & mask;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+#ifdef CONFIG_NFTL_RW
+
+/*
+ * Write data and oob to flash
+ */
+static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf, uint8_t *oob)
+{
+ loff_t mask = mtd->writesize - 1;
+ struct mtd_oob_ops ops;
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & mask;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = oob;
+ ops.datbuf = buf;
+ ops.len = len;
+
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
+ *retlen = ops.retlen;
+ return res;
+}
+
+/* Actual NFTL access routines */
+/* NFTL_findfreeblock: Find a free Erase Unit on the NFTL partition. This function is used
+ * when the give Virtual Unit Chain
+ */
+static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
+{
+ /* For a given Virtual Unit Chain: find or create a free block and
+ add it to the chain */
+ /* We're passed the number of the last EUN in the chain, to save us from
+ having to look it up again */
+ u16 pot = nftl->LastFreeEUN;
+ int silly = nftl->nb_blocks;
+
+ /* Normally, we force a fold to happen before we run out of free blocks completely */
+ if (!desperate && nftl->numfreeEUNs < 2) {
+ pr_debug("NFTL_findfreeblock: there are too few free EUNs\n");
+ return BLOCK_NIL;
+ }
+
+ /* Scan for a free block */
+ do {
+ if (nftl->ReplUnitTable[pot] == BLOCK_FREE) {
+ nftl->LastFreeEUN = pot;
+ nftl->numfreeEUNs--;
+ return pot;
+ }
+
+ /* This will probably point to the MediaHdr unit itself,
+ right at the beginning of the partition. But that unit
+ (and the backup unit too) should have the UCI set
+ up so that it's not selected for overwriting */
+ if (++pot > nftl->lastEUN)
+ pot = le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN);
+
+ if (!silly--) {
+ printk("Argh! No free blocks found! LastFreeEUN = %d, "
+ "FirstEUN = %d\n", nftl->LastFreeEUN,
+ le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
+ return BLOCK_NIL;
+ }
+ } while (pot != nftl->LastFreeEUN);
+
+ return BLOCK_NIL;
+}
+
+static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ u16 BlockMap[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockLastState[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN;
+ int block;
+ int silly;
+ unsigned int targetEUN;
+ struct nftl_oob oob;
+ int inplace = 1;
+ size_t retlen;
+
+ memset(BlockMap, 0xff, sizeof(BlockMap));
+ memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
+
+ thisEUN = nftl->EUNtable[thisVUC];
+
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "Trying to fold non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /* Scan to find the Erase Unit which holds the actual data for each
+ 512-byte block within the Chain.
+ */
+ silly = MAX_LOOPS;
+ targetEUN = BLOCK_NIL;
+ while (thisEUN <= nftl->lastEUN ) {
+ unsigned int status, foldmark;
+
+ targetEUN = thisEUN;
+ for (block = 0; block < nftl->EraseSize / 512; block ++) {
+ nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
+ (block * 512), 16 , &retlen,
+ (char *)&oob);
+ if (block == 2) {
+ foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
+ if (foldmark == FOLD_MARK_IN_PROGRESS) {
+ pr_debug("Write Inhibited on EUN %d\n", thisEUN);
+ inplace = 0;
+ } else {
+ /* There's no other reason not to do inplace,
+ except ones that come later. So we don't need
+ to preserve inplace */
+ inplace = 1;
+ }
+ }
+ status = oob.b.Status | oob.b.Status1;
+ BlockLastState[block] = status;
+
+ switch(status) {
+ case SECTOR_FREE:
+ BlockFreeFound[block] = 1;
+ break;
+
+ case SECTOR_USED:
+ if (!BlockFreeFound[block])
+ BlockMap[block] = thisEUN;
+ else
+ printk(KERN_WARNING
+ "SECTOR_USED found after SECTOR_FREE "
+ "in Virtual Unit Chain %d for block %d\n",
+ thisVUC, block);
+ break;
+ case SECTOR_DELETED:
+ if (!BlockFreeFound[block])
+ BlockMap[block] = BLOCK_NIL;
+ else
+ printk(KERN_WARNING
+ "SECTOR_DELETED found after SECTOR_FREE "
+ "in Virtual Unit Chain %d for block %d\n",
+ thisVUC, block);
+ break;
+
+ case SECTOR_IGNORE:
+ break;
+ default:
+ printk("Unknown status for block %d in EUN %d: %x\n",
+ block, thisEUN, status);
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n",
+ thisVUC);
+ return BLOCK_NIL;
+ }
+
+ thisEUN = nftl->ReplUnitTable[thisEUN];
+ }
+
+ if (inplace) {
+ /* We're being asked to be a fold-in-place. Check
+ that all blocks which actually have data associated
+ with them (i.e. BlockMap[block] != BLOCK_NIL) are
+ either already present or SECTOR_FREE in the target
+ block. If not, we're going to have to fold out-of-place
+ anyway.
+ */
+ for (block = 0; block < nftl->EraseSize / 512 ; block++) {
+ if (BlockLastState[block] != SECTOR_FREE &&
+ BlockMap[block] != BLOCK_NIL &&
+ BlockMap[block] != targetEUN) {
+ pr_debug("Setting inplace to 0. VUC %d, "
+ "block %d was %x lastEUN, "
+ "and is in EUN %d (%s) %d\n",
+ thisVUC, block, BlockLastState[block],
+ BlockMap[block],
+ BlockMap[block]== targetEUN ? "==" : "!=",
+ targetEUN);
+ inplace = 0;
+ break;
+ }
+ }
+
+ if (pendingblock >= (thisVUC * (nftl->EraseSize / 512)) &&
+ pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
+ BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
+ SECTOR_FREE) {
+ pr_debug("Pending write not free in EUN %d. "
+ "Folding out of place.\n", targetEUN);
+ inplace = 0;
+ }
+ }
+
+ if (!inplace) {
+ pr_debug("Cannot fold Virtual Unit Chain %d in place. "
+ "Trying out-of-place\n", thisVUC);
+ /* We need to find a targetEUN to fold into. */
+ targetEUN = NFTL_findfreeblock(nftl, 1);
+ if (targetEUN == BLOCK_NIL) {
+ /* Ouch. Now we're screwed. We need to do a
+ fold-in-place of another chain to make room
+ for this one. We need a better way of selecting
+ which chain to fold, because makefreeblock will
+ only ask us to fold the same one again.
+ */
+ printk(KERN_WARNING
+ "NFTL_findfreeblock(desperate) returns 0xffff.\n");
+ return BLOCK_NIL;
+ }
+ } else {
+ /* We put a fold mark in the chain we are folding only if we
+ fold in place to help the mount check code. If we do not fold in
+ place, it is possible to find the valid chain by selecting the
+ longer one */
+ oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS);
+ oob.u.c.unused = 0xffffffff;
+ nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8,
+ 8, &retlen, (char *)&oob.u);
+ }
+
+ /* OK. We now know the location of every block in the Virtual Unit Chain,
+ and the Erase Unit into which we are supposed to be copying.
+ Go for it.
+ */
+ pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
+ for (block = 0; block < nftl->EraseSize / 512 ; block++) {
+ unsigned char movebuf[512];
+ int ret;
+
+ /* If it's in the target EUN already, or if it's pending write, do nothing */
+ if (BlockMap[block] == targetEUN ||
+ (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) {
+ continue;
+ }
+
+ /* copy only in non free block (free blocks can only
+ happen in case of media errors or deleted blocks) */
+ if (BlockMap[block] == BLOCK_NIL)
+ continue;
+
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
+ if (ret != -EIO)
+ printk("Error went away on retry.\n");
+ }
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) +
+ (block * 512), 512, &retlen, movebuf, (char *)&oob);
+ }
+
+ /* add the header so that it is now a valid chain */
+ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
+
+ nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
+ 8, &retlen, (char *)&oob.u);
+
+ /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */
+
+ /* At this point, we have two different chains for this Virtual Unit, and no way to tell
+ them apart. If we crash now, we get confused. However, both contain the same data, so we
+ shouldn't actually lose data in this case. It's just that when we load up on a medium which
+ has duplicate chains, we need to free one of the chains because it's not necessary any more.
+ */
+ thisEUN = nftl->EUNtable[thisVUC];
+ pr_debug("Want to erase\n");
+
+ /* For each block in the old chain (except the targetEUN of course),
+ free it and make it available for future use */
+ while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) {
+ unsigned int EUNtmp;
+
+ EUNtmp = nftl->ReplUnitTable[thisEUN];
+
+ if (NFTL_formatblock(nftl, thisEUN) < 0) {
+ /* could not erase : mark block as reserved
+ */
+ nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* correctly erased : mark it as free */
+ nftl->ReplUnitTable[thisEUN] = BLOCK_FREE;
+ nftl->numfreeEUNs++;
+ }
+ thisEUN = EUNtmp;
+ }
+
+ /* Make this the new start of chain for thisVUC */
+ nftl->ReplUnitTable[targetEUN] = BLOCK_NIL;
+ nftl->EUNtable[thisVUC] = targetEUN;
+
+ return targetEUN;
+}
+
+static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
+{
+ /* This is the part that needs some cleverness applied.
+ For now, I'm doing the minimum applicable to actually
+ get the thing to work.
+ Wear-levelling and other clever stuff needs to be implemented
+ and we also need to do some assessment of the results when
+ the system loses power half-way through the routine.
+ */
+ u16 LongestChain = 0;
+ u16 ChainLength = 0, thislen;
+ u16 chain, EUN;
+
+ for (chain = 0; chain < le32_to_cpu(nftl->MediaHdr.FormattedSize) / nftl->EraseSize; chain++) {
+ EUN = nftl->EUNtable[chain];
+ thislen = 0;
+
+ while (EUN <= nftl->lastEUN) {
+ thislen++;
+ //printk("VUC %d reaches len %d with EUN %d\n", chain, thislen, EUN);
+ EUN = nftl->ReplUnitTable[EUN] & 0x7fff;
+ if (thislen > 0xff00) {
+ printk("Endless loop in Virtual Chain %d: Unit %x\n",
+ chain, EUN);
+ }
+ if (thislen > 0xff10) {
+ /* Actually, don't return failure. Just ignore this chain and
+ get on with it. */
+ thislen = 0;
+ break;
+ }
+ }
+
+ if (thislen > ChainLength) {
+ //printk("New longest chain is %d with length %d\n", chain, thislen);
+ ChainLength = thislen;
+ LongestChain = chain;
+ }
+ }
+
+ if (ChainLength < 2) {
+ printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
+ "Failing request\n");
+ return BLOCK_NIL;
+ }
+
+ return NFTL_foldchain (nftl, LongestChain, pendingblock);
+}
+
+/* NFTL_findwriteunit: Return the unit number into which we can write
+ for this block. Make it available if it isn't already
+*/
+static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
+{
+ u16 lastEUN;
+ u16 thisVUC = block / (nftl->EraseSize / 512);
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ unsigned int writeEUN;
+ unsigned long blockofs = (block * 512) & (nftl->EraseSize -1);
+ size_t retlen;
+ int silly, silly2 = 3;
+ struct nftl_oob oob;
+
+ do {
+ /* Scan the media to find a unit in the VUC which has
+ a free space for the block in question.
+ */
+
+ /* This condition catches the 0x[7f]fff cases, as well as
+ being a sanity check for past-end-of-media access
+ */
+ lastEUN = BLOCK_NIL;
+ writeEUN = nftl->EUNtable[thisVUC];
+ silly = MAX_LOOPS;
+ while (writeEUN <= nftl->lastEUN) {
+ struct nftl_bci bci;
+ size_t retlen;
+ unsigned int status;
+
+ lastEUN = writeEUN;
+
+ nftl_read_oob(mtd,
+ (writeEUN * nftl->EraseSize) + blockofs,
+ 8, &retlen, (char *)&bci);
+
+ pr_debug("Status of block %d in EUN %d is %x\n",
+ block , writeEUN, le16_to_cpu(bci.Status));
+
+ status = bci.Status | bci.Status1;
+ switch(status) {
+ case SECTOR_FREE:
+ return writeEUN;
+
+ case SECTOR_DELETED:
+ case SECTOR_USED:
+ case SECTOR_IGNORE:
+ break;
+ default:
+ // Invalid block. Don't use it any more. Must implement.
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING
+ "Infinite loop in Virtual Unit Chain 0x%x\n",
+ thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /* Skip to next block in chain */
+ writeEUN = nftl->ReplUnitTable[writeEUN];
+ }
+
+ /* OK. We didn't find one in the existing chain, or there
+ is no existing chain. */
+
+ /* Try to find an already-free block */
+ writeEUN = NFTL_findfreeblock(nftl, 0);
+
+ if (writeEUN == BLOCK_NIL) {
+ /* That didn't work - there were no free blocks just
+ waiting to be picked up. We're going to have to fold
+ a chain to make room.
+ */
+
+ /* First remember the start of this chain */
+ //u16 startEUN = nftl->EUNtable[thisVUC];
+
+ //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
+ writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
+
+ if (writeEUN == BLOCK_NIL) {
+ /* OK, we accept that the above comment is
+ lying - there may have been free blocks
+ last time we called NFTL_findfreeblock(),
+ but they are reserved for when we're
+ desperate. Well, now we're desperate.
+ */
+ pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
+ writeEUN = NFTL_findfreeblock(nftl, 1);
+ }
+ if (writeEUN == BLOCK_NIL) {
+ /* Ouch. This should never happen - we should
+ always be able to make some room somehow.
+ If we get here, we've allocated more storage
+ space than actual media, or our makefreeblock
+ routine is missing something.
+ */
+ printk(KERN_WARNING "Cannot make free space.\n");
+ return BLOCK_NIL;
+ }
+ //printk("Restarting scan\n");
+ lastEUN = BLOCK_NIL;
+ continue;
+ }
+
+ /* We've found a free block. Insert it into the chain. */
+
+ if (lastEUN != BLOCK_NIL) {
+ thisVUC |= 0x8000; /* It's a replacement block */
+ } else {
+ /* The first block in a new chain */
+ nftl->EUNtable[thisVUC] = writeEUN;
+ }
+
+ /* set up the actual EUN we're writing into */
+ /* Both in our cache... */
+ nftl->ReplUnitTable[writeEUN] = BLOCK_NIL;
+
+ /* ... and on the flash itself */
+ nftl_read_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
+ &retlen, (char *)&oob.u);
+
+ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
+
+ nftl_write_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
+ &retlen, (char *)&oob.u);
+
+ /* we link the new block to the chain only after the
+ block is ready. It avoids the case where the chain
+ could point to a free block */
+ if (lastEUN != BLOCK_NIL) {
+ /* Both in our cache... */
+ nftl->ReplUnitTable[lastEUN] = writeEUN;
+ /* ... and on the flash itself */
+ nftl_read_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
+ 8, &retlen, (char *)&oob.u);
+
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum
+ = cpu_to_le16(writeEUN);
+
+ nftl_write_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
+ 8, &retlen, (char *)&oob.u);
+ }
+
+ return writeEUN;
+
+ } while (silly2--);
+
+ printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
+ thisVUC);
+ return BLOCK_NIL;
+}
+
+static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct NFTLrecord *nftl = (void *)mbd;
+ u16 writeEUN;
+ unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
+ size_t retlen;
+ struct nftl_oob oob;
+
+ writeEUN = NFTL_findwriteunit(nftl, block);
+
+ if (writeEUN == BLOCK_NIL) {
+ printk(KERN_WARNING
+ "NFTL_writeblock(): Cannot find block to write to\n");
+ /* If we _still_ haven't got a block to use, we're screwed */
+ return 1;
+ }
+
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ nftl_write(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
+ 512, &retlen, (char *)buffer, (char *)&oob);
+ return 0;
+}
+#endif /* CONFIG_NFTL_RW */
+
+static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct NFTLrecord *nftl = (void *)mbd;
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ u16 lastgoodEUN;
+ u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)];
+ unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ size_t retlen;
+ struct nftl_bci bci;
+
+ lastgoodEUN = BLOCK_NIL;
+
+ if (thisEUN != BLOCK_NIL) {
+ while (thisEUN < nftl->nb_blocks) {
+ if (nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
+ blockofs, 8, &retlen,
+ (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_FREE:
+ /* no modification of a sector should follow a free sector */
+ goto the_end;
+ case SECTOR_DELETED:
+ lastgoodEUN = BLOCK_NIL;
+ break;
+ case SECTOR_USED:
+ lastgoodEUN = thisEUN;
+ break;
+ case SECTOR_IGNORE:
+ break;
+ default:
+ printk("Unknown status for block %ld in EUN %d: %x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%lx\n",
+ block / (nftl->EraseSize / 512));
+ return 1;
+ }
+ thisEUN = nftl->ReplUnitTable[thisEUN];
+ }
+ }
+
+ the_end:
+ if (lastgoodEUN == BLOCK_NIL) {
+ /* the requested block is not on the media, return all 0x00 */
+ memset(buffer, 0, 512);
+ } else {
+ loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
+ size_t retlen;
+ int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
+
+ if (res < 0 && !mtd_is_bitflip(res))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int nftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct NFTLrecord *nftl = (void *)dev;
+
+ geo->heads = nftl->heads;
+ geo->sectors = nftl->sectors;
+ geo->cylinders = nftl->cylinders;
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * Module stuff
+ *
+ ****************************************************************************/
+
+
+static struct mtd_blktrans_ops nftl_tr = {
+ .name = "nftl",
+ .major = NFTL_MAJOR,
+ .part_bits = NFTL_PARTN_BITS,
+ .blksize = 512,
+ .getgeo = nftl_getgeo,
+ .readsect = nftl_readblock,
+#ifdef CONFIG_NFTL_RW
+ .writesect = nftl_writeblock,
+#endif
+ .add_mtd = nftl_add_mtd,
+ .remove_dev = nftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nftl(void)
+{
+ return register_mtd_blktrans(&nftl_tr);
+}
+
+static void __exit cleanup_nftl(void)
+{
+ deregister_mtd_blktrans(&nftl_tr);
+}
+
+module_init(init_nftl);
+module_exit(cleanup_nftl);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
+MODULE_DESCRIPTION("Support code for NAND Flash Translation Layer, used on M-Systems DiskOnChip 2000 and Millennium");
+MODULE_ALIAS_BLOCKDEV_MAJOR(NFTL_MAJOR);
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
new file mode 100644
index 000000000..a5dfbfbeb
--- /dev/null
+++ b/drivers/mtd/nftlmount.c
@@ -0,0 +1,790 @@
+/*
+ * NFTL mount code with extensive checks
+ *
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright © 2000 Netgem S.A.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <asm/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nftl.h>
+
+#define SECTORSIZE 512
+
+/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
+ * various device information of the NFTL partition and Bad Unit Table. Update
+ * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[]
+ * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
+ */
+static int find_boot_record(struct NFTLrecord *nftl)
+{
+ struct nftl_uci1 h1;
+ unsigned int block, boot_record_count = 0;
+ size_t retlen;
+ u8 buf[SECTORSIZE];
+ struct NFTLMediaHeader *mh = &nftl->MediaHdr;
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ unsigned int i;
+
+ /* Assume logical EraseSize == physical erasesize for starting the scan.
+ We'll sort it out later if we find a MediaHeader which says otherwise */
+ /* Actually, we won't. The new DiskOnChip driver has already scanned
+ the MediaHeader and adjusted the virtual erasesize it presents in
+ the mtd device accordingly. We could even get rid of
+ nftl->EraseSize if there were any point in doing so. */
+ nftl->EraseSize = nftl->mbd.mtd->erasesize;
+ nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
+
+ nftl->MediaUnit = BLOCK_NIL;
+ nftl->SpareMediaUnit = BLOCK_NIL;
+
+ /* search for a valid boot record */
+ for (block = 0; block < nftl->nb_blocks; block++) {
+ int ret;
+
+ /* Check for ANAND header first. Then can whinge if it's found but later
+ checks fail */
+ ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
+ /* We ignore ret in case the ECC of the MediaHeader is invalid
+ (which is apparently acceptable) */
+ if (retlen != SECTORSIZE) {
+ static int warncount = 5;
+
+ if (warncount) {
+ printk(KERN_WARNING "Block read at 0x%x of mtd%d failed: %d\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
+ if (!--warncount)
+ printk(KERN_WARNING "Further failures for this block will not be printed\n");
+ }
+ continue;
+ }
+
+ if (retlen < 6 || memcmp(buf, "ANAND", 6)) {
+ /* ANAND\0 not found. Continue */
+#if 0
+ printk(KERN_DEBUG "ANAND header not found at 0x%x in mtd%d\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index);
+#endif
+ continue;
+ }
+
+ /* To be safer with BIOS, also use erase mark as discriminant */
+ ret = nftl_read_oob(mtd, block * nftl->EraseSize +
+ SECTORSIZE + 8, 8, &retlen,
+ (char *)&h1);
+ if (ret < 0) {
+ printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
+ continue;
+ }
+
+#if 0 /* Some people seem to have devices without ECC or erase marks
+ on the Media Header blocks. There are enough other sanity
+ checks in here that we can probably do without it.
+ */
+ if (le16_to_cpu(h1.EraseMark | h1.EraseMark1) != ERASE_MARK) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index,
+ le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1));
+ continue;
+ }
+
+ /* Finally reread to check ECC */
+ ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
+ if (ret < 0) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
+ continue;
+ }
+
+ /* Paranoia. Check the ANAND header is still there after the ECC read */
+ if (memcmp(buf, "ANAND", 6)) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but went away on reread!\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index);
+ printk(KERN_NOTICE "New data are: %02x %02x %02x %02x %02x %02x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ continue;
+ }
+#endif
+ /* OK, we like it. */
+
+ if (boot_record_count) {
+ /* We've already processed one. So we just check if
+ this one is the same as the first one we found */
+ if (memcmp(mh, buf, sizeof(struct NFTLMediaHeader))) {
+ printk(KERN_NOTICE "NFTL Media Headers at 0x%x and 0x%x disagree.\n",
+ nftl->MediaUnit * nftl->EraseSize, block * nftl->EraseSize);
+ /* if (debug) Print both side by side */
+ if (boot_record_count < 2) {
+ /* We haven't yet seen two real ones */
+ return -1;
+ }
+ continue;
+ }
+ if (boot_record_count == 1)
+ nftl->SpareMediaUnit = block;
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+
+
+ boot_record_count++;
+ continue;
+ }
+
+ /* This is the first we've seen. Copy the media header structure into place */
+ memcpy(mh, buf, sizeof(struct NFTLMediaHeader));
+
+ /* Do some sanity checks on it */
+#if 0
+The new DiskOnChip driver scans the MediaHeader itself, and presents a virtual
+erasesize based on UnitSizeFactor. So the erasesize we read from the mtd
+device is already correct.
+ if (mh->UnitSizeFactor == 0) {
+ printk(KERN_NOTICE "NFTL: UnitSizeFactor 0x00 detected. This violates the spec but we think we know what it means...\n");
+ } else if (mh->UnitSizeFactor < 0xfc) {
+ printk(KERN_NOTICE "Sorry, we don't support UnitSizeFactor 0x%02x\n",
+ mh->UnitSizeFactor);
+ return -1;
+ } else if (mh->UnitSizeFactor != 0xff) {
+ printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
+ mh->UnitSizeFactor);
+ nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
+ nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
+ }
+#endif
+ nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
+ if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
+ printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
+ printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
+ nftl->nb_boot_blocks, nftl->nb_blocks);
+ return -1;
+ }
+
+ nftl->numvunits = le32_to_cpu(mh->FormattedSize) / nftl->EraseSize;
+ if (nftl->numvunits > (nftl->nb_blocks - nftl->nb_boot_blocks - 2)) {
+ printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
+ printk(KERN_NOTICE "numvunits (%d) > nb_blocks (%d) - nb_boot_blocks(%d) - 2\n",
+ nftl->numvunits, nftl->nb_blocks, nftl->nb_boot_blocks);
+ return -1;
+ }
+
+ nftl->mbd.size = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
+
+ /* If we're not using the last sectors in the device for some reason,
+ reduce nb_blocks accordingly so we forget they're there */
+ nftl->nb_blocks = le16_to_cpu(mh->NumEraseUnits) + le16_to_cpu(mh->FirstPhysicalEUN);
+
+ /* XXX: will be suppressed */
+ nftl->lastEUN = nftl->nb_blocks - 1;
+
+ /* memory alloc */
+ nftl->EUNtable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!nftl->EUNtable) {
+ printk(KERN_NOTICE "NFTL: allocation of EUNtable failed\n");
+ return -ENOMEM;
+ }
+
+ nftl->ReplUnitTable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!nftl->ReplUnitTable) {
+ kfree(nftl->EUNtable);
+ printk(KERN_NOTICE "NFTL: allocation of ReplUnitTable failed\n");
+ return -ENOMEM;
+ }
+
+ /* mark the bios blocks (blocks before NFTL MediaHeader) as reserved */
+ for (i = 0; i < nftl->nb_boot_blocks; i++)
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+ /* mark all remaining blocks as potentially containing data */
+ for (; i < nftl->nb_blocks; i++) {
+ nftl->ReplUnitTable[i] = BLOCK_NOTEXPLORED;
+ }
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+
+ /* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
+ for (i = 0; i < nftl->nb_blocks; i++) {
+#if 0
+The new DiskOnChip driver already scanned the bad block table. Just query it.
+ if ((i & (SECTORSIZE - 1)) == 0) {
+ /* read one sector for every SECTORSIZE of blocks */
+ ret = mtd->read(nftl->mbd.mtd,
+ block * nftl->EraseSize + i +
+ SECTORSIZE, SECTORSIZE,
+ &retlen, buf);
+ if (ret < 0) {
+ printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
+ ret);
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
+ return -1;
+ }
+ }
+ /* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
+ if (buf[i & (SECTORSIZE - 1)] != 0xff)
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+#endif
+ if (mtd_block_isbad(nftl->mbd.mtd,
+ i * nftl->EraseSize))
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+ }
+
+ nftl->MediaUnit = block;
+ boot_record_count++;
+
+ } /* foreach (block) */
+
+ return boot_record_count?0:-1;
+}
+
+static int memcmpb(void *a, int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if (c != ((unsigned char *)a)[i])
+ return 1;
+ }
+ return 0;
+}
+
+/* check_free_sector: check if a free sector is actually FREE, i.e. All 0xff in data and oob area */
+static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
+ int check_oob)
+{
+ u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize];
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ size_t retlen;
+ int i;
+
+ for (i = 0; i < len; i += SECTORSIZE) {
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
+ return -1;
+ if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
+ return -1;
+
+ if (check_oob) {
+ if(nftl_read_oob(mtd, address, mtd->oobsize,
+ &retlen, &buf[SECTORSIZE]) < 0)
+ return -1;
+ if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
+ return -1;
+ }
+ address += SECTORSIZE;
+ }
+
+ return 0;
+}
+
+/* NFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase Unit and
+ * Update NFTL metadata. Each erase operation is checked with check_free_sectors
+ *
+ * Return: 0 when succeed, -1 on error.
+ *
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
+ */
+int NFTL_formatblock(struct NFTLrecord *nftl, int block)
+{
+ size_t retlen;
+ unsigned int nb_erases, erase_mark;
+ struct nftl_uci1 uci;
+ struct erase_info *instr = &nftl->instr;
+ struct mtd_info *mtd = nftl->mbd.mtd;
+
+ /* Read the Unit Control Information #1 for Wear-Leveling */
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen, (char *)&uci) < 0)
+ goto default_uci1;
+
+ erase_mark = le16_to_cpu ((uci.EraseMark | uci.EraseMark1));
+ if (erase_mark != ERASE_MARK) {
+ default_uci1:
+ uci.EraseMark = cpu_to_le16(ERASE_MARK);
+ uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
+ uci.WearInfo = cpu_to_le32(0);
+ }
+
+ memset(instr, 0, sizeof(struct erase_info));
+
+ /* XXX: use async erase interface, XXX: test return code */
+ instr->mtd = nftl->mbd.mtd;
+ instr->addr = block * nftl->EraseSize;
+ instr->len = nftl->EraseSize;
+ mtd_erase(mtd, instr);
+
+ if (instr->state == MTD_ERASE_FAILED) {
+ printk("Error while formatting block %d\n", block);
+ goto fail;
+ }
+
+ /* increase and write Wear-Leveling info */
+ nb_erases = le32_to_cpu(uci.WearInfo);
+ nb_erases++;
+
+ /* wrap (almost impossible with current flash) or free block */
+ if (nb_erases == 0)
+ nb_erases = 1;
+
+ /* check the "freeness" of Erase Unit before updating metadata
+ * FixMe: is this check really necessary ? since we have check the
+ * return code after the erase operation. */
+ if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
+ goto fail;
+
+ uci.WearInfo = le32_to_cpu(nb_erases);
+ if (nftl_write_oob(mtd, block * nftl->EraseSize + SECTORSIZE +
+ 8, 8, &retlen, (char *)&uci) < 0)
+ goto fail;
+ return 0;
+fail:
+ /* could not format, update the bad block table (caller is responsible
+ for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
+ mtd_block_markbad(nftl->mbd.mtd, instr->addr);
+ return -1;
+}
+
+/* check_sectors_in_chain: Check that each sector of a Virtual Unit Chain is correct.
+ * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
+ * was being folded when NFTL was interrupted.
+ *
+ * The check_free_sectors in this function is necessary. There is a possible
+ * situation that after writing the Data area, the Block Control Information is
+ * not updated according (due to power failure or something) which leaves the block
+ * in an inconsistent state. So we have to check if a block is really FREE in this
+ * case. */
+static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ unsigned int block, i, status;
+ struct nftl_bci bci;
+ int sectors_per_block;
+ size_t retlen;
+
+ sectors_per_block = nftl->EraseSize / SECTORSIZE;
+ block = first_block;
+ for (;;) {
+ for (i = 0; i < sectors_per_block; i++) {
+ if (nftl_read_oob(mtd,
+ block * nftl->EraseSize + i * SECTORSIZE,
+ 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ /* verify that the sector is really free. If not, mark
+ as ignore */
+ if (memcmpb(&bci, 0xff, 8) != 0 ||
+ check_free_sectors(nftl, block * nftl->EraseSize + i * SECTORSIZE,
+ SECTORSIZE, 0) != 0) {
+ printk("Incorrect free sector %d in block %d: "
+ "marking it as ignored\n",
+ i, block);
+
+ /* sector not free actually : mark it as SECTOR_IGNORE */
+ bci.Status = SECTOR_IGNORE;
+ bci.Status1 = SECTOR_IGNORE;
+ nftl_write_oob(mtd, block *
+ nftl->EraseSize +
+ i * SECTORSIZE, 8,
+ &retlen, (char *)&bci);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* proceed to next Erase Unit on the chain */
+ block = nftl->ReplUnitTable[block];
+ if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
+ printk("incorrect ReplUnitTable[] : %d\n", block);
+ if (block == BLOCK_NIL || block >= nftl->nb_blocks)
+ break;
+ }
+}
+
+/* calc_chain_length: Walk through a Virtual Unit Chain and estimate chain length */
+static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
+{
+ unsigned int length = 0, block = first_block;
+
+ for (;;) {
+ length++;
+ /* avoid infinite loops, although this is guaranteed not to
+ happen because of the previous checks */
+ if (length >= nftl->nb_blocks) {
+ printk("nftl: length too long %d !\n", length);
+ break;
+ }
+
+ block = nftl->ReplUnitTable[block];
+ if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
+ printk("incorrect ReplUnitTable[] : %d\n", block);
+ if (block == BLOCK_NIL || block >= nftl->nb_blocks)
+ break;
+ }
+ return length;
+}
+
+/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
+ * Virtual Unit Chain, i.e. all the units are disconnected.
+ *
+ * It is not strictly correct to begin from the first block of the chain because
+ * if we stop the code, we may see again a valid chain if there was a first_block
+ * flag in a block inside it. But is it really a problem ?
+ *
+ * FixMe: Figure out what the last statement means. What if power failure when we are
+ * in the for (;;) loop formatting blocks ??
+ */
+static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
+{
+ unsigned int block = first_block, block1;
+
+ printk("Formatting chain at block %d\n", first_block);
+
+ for (;;) {
+ block1 = nftl->ReplUnitTable[block];
+
+ printk("Formatting block %d\n", block);
+ if (NFTL_formatblock(nftl, block) < 0) {
+ /* cannot format !!!! Mark it as Bad Unit */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+ } else {
+ nftl->ReplUnitTable[block] = BLOCK_FREE;
+ }
+
+ /* goto next block on the chain */
+ block = block1;
+
+ if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
+ printk("incorrect ReplUnitTable[] : %d\n", block);
+ if (block == BLOCK_NIL || block >= nftl->nb_blocks)
+ break;
+ }
+}
+
+/* check_and_mark_free_block: Verify that a block is free in the NFTL sense (valid erase mark) or
+ * totally free (only 0xff).
+ *
+ * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
+ * following criteria:
+ * 1. */
+static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ struct nftl_uci1 h1;
+ unsigned int erase_mark;
+ size_t retlen;
+
+ /* check erase mark. */
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
+ &retlen, (char *)&h1) < 0)
+ return -1;
+
+ erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
+ if (erase_mark != ERASE_MARK) {
+ /* if no erase mark, the block must be totally free. This is
+ possible in two cases : empty filesystem or interrupted erase (very unlikely) */
+ if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
+ return -1;
+
+ /* free block : write erase mark */
+ h1.EraseMark = cpu_to_le16(ERASE_MARK);
+ h1.EraseMark1 = cpu_to_le16(ERASE_MARK);
+ h1.WearInfo = cpu_to_le32(0);
+ if (nftl_write_oob(mtd,
+ block * nftl->EraseSize + SECTORSIZE + 8, 8,
+ &retlen, (char *)&h1) < 0)
+ return -1;
+ } else {
+#if 0
+ /* if erase mark present, need to skip it when doing check */
+ for (i = 0; i < nftl->EraseSize; i += SECTORSIZE) {
+ /* check free sector */
+ if (check_free_sectors (nftl, block * nftl->EraseSize + i,
+ SECTORSIZE, 0) != 0)
+ return -1;
+
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + i,
+ 16, &retlen, buf) < 0)
+ return -1;
+ if (i == SECTORSIZE) {
+ /* skip erase mark */
+ if (memcmpb(buf, 0xff, 8))
+ return -1;
+ } else {
+ if (memcmpb(buf, 0xff, 16))
+ return -1;
+ }
+ }
+#endif
+ }
+
+ return 0;
+}
+
+/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
+ * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
+ * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
+ * for some reason. A clean up/check of the VUC is necessary in this case.
+ *
+ * WARNING: return 0 if read error
+ */
+static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ struct nftl_uci2 uci;
+ size_t retlen;
+
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
+ 8, &retlen, (char *)&uci) < 0)
+ return 0;
+
+ return le16_to_cpu((uci.FoldMark | uci.FoldMark1));
+}
+
+int NFTL_mount(struct NFTLrecord *s)
+{
+ int i;
+ unsigned int first_logical_block, logical_block, rep_block, nb_erases, erase_mark;
+ unsigned int block, first_block, is_first_block;
+ int chain_length, do_format_chain;
+ struct nftl_uci0 h0;
+ struct nftl_uci1 h1;
+ struct mtd_info *mtd = s->mbd.mtd;
+ size_t retlen;
+
+ /* search for NFTL MediaHeader and Spare NFTL Media Header */
+ if (find_boot_record(s) < 0) {
+ printk("Could not find valid boot record\n");
+ return -1;
+ }
+
+ /* init the logical to physical table */
+ for (i = 0; i < s->nb_blocks; i++) {
+ s->EUNtable[i] = BLOCK_NIL;
+ }
+
+ /* first pass : explore each block chain */
+ first_logical_block = 0;
+ for (first_block = 0; first_block < s->nb_blocks; first_block++) {
+ /* if the block was not already explored, we can look at it */
+ if (s->ReplUnitTable[first_block] == BLOCK_NOTEXPLORED) {
+ block = first_block;
+ chain_length = 0;
+ do_format_chain = 0;
+
+ for (;;) {
+ /* read the block header. If error, we format the chain */
+ if (nftl_read_oob(mtd,
+ block * s->EraseSize + 8, 8,
+ &retlen, (char *)&h0) < 0 ||
+ nftl_read_oob(mtd,
+ block * s->EraseSize +
+ SECTORSIZE + 8, 8,
+ &retlen, (char *)&h1) < 0) {
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ do_format_chain = 1;
+ break;
+ }
+
+ logical_block = le16_to_cpu ((h0.VirtUnitNum | h0.SpareVirtUnitNum));
+ rep_block = le16_to_cpu ((h0.ReplUnitNum | h0.SpareReplUnitNum));
+ nb_erases = le32_to_cpu (h1.WearInfo);
+ erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
+
+ is_first_block = !(logical_block >> 15);
+ logical_block = logical_block & 0x7fff;
+
+ /* invalid/free block test */
+ if (erase_mark != ERASE_MARK || logical_block >= s->nb_blocks) {
+ if (chain_length == 0) {
+ /* if not currently in a chain, we can handle it safely */
+ if (check_and_mark_free_block(s, block) < 0) {
+ /* not really free: format it */
+ printk("Formatting block %d\n", block);
+ if (NFTL_formatblock(s, block) < 0) {
+ /* could not format: reserve the block */
+ s->ReplUnitTable[block] = BLOCK_RESERVED;
+ } else {
+ s->ReplUnitTable[block] = BLOCK_FREE;
+ }
+ } else {
+ /* free block: mark it */
+ s->ReplUnitTable[block] = BLOCK_FREE;
+ }
+ /* directly examine the next block. */
+ goto examine_ReplUnitTable;
+ } else {
+ /* the block was in a chain : this is bad. We
+ must format all the chain */
+ printk("Block %d: free but referenced in chain %d\n",
+ block, first_block);
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ do_format_chain = 1;
+ break;
+ }
+ }
+
+ /* we accept only first blocks here */
+ if (chain_length == 0) {
+ /* this block is not the first block in chain :
+ ignore it, it will be included in a chain
+ later, or marked as not explored */
+ if (!is_first_block)
+ goto examine_ReplUnitTable;
+ first_logical_block = logical_block;
+ } else {
+ if (logical_block != first_logical_block) {
+ printk("Block %d: incorrect logical block: %d expected: %d\n",
+ block, logical_block, first_logical_block);
+ /* the chain is incorrect : we must format it,
+ but we need to read it completely */
+ do_format_chain = 1;
+ }
+ if (is_first_block) {
+ /* we accept that a block is marked as first
+ block while being last block in a chain
+ only if the chain is being folded */
+ if (get_fold_mark(s, block) != FOLD_MARK_IN_PROGRESS ||
+ rep_block != 0xffff) {
+ printk("Block %d: incorrectly marked as first block in chain\n",
+ block);
+ /* the chain is incorrect : we must format it,
+ but we need to read it completely */
+ do_format_chain = 1;
+ } else {
+ printk("Block %d: folding in progress - ignoring first block flag\n",
+ block);
+ }
+ }
+ }
+ chain_length++;
+ if (rep_block == 0xffff) {
+ /* no more blocks after */
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ break;
+ } else if (rep_block >= s->nb_blocks) {
+ printk("Block %d: referencing invalid block %d\n",
+ block, rep_block);
+ do_format_chain = 1;
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ break;
+ } else if (s->ReplUnitTable[rep_block] != BLOCK_NOTEXPLORED) {
+ /* same problem as previous 'is_first_block' test:
+ we accept that the last block of a chain has
+ the first_block flag set if folding is in
+ progress. We handle here the case where the
+ last block appeared first */
+ if (s->ReplUnitTable[rep_block] == BLOCK_NIL &&
+ s->EUNtable[first_logical_block] == rep_block &&
+ get_fold_mark(s, first_block) == FOLD_MARK_IN_PROGRESS) {
+ /* EUNtable[] will be set after */
+ printk("Block %d: folding in progress - ignoring first block flag\n",
+ rep_block);
+ s->ReplUnitTable[block] = rep_block;
+ s->EUNtable[first_logical_block] = BLOCK_NIL;
+ } else {
+ printk("Block %d: referencing block %d already in another chain\n",
+ block, rep_block);
+ /* XXX: should handle correctly fold in progress chains */
+ do_format_chain = 1;
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ }
+ break;
+ } else {
+ /* this is OK */
+ s->ReplUnitTable[block] = rep_block;
+ block = rep_block;
+ }
+ }
+
+ /* the chain was completely explored. Now we can decide
+ what to do with it */
+ if (do_format_chain) {
+ /* invalid chain : format it */
+ format_chain(s, first_block);
+ } else {
+ unsigned int first_block1, chain_to_format, chain_length1;
+ int fold_mark;
+
+ /* valid chain : get foldmark */
+ fold_mark = get_fold_mark(s, first_block);
+ if (fold_mark == 0) {
+ /* cannot get foldmark : format the chain */
+ printk("Could read foldmark at block %d\n", first_block);
+ format_chain(s, first_block);
+ } else {
+ if (fold_mark == FOLD_MARK_IN_PROGRESS)
+ check_sectors_in_chain(s, first_block);
+
+ /* now handle the case where we find two chains at the
+ same virtual address : we select the longer one,
+ because the shorter one is the one which was being
+ folded if the folding was not done in place */
+ first_block1 = s->EUNtable[first_logical_block];
+ if (first_block1 != BLOCK_NIL) {
+ /* XXX: what to do if same length ? */
+ chain_length1 = calc_chain_length(s, first_block1);
+ printk("Two chains at blocks %d (len=%d) and %d (len=%d)\n",
+ first_block1, chain_length1, first_block, chain_length);
+
+ if (chain_length >= chain_length1) {
+ chain_to_format = first_block1;
+ s->EUNtable[first_logical_block] = first_block;
+ } else {
+ chain_to_format = first_block;
+ }
+ format_chain(s, chain_to_format);
+ } else {
+ s->EUNtable[first_logical_block] = first_block;
+ }
+ }
+ }
+ }
+ examine_ReplUnitTable:;
+ }
+
+ /* second pass to format unreferenced blocks and init free block count */
+ s->numfreeEUNs = 0;
+ s->LastFreeEUN = le16_to_cpu(s->MediaHdr.FirstPhysicalEUN);
+
+ for (block = 0; block < s->nb_blocks; block++) {
+ if (s->ReplUnitTable[block] == BLOCK_NOTEXPLORED) {
+ printk("Unreferenced block %d, formatting it\n", block);
+ if (NFTL_formatblock(s, block) < 0)
+ s->ReplUnitTable[block] = BLOCK_RESERVED;
+ else
+ s->ReplUnitTable[block] = BLOCK_FREE;
+ }
+ if (s->ReplUnitTable[block] == BLOCK_FREE) {
+ s->numfreeEUNs++;
+ s->LastFreeEUN = block;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
new file mode 100644
index 000000000..aa26c32e1
--- /dev/null
+++ b/drivers/mtd/ofpart.c
@@ -0,0 +1,198 @@
+/*
+ * Flash partitions described by the OF (or flattened) device tree
+ *
+ * Copyright © 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ * Copyright © 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/mtd/partitions.h>
+
+static bool node_has_compatible(struct device_node *pp)
+{
+ return of_get_property(pp, "compatible", NULL);
+}
+
+static int parse_ofpart_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct device_node *node;
+ const char *partname;
+ struct device_node *pp;
+ int nr_parts, i;
+
+
+ if (!data)
+ return 0;
+
+ node = data->of_node;
+ if (!node)
+ return 0;
+
+ /* First count the subnodes */
+ nr_parts = 0;
+ for_each_child_of_node(node, pp) {
+ if (node_has_compatible(pp))
+ continue;
+
+ nr_parts++;
+ }
+
+ if (nr_parts == 0)
+ return 0;
+
+ *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_child_of_node(node, pp) {
+ const __be32 *reg;
+ int len;
+ int a_cells, s_cells;
+
+ if (node_has_compatible(pp))
+ continue;
+
+ reg = of_get_property(pp, "reg", &len);
+ if (!reg) {
+ nr_parts--;
+ continue;
+ }
+
+ a_cells = of_n_addr_cells(pp);
+ s_cells = of_n_size_cells(pp);
+ (*pparts)[i].offset = of_read_number(reg, a_cells);
+ (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
+
+ partname = of_get_property(pp, "label", &len);
+ if (!partname)
+ partname = of_get_property(pp, "name", &len);
+ (*pparts)[i].name = partname;
+
+ if (of_get_property(pp, "read-only", &len))
+ (*pparts)[i].mask_flags |= MTD_WRITEABLE;
+
+ if (of_get_property(pp, "lock", &len))
+ (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
+
+ i++;
+ }
+
+ if (!i) {
+ of_node_put(pp);
+ pr_err("No valid partition found on %s\n", node->full_name);
+ kfree(*pparts);
+ *pparts = NULL;
+ return -EINVAL;
+ }
+
+ return nr_parts;
+}
+
+static struct mtd_part_parser ofpart_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_ofpart_partitions,
+ .name = "ofpart",
+};
+
+static int parse_ofoldpart_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct device_node *dp;
+ int i, plen, nr_parts;
+ const struct {
+ __be32 offset, len;
+ } *part;
+ const char *names;
+
+ if (!data)
+ return 0;
+
+ dp = data->of_node;
+ if (!dp)
+ return 0;
+
+ part = of_get_property(dp, "partitions", &plen);
+ if (!part)
+ return 0; /* No partitions found */
+
+ pr_warning("Device tree uses obsolete partition map binding: %s\n",
+ dp->full_name);
+
+ nr_parts = plen / sizeof(part[0]);
+
+ *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ names = of_get_property(dp, "partition-names", &plen);
+
+ for (i = 0; i < nr_parts; i++) {
+ (*pparts)[i].offset = be32_to_cpu(part->offset);
+ (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
+ /* bit 0 set signifies read only partition */
+ if (be32_to_cpu(part->len) & 1)
+ (*pparts)[i].mask_flags = MTD_WRITEABLE;
+
+ if (names && (plen > 0)) {
+ int len = strlen(names) + 1;
+
+ (*pparts)[i].name = names;
+ plen -= len;
+ names += len;
+ } else {
+ (*pparts)[i].name = "unnamed";
+ }
+
+ part++;
+ }
+
+ return nr_parts;
+}
+
+static struct mtd_part_parser ofoldpart_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_ofoldpart_partitions,
+ .name = "ofoldpart",
+};
+
+static int __init ofpart_parser_init(void)
+{
+ register_mtd_parser(&ofpart_parser);
+ register_mtd_parser(&ofoldpart_parser);
+ return 0;
+}
+
+static void __exit ofpart_parser_exit(void)
+{
+ deregister_mtd_parser(&ofpart_parser);
+ deregister_mtd_parser(&ofoldpart_parser);
+}
+
+module_init(ofpart_parser_init);
+module_exit(ofpart_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
+MODULE_AUTHOR("Vitaly Wool, David Gibson");
+/*
+ * When MTD core cannot find the requested parser, it tries to load the module
+ * with the same name. Since we provide the ofoldpart parser, we should have
+ * the corresponding alias.
+ */
+MODULE_ALIAS("ofoldpart");
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
new file mode 100644
index 000000000..dcae2f6a2
--- /dev/null
+++ b/drivers/mtd/onenand/Kconfig
@@ -0,0 +1,70 @@
+menuconfig MTD_ONENAND
+ tristate "OneNAND Device Support"
+ depends on MTD
+ depends on HAS_IOMEM
+ help
+ This enables support for accessing all type of OneNAND flash
+ devices. For further information see
+ <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
+
+if MTD_ONENAND
+
+config MTD_ONENAND_VERIFY_WRITE
+ bool "Verify OneNAND page writes"
+ help
+ This adds an extra check when data is written to the flash. The
+ OneNAND flash device internally checks only bits transitioning
+ from 1 to 0. There is a rare possibility that even though the
+ device thinks the write was successful, a bit could have been
+ flipped accidentally due to device wear or something else.
+
+config MTD_ONENAND_GENERIC
+ tristate "OneNAND Flash device via platform device driver"
+ help
+ Support for OneNAND flash via platform device driver.
+
+config MTD_ONENAND_OMAP2
+ tristate "OneNAND on OMAP2/OMAP3 support"
+ depends on ARCH_OMAP2 || ARCH_OMAP3
+ help
+ Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
+ via the GPMC memory controller.
+
+config MTD_ONENAND_SAMSUNG
+ tristate "OneNAND on Samsung SOC controller support"
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4
+ help
+ Support for a OneNAND flash device connected to an Samsung SOC.
+ S3C64XX uses command mapping method.
+ S5PC110/S5PC210 use generic OneNAND method.
+
+config MTD_ONENAND_OTP
+ bool "OneNAND OTP Support"
+ help
+ One Block of the NAND Flash Array memory is reserved as
+ a One-Time Programmable Block memory area.
+ Also, 1st Block of NAND Flash Array can be used as OTP.
+
+ The OTP block can be read, programmed and locked using the same
+ operations as any other NAND Flash Array memory block.
+ OTP block cannot be erased.
+
+ OTP block is fully-guaranteed to be a valid block.
+
+config MTD_ONENAND_2X_PROGRAM
+ bool "OneNAND 2X program support"
+ help
+ The 2X Program is an extension of Program Operation.
+ Since the device is equipped with two DataRAMs, and two-plane NAND
+ Flash memory array, these two component enables simultaneous program
+ of 4KiB. Plane1 has only even blocks such as block0, block2, block4
+ while Plane2 has only odd blocks such as block1, block3, block5.
+ So MTD regards it as 4KiB page size and 256KiB block size
+
+ Now the following chips support it. (KFXXX16Q2M)
+ Demux: KFG2G16Q2M, KFH4G16Q2M, KFW8G16Q2M,
+ Mux: KFM2G16Q2M, KFN4G16Q2M,
+
+ And more recent chips
+
+endif # MTD_ONENAND
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
new file mode 100644
index 000000000..9d6540e8b
--- /dev/null
+++ b/drivers/mtd/onenand/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the OneNAND MTD
+#
+
+# Core functionality.
+obj-$(CONFIG_MTD_ONENAND) += onenand.o
+
+# Board specific.
+obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
+obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
+
+onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
new file mode 100644
index 000000000..32a216d31
--- /dev/null
+++ b/drivers/mtd/onenand/generic.c
@@ -0,0 +1,119 @@
+/*
+ * linux/drivers/mtd/onenand/generic.c
+ *
+ * Copyright (c) 2005 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the OneNAND flash for generic boards.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+
+/*
+ * Note: Driver name and platform data format have been updated!
+ *
+ * This version of the driver is named "onenand-flash" and takes struct
+ * onenand_platform_data as platform data. The old ARM-specific version
+ * with the name "onenand" used to take struct flash_platform_data.
+ */
+#define DRIVER_NAME "onenand-flash"
+
+struct onenand_info {
+ struct mtd_info mtd;
+ struct onenand_chip onenand;
+};
+
+static int generic_onenand_probe(struct platform_device *pdev)
+{
+ struct onenand_info *info;
+ struct onenand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct resource *res = pdev->resource;
+ unsigned long size = resource_size(res);
+ int err;
+
+ info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, size, dev_name(&pdev->dev))) {
+ err = -EBUSY;
+ goto out_free_info;
+ }
+
+ info->onenand.base = ioremap(res->start, size);
+ if (!info->onenand.base) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+
+ info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
+ info->onenand.irq = platform_get_irq(pdev, 0);
+
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.priv = &info->onenand;
+ info->mtd.owner = THIS_MODULE;
+
+ if (onenand_scan(&info->mtd, 1)) {
+ err = -ENXIO;
+ goto out_iounmap;
+ }
+
+ err = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+out_iounmap:
+ iounmap(info->onenand.base);
+out_release_mem_region:
+ release_mem_region(res->start, size);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int generic_onenand_remove(struct platform_device *pdev)
+{
+ struct onenand_info *info = platform_get_drvdata(pdev);
+ struct resource *res = pdev->resource;
+ unsigned long size = resource_size(res);
+
+ if (info) {
+ onenand_release(&info->mtd);
+ release_mem_region(res->start, size);
+ iounmap(info->onenand.base);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static struct platform_driver generic_onenand_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = generic_onenand_probe,
+ .remove = generic_onenand_remove,
+};
+
+module_platform_driver(generic_onenand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
new file mode 100644
index 000000000..646ddd6db
--- /dev/null
+++ b/drivers/mtd/onenand/omap2.c
@@ -0,0 +1,815 @@
+/*
+ * linux/drivers/mtd/onenand/omap2.c
+ *
+ * OneNAND driver for OMAP2 / OMAP3
+ *
+ * Copyright © 2005-2006 Nokia Corporation
+ *
+ * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
+ * IRQ and DMA support written by Timo Teras
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/mach/flash.h>
+#include <linux/platform_data/mtd-onenand-omap2.h>
+#include <asm/gpio.h>
+
+#include <linux/omap-dma.h>
+
+#define DRIVER_NAME "omap2-onenand"
+
+#define ONENAND_BUFRAM_SIZE (1024 * 5)
+
+struct omap2_onenand {
+ struct platform_device *pdev;
+ int gpmc_cs;
+ unsigned long phys_base;
+ unsigned int mem_size;
+ int gpio_irq;
+ struct mtd_info mtd;
+ struct onenand_chip onenand;
+ struct completion irq_done;
+ struct completion dma_done;
+ int dma_channel;
+ int freq;
+ int (*setup)(void __iomem *base, int *freq_ptr);
+ struct regulator *regulator;
+ u8 flags;
+};
+
+static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct omap2_onenand *c = data;
+
+ complete(&c->dma_done);
+}
+
+static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
+{
+ struct omap2_onenand *c = dev_id;
+
+ complete(&c->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
+{
+ return readw(c->onenand.base + reg);
+}
+
+static inline void write_reg(struct omap2_onenand *c, unsigned short value,
+ int reg)
+{
+ writew(value, c->onenand.base + reg);
+}
+
+static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
+{
+ printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
+ msg, state, ctrl, intr);
+}
+
+static void wait_warn(char *msg, int state, unsigned int ctrl,
+ unsigned int intr)
+{
+ printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
+ "intr 0x%04x\n", msg, state, ctrl, intr);
+}
+
+static int omap2_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ unsigned int intr = 0;
+ unsigned int ctrl, ctrl_mask;
+ unsigned long timeout;
+ u32 syscfg;
+
+ if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+ state == FL_VERIFYING_ERASE) {
+ int i = 21;
+ unsigned int intr_flags = ONENAND_INT_MASTER;
+
+ switch (state) {
+ case FL_RESETING:
+ intr_flags |= ONENAND_INT_RESET;
+ break;
+ case FL_PREPARING_ERASE:
+ intr_flags |= ONENAND_INT_ERASE;
+ break;
+ case FL_VERIFYING_ERASE:
+ i = 101;
+ break;
+ }
+
+ while (--i) {
+ udelay(1);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if (intr & ONENAND_INT_MASTER)
+ break;
+ }
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ wait_err("controller error", state, ctrl, intr);
+ return -EIO;
+ }
+ if ((intr & intr_flags) == intr_flags)
+ return 0;
+ /* Continue in wait for interrupt branch */
+ }
+
+ if (state != FL_READING) {
+ int result;
+
+ /* Turn interrupts on */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
+ syscfg |= ONENAND_SYS_CFG1_IOBE;
+ write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+ if (c->flags & ONENAND_IN_OMAP34XX)
+ /* Add a delay to let GPIO settle */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ }
+
+ reinit_completion(&c->irq_done);
+ if (c->gpio_irq) {
+ result = gpio_get_value(c->gpio_irq);
+ if (result == -1) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return -EIO;
+ }
+ } else
+ result = 0;
+ if (result == 0) {
+ int retry_cnt = 0;
+retry:
+ result = wait_for_completion_timeout(&c->irq_done,
+ msecs_to_jiffies(20));
+ if (result == 0) {
+ /* Timeout after 20ms */
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ONGO &&
+ !this->ongoing) {
+ /*
+ * The operation seems to be still going
+ * so give it some more time.
+ */
+ retry_cnt += 1;
+ if (retry_cnt < 3)
+ goto retry;
+ intr = read_reg(c,
+ ONENAND_REG_INTERRUPT);
+ wait_err("timeout", state, ctrl, intr);
+ return -EIO;
+ }
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if ((intr & ONENAND_INT_MASTER) == 0)
+ wait_warn("timeout", state, ctrl, intr);
+ }
+ }
+ } else {
+ int retry_cnt = 0;
+
+ /* Turn interrupts off */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ syscfg &= ~ONENAND_SYS_CFG1_IOBE;
+ write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (1) {
+ if (time_before(jiffies, timeout)) {
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if (intr & ONENAND_INT_MASTER)
+ break;
+ } else {
+ /* Timeout after 20ms */
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ONGO) {
+ /*
+ * The operation seems to be still going
+ * so give it some more time.
+ */
+ retry_cnt += 1;
+ if (retry_cnt < 3) {
+ timeout = jiffies +
+ msecs_to_jiffies(20);
+ continue;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+
+ if (intr & ONENAND_INT_READ) {
+ int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
+
+ if (ecc) {
+ unsigned int addr1, addr8;
+
+ addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
+ addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_ERR "onenand_wait: ECC error = "
+ "0x%04x, addr1 %#x, addr8 %#x\n",
+ ecc, addr1, addr8);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ } else if (ecc & ONENAND_ECC_1BIT_ALL) {
+ printk(KERN_NOTICE "onenand_wait: correctable "
+ "ECC error = 0x%04x, addr1 %#x, "
+ "addr8 %#x\n", ecc, addr1, addr8);
+ mtd->ecc_stats.corrected++;
+ }
+ }
+ } else if (state == FL_READING) {
+ wait_err("timeout", state, ctrl, intr);
+ return -EIO;
+ }
+
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ wait_err("controller error", state, ctrl, intr);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk(KERN_ERR "onenand_wait: "
+ "Device is write protected!!!\n");
+ return -EIO;
+ }
+
+ ctrl_mask = 0xFE9F;
+ if (this->ongoing)
+ ctrl_mask &= ~0x8000;
+
+ if (ctrl & ctrl_mask)
+ wait_warn("unexpected controller status", state, ctrl, intr);
+
+ return 0;
+}
+
+static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ return this->writesize;
+ if (area == ONENAND_SPARERAM)
+ return mtd->oobsize;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+
+static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+ unsigned long timeout;
+ void *buf = (void *)buffer;
+ size_t xtra;
+ volatile unsigned *done;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+ goto out_copy;
+
+ /* panic_write() may be in an interrupt context */
+ if (in_interrupt() || oops_in_progress)
+ goto out_copy;
+
+ if (buf >= high_memory) {
+ struct page *p1;
+
+ if (((size_t)buf & PAGE_MASK) !=
+ ((size_t)(buf + count - 1) & PAGE_MASK))
+ goto out_copy;
+ p1 = vmalloc_to_page(buf);
+ if (!p1)
+ goto out_copy;
+ buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+ }
+
+ xtra = count & 3;
+ if (xtra) {
+ count -= xtra;
+ memcpy(buf + count, this->base + bram_offset + count, xtra);
+ }
+
+ dma_src = c->phys_base + bram_offset;
+ dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ goto out_copy;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+ count >> 2, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ reinit_completion(&c->dma_done);
+ omap_start_dma(c->dma_channel);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ done = &c->dma_done.done;
+ while (time_before(jiffies, timeout))
+ if (*done)
+ break;
+
+ dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (!*done) {
+ dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+ goto out_copy;
+ }
+
+ return 0;
+
+out_copy:
+ memcpy(buf, this->base + bram_offset, count);
+ return 0;
+}
+
+static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+ unsigned long timeout;
+ void *buf = (void *)buffer;
+ volatile unsigned *done;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+ goto out_copy;
+
+ /* panic_write() may be in an interrupt context */
+ if (in_interrupt() || oops_in_progress)
+ goto out_copy;
+
+ if (buf >= high_memory) {
+ struct page *p1;
+
+ if (((size_t)buf & PAGE_MASK) !=
+ ((size_t)(buf + count - 1) & PAGE_MASK))
+ goto out_copy;
+ p1 = vmalloc_to_page(buf);
+ if (!p1)
+ goto out_copy;
+ buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+ }
+
+ dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
+ dma_dst = c->phys_base + bram_offset;
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ return -1;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+ count >> 2, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ reinit_completion(&c->dma_done);
+ omap_start_dma(c->dma_channel);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ done = &c->dma_done.done;
+ while (time_before(jiffies, timeout))
+ if (*done)
+ break;
+
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
+
+ if (!*done) {
+ dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+ goto out_copy;
+ }
+
+ return 0;
+
+out_copy:
+ memcpy(this->base + bram_offset, buf, count);
+ return 0;
+}
+
+#else
+
+static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ return -ENOSYS;
+}
+
+#endif
+
+#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
+
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ /* DMA is not used. Revisit PM requirements before enabling it. */
+ if (1 || (c->dma_channel < 0) ||
+ ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
+ (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
+ memcpy(buffer, (__force void *)(this->base + bram_offset),
+ count);
+ return 0;
+ }
+
+ dma_src = c->phys_base + bram_offset;
+ dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ return -1;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
+ count / 4, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ reinit_completion(&c->dma_done);
+ omap_start_dma(c->dma_channel);
+ wait_for_completion(&c->dma_done);
+
+ dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ /* DMA is not used. Revisit PM requirements before enabling it. */
+ if (1 || (c->dma_channel < 0) ||
+ ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
+ (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
+ memcpy((__force void *)(this->base + bram_offset), buffer,
+ count);
+ return 0;
+ }
+
+ dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
+ DMA_TO_DEVICE);
+ dma_dst = c->phys_base + bram_offset;
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
+ dev_err(&c->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n",
+ count);
+ return -1;
+ }
+
+ omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
+ count / 2, 1, 0, 0, 0);
+ omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_src, 0, 0);
+ omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_dst, 0, 0);
+
+ reinit_completion(&c->dma_done);
+ omap_start_dma(c->dma_channel);
+ wait_for_completion(&c->dma_done);
+
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+#else
+
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ return -ENOSYS;
+}
+
+#endif
+
+static struct platform_driver omap2_onenand_driver;
+
+static void omap2_onenand_shutdown(struct platform_device *pdev)
+{
+ struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+ /* With certain content in the buffer RAM, the OMAP boot ROM code
+ * can recognize the flash chip incorrectly. Zero it out before
+ * soft reset.
+ */
+ memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
+}
+
+static int omap2_onenand_enable(struct mtd_info *mtd)
+{
+ int ret;
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+ ret = regulator_enable(c->regulator);
+ if (ret != 0)
+ dev_err(&c->pdev->dev, "can't enable regulator\n");
+
+ return ret;
+}
+
+static int omap2_onenand_disable(struct mtd_info *mtd)
+{
+ int ret;
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+ ret = regulator_disable(c->regulator);
+ if (ret != 0)
+ dev_err(&c->pdev->dev, "can't disable regulator\n");
+
+ return ret;
+}
+
+static int omap2_onenand_probe(struct platform_device *pdev)
+{
+ struct omap_onenand_platform_data *pdata;
+ struct omap2_onenand *c;
+ struct onenand_chip *this;
+ int r;
+ struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ init_completion(&c->irq_done);
+ init_completion(&c->dma_done);
+ c->flags = pdata->flags;
+ c->gpmc_cs = pdata->cs;
+ c->gpio_irq = pdata->gpio_irq;
+ c->dma_channel = pdata->dma_channel;
+ if (c->dma_channel < 0) {
+ /* if -1, don't use DMA */
+ c->gpio_irq = 0;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ r = -EINVAL;
+ dev_err(&pdev->dev, "error getting memory resource\n");
+ goto err_kfree;
+ }
+
+ c->phys_base = res->start;
+ c->mem_size = resource_size(res);
+
+ if (request_mem_region(c->phys_base, c->mem_size,
+ pdev->dev.driver->name) == NULL) {
+ dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
+ c->phys_base, c->mem_size);
+ r = -EBUSY;
+ goto err_kfree;
+ }
+ c->onenand.base = ioremap(c->phys_base, c->mem_size);
+ if (c->onenand.base == NULL) {
+ r = -ENOMEM;
+ goto err_release_mem_region;
+ }
+
+ if (pdata->onenand_setup != NULL) {
+ r = pdata->onenand_setup(c->onenand.base, &c->freq);
+ if (r < 0) {
+ dev_err(&pdev->dev, "Onenand platform setup failed: "
+ "%d\n", r);
+ goto err_iounmap;
+ }
+ c->setup = pdata->onenand_setup;
+ }
+
+ if (c->gpio_irq) {
+ if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
+ dev_err(&pdev->dev, "Failed to request GPIO%d for "
+ "OneNAND\n", c->gpio_irq);
+ goto err_iounmap;
+ }
+ gpio_direction_input(c->gpio_irq);
+
+ if ((r = request_irq(gpio_to_irq(c->gpio_irq),
+ omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
+ pdev->dev.driver->name, c)) < 0)
+ goto err_release_gpio;
+ }
+
+ if (c->dma_channel >= 0) {
+ r = omap_request_dma(0, pdev->dev.driver->name,
+ omap2_onenand_dma_cb, (void *) c,
+ &c->dma_channel);
+ if (r == 0) {
+ omap_set_dma_write_mode(c->dma_channel,
+ OMAP_DMA_WRITE_NON_POSTED);
+ omap_set_dma_src_data_pack(c->dma_channel, 1);
+ omap_set_dma_src_burst_mode(c->dma_channel,
+ OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_data_pack(c->dma_channel, 1);
+ omap_set_dma_dest_burst_mode(c->dma_channel,
+ OMAP_DMA_DATA_BURST_8);
+ } else {
+ dev_info(&pdev->dev,
+ "failed to allocate DMA for OneNAND, "
+ "using PIO instead\n");
+ c->dma_channel = -1;
+ }
+ }
+
+ dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
+ "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
+ c->onenand.base, c->freq);
+
+ c->pdev = pdev;
+ c->mtd.name = dev_name(&pdev->dev);
+ c->mtd.priv = &c->onenand;
+ c->mtd.owner = THIS_MODULE;
+
+ c->mtd.dev.parent = &pdev->dev;
+
+ this = &c->onenand;
+ if (c->dma_channel >= 0) {
+ this->wait = omap2_onenand_wait;
+ if (c->flags & ONENAND_IN_OMAP34XX) {
+ this->read_bufferram = omap3_onenand_read_bufferram;
+ this->write_bufferram = omap3_onenand_write_bufferram;
+ } else {
+ this->read_bufferram = omap2_onenand_read_bufferram;
+ this->write_bufferram = omap2_onenand_write_bufferram;
+ }
+ }
+
+ if (pdata->regulator_can_sleep) {
+ c->regulator = regulator_get(&pdev->dev, "vonenand");
+ if (IS_ERR(c->regulator)) {
+ dev_err(&pdev->dev, "Failed to get regulator\n");
+ r = PTR_ERR(c->regulator);
+ goto err_release_dma;
+ }
+ c->onenand.enable = omap2_onenand_enable;
+ c->onenand.disable = omap2_onenand_disable;
+ }
+
+ if (pdata->skip_initial_unlocking)
+ this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
+
+ if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ goto err_release_regulator;
+
+ ppdata.of_node = pdata->of_node;
+ r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
+ if (r)
+ goto err_release_onenand;
+
+ platform_set_drvdata(pdev, c);
+
+ return 0;
+
+err_release_onenand:
+ onenand_release(&c->mtd);
+err_release_regulator:
+ regulator_put(c->regulator);
+err_release_dma:
+ if (c->dma_channel != -1)
+ omap_free_dma(c->dma_channel);
+ if (c->gpio_irq)
+ free_irq(gpio_to_irq(c->gpio_irq), c);
+err_release_gpio:
+ if (c->gpio_irq)
+ gpio_free(c->gpio_irq);
+err_iounmap:
+ iounmap(c->onenand.base);
+err_release_mem_region:
+ release_mem_region(c->phys_base, c->mem_size);
+err_kfree:
+ kfree(c);
+
+ return r;
+}
+
+static int omap2_onenand_remove(struct platform_device *pdev)
+{
+ struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+ onenand_release(&c->mtd);
+ regulator_put(c->regulator);
+ if (c->dma_channel != -1)
+ omap_free_dma(c->dma_channel);
+ omap2_onenand_shutdown(pdev);
+ if (c->gpio_irq) {
+ free_irq(gpio_to_irq(c->gpio_irq), c);
+ gpio_free(c->gpio_irq);
+ }
+ iounmap(c->onenand.base);
+ release_mem_region(c->phys_base, c->mem_size);
+ kfree(c);
+
+ return 0;
+}
+
+static struct platform_driver omap2_onenand_driver = {
+ .probe = omap2_onenand_probe,
+ .remove = omap2_onenand_remove,
+ .shutdown = omap2_onenand_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(omap2_onenand_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
+MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
new file mode 100644
index 000000000..43b3392ff
--- /dev/null
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -0,0 +1,4141 @@
+/*
+ * linux/drivers/mtd/onenand/onenand_base.c
+ *
+ * Copyright © 2005-2009 Samsung Electronics
+ * Copyright © 2007 Nokia Corporation
+ *
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Credits:
+ * Adrian Hunter <ext-adrian.hunter@nokia.com>:
+ * auto-placement support, read-while load support, various fixes
+ *
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND support
+ * Amul Kumar Saha <amul.saha at samsung.com>
+ * OTP support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+/*
+ * Multiblock erase if number of blocks to erase is 2 or more.
+ * Maximum number of blocks for simultaneous erase is 64.
+ */
+#define MB_ERASE_MIN_BLK_COUNT 2
+#define MB_ERASE_MAX_BLK_COUNT 64
+
+/* Default Flex-OneNAND boundary and lock respectively */
+static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
+
+module_param_array(flex_bdry, int, NULL, 0400);
+MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
+ "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
+ "DIE_BDRY: SLC boundary of the die"
+ "LOCK: Locking information for SLC boundary"
+ " : 0->Set boundary in unlocked status"
+ " : 1->Set boundary in locked status");
+
+/* Default OneNAND/Flex-OneNAND OTP options*/
+static int otp;
+
+module_param(otp, int, 0400);
+MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
+ "Syntax : otp=LOCK_TYPE"
+ "LOCK_TYPE : Keys issued, for specific OTP Lock type"
+ " : 0 -> Default (No Blocks Locked)"
+ " : 1 -> OTP Block lock"
+ " : 2 -> 1st Block lock"
+ " : 3 -> BOTH OTP Block and 1st Block lock");
+
+/*
+ * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout flexonenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
+/*
+ * onenand_oob_128 - oob info for OneNAND with 4KB page
+ *
+ * Based on specification:
+ * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
+ *
+ * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
+ *
+ * oobfree uses the spare area fields marked as
+ * "Managed by internal ECC logic for Logical Sector Number area"
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 119
+ },
+ .oobfree = {
+ {2, 3}, {18, 3}, {34, 3}, {50, 3},
+ {66, 3}, {82, 3}, {98, 3}, {114, 3}
+ }
+};
+
+/**
+ * onenand_oob_64 - oob info for large (2KB) page
+ */
+static struct nand_ecclayout onenand_oob_64 = {
+ .eccbytes = 20,
+ .eccpos = {
+ 8, 9, 10, 11, 12,
+ 24, 25, 26, 27, 28,
+ 40, 41, 42, 43, 44,
+ 56, 57, 58, 59, 60,
+ },
+ .oobfree = {
+ {2, 3}, {14, 2}, {18, 3}, {30, 2},
+ {34, 3}, {46, 2}, {50, 3}, {62, 2}
+ }
+};
+
+/**
+ * onenand_oob_32 - oob info for middle (1KB) page
+ */
+static struct nand_ecclayout onenand_oob_32 = {
+ .eccbytes = 10,
+ .eccpos = {
+ 8, 9, 10, 11, 12,
+ 24, 25, 26, 27, 28,
+ },
+ .oobfree = { {2, 3}, {14, 2}, {18, 3}, {30, 2} }
+};
+
+static const unsigned char ffchars[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
+};
+
+/**
+ * onenand_readw - [OneNAND Interface] Read OneNAND register
+ * @param addr address to read
+ *
+ * Read OneNAND register
+ */
+static unsigned short onenand_readw(void __iomem *addr)
+{
+ return readw(addr);
+}
+
+/**
+ * onenand_writew - [OneNAND Interface] Write OneNAND register with value
+ * @param value value to write
+ * @param addr address to write
+ *
+ * Write OneNAND register with value
+ */
+static void onenand_writew(unsigned short value, void __iomem *addr)
+{
+ writew(value, addr);
+}
+
+/**
+ * onenand_block_address - [DEFAULT] Get block address
+ * @param this onenand chip data structure
+ * @param block the block
+ * @return translated block address if DDP, otherwise same
+ *
+ * Setup Start Address 1 Register (F100h)
+ */
+static int onenand_block_address(struct onenand_chip *this, int block)
+{
+ /* Device Flash Core select, NAND Flash Block Address */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1 | (block ^ this->density_mask);
+
+ return block;
+}
+
+/**
+ * onenand_bufferram_address - [DEFAULT] Get bufferram address
+ * @param this onenand chip data structure
+ * @param block the block
+ * @return set DBS value if DDP, otherwise 0
+ *
+ * Setup Start Address 2 Register (F101h) for DDP
+ */
+static int onenand_bufferram_address(struct onenand_chip *this, int block)
+{
+ /* Device BufferRAM Select */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1;
+
+ return ONENAND_DDP_CHIP0;
+}
+
+/**
+ * onenand_page_address - [DEFAULT] Get page address
+ * @param page the page address
+ * @param sector the sector address
+ * @return combined page and sector address
+ *
+ * Setup Start Address 8 Register (F107h)
+ */
+static int onenand_page_address(int page, int sector)
+{
+ /* Flash Page Address, Flash Sector Address */
+ int fpa, fsa;
+
+ fpa = page & ONENAND_FPA_MASK;
+ fsa = sector & ONENAND_FSA_MASK;
+
+ return ((fpa << ONENAND_FPA_SHIFT) | fsa);
+}
+
+/**
+ * onenand_buffer_address - [DEFAULT] Get buffer address
+ * @param dataram1 DataRAM index
+ * @param sectors the sector address
+ * @param count the number of sectors
+ * @return the start buffer value
+ *
+ * Setup Start Buffer Register (F200h)
+ */
+static int onenand_buffer_address(int dataram1, int sectors, int count)
+{
+ int bsa, bsc;
+
+ /* BufferRAM Sector Address */
+ bsa = sectors & ONENAND_BSA_MASK;
+
+ if (dataram1)
+ bsa |= ONENAND_BSA_DATARAM1; /* DataRAM1 */
+ else
+ bsa |= ONENAND_BSA_DATARAM0; /* DataRAM0 */
+
+ /* BufferRAM Sector Count */
+ bsc = count & ONENAND_BSC_MASK;
+
+ return ((bsa << ONENAND_BSA_SHIFT) | bsc);
+}
+
+/**
+ * flexonenand_block- For given address return block number
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t)block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t)block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+EXPORT_SYMBOL(onenand_addr);
+
+/**
+ * onenand_get_density - [DEFAULT] Get OneNAND density
+ * @param dev_id OneNAND device ID
+ *
+ * Get OneNAND density from device ID
+ */
+static inline int onenand_get_density(int dev_id)
+{
+ int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ return (density & ONENAND_DEVICE_DENSITY_MASK);
+}
+
+/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+EXPORT_SYMBOL(flexonenand_region);
+
+/**
+ * onenand_command - [DEFAULT] Send command to OneNAND device
+ * @param mtd MTD device structure
+ * @param cmd the command to be sent
+ * @param addr offset to read from or write to
+ * @param len number of bytes to read or write
+ *
+ * Send command to OneNAND device. This function is used for middle/large page
+ * devices (1KB/2KB Bytes per page)
+ */
+static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int value, block, page;
+
+ /* Address translation */
+ switch (cmd) {
+ case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_LOCK:
+ case ONENAND_CMD_LOCK_TIGHT:
+ case ONENAND_CMD_UNLOCK_ALL:
+ block = -1;
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
+ page = -1;
+ break;
+
+ case ONENAND_CMD_ERASE:
+ case ONENAND_CMD_MULTIBLOCK_ERASE:
+ case ONENAND_CMD_ERASE_VERIFY:
+ case ONENAND_CMD_BUFFERRAM:
+ case ONENAND_CMD_OTP_ACCESS:
+ block = onenand_block(this, addr);
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
+ default:
+ block = onenand_block(this, addr);
+ if (FLEXONENAND(this))
+ page = (int) (addr - onenand_addr(this, block))>>\
+ this->page_shift;
+ else
+ page = (int) (addr >> this->page_shift);
+ if (ONENAND_IS_2PLANE(this)) {
+ /* Make the even block number */
+ block &= ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page >>= 1;
+ }
+ page &= this->page_mask;
+ break;
+ }
+
+ /* NOTE: The setting order of the registers is very important! */
+ if (cmd == ONENAND_CMD_BUFFERRAM) {
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
+ /* It is always BufferRAM0 */
+ ONENAND_SET_BUFFERRAM0(this);
+ else
+ /* Switch to the next data buffer */
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ return 0;
+ }
+
+ if (block != -1) {
+ /* Write 'DFS, FBA' of Flash */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ if (page != -1) {
+ /* Now we use page size operation */
+ int sectors = 0, count = 0;
+ int dataram;
+
+ switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ if (ONENAND_IS_4KB_PAGE(this))
+ /* It is always BufferRAM0 */
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+ break;
+
+ default:
+ if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
+ cmd = ONENAND_CMD_2X_PROG;
+ dataram = ONENAND_CURRENT_BUFFERRAM(this);
+ break;
+ }
+
+ /* Write 'FPA, FSA' of Flash */
+ value = onenand_page_address(page, sectors);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS8);
+
+ /* Write 'BSA, BSC' of DataRAM */
+ value = onenand_buffer_address(dataram, sectors, count);
+ this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
+ }
+
+ /* Interrupt clear */
+ this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
+
+ /* Write command */
+ this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
+
+ return 0;
+}
+
+/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static inline int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i, result = 0;
+
+ if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ else
+ result = ONENAND_ECC_1BIT_ALL;
+ }
+
+ return result;
+}
+
+/**
+ * onenand_wait - [DEFAULT] wait until the command is done
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Wait for command done. This applies to all OneNAND command
+ * Read can take up to 30us, erase up to 2ms and program up to 350us
+ * according to general OneNAND specs
+ */
+static int onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip * this = mtd->priv;
+ unsigned long timeout;
+ unsigned int flags = ONENAND_INT_MASTER;
+ unsigned int interrupt = 0;
+ unsigned int ctrl;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+
+ if (interrupt & flags)
+ break;
+
+ if (state != FL_READING && state != FL_PREPARING_ERASE)
+ cond_resched();
+ }
+ /* To get correct interrupt status in timeout case */
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (interrupt & ONENAND_INT_READ) {
+ int ecc = onenand_read_ecc(this);
+ if (ecc) {
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_ERR "%s: ECC error = 0x%04x\n",
+ __func__, ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ } else if (ecc & ONENAND_ECC_1BIT_ALL) {
+ printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
+ __func__, ecc);
+ mtd->ecc_stats.corrected++;
+ }
+ }
+ } else if (state == FL_READING) {
+ printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
+ printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ if (!(interrupt & ONENAND_INT_MASTER)) {
+ printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ /* If there's controller error, it's a real error */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_ERR "%s: controller error = 0x%04x\n",
+ __func__, ctrl);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk(KERN_ERR "%s: it's locked error.\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * onenand_interrupt - [DEFAULT] onenand interrupt handler
+ * @param irq onenand interrupt number
+ * @param dev_id interrupt data
+ *
+ * complete the work
+ */
+static irqreturn_t onenand_interrupt(int irq, void *data)
+{
+ struct onenand_chip *this = data;
+
+ /* To handle shared interrupt */
+ if (!this->complete.done)
+ complete(&this->complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * onenand_interrupt_wait - [DEFAULT] wait until the command is done
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Wait for command done.
+ */
+static int onenand_interrupt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ wait_for_completion(&this->complete);
+
+ return onenand_wait(mtd, state);
+}
+
+/*
+ * onenand_try_interrupt_wait - [DEFAULT] try interrupt wait
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Try interrupt based wait (It is used one-time)
+ */
+static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned long remain, timeout;
+
+ /* We use interrupt wait first */
+ this->wait = onenand_interrupt_wait;
+
+ timeout = msecs_to_jiffies(100);
+ remain = wait_for_completion_timeout(&this->complete, timeout);
+ if (!remain) {
+ printk(KERN_INFO "OneNAND: There's no interrupt. "
+ "We use the normal wait\n");
+
+ /* Release the irq */
+ free_irq(this->irq, this);
+
+ this->wait = onenand_wait;
+ }
+
+ return onenand_wait(mtd, state);
+}
+
+/*
+ * onenand_setup_wait - [OneNAND Interface] setup onenand wait method
+ * @param mtd MTD device structure
+ *
+ * There's two method to wait onenand work
+ * 1. polling - read interrupt status register
+ * 2. interrupt - use the kernel interrupt method
+ */
+static void onenand_setup_wait(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int syscfg;
+
+ init_completion(&this->complete);
+
+ if (this->irq <= 0) {
+ this->wait = onenand_wait;
+ return;
+ }
+
+ if (request_irq(this->irq, &onenand_interrupt,
+ IRQF_SHARED, "onenand", this)) {
+ /* If we can't get irq, use the normal wait */
+ this->wait = onenand_wait;
+ return;
+ }
+
+ /* Enable interrupt */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ syscfg |= ONENAND_SYS_CFG1_IOBE;
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+
+ this->wait = onenand_try_interrupt_wait;
+}
+
+/**
+ * onenand_bufferram_offset - [DEFAULT] BufferRAM offset
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @return offset given area
+ *
+ * Return BufferRAM offset given area
+ */
+static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ /* Note: the 'this->writesize' is a real page size */
+ if (area == ONENAND_DATARAM)
+ return this->writesize;
+ if (area == ONENAND_SPARERAM)
+ return mtd->oobsize;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_read_bufferram - [OneNAND Interface] Read the bufferram area
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Read the BufferRAM area
+ */
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ if (ONENAND_CHECK_BYTE_ACCESS(count)) {
+ unsigned short word;
+
+ /* Align with word(16-bit) size */
+ count--;
+
+ /* Read word and save byte */
+ word = this->read_word(bufferram + offset + count);
+ buffer[count] = (word & 0xff);
+ }
+
+ memcpy(buffer, bufferram + offset, count);
+
+ return 0;
+}
+
+/**
+ * onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Read the BufferRAM area with Sync. Burst Mode
+ */
+static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ);
+
+ if (ONENAND_CHECK_BYTE_ACCESS(count)) {
+ unsigned short word;
+
+ /* Align with word(16-bit) size */
+ count--;
+
+ /* Read word and save byte */
+ word = this->read_word(bufferram + offset + count);
+ buffer[count] = (word & 0xff);
+ }
+
+ memcpy(buffer, bufferram + offset, count);
+
+ this->mmcontrol(mtd, 0);
+
+ return 0;
+}
+
+/**
+ * onenand_write_bufferram - [OneNAND Interface] Write the bufferram area
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Write the BufferRAM area
+ */
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ if (ONENAND_CHECK_BYTE_ACCESS(count)) {
+ unsigned short word;
+ int byte_offset;
+
+ /* Align with word(16-bit) size */
+ count--;
+
+ /* Calculate byte access offset */
+ byte_offset = offset + count;
+
+ /* Read word and save byte */
+ word = this->read_word(bufferram + byte_offset);
+ word = (word & ~0xff) | buffer[count];
+ this->write_word(word, bufferram + byte_offset);
+ }
+
+ memcpy(bufferram + offset, buffer, count);
+
+ return 0;
+}
+
+/**
+ * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode
+ * @param mtd MTD data structure
+ * @param addr address to check
+ * @return blockpage address
+ *
+ * Get blockpage address at 2x program mode
+ */
+static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, block, page;
+
+ /* Calculate the even block number */
+ block = (int) (addr >> this->erase_shift) & ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
+ blockpage = (block << 7) | page;
+
+ return blockpage;
+}
+
+/**
+ * onenand_check_bufferram - [GENERIC] Check BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr address to check
+ * @return 1 if there are valid data, otherwise 0
+ *
+ * Check bufferram if there is data we required
+ */
+static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, found = 0;
+ unsigned int i;
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int) (addr >> this->page_shift);
+
+ /* Is there valid data? */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ found = 1;
+ else {
+ /* Check another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage) {
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ found = 1;
+ }
+ }
+
+ if (found && ONENAND_IS_DDP(this)) {
+ /* Select DataRAM for DDP */
+ int block = onenand_block(this, addr);
+ int value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ return found;
+}
+
+/**
+ * onenand_update_bufferram - [GENERIC] Update BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr address to update
+ * @param valid valid flag
+ *
+ * Update BufferRAM information
+ */
+static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
+ int valid)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage;
+ unsigned int i;
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int) (addr >> this->page_shift);
+
+ /* Invalidate another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ this->bufferram[i].blockpage = -1;
+
+ /* Update BufferRAM */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (valid)
+ this->bufferram[i].blockpage = blockpage;
+ else
+ this->bufferram[i].blockpage = -1;
+}
+
+/**
+ * onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr start address to invalidate
+ * @param len length to invalidate
+ *
+ * Invalidate BufferRAM information
+ */
+static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr,
+ unsigned int len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+ loff_t end_addr = addr + len;
+
+ /* Invalidate BufferRAM */
+ for (i = 0; i < MAX_BUFFERRAM; i++) {
+ loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift;
+ if (buf_addr >= addr && buf_addr < end_addr)
+ this->bufferram[i].blockpage = -1;
+ }
+}
+
+/**
+ * onenand_get_device - [GENERIC] Get chip for selected access
+ * @param mtd MTD device structure
+ * @param new_state the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int onenand_get_device(struct mtd_info *mtd, int new_state)
+{
+ struct onenand_chip *this = mtd->priv;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Grab the lock and see if the device is available
+ */
+ while (1) {
+ spin_lock(&this->chip_lock);
+ if (this->state == FL_READY) {
+ this->state = new_state;
+ spin_unlock(&this->chip_lock);
+ if (new_state != FL_PM_SUSPENDED && this->enable)
+ this->enable(mtd);
+ break;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ spin_unlock(&this->chip_lock);
+ return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&this->wq, &wait);
+ spin_unlock(&this->chip_lock);
+ schedule();
+ remove_wait_queue(&this->wq, &wait);
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_release_device - [GENERIC] release chip
+ * @param mtd MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void onenand_release_device(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (this->state != FL_PM_SUSPENDED && this->disable)
+ this->disable(mtd);
+ /* Release the chip */
+ spin_lock(&this->chip_lock);
+ this->state = FL_READY;
+ wake_up(&this->wq);
+ spin_unlock(&this->chip_lock);
+}
+
+/**
+ * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
+ * @param mtd MTD device structure
+ * @param buf destination address
+ * @param column oob offset to read from
+ * @param thislen oob length to read
+ */
+static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column,
+ int thislen)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct nand_oobfree *free;
+ int readcol = column;
+ int readend = column + thislen;
+ int lastgap = 0;
+ unsigned int i;
+ uint8_t *oob_buf = this->oob_buf;
+
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ if (readcol >= lastgap)
+ readcol += free->offset - lastgap;
+ if (readend >= lastgap)
+ readend += free->offset - lastgap;
+ lastgap = free->offset + free->length;
+ }
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ int free_end = free->offset + free->length;
+ if (free->offset < readend && free_end > readcol) {
+ int st = max_t(int,free->offset,readcol);
+ int ed = min_t(int,free_end,readend);
+ int n = ed - st;
+ memcpy(buf, oob_buf + st, n);
+ buf += n;
+ } else if (column == 0)
+ break;
+ }
+ return 0;
+}
+
+/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait / onenand_bbt_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ /* We are attempting to reread, so decrement stats.failed
+ * which was incremented by onenand_wait due to read failure
+ */
+ printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
+ __func__);
+ mtd->ecc_stats.failed--;
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
+ * So, read-while-load is not present.
+ */
+static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0;
+ int writesize = this->writesize;
+
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = min_t(int, writesize, len - read);
+
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ if (ret)
+ break;
+ }
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ read += thislen;
+ if (read == len)
+ break;
+
+ from += thislen;
+ buf += thislen;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
+}
+
+/**
+ * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * OneNAND read main and/or out-of-band data
+ */
+static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0, boundary = 0;
+ int writesize = this->writesize;
+
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ /* Read-while-load method */
+
+ /* Do first load to bufferRAM */
+ if (read < len) {
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ }
+ }
+
+ thislen = min_t(int, writesize, len - read);
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ while (!ret) {
+ /* If there is more to load then start next load */
+ from += thislen;
+ if (read + thislen < len) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ /*
+ * Chip boundary handling in DDP
+ * Now we issued chip 1 read and pointed chip 1
+ * bufferram so we have to point chip 0 bufferram.
+ */
+ if (ONENAND_IS_DDP(this) &&
+ unlikely(from == (this->chipsize >> 1))) {
+ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+ boundary = 1;
+ } else
+ boundary = 0;
+ ONENAND_SET_PREV_BUFFERRAM(this);
+ }
+ /* While load is going, read from last bufferRAM */
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+
+ /* Read oob area if needed */
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ /* See if we are done */
+ read += thislen;
+ if (read == len)
+ break;
+ /* Set up for next read from bufferRAM */
+ if (unlikely(boundary))
+ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ buf += thislen;
+ thislen = min_t(int, writesize, len - read);
+ column = 0;
+ cond_resched();
+ /* Now wait for load */
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
+}
+
+/**
+ * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area
+ */
+static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int read = 0, thislen, column, oobsize;
+ size_t len = ops->ooblen;
+ unsigned int mode = ops->mode;
+ u_char *buf = ops->oobbuf;
+ int ret = 0, readcmd;
+
+ from += ops->ooboffs;
+
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
+
+ /* Initialize return length value */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = from & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "%s: Attempted to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ column + len > ((mtd->size >> this->page_shift) -
+ (from >> this->page_shift)) * oobsize)) {
+ printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret && !mtd_is_eccerr(ret)) {
+ printk(KERN_ERR "%s: read failed = 0x%x\n",
+ __func__, ret);
+ break;
+ }
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, buf, column, thislen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
+
+ read += thislen;
+
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Page size */
+ from += mtd->writesize;
+ column = 0;
+ }
+ }
+
+ ops->oobretlen = read;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_read - [MTD Interface] Read data from flash
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put data
+ *
+ * Read with ecc
+*/
+static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ onenand_get_device(mtd, FL_READING);
+ ret = ONENAND_IS_4KB_PAGE(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
+ onenand_release_device(mtd);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * onenand_read_oob - [MTD Interface] Read main and/or out-of-band
+ * @param mtd: MTD device structure
+ * @param from: offset to read from
+ * @param ops: oob operation description structure
+
+ * Read main and/or out-of-band
+ */
+static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ break;
+ case MTD_OPS_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_READING);
+ if (ops->datbuf)
+ ret = ONENAND_IS_4KB_PAGE(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, ops) :
+ onenand_read_ops_nolock(mtd, from, ops);
+ else
+ ret = onenand_read_oob_nolock(mtd, from, ops);
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_bbt_wait - [DEFAULT] wait until the command is done
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Wait for command done.
+ */
+static int onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned long timeout;
+ unsigned int interrupt, ctrl, ecc, addr1, addr8;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ if (interrupt & ONENAND_INT_MASTER)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
+ addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
+
+ if (interrupt & ONENAND_INT_READ) {
+ ecc = onenand_read_ecc(this);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ecc, ctrl, interrupt, addr1, addr8);
+ return ONENAND_BBT_READ_ECC_ERROR;
+ }
+ } else {
+ printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ctrl, interrupt, addr1, addr8);
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Initial bad block case: 0x2400 or 0x0400 */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
+ "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area for bbt scan
+ */
+int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int read = 0, thislen, column;
+ int ret = 0, readcmd;
+ size_t len = ops->ooblen;
+ u_char *buf = ops->oobbuf;
+
+ pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
+ len);
+
+ /* Initialize return value */
+ ops->oobretlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (unlikely((from + len) > mtd->size)) {
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_READING);
+
+ column = from & (mtd->oobsize - 1);
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = mtd->oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret)
+ break;
+
+ this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
+ read += thislen;
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Update Page size */
+ from += this->writesize;
+ column = 0;
+ }
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ ops->oobretlen = read;
+ return ret;
+}
+
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+/**
+ * onenand_verify_oob - [GENERIC] verify the oob contents after a write
+ * @param mtd MTD device structure
+ * @param buf the databuffer to verify
+ * @param to offset to read from
+ */
+static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
+{
+ struct onenand_chip *this = mtd->priv;
+ u_char *oob_buf = this->oob_buf;
+ int status, i, readcmd;
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
+ onenand_update_bufferram(mtd, to, 0);
+ status = this->wait(mtd, FL_READING);
+ if (status)
+ return status;
+
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
+ for (i = 0; i < mtd->oobsize; i++)
+ if (buf[i] != 0xFF && buf[i] != oob_buf[i])
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_verify - [GENERIC] verify the chip contents after a write
+ * @param mtd MTD device structure
+ * @param buf the databuffer to verify
+ * @param addr offset to read from
+ * @param len number of bytes to read and compare
+ */
+static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret = 0;
+ int thislen, column;
+
+ column = addr & (this->writesize - 1);
+
+ while (len != 0) {
+ thislen = min_t(int, this->writesize - column, len);
+
+ this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
+
+ onenand_update_bufferram(mtd, addr, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (ret)
+ return ret;
+
+ onenand_update_bufferram(mtd, addr, 1);
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
+
+ if (memcmp(buf, this->verify_buf + column, thislen))
+ return -EBADMSG;
+
+ len -= thislen;
+ buf += thislen;
+ addr += thislen;
+ column = 0;
+ }
+
+ return 0;
+}
+#else
+#define onenand_verify(...) (0)
+#define onenand_verify_oob(...) (0)
+#endif
+
+#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
+
+static void onenand_panic_wait(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int interrupt;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ if (interrupt & ONENAND_INT_MASTER)
+ break;
+ udelay(10);
+ }
+}
+
+/**
+ * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ *
+ * Write with ECC
+ */
+static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, subpage;
+ int written = 0;
+
+ if (this->state == FL_PM_SUSPENDED)
+ return -EBUSY;
+
+ /* Wait for any existing operation to clear */
+ onenand_panic_wait(mtd);
+
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
+
+ /* Reject writes, which are not page aligned */
+ if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
+ printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ column = to & (mtd->writesize - 1);
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, mtd->writesize - column, len - written);
+ u_char *wbuf = (u_char *) buf;
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+ /* Partial page write */
+ subpage = thislen < mtd->writesize;
+ if (subpage) {
+ memset(this->page_buf, 0xff, mtd->writesize);
+ memcpy(this->page_buf + column, buf, thislen);
+ wbuf = this->page_buf;
+ }
+
+ this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
+
+ this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+
+ onenand_panic_wait(mtd);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, to, !subpage);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, !subpage);
+ }
+
+ written += thislen;
+
+ if (written == len)
+ break;
+
+ column = 0;
+ to += thislen;
+ buf += thislen;
+ }
+
+ *retlen = written;
+ return 0;
+}
+
+/**
+ * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
+ * @param mtd MTD device structure
+ * @param oob_buf oob buffer
+ * @param buf source address
+ * @param column oob offset to write to
+ * @param thislen oob length to write
+ */
+static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
+ const u_char *buf, int column, int thislen)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct nand_oobfree *free;
+ int writecol = column;
+ int writeend = column + thislen;
+ int lastgap = 0;
+ unsigned int i;
+
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ if (writecol >= lastgap)
+ writecol += free->offset - lastgap;
+ if (writeend >= lastgap)
+ writeend += free->offset - lastgap;
+ lastgap = free->offset + free->length;
+ }
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
+ int free_end = free->offset + free->length;
+ if (free->offset < writeend && free_end > writecol) {
+ int st = max_t(int,free->offset,writecol);
+ int ed = min_t(int,free_end,writeend);
+ int n = ed - st;
+ memcpy(oob_buf + st, buf, n);
+ buf += n;
+ } else if (column == 0)
+ break;
+ }
+ return 0;
+}
+
+/**
+ * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param ops oob operation description structure
+ *
+ * Write main and/or oob with ECC
+ */
+static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int written = 0, column, thislen = 0, subpage = 0;
+ int prev = 0, prevlen = 0, prev_subpage = 0, first = 1;
+ int oobwritten = 0, oobcolumn, thisooblen, oobsize;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ const u_char *buf = ops->datbuf;
+ const u_char *oob = ops->oobbuf;
+ u_char *oobbuf;
+ int ret = 0, cmd;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ /* Reject writes, which are not page aligned */
+ if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
+ printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check zero length */
+ if (!len)
+ return 0;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = to & (mtd->oobsize - 1);
+
+ column = to & (mtd->writesize - 1);
+
+ /* Loop until all data write */
+ while (1) {
+ if (written < len) {
+ u_char *wbuf = (u_char *) buf;
+
+ thislen = min_t(int, mtd->writesize - column, len - written);
+ thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten);
+
+ cond_resched();
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+ /* Partial page write */
+ subpage = thislen < mtd->writesize;
+ if (subpage) {
+ memset(this->page_buf, 0xff, mtd->writesize);
+ memcpy(this->page_buf + column, buf, thislen);
+ wbuf = this->page_buf;
+ }
+
+ this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+
+ if (oob) {
+ oobbuf = this->oob_buf;
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
+ else
+ memcpy(oobbuf + oobcolumn, oob, thisooblen);
+
+ oobwritten += thisooblen;
+ oob += thisooblen;
+ oobcolumn = 0;
+ } else
+ oobbuf = (u_char *) ffchars;
+
+ this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+ } else
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ /*
+ * 2 PLANE, MLC, and Flex-OneNAND do not support
+ * write-while-program feature.
+ */
+ if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
+ ONENAND_SET_PREV_BUFFERRAM(this);
+
+ ret = this->wait(mtd, FL_WRITING);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
+ if (ret) {
+ written -= prevlen;
+ printk(KERN_ERR "%s: write failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ if (written == len) {
+ /* Only check verify write turn on */
+ ret = onenand_verify(mtd, buf - len, to - len, len);
+ if (ret)
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ }
+
+ this->ongoing = 0;
+ cmd = ONENAND_CMD_PROG;
+
+ /* Exclude 1st OTP and OTP blocks for cache program feature */
+ if (ONENAND_IS_CACHE_PROGRAM(this) &&
+ likely(onenand_block(this, to) != 0) &&
+ ONENAND_IS_4KB_PAGE(this) &&
+ ((written + thislen) < len)) {
+ cmd = ONENAND_CMD_2X_CACHE_PROG;
+ this->ongoing = 1;
+ }
+
+ this->command(mtd, cmd, to, mtd->writesize);
+
+ /*
+ * 2 PLANE, MLC, and Flex-OneNAND wait here
+ */
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
+ ret = this->wait(mtd, FL_WRITING);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, to, !ret && !subpage);
+ if (ret) {
+ printk(KERN_ERR "%s: write failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ /* Only check verify write turn on */
+ ret = onenand_verify(mtd, buf, to, thislen);
+ if (ret) {
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ written += thislen;
+
+ if (written == len)
+ break;
+
+ } else
+ written += thislen;
+
+ column = 0;
+ prev_subpage = subpage;
+ prev = to;
+ prevlen = thislen;
+ to += thislen;
+ buf += thislen;
+ first = 0;
+ }
+
+ /* In error case, clear all bufferrams */
+ if (written != len)
+ onenand_invalidate_bufferram(mtd, 0, -1);
+
+ ops->retlen = written;
+ ops->oobretlen = oobwritten;
+
+ return ret;
+}
+
+
+/**
+ * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ * @param mode operation mode
+ *
+ * OneNAND write out-of-band
+ */
+static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, ret = 0, oobsize;
+ int written = 0, oobcmd;
+ u_char *oobbuf;
+ size_t len = ops->ooblen;
+ const u_char *buf = ops->oobbuf;
+ unsigned int mode = ops->mode;
+
+ to += ops->ooboffs;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = to & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "%s: Attempted to start write outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* For compatibility with NAND: Do not allow write past end of page */
+ if (unlikely(column + len > oobsize)) {
+ printk(KERN_ERR "%s: Attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(to >= mtd->size ||
+ column + len > ((mtd->size >> this->page_shift) -
+ (to >> this->page_shift)) * oobsize)) {
+ printk(KERN_ERR "%s: Attempted to write past end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ oobbuf = this->oob_buf;
+
+ oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, oobsize, len - written);
+
+ cond_resched();
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (mode == MTD_OPS_AUTO_OOB)
+ onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
+ else
+ memcpy(oobbuf + column, buf, thislen);
+ this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+
+ if (ONENAND_IS_4KB_PAGE(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, to, 0);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, 0);
+ }
+
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
+ break;
+ }
+
+ ret = onenand_verify_oob(mtd, oobbuf, to);
+ if (ret) {
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ written += thislen;
+ if (written == len)
+ break;
+
+ to += mtd->writesize;
+ buf += thislen;
+ column = 0;
+ }
+
+ ops->oobretlen = written;
+
+ return ret;
+}
+
+/**
+ * onenand_write - [MTD Interface] write buffer to FLASH
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ *
+ * Write with ECC
+ */
+static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = (u_char *) buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ onenand_get_device(mtd, FL_WRITING);
+ ret = onenand_write_ops_nolock(mtd, to, &ops);
+ onenand_release_device(mtd);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @param mtd: MTD device structure
+ * @param to: offset to write
+ * @param ops: oob operation description structure
+ */
+static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ break;
+ case MTD_OPS_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_WRITING);
+ if (ops->datbuf)
+ ret = onenand_write_ops_nolock(mtd, to, ops);
+ else
+ ret = onenand_write_oob_nolock(mtd, to, ops);
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad
+ * @param mtd MTD device structure
+ * @param ofs offset from device start
+ * @param allowbbt 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+
+ /* Return info from the table */
+ return bbm->isbad_bbt(mtd, ofs, allowbbt);
+}
+
+
+static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
+ struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ unsigned int block_size = (1 << this->erase_shift);
+ int ret = 0;
+
+ while (len) {
+ this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
+ ret = this->wait(mtd, FL_VERIFYING_ERASE);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed verify, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = addr;
+ return -1;
+ }
+ len -= block_size;
+ addr += block_size;
+ }
+ return 0;
+}
+
+/**
+ * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
+ * @param mtd MTD device structure
+ * @param instr erase instruction
+ * @param region erase region
+ *
+ * Erase one or more blocks up to 64 block at a time
+ */
+static int onenand_multiblock_erase(struct mtd_info *mtd,
+ struct erase_info *instr,
+ unsigned int block_size)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ int eb_count = 0;
+ int ret = 0;
+ int bdry_block = 0;
+
+ instr->state = MTD_ERASING;
+
+ if (ONENAND_IS_DDP(this)) {
+ loff_t bdry_addr = this->chipsize >> 1;
+ if (addr < bdry_addr && (addr + len) > bdry_addr)
+ bdry_block = bdry_addr >> this->erase_shift;
+ }
+
+ /* Pre-check bbs */
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks */
+ if (onenand_block_isbad_nolock(mtd, addr, 0)) {
+ printk(KERN_WARNING "%s: attempt to erase a bad block "
+ "at addr 0x%012llx\n",
+ __func__, (unsigned long long) addr);
+ instr->state = MTD_ERASE_FAILED;
+ return -EIO;
+ }
+ len -= block_size;
+ addr += block_size;
+ }
+
+ len = instr->len;
+ addr = instr->addr;
+
+ /* loop over 64 eb batches */
+ while (len) {
+ struct erase_info verify_instr = *instr;
+ int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
+
+ verify_instr.addr = addr;
+ verify_instr.len = 0;
+
+ /* do not cross chip boundary */
+ if (bdry_block) {
+ int this_block = (addr >> this->erase_shift);
+
+ if (this_block < bdry_block) {
+ max_eb_count = min(max_eb_count,
+ (bdry_block - this_block));
+ }
+ }
+
+ eb_count = 0;
+
+ while (len > block_size && eb_count < (max_eb_count - 1)) {
+ this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE,
+ addr, block_size);
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_PREPARING_ERASE);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed multiblock erase, "
+ "block %d\n", __func__,
+ onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+ eb_count++;
+ }
+
+ /* last block of 64-eb series */
+ cond_resched();
+ this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_ERASING);
+ /* Check if it is write protected */
+ if (ret) {
+ printk(KERN_ERR "%s: Failed erase, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+ eb_count++;
+
+ /* verify */
+ verify_instr.len = eb_count * block_size;
+ if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
+ instr->state = verify_instr.state;
+ instr->fail_addr = verify_instr.fail_addr;
+ return -EIO;
+ }
+
+ }
+ return 0;
+}
+
+
+/**
+ * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
+ * @param mtd MTD device structure
+ * @param instr erase instruction
+ * @param region erase region
+ * @param block_size erase block size
+ *
+ * Erase one or more blocks one block at a time
+ */
+static int onenand_block_by_block_erase(struct mtd_info *mtd,
+ struct erase_info *instr,
+ struct mtd_erase_region_info *region,
+ unsigned int block_size)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ loff_t region_end = 0;
+ int ret = 0;
+
+ if (region) {
+ /* region is set for Flex-OneNAND */
+ region_end = region->offset + region->erasesize * region->numblocks;
+ }
+
+ instr->state = MTD_ERASING;
+
+ /* Loop through the blocks */
+ while (len) {
+ cond_resched();
+
+ /* Check if we have a bad block, we do not erase bad blocks */
+ if (onenand_block_isbad_nolock(mtd, addr, 0)) {
+ printk(KERN_WARNING "%s: attempt to erase a bad block "
+ "at addr 0x%012llx\n",
+ __func__, (unsigned long long) addr);
+ instr->state = MTD_ERASE_FAILED;
+ return -EIO;
+ }
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_ERASING);
+ /* Check, if it is write protected */
+ if (ret) {
+ printk(KERN_ERR "%s: Failed erase, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = addr;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+
+ if (region && addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* FIXME: This should be handled at MTD partitioning level. */
+ printk(KERN_ERR "%s: Unaligned address\n",
+ __func__);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * onenand_erase - [MTD Interface] erase block(s)
+ * @param mtd MTD device structure
+ * @param instr erase instruction
+ *
+ * Erase one or more blocks
+ */
+static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int block_size;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_offset = 0;
+
+ pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
+ (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ int i = flexonenand_region(mtd, addr);
+
+ region = &mtd->eraseregions[i];
+ block_size = region->erasesize;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ region_offset = region->offset;
+ } else
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely((addr - region_offset) & (block_size - 1))) {
+ printk(KERN_ERR "%s: Unaligned address\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ printk(KERN_ERR "%s: Length not block aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_ERASING);
+
+ if (ONENAND_IS_4KB_PAGE(this) || region ||
+ instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
+ /* region is set for Flex-OneNAND (no mb erase) */
+ ret = onenand_block_by_block_erase(mtd, instr,
+ region, block_size);
+ } else {
+ ret = onenand_multiblock_erase(mtd, instr, block_size);
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ /* Do call back function */
+ if (!ret) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ }
+
+ return ret;
+}
+
+/**
+ * onenand_sync - [MTD Interface] sync
+ * @param mtd MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+static void onenand_sync(struct mtd_info *mtd)
+{
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_SYNCING);
+
+ /* Release it and go back */
+ onenand_release_device(mtd);
+}
+
+/**
+ * onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ *
+ * Check whether the block is bad
+ */
+static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_READING);
+ ret = onenand_block_isbad_nolock(mtd, ofs, 0);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_default_block_markbad - [DEFAULT] mark a block bad
+ * @param mtd MTD device structure
+ * @param ofs offset from device start
+ *
+ * This is the default implementation, which can be overridden by
+ * a hardware specific driver.
+ */
+static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ u_char buf[2] = {0, 0};
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .ooblen = 2,
+ .oobbuf = buf,
+ .ooboffs = 0,
+ };
+ int block;
+
+ /* Get block number */
+ block = onenand_block(this, ofs);
+ if (bbm->bbt)
+ bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* We write two bytes, so we don't have to mess with 16-bit access */
+ ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
+ /* FIXME : What to do when marking SLC block in partition
+ * with MLC erasesize? For now, it is not advisable to
+ * create partitions containing both SLC and MLC regions.
+ */
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
+}
+
+/**
+ * onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ *
+ * Mark the block as bad
+ */
+static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = onenand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ onenand_get_device(mtd, FL_WRITING);
+ ret = mtd_block_markbad(mtd, ofs);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to lock or unlock
+ * @param cmd lock or unlock command
+ *
+ * Lock or unlock one or more blocks
+ */
+static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, block, value, status;
+ int wp_status_mask;
+
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len) - 1;
+
+ if (cmd == ONENAND_CMD_LOCK)
+ wp_status_mask = ONENAND_WP_LS;
+ else
+ wp_status_mask = ONENAND_WP_US;
+
+ /* Continuous lock scheme */
+ if (this->options & ONENAND_HAS_CONT_LOCK) {
+ /* Set start block address */
+ this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Set end block address */
+ this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ /* Write lock command */
+ this->command(mtd, cmd, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & wp_status_mask))
+ printk(KERN_ERR "%s: wp status = 0x%x\n",
+ __func__, status);
+
+ return 0;
+ }
+
+ /* Block lock scheme */
+ for (block = start; block < end + 1; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ /* Set start block address */
+ this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write lock command */
+ this->command(mtd, cmd, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & wp_status_mask))
+ printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
+ __func__, block, status);
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_lock - [MTD Interface] Lock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to unlock
+ *
+ * Lock one or more blocks
+ */
+static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_unlock - [MTD Interface] Unlock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to unlock
+ *
+ * Unlock one or more blocks
+ */
+static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_check_lock_status - [OneNAND Interface] Check lock status
+ * @param this onenand chip data structure
+ *
+ * Check lock status
+ */
+static int onenand_check_lock_status(struct onenand_chip *this)
+{
+ unsigned int value, block, status;
+ unsigned int end;
+
+ end = this->chipsize >> this->erase_shift;
+ for (block = 0; block < end; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ /* Set start block address */
+ this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US)) {
+ printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
+ __func__, block, status);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_unlock_all - [OneNAND Interface] unlock all blocks
+ * @param mtd MTD device structure
+ *
+ * Unlock all blocks
+ */
+static void onenand_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ loff_t len = mtd->size;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Set start block address */
+ this->write_word(0, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Don't check lock status */
+ if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
+ return;
+
+ /* Check lock status */
+ if (onenand_check_lock_status(this))
+ return;
+
+ /* Workaround for all block unlock in DDP */
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+ }
+
+ onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+}
+
+#ifdef CONFIG_MTD_ONENAND_OTP
+
+/**
+ * onenand_otp_command - Send OTP specific command to OneNAND device
+ * @param mtd MTD device structure
+ * @param cmd the command to be sent
+ * @param addr offset to read from or write to
+ * @param len number of bytes to read or write
+ */
+static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int value, block, page;
+
+ /* Address translation */
+ switch (cmd) {
+ case ONENAND_CMD_OTP_ACCESS:
+ block = (int) (addr >> this->erase_shift);
+ page = -1;
+ break;
+
+ default:
+ block = (int) (addr >> this->erase_shift);
+ page = (int) (addr >> this->page_shift);
+
+ if (ONENAND_IS_2PLANE(this)) {
+ /* Make the even block number */
+ block &= ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page >>= 1;
+ }
+ page &= this->page_mask;
+ break;
+ }
+
+ if (block != -1) {
+ /* Write 'DFS, FBA' of Flash */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS1);
+ }
+
+ if (page != -1) {
+ /* Now we use page size operation */
+ int sectors = 4, count = 4;
+ int dataram;
+
+ switch (cmd) {
+ default:
+ if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
+ cmd = ONENAND_CMD_2X_PROG;
+ dataram = ONENAND_CURRENT_BUFFERRAM(this);
+ break;
+ }
+
+ /* Write 'FPA, FSA' of Flash */
+ value = onenand_page_address(page, sectors);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS8);
+
+ /* Write 'BSA, BSC' of DataRAM */
+ value = onenand_buffer_address(dataram, sectors, count);
+ this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
+ }
+
+ /* Interrupt clear */
+ this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
+
+ /* Write command */
+ this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
+
+ return 0;
+}
+
+/**
+ * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ *
+ * OneNAND write out-of-band only for OTP
+ */
+static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, ret = 0, oobsize;
+ int written = 0;
+ u_char *oobbuf;
+ size_t len = ops->ooblen;
+ const u_char *buf = ops->oobbuf;
+ int block, value, status;
+
+ to += ops->ooboffs;
+
+ /* Initialize retlen, in case of early exit */
+ ops->oobretlen = 0;
+
+ oobsize = mtd->oobsize;
+
+ column = to & (mtd->oobsize - 1);
+
+ oobbuf = this->oob_buf;
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, oobsize, len - written);
+
+ cond_resched();
+
+ block = (int) (to >> this->erase_shift);
+ /*
+ * Write 'DFS, FBA' of Flash
+ * Add: F100h DQ=DFS, FBA
+ */
+
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS1);
+
+ /*
+ * Select DataRAM for DDP
+ * Add: F101h DQ=DBS
+ */
+
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS2);
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ /*
+ * Enter OTP access mode
+ */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memcpy(oobbuf + column, buf, thislen);
+
+ /*
+ * Write Data into DataRAM
+ * Add: 8th Word
+ * in sector0/spare/page0
+ * DQ=XXFCh
+ */
+ this->write_bufferram(mtd, ONENAND_SPARERAM,
+ oobbuf, 0, mtd->oobsize);
+
+ onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ onenand_update_bufferram(mtd, to, 0);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, 0);
+ }
+
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
+ break;
+ }
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
+
+ status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ status &= 0x60;
+
+ if (status == 0x60) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tLOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tLOCKED\n");
+ } else if (status == 0x20) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tLOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
+ } else if (status == 0x40) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tLOCKED\n");
+ } else {
+ printk(KERN_DEBUG "Reboot to check\n");
+ }
+
+ written += thislen;
+ if (written == len)
+ break;
+
+ to += mtd->writesize;
+ buf += thislen;
+ column = 0;
+ }
+
+ ops->oobretlen = written;
+
+ return ret;
+}
+
+/* Internal OTP operation */
+typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
+ size_t *retlen, u_char *buf);
+
+/**
+ * do_otp_read - [DEFAULT] Read OTP block area
+ * @param mtd MTD device structure
+ * @param from The offset to read
+ * @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of readbytes
+ * @param buf the databuffer to put/get data
+ *
+ * Read OTP block area.
+ */
+static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ /* Enter OTP access mode */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+
+ ret = ONENAND_IS_4KB_PAGE(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
+
+ return ret;
+}
+
+/**
+ * do_otp_write - [DEFAULT] Write OTP block area
+ * @param mtd MTD device structure
+ * @param to The offset to write
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of write bytes
+ * @param buf the databuffer to put/get data
+ *
+ * Write OTP block area.
+ */
+static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned char *pbuf = buf;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ /* Force buffer page aligned */
+ if (len < mtd->writesize) {
+ memcpy(this->page_buf, buf, len);
+ memset(this->page_buf + len, 0xff, mtd->writesize - len);
+ pbuf = this->page_buf;
+ len = mtd->writesize;
+ }
+
+ /* Enter OTP access mode */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+
+ ops.len = len;
+ ops.ooblen = 0;
+ ops.datbuf = pbuf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, to, &ops);
+ *retlen = ops.retlen;
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
+
+ return ret;
+}
+
+/**
+ * do_otp_lock - [DEFAULT] Lock OTP block area
+ * @param mtd MTD device structure
+ * @param from The offset to lock
+ * @param len number of bytes to lock
+ * @param retlen pointer to variable to store the number of lock bytes
+ * @param buf the databuffer to put/get data
+ *
+ * Lock OTP block area.
+ */
+static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_oob_ops ops;
+ int ret;
+
+ if (FLEXONENAND(this)) {
+
+ /* Enter OTP access mode */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+ /*
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+ ops.len = mtd->writesize;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
+ *retlen = ops.retlen;
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
+ } else {
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
+ *retlen = ops.oobretlen;
+ }
+
+ return ret;
+}
+
+/**
+ * onenand_otp_walk - [DEFAULT] Handle OTP operation
+ * @param mtd MTD device structure
+ * @param from The offset to read/write
+ * @param len number of bytes to read/write
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put/get data
+ * @param action do given action
+ * @param mode specify user and factory
+ *
+ * Handle OTP operation.
+ */
+static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int mode)
+{
+ struct onenand_chip *this = mtd->priv;
+ int otp_pages;
+ int density;
+ int ret = 0;
+
+ *retlen = 0;
+
+ density = onenand_get_density(this->device_id);
+ if (density < ONENAND_DEVICE_DENSITY_512Mb)
+ otp_pages = 20;
+ else
+ otp_pages = 50;
+
+ if (mode == MTD_OTP_FACTORY) {
+ from += mtd->writesize * otp_pages;
+ otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
+ }
+
+ /* Check User/Factory boundary */
+ if (mode == MTD_OTP_USER) {
+ if (mtd->writesize * otp_pages < from + len)
+ return 0;
+ } else {
+ if (mtd->writesize * otp_pages < len)
+ return 0;
+ }
+
+ onenand_get_device(mtd, FL_OTPING);
+ while (len > 0 && otp_pages > 0) {
+ if (!action) { /* OTP Info functions */
+ struct otp_info *otpinfo;
+
+ len -= sizeof(struct otp_info);
+ if (len <= 0) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ otpinfo = (struct otp_info *) buf;
+ otpinfo->start = from;
+ otpinfo->length = mtd->writesize;
+ otpinfo->locked = 0;
+
+ from += mtd->writesize;
+ buf += sizeof(struct otp_info);
+ *retlen += sizeof(struct otp_info);
+ } else {
+ size_t tmp_retlen;
+
+ ret = action(mtd, from, len, &tmp_retlen, buf);
+
+ buf += tmp_retlen;
+ len -= tmp_retlen;
+ *retlen += tmp_retlen;
+
+ if (ret)
+ break;
+ }
+ otp_pages--;
+ }
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info
+ * @param mtd MTD device structure
+ * @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put/get data
+ *
+ * Read factory OTP info.
+ */
+static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_FACTORY);
+}
+
+/**
+ * onenand_read_fact_prot_reg - [MTD Interface] Read factory OTP area
+ * @param mtd MTD device structure
+ * @param from The offset to read
+ * @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put/get data
+ *
+ * Read factory OTP area.
+ */
+static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_FACTORY);
+}
+
+/**
+ * onenand_get_user_prot_info - [MTD Interface] Read user OTP info
+ * @param mtd MTD device structure
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param len number of bytes to read
+ * @param buf the databuffer to put/get data
+ *
+ * Read user OTP info.
+ */
+static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_USER);
+}
+
+/**
+ * onenand_read_user_prot_reg - [MTD Interface] Read user OTP area
+ * @param mtd MTD device structure
+ * @param from The offset to read
+ * @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put/get data
+ *
+ * Read user OTP area.
+ */
+static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_USER);
+}
+
+/**
+ * onenand_write_user_prot_reg - [MTD Interface] Write user OTP area
+ * @param mtd MTD device structure
+ * @param from The offset to write
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of write bytes
+ * @param buf the databuffer to put/get data
+ *
+ * Write user OTP area.
+ */
+static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_write, MTD_OTP_USER);
+}
+
+/**
+ * onenand_lock_user_prot_reg - [MTD Interface] Lock user OTP area
+ * @param mtd MTD device structure
+ * @param from The offset to lock
+ * @param len number of bytes to unlock
+ *
+ * Write lock mark on spare area in page 0 in OTP block
+ */
+static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
+ size_t retlen;
+ int ret;
+ unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
+
+ memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
+ : mtd->oobsize);
+ /*
+ * Write lock mark to 8th word of sector0 of page0 of the spare0.
+ * We write 16 bytes spare area instead of 2 bytes.
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+
+ from = 0;
+ len = FLEXONENAND(this) ? mtd->writesize : 16;
+
+ /*
+ * Note: OTP lock operation
+ * OTP block : 0xXXFC XX 1111 1100
+ * 1st block : 0xXXF3 (If chip support) XX 1111 0011
+ * Both : 0xXXF0 (If chip support) XX 1111 0000
+ */
+ if (FLEXONENAND(this))
+ otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
+
+ /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
+ if (otp == 1)
+ buf[otp_lock_offset] = 0xFC;
+ else if (otp == 2)
+ buf[otp_lock_offset] = 0xF3;
+ else if (otp == 3)
+ buf[otp_lock_offset] = 0xF0;
+ else if (otp != 0)
+ printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
+
+ ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
+
+ return ret ? : retlen;
+}
+
+#endif /* CONFIG_MTD_ONENAND_OTP */
+
+/**
+ * onenand_check_features - Check and set OneNAND features
+ * @param mtd MTD data structure
+ *
+ * Check and set OneNAND features
+ * - lock scheme
+ * - two plane
+ */
+static void onenand_check_features(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int density, process, numbufs;
+
+ /* Lock scheme depends on density and process */
+ density = onenand_get_density(this->device_id);
+ process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
+ numbufs = this->read_word(this->base + ONENAND_REG_NUM_BUFFERS) >> 8;
+
+ /* Lock scheme */
+ switch (density) {
+ case ONENAND_DEVICE_DENSITY_4Gb:
+ if (ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ else if (numbufs == 1) {
+ this->options |= ONENAND_HAS_4KB_PAGE;
+ this->options |= ONENAND_HAS_CACHE_PROGRAM;
+ /*
+ * There are two different 4KiB pagesize chips
+ * and no way to detect it by H/W config values.
+ *
+ * To detect the correct NOP for each chips,
+ * It should check the version ID as workaround.
+ *
+ * Now it has as following
+ * KFM4G16Q4M has NOP 4 with version ID 0x0131
+ * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
+ */
+ if ((this->version_id & 0xf) == 0xe)
+ this->options |= ONENAND_HAS_NOP_1;
+ }
+
+ case ONENAND_DEVICE_DENSITY_2Gb:
+ /* 2Gb DDP does not have 2 plane */
+ if (!ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+
+ case ONENAND_DEVICE_DENSITY_1Gb:
+ /* A-Die has all block unlock */
+ if (process)
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ break;
+
+ default:
+ /* Some OneNAND has continuous lock scheme */
+ if (!process)
+ this->options |= ONENAND_HAS_CONT_LOCK;
+ break;
+ }
+
+ /* The MLC has 4KiB pagesize. */
+ if (ONENAND_IS_MLC(this))
+ this->options |= ONENAND_HAS_4KB_PAGE;
+
+ if (ONENAND_IS_4KB_PAGE(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
+ if (this->options & ONENAND_HAS_CONT_LOCK)
+ printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
+ if (this->options & ONENAND_HAS_UNLOCK_ALL)
+ printk(KERN_DEBUG "Chip support all block unlock\n");
+ if (this->options & ONENAND_HAS_2PLANE)
+ printk(KERN_DEBUG "Chip has 2 plane\n");
+ if (this->options & ONENAND_HAS_4KB_PAGE)
+ printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
+ if (this->options & ONENAND_HAS_CACHE_PROGRAM)
+ printk(KERN_DEBUG "Chip has cache program feature\n");
+}
+
+/**
+ * onenand_print_device_info - Print device & version ID
+ * @param device device ID
+ * @param version version ID
+ *
+ * Print device & version ID
+ */
+static void onenand_print_device_info(int device, int version)
+{
+ int vcc, demuxed, ddp, density, flexonenand;
+
+ vcc = device & ONENAND_DEVICE_VCC_MASK;
+ demuxed = device & ONENAND_DEVICE_IS_DEMUX;
+ ddp = device & ONENAND_DEVICE_IS_DDP;
+ density = onenand_get_density(device);
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
+ ddp ? "(DDP)" : "",
+ (16 << density),
+ vcc ? "2.65/3.3" : "1.8",
+ device);
+ printk(KERN_INFO "OneNAND version = 0x%04x\n", version);
+}
+
+static const struct onenand_manufacturers onenand_manuf_ids[] = {
+ {ONENAND_MFR_SAMSUNG, "Samsung"},
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
+};
+
+/**
+ * onenand_check_maf - Check manufacturer ID
+ * @param manuf manufacturer ID
+ *
+ * Check manufacturer ID
+ */
+static int onenand_check_maf(int manuf)
+{
+ int size = ARRAY_SIZE(onenand_manuf_ids);
+ char *name;
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (manuf == onenand_manuf_ids[i].id)
+ break;
+
+ if (i < size)
+ name = onenand_manuf_ids[i].name;
+ else
+ name = "Unknown";
+
+ printk(KERN_DEBUG "OneNAND Manufacturer: %s (0x%0x)\n", name, manuf);
+
+ return (i == size);
+}
+
+/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned die, bdry;
+ int syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
+ " numblocks: %04u]\n",
+ (unsigned int) mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
+ this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "%s: Block %d not erased.\n",
+ __func__, block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ /* Change only once for SDP Flex-OneNAND */
+ if (die && (!ONENAND_IS_DDP(this)))
+ return 0;
+
+ /* boundary value of -1 indicates no required change */
+ if (boundary < 0 || boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk(KERN_ERR "%s: Invalid boundary value. "
+ "Boundary not changed.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "%s: Please erase blocks "
+ "before boundary change\n", __func__);
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "%s: boundary locked\n", __func__);
+ ret = 1;
+ goto out;
+ }
+
+ printk(KERN_INFO "Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ addr = die ? this->diesize[0] : 0;
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
+ __func__, die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed PI write for Die %d\n",
+ __func__, die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_chip_probe - [OneNAND Interface] The generic chip probe
+ * @param mtd MTD device structure
+ *
+ * OneNAND detection method:
+ * Compare the values from command with ones from register
+ */
+static int onenand_chip_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int bram_maf_id, bram_dev_id, maf_id, dev_id;
+ int syscfg;
+
+ /* Save system configuration 1 */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ /* Clear Sync. Burst Read mode to read BootRAM */
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Send the command for reading device ID from BootRAM */
+ this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
+
+ /* Read manufacturer and device IDs from BootRAM */
+ bram_maf_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x0);
+ bram_dev_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x2);
+
+ /* Reset OneNAND to read default register values */
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
+ /* Wait reset */
+ this->wait(mtd, FL_RESETING);
+
+ /* Restore system configuration 1 */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Check manufacturer ID */
+ if (onenand_check_maf(bram_maf_id))
+ return -ENXIO;
+
+ /* Read manufacturer and device IDs from Register */
+ maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+
+ /* Check OneNAND device */
+ if (maf_id != bram_maf_id || dev_id != bram_dev_id)
+ return -ENXIO;
+
+ return 0;
+}
+
+/**
+ * onenand_probe - [OneNAND Interface] Probe the OneNAND device
+ * @param mtd MTD device structure
+ */
+static int onenand_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int dev_id, ver_id;
+ int density;
+ int ret;
+
+ ret = this->chip_probe(mtd);
+ if (ret)
+ return ret;
+
+ /* Device and version IDs from Register */
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
+
+ /* Flash device information */
+ onenand_print_device_info(dev_id, ver_id);
+ this->device_id = dev_id;
+ this->version_id = ver_id;
+
+ /* Check OneNAND features */
+ onenand_check_features(mtd);
+
+ density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1), GFP_KERNEL);
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
+ this->chipsize = (16 << density) << 20;
+
+ /* OneNAND page size & block size */
+ /* The data buffer size is equal to page size */
+ mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_4KB_PAGE(this))
+ mtd->writesize <<= 1;
+
+ mtd->oobsize = mtd->writesize >> 5;
+ /* Pages per a block are always 64 in OneNAND */
+ mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
+
+ this->erase_shift = ffs(mtd->erasesize) - 1;
+ this->page_shift = ffs(mtd->writesize) - 1;
+ this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
+ /* It's real page size */
+ this->writesize = mtd->writesize;
+
+ /* REVISIT: Multichip handling */
+
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
+
+ /*
+ * We emulate the 4KiB page and 256KiB erase block size
+ * But oobsize is still 64 bytes.
+ * It is only valid if you turn on 2X program support,
+ * Otherwise it will be ignored by compiler.
+ */
+ if (ONENAND_IS_2PLANE(this)) {
+ mtd->writesize <<= 1;
+ mtd->erasesize <<= 1;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_suspend - [MTD Interface] Suspend the OneNAND flash
+ * @param mtd MTD device structure
+ */
+static int onenand_suspend(struct mtd_info *mtd)
+{
+ return onenand_get_device(mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * onenand_resume - [MTD Interface] Resume the OneNAND flash
+ * @param mtd MTD device structure
+ */
+static void onenand_resume(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (this->state == FL_PM_SUSPENDED)
+ onenand_release_device(mtd);
+ else
+ printk(KERN_ERR "%s: resume() called for the chip which is not "
+ "in suspended state\n", __func__);
+}
+
+/**
+ * onenand_scan - [OneNAND Interface] Scan for the OneNAND device
+ * @param mtd MTD device structure
+ * @param maxchips Number of chips to scan for
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ */
+int onenand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int i, ret;
+ struct onenand_chip *this = mtd->priv;
+
+ if (!this->read_word)
+ this->read_word = onenand_readw;
+ if (!this->write_word)
+ this->write_word = onenand_writew;
+
+ if (!this->command)
+ this->command = onenand_command;
+ if (!this->wait)
+ onenand_setup_wait(mtd);
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+ if (!this->unlock_all)
+ this->unlock_all = onenand_unlock_all;
+
+ if (!this->chip_probe)
+ this->chip_probe = onenand_chip_probe;
+
+ if (!this->read_bufferram)
+ this->read_bufferram = onenand_read_bufferram;
+ if (!this->write_bufferram)
+ this->write_bufferram = onenand_write_bufferram;
+
+ if (!this->block_markbad)
+ this->block_markbad = onenand_default_block_markbad;
+ if (!this->scan_bbt)
+ this->scan_bbt = onenand_default_bbt;
+
+ if (onenand_probe(mtd))
+ return -ENXIO;
+
+ /* Set Sync. Burst Read after probing */
+ if (this->mmcontrol) {
+ printk(KERN_INFO "OneNAND Sync. Burst Read support\n");
+ this->read_bufferram = onenand_sync_read_bufferram;
+ }
+
+ /* Allocate buffers, if necessary */
+ if (!this->page_buf) {
+ this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->page_buf)
+ return -ENOMEM;
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->verify_buf) {
+ kfree(this->page_buf);
+ return -ENOMEM;
+ }
+#endif
+ this->options |= ONENAND_PAGEBUF_ALLOC;
+ }
+ if (!this->oob_buf) {
+ this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!this->oob_buf) {
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
+ this->options &= ~ONENAND_PAGEBUF_ALLOC;
+ kfree(this->page_buf);
+ }
+ return -ENOMEM;
+ }
+ this->options |= ONENAND_OOBBUF_ALLOC;
+ }
+
+ this->state = FL_READY;
+ init_waitqueue_head(&this->wq);
+ spin_lock_init(&this->chip_lock);
+
+ /*
+ * Allow subpage writes up to oobsize.
+ */
+ switch (mtd->oobsize) {
+ case 128:
+ if (FLEXONENAND(this)) {
+ this->ecclayout = &flexonenand_oob_128;
+ mtd->subpage_sft = 0;
+ } else {
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 2;
+ }
+ if (ONENAND_IS_NOP_1(this))
+ mtd->subpage_sft = 0;
+ break;
+ case 64:
+ this->ecclayout = &onenand_oob_64;
+ mtd->subpage_sft = 2;
+ break;
+
+ case 32:
+ this->ecclayout = &onenand_oob_32;
+ mtd->subpage_sft = 1;
+ break;
+
+ default:
+ printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
+ __func__, mtd->oobsize);
+ mtd->subpage_sft = 0;
+ /* To prevent kernel oops */
+ this->ecclayout = &onenand_oob_32;
+ break;
+ }
+
+ this->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area
+ */
+ this->ecclayout->oobavail = 0;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
+ this->ecclayout->oobfree[i].length; i++)
+ this->ecclayout->oobavail +=
+ this->ecclayout->oobfree[i].length;
+ mtd->oobavail = this->ecclayout->oobavail;
+
+ mtd->ecclayout = this->ecclayout;
+ mtd->ecc_strength = 1;
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_erase = onenand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read = onenand_read;
+ mtd->_write = onenand_write;
+ mtd->_read_oob = onenand_read_oob;
+ mtd->_write_oob = onenand_write_oob;
+ mtd->_panic_write = onenand_panic_write;
+#ifdef CONFIG_MTD_ONENAND_OTP
+ mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
+ mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
+ mtd->_get_user_prot_info = onenand_get_user_prot_info;
+ mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
+ mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
+#endif
+ mtd->_sync = onenand_sync;
+ mtd->_lock = onenand_lock;
+ mtd->_unlock = onenand_unlock;
+ mtd->_suspend = onenand_suspend;
+ mtd->_resume = onenand_resume;
+ mtd->_block_isbad = onenand_block_isbad;
+ mtd->_block_markbad = onenand_block_markbad;
+ mtd->owner = THIS_MODULE;
+ mtd->writebufsize = mtd->writesize;
+
+ /* Unlock whole block */
+ if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING))
+ this->unlock_all(mtd);
+
+ ret = this->scan_bbt(mtd);
+ if ((!FLEXONENAND(this)) || ret)
+ return ret;
+
+ /* Change Flex-OneNAND boundaries if required */
+ for (i = 0; i < MAX_DIES; i++)
+ flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
+ flex_bdry[(2 * i) + 1]);
+
+ return 0;
+}
+
+/**
+ * onenand_release - [OneNAND Interface] Free resources held by the OneNAND device
+ * @param mtd MTD device structure
+ */
+void onenand_release(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ /* Deregister partitions */
+ mtd_device_unregister(mtd);
+
+ /* Free bad block table memory, if allocated */
+ if (this->bbm) {
+ struct bbm_info *bbm = this->bbm;
+ kfree(bbm->bbt);
+ kfree(this->bbm);
+ }
+ /* Buffers allocated by onenand_scan */
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
+ kfree(this->page_buf);
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ kfree(this->verify_buf);
+#endif
+ }
+ if (this->options & ONENAND_OOBBUF_ALLOC)
+ kfree(this->oob_buf);
+ kfree(mtd->eraseregions);
+}
+
+EXPORT_SYMBOL_GPL(onenand_scan);
+EXPORT_SYMBOL_GPL(onenand_release);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Generic OneNAND flash driver code");
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
new file mode 100644
index 000000000..08d0085f3
--- /dev/null
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -0,0 +1,252 @@
+/*
+ * linux/drivers/mtd/onenand/onenand_bbt.c
+ *
+ * Bad Block Table support for the OneNAND driver
+ *
+ * Copyright(c) 2005 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Derived from nand_bbt.c
+ *
+ * TODO:
+ * Split BBT core and chip specific BBT.
+ */
+
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/export.h>
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @param buf the buffer to search
+ * @param len the length of buffer to search
+ * @param paglen the pagelength
+ * @param td search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers. Same as check_pattern, but
+ * no optional empty check and the pattern is expected to start
+ * at offset 0.
+ *
+ */
+static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @param mtd MTD device structure
+ * @param buf temporary buffer
+ * @param bd descriptor for the good/bad block search pattern
+ * @param chip create the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device
+ * for the given good/bad block identify pattern
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int i, j, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen, ooblen;
+ struct mtd_oob_ops ops;
+ int rgn;
+
+ printk(KERN_INFO "Scanning device for bad blocks\n");
+
+ len = 2;
+
+ /* We need only read few bytes from the OOB area */
+ scanlen = ooblen = 0;
+ readlen = bd->len;
+
+ /* chip == -1 case only */
+ /* Note that numblocks is 2 * (real numblocks) here;
+ * see i += 2 below as it makses shifting and masking less painful
+ */
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
+ startblock = 0;
+ from = 0;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = readlen;
+ ops.oobbuf = buf;
+ ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
+
+ for (i = startblock; i < numblocks; ) {
+ int ret;
+
+ for (j = 0; j < len; j++) {
+ /* No need to read pages fully,
+ * just read required OOB bytes */
+ ret = onenand_bbt_read_oob(mtd,
+ from + j * this->writesize + bd->offs, &ops);
+
+ /* If it is a initial bad block, just ignore it */
+ if (ret == ONENAND_BBT_READ_FATAL_ERROR)
+ return -EIO;
+
+ if (ret || check_short_pattern(&buf[j * scanlen],
+ scanlen, this->writesize, bd)) {
+ bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ printk(KERN_INFO "OneNAND eraseblock %d is an "
+ "initial bad block\n", i >> 1);
+ mtd->ecc_stats.badblocks++;
+ break;
+ }
+ }
+ i += 2;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
+ }
+
+ return 0;
+}
+
+
+/**
+ * onenand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @param mtd MTD device structure
+ * @param bd descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device
+ * for manufacturer / software marked good / bad blocks
+ */
+static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ return create_bbt(mtd, this->page_buf, bd, -1);
+}
+
+/**
+ * onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
+ * @param mtd MTD device structure
+ * @param offs offset in the device
+ * @param allowbbt allow access to bad block table region
+ */
+static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 2 */
+ block = (int) (onenand_block(this, offs) << 1);
+ res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int) offs, block >> 1, res);
+
+ switch ((int) res) {
+ case 0x00: return 0;
+ case 0x01: return 1;
+ case 0x02: return allowbbt ? 0 : 1;
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
+ * @param mtd MTD device structure
+ * @param bd descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already
+ * available. If not it scans the device for manufacturer
+ * marked good / bad blocks and writes the bad block table(s) to
+ * the selected place.
+ *
+ * The bad block table memory is allocated here. It is freed
+ * by the onenand_release function.
+ *
+ */
+int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int len, ret = 0;
+
+ len = this->chipsize >> (this->erase_shift + 2);
+ /* Allocate memory (2bit per block) and clear the memory bad block table */
+ bbm->bbt = kzalloc(len, GFP_KERNEL);
+ if (!bbm->bbt)
+ return -ENOMEM;
+
+ /* Set the bad block position */
+ bbm->badblockpos = ONENAND_BADBLOCK_POS;
+
+ /* Set erase shift */
+ bbm->bbt_erase_shift = this->erase_shift;
+
+ if (!bbm->isbad_bbt)
+ bbm->isbad_bbt = onenand_isbad_bbt;
+
+ /* Scan the device to build a memory based bad block table */
+ if ((ret = onenand_memory_bbt(mtd, bd))) {
+ printk(KERN_ERR "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
+ kfree(bbm->bbt);
+ bbm->bbt = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+/**
+ * onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
+ * @param mtd MTD device structure
+ *
+ * This function selects the default bad block table
+ * support for the device and calls the onenand_scan_bbt function
+ */
+int onenand_default_bbt(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm;
+
+ this->bbm = kzalloc(sizeof(struct bbm_info), GFP_KERNEL);
+ if (!this->bbm)
+ return -ENOMEM;
+
+ bbm = this->bbm;
+
+ /* 1KB page has same configuration as 2KB page */
+ if (!bbm->badblock_pattern)
+ bbm->badblock_pattern = &largepage_memorybased;
+
+ return onenand_scan_bbt(mtd, bbm->badblock_pattern);
+}
+
+EXPORT_SYMBOL(onenand_scan_bbt);
+EXPORT_SYMBOL(onenand_default_bbt);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
new file mode 100644
index 000000000..19cfb97ad
--- /dev/null
+++ b/drivers/mtd/onenand/samsung.c
@@ -0,0 +1,1114 @@
+/*
+ * Samsung S3C64XX/S5PC1XX OneNAND driver
+ *
+ * Copyright © 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Implementation:
+ * S3C64XX: emulate the pseudo BufferRAM
+ * S5PC110: use DMA
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <asm/mach/flash.h>
+
+#include "samsung.h"
+
+enum soc_type {
+ TYPE_S3C6400,
+ TYPE_S3C6410,
+ TYPE_S5PC110,
+};
+
+#define ONENAND_ERASE_STATUS 0x00
+#define ONENAND_MULTI_ERASE_SET 0x01
+#define ONENAND_ERASE_START 0x03
+#define ONENAND_UNLOCK_START 0x08
+#define ONENAND_UNLOCK_END 0x09
+#define ONENAND_LOCK_START 0x0A
+#define ONENAND_LOCK_END 0x0B
+#define ONENAND_LOCK_TIGHT_START 0x0C
+#define ONENAND_LOCK_TIGHT_END 0x0D
+#define ONENAND_UNLOCK_ALL 0x0E
+#define ONENAND_OTP_ACCESS 0x12
+#define ONENAND_SPARE_ACCESS_ONLY 0x13
+#define ONENAND_MAIN_ACCESS_ONLY 0x14
+#define ONENAND_ERASE_VERIFY 0x15
+#define ONENAND_MAIN_SPARE_ACCESS 0x16
+#define ONENAND_PIPELINE_READ 0x4000
+
+#define MAP_00 (0x0)
+#define MAP_01 (0x1)
+#define MAP_10 (0x2)
+#define MAP_11 (0x3)
+
+#define S3C64XX_CMD_MAP_SHIFT 24
+
+#define S3C6400_FBA_SHIFT 10
+#define S3C6400_FPA_SHIFT 4
+#define S3C6400_FSA_SHIFT 2
+
+#define S3C6410_FBA_SHIFT 12
+#define S3C6410_FPA_SHIFT 6
+#define S3C6410_FSA_SHIFT 4
+
+/* S5PC110 specific definitions */
+#define S5PC110_DMA_SRC_ADDR 0x400
+#define S5PC110_DMA_SRC_CFG 0x404
+#define S5PC110_DMA_DST_ADDR 0x408
+#define S5PC110_DMA_DST_CFG 0x40C
+#define S5PC110_DMA_TRANS_SIZE 0x414
+#define S5PC110_DMA_TRANS_CMD 0x418
+#define S5PC110_DMA_TRANS_STATUS 0x41C
+#define S5PC110_DMA_TRANS_DIR 0x420
+#define S5PC110_INTC_DMA_CLR 0x1004
+#define S5PC110_INTC_ONENAND_CLR 0x1008
+#define S5PC110_INTC_DMA_MASK 0x1024
+#define S5PC110_INTC_ONENAND_MASK 0x1028
+#define S5PC110_INTC_DMA_PEND 0x1044
+#define S5PC110_INTC_ONENAND_PEND 0x1048
+#define S5PC110_INTC_DMA_STATUS 0x1064
+#define S5PC110_INTC_ONENAND_STATUS 0x1068
+
+#define S5PC110_INTC_DMA_TD (1 << 24)
+#define S5PC110_INTC_DMA_TE (1 << 16)
+
+#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
+#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
+#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
+#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
+
+#define S5PC110_DMA_CFG_INC (0x0 << 8)
+#define S5PC110_DMA_CFG_CNT (0x1 << 8)
+
+#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
+#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
+#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
+
+#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+
+#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
+#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
+#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
+
+#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
+#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
+#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
+
+#define S5PC110_DMA_DIR_READ 0x0
+#define S5PC110_DMA_DIR_WRITE 0x1
+
+struct s3c_onenand {
+ struct mtd_info *mtd;
+ struct platform_device *pdev;
+ enum soc_type type;
+ void __iomem *base;
+ struct resource *base_res;
+ void __iomem *ahb_addr;
+ struct resource *ahb_res;
+ int bootram_command;
+ void __iomem *page_buf;
+ void __iomem *oob_buf;
+ unsigned int (*mem_addr)(int fba, int fpa, int fsa);
+ unsigned int (*cmd_map)(unsigned int type, unsigned int val);
+ void __iomem *dma_addr;
+ struct resource *dma_res;
+ unsigned long phys_base;
+ struct completion complete;
+};
+
+#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
+#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
+#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
+#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
+
+static struct s3c_onenand *onenand;
+
+static inline int s3c_read_reg(int offset)
+{
+ return readl(onenand->base + offset);
+}
+
+static inline void s3c_write_reg(int value, int offset)
+{
+ writel(value, onenand->base + offset);
+}
+
+static inline int s3c_read_cmd(unsigned int cmd)
+{
+ return readl(onenand->ahb_addr + cmd);
+}
+
+static inline void s3c_write_cmd(int value, unsigned int cmd)
+{
+ writel(value, onenand->ahb_addr + cmd);
+}
+
+#ifdef SAMSUNG_DEBUG
+static void s3c_dump_reg(void)
+{
+ int i;
+
+ for (i = 0; i < 0x400; i += 0x40) {
+ printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (unsigned int) onenand->base + i,
+ s3c_read_reg(i), s3c_read_reg(i + 0x10),
+ s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
+ }
+}
+#endif
+
+static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S3C64XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
+ (fsa << S3C6400_FSA_SHIFT);
+}
+
+static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
+ (fsa << S3C6410_FSA_SHIFT);
+}
+
+static void s3c_onenand_reset(void)
+{
+ unsigned long timeout = 0x10000;
+ int stat;
+
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ while (1 && timeout--) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & RST_CMP)
+ break;
+ }
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /* Clear interrupt */
+ s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
+ /* Clear the ECC status */
+ s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+ int value;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_MANUFACTURER_ID:
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ case ONENAND_REG_DEVICE_ID:
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ case ONENAND_REG_VERSION_ID:
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ case ONENAND_REG_DATA_BUFFER_SIZE:
+ return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
+ case ONENAND_REG_TECHNOLOGY:
+ return s3c_read_reg(TECH_OFFSET);
+ case ONENAND_REG_SYS_CFG1:
+ return s3c_read_reg(MEM_CFG_OFFSET);
+
+ /* Used at unlock all status */
+ case ONENAND_REG_CTRL_STATUS:
+ return 0;
+
+ case ONENAND_REG_WP_STATUS:
+ return ONENAND_WP_US;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
+ if (word_addr == 0)
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ if (word_addr == 1)
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ if (word_addr == 2)
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ }
+
+ value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+ return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int reg = addr - this->base;
+ unsigned int word_addr = reg >> 1;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_SYS_CFG1:
+ s3c_write_reg(value, MEM_CFG_OFFSET);
+ return;
+
+ case ONENAND_REG_START_ADDRESS1:
+ case ONENAND_REG_START_ADDRESS2:
+ return;
+
+ /* Lock/lock-tight/unlock/unlock_all */
+ case ONENAND_REG_START_BLOCK_ADDRESS:
+ return;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int)addr < ONENAND_DATARAM) {
+ if (value == ONENAND_CMD_READID) {
+ onenand->bootram_command = 1;
+ return;
+ }
+ if (value == ONENAND_CMD_RESET) {
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ onenand->bootram_command = 0;
+ return;
+ }
+ }
+
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+
+ s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int flags = INT_ACT;
+ unsigned int stat, ecc;
+ unsigned long timeout;
+
+ switch (state) {
+ case FL_READING:
+ flags |= BLK_RW_CMP | LOAD_CMP;
+ break;
+ case FL_WRITING:
+ flags |= BLK_RW_CMP | PGM_CMP;
+ break;
+ case FL_ERASING:
+ flags |= BLK_RW_CMP | ERS_CMP;
+ break;
+ case FL_LOCKING:
+ flags |= BLK_RW_CMP;
+ break;
+ default:
+ break;
+ }
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+
+ if (state != FL_READING)
+ cond_resched();
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (stat & LOAD_CMP) {
+ ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
+ ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ }
+ }
+
+ if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+ dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
+ stat);
+ if (stat & LOCKED_BLK)
+ dev_info(dev, "%s: it's locked error = 0x%04x\n",
+ __func__, stat);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int *m, *s;
+ int fba, fpa, fsa = 0;
+ unsigned int mem_addr, cmd_map_01, cmd_map_10;
+ int i, mcount, scount;
+ int index;
+
+ fba = (int) (addr >> this->erase_shift);
+ fpa = (int) (addr >> this->page_shift);
+ fpa &= this->page_mask;
+
+ mem_addr = onenand->mem_addr(fba, fpa, fsa);
+ cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
+ cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ case ONENAND_CMD_BUFFERRAM:
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ default:
+ break;
+ }
+
+ index = ONENAND_CURRENT_BUFFERRAM(this);
+
+ /*
+ * Emulate Two BufferRAMs and access with 4 bytes pointer
+ */
+ m = (unsigned int *) onenand->page_buf;
+ s = (unsigned int *) onenand->oob_buf;
+
+ if (index) {
+ m += (this->writesize >> 2);
+ s += (mtd->oobsize >> 2);
+ }
+
+ mcount = mtd->writesize >> 2;
+ scount = mtd->oobsize >> 2;
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_READOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ *s++ = s3c_read_cmd(cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_PROG:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(*m++, cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_PROGOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+
+ /* Main - dummy write */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(0xffffffff, cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ s3c_write_cmd(*s++, cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_UNLOCK_ALL:
+ s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
+ return 0;
+
+ case ONENAND_CMD_ERASE:
+ s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+ int index = ONENAND_CURRENT_BUFFERRAM(this);
+ unsigned char *p;
+
+ if (area == ONENAND_DATARAM) {
+ p = (unsigned char *) onenand->page_buf;
+ if (index == 1)
+ p += this->writesize;
+ } else {
+ p = (unsigned char *) onenand->oob_buf;
+ if (index == 1)
+ p += mtd->oobsize;
+ }
+
+ return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(buffer, p + offset, count);
+ return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(p + offset, buffer, count);
+ return 0;
+}
+
+static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
+
+static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+ unsigned long timeout;
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ /*
+ * There's no exact timeout values at Spec.
+ * In real case it takes under 1 msec.
+ * So 20 msecs are enough.
+ */
+ timeout = jiffies + msecs_to_jiffies(20);
+
+ do {
+ status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC,
+ base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
+ } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
+ time_before(jiffies, timeout));
+
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+
+ return 0;
+}
+
+static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status, cmd = 0;
+
+ status = readl(base + S5PC110_INTC_DMA_STATUS);
+
+ if (likely(status & S5PC110_INTC_DMA_TD))
+ cmd = S5PC110_DMA_TRANS_CMD_TDC;
+
+ if (unlikely(status & S5PC110_INTC_DMA_TE))
+ cmd = S5PC110_DMA_TRANS_CMD_TEC;
+
+ writel(cmd, base + S5PC110_DMA_TRANS_CMD);
+ writel(status, base + S5PC110_INTC_DMA_CLR);
+
+ if (!onenand->complete.done)
+ complete(&onenand->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+
+ status = readl(base + S5PC110_INTC_DMA_MASK);
+ if (status) {
+ status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
+ writel(status, base + S5PC110_INTC_DMA_MASK);
+ }
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
+
+ return 0;
+}
+
+static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *p;
+ void *buf = (void *) buffer;
+ dma_addr_t dma_src, dma_dst;
+ int err, ofs, page_dma = 0;
+ struct device *dev = &onenand->pdev->dev;
+
+ p = this->base + area;
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ p += this->writesize;
+ else
+ p += mtd->oobsize;
+ }
+
+ if (offset & 3 || (size_t) buf & 3 ||
+ !onenand->dma_addr || count != mtd->writesize)
+ goto normal;
+
+ /* Handle vmalloc address */
+ if (buf >= high_memory) {
+ struct page *page;
+
+ if (((size_t) buf & PAGE_MASK) !=
+ ((size_t) (buf + count - 1) & PAGE_MASK))
+ goto normal;
+ page = vmalloc_to_page(buf);
+ if (!page)
+ goto normal;
+
+ /* Page offset */
+ ofs = ((size_t) buf & ~PAGE_MASK);
+ page_dma = 1;
+
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
+ } else {
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
+ }
+ if (dma_mapping_error(dev, dma_dst)) {
+ dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
+ goto normal;
+ }
+ err = s5pc110_dma_ops(dma_dst, dma_src,
+ count, S5PC110_DMA_DIR_READ);
+
+ if (page_dma)
+ dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
+ else
+ dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (!err)
+ return 0;
+
+normal:
+ if (count != mtd->writesize) {
+ /* Copy the bufferram to memory to prevent unaligned access */
+ memcpy(this->page_buf, p, mtd->writesize);
+ p = this->page_buf + offset;
+ }
+
+ memcpy(buffer, p, count);
+
+ return 0;
+}
+
+static int s5pc110_chip_probe(struct mtd_info *mtd)
+{
+ /* Now just return 0 */
+ return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ unsigned int flags = INT_ACT | LOAD_CMP;
+ unsigned int stat;
+ unsigned long timeout;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ if (stat & LD_FAIL_ECC_ERR) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ if (stat & LOAD_CMP) {
+ int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int block, end;
+ int tmp;
+
+ end = this->chipsize >> this->erase_shift;
+
+ for (block = 0; block < end; block++) {
+ unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
+ tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+
+ if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
+ dev_err(dev, "block %d is write-protected!\n", block);
+ s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
+ }
+ }
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+ size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, start_mem_addr, end_mem_addr;
+
+ start = ofs >> this->erase_shift;
+ start_mem_addr = onenand->mem_addr(start, 0, 0);
+ end = start + (len >> this->erase_shift) - 1;
+ end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+ if (cmd == ONENAND_CMD_LOCK) {
+ s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ } else {
+ s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ }
+
+ this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = this->chipsize;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* No need to check return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Workaround for all block unlock in DDP */
+ if (!ONENAND_IS_DDP(this)) {
+ s3c_onenand_check_lock_status(mtd);
+ return;
+ }
+
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+
+ s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+
+ s3c_onenand_check_lock_status(mtd);
+}
+
+static void s3c_onenand_setup(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ onenand->mtd = mtd;
+
+ if (onenand->type == TYPE_S3C6400) {
+ onenand->mem_addr = s3c6400_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S3C6410) {
+ onenand->mem_addr = s3c6410_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC110) {
+ /* Use generic onenand functions */
+ this->read_bufferram = s5pc110_read_bufferram;
+ this->chip_probe = s5pc110_chip_probe;
+ return;
+ } else {
+ BUG();
+ }
+
+ this->read_word = s3c_onenand_readw;
+ this->write_word = s3c_onenand_writew;
+
+ this->wait = s3c_onenand_wait;
+ this->bbt_wait = s3c_onenand_bbt_wait;
+ this->unlock_all = s3c_unlock_all;
+ this->command = s3c_onenand_command;
+
+ this->read_bufferram = onenand_read_bufferram;
+ this->write_bufferram = onenand_write_bufferram;
+}
+
+static int s3c_onenand_probe(struct platform_device *pdev)
+{
+ struct onenand_platform_data *pdata;
+ struct onenand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int size, err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ /* No need to check pdata. the platform data is optional */
+
+ size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
+ mtd = kzalloc(size, GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
+ if (!onenand) {
+ err = -ENOMEM;
+ goto onenand_fail;
+ }
+
+ this = (struct onenand_chip *) &mtd[1];
+ mtd->priv = this;
+ mtd->dev.parent = &pdev->dev;
+ mtd->owner = THIS_MODULE;
+ onenand->pdev = pdev;
+ onenand->type = platform_get_device_id(pdev)->driver_data;
+
+ s3c_onenand_setup(mtd);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->base_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->base_res) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ err = -EBUSY;
+ goto resource_failed;
+ }
+
+ onenand->base = ioremap(r->start, resource_size(r));
+ if (!onenand->base) {
+ dev_err(&pdev->dev, "failed to map memory resource\n");
+ err = -EFAULT;
+ goto ioremap_failed;
+ }
+ /* Set onenand_chip also */
+ this->base = onenand->base;
+
+ /* Use runtime badblock check */
+ this->options |= ONENAND_SKIP_UNLOCK_CHECK;
+
+ if (onenand->type != TYPE_S5PC110) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no buffer memory resource defined\n");
+ err = -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->ahb_res) {
+ dev_err(&pdev->dev, "failed to request buffer memory resource\n");
+ err = -EBUSY;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->ahb_addr) {
+ dev_err(&pdev->dev, "failed to map buffer memory resource\n");
+ err = -EINVAL;
+ goto ahb_ioremap_failed;
+ }
+
+ /* Allocate 4KiB BufferRAM */
+ onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!onenand->page_buf) {
+ err = -ENOMEM;
+ goto page_buf_fail;
+ }
+
+ /* Allocate 128 SpareRAM */
+ onenand->oob_buf = kzalloc(128, GFP_KERNEL);
+ if (!onenand->oob_buf) {
+ err = -ENOMEM;
+ goto oob_buf_fail;
+ }
+
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+
+ } else { /* S5PC110 */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no dma memory resource defined\n");
+ err = -ENOENT;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->dma_res) {
+ dev_err(&pdev->dev, "failed to request dma memory resource\n");
+ err = -EBUSY;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->dma_addr) {
+ dev_err(&pdev->dev, "failed to map dma memory resource\n");
+ err = -EINVAL;
+ goto dma_ioremap_failed;
+ }
+
+ onenand->phys_base = onenand->base_res->start;
+
+ s5pc110_dma_ops = s5pc110_dma_poll;
+ /* Interrupt support */
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (r) {
+ init_completion(&onenand->complete);
+ s5pc110_dma_ops = s5pc110_dma_irq;
+ err = request_irq(r->start, s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand", &onenand);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ goto scan_failed;
+ }
+ }
+ }
+
+ if (onenand_scan(mtd, 1)) {
+ err = -EFAULT;
+ goto scan_failed;
+ }
+
+ if (onenand->type != TYPE_S5PC110) {
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+ }
+
+ if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
+ dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
+
+ err = mtd_device_parse_register(mtd, NULL, NULL,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+
+scan_failed:
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+dma_ioremap_failed:
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+ kfree(onenand->oob_buf);
+oob_buf_fail:
+ kfree(onenand->page_buf);
+page_buf_fail:
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ahb_ioremap_failed:
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+dma_resource_failed:
+ahb_resource_failed:
+ iounmap(onenand->base);
+ioremap_failed:
+ if (onenand->base_res)
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+resource_failed:
+ kfree(onenand);
+onenand_fail:
+ kfree(mtd);
+ return err;
+}
+
+static int s3c_onenand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+
+ onenand_release(mtd);
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+
+ iounmap(onenand->base);
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+
+ kfree(onenand->oob_buf);
+ kfree(onenand->page_buf);
+ kfree(onenand);
+ kfree(mtd);
+ return 0;
+}
+
+static int s3c_pm_ops_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ this->wait(mtd, FL_PM_SUSPENDED);
+ return 0;
+}
+
+static int s3c_pm_ops_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ this->unlock_all(mtd);
+ return 0;
+}
+
+static const struct dev_pm_ops s3c_pm_ops = {
+ .suspend = s3c_pm_ops_suspend,
+ .resume = s3c_pm_ops_resume,
+};
+
+static struct platform_device_id s3c_onenand_driver_ids[] = {
+ {
+ .name = "s3c6400-onenand",
+ .driver_data = TYPE_S3C6400,
+ }, {
+ .name = "s3c6410-onenand",
+ .driver_data = TYPE_S3C6410,
+ }, {
+ .name = "s5pc110-onenand",
+ .driver_data = TYPE_S5PC110,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
+
+static struct platform_driver s3c_onenand_driver = {
+ .driver = {
+ .name = "samsung-onenand",
+ .pm = &s3c_pm_ops,
+ },
+ .id_table = s3c_onenand_driver_ids,
+ .probe = s3c_onenand_probe,
+ .remove = s3c_onenand_remove,
+};
+
+module_platform_driver(s3c_onenand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Samsung OneNAND controller support");
diff --git a/drivers/mtd/onenand/samsung.h b/drivers/mtd/onenand/samsung.h
new file mode 100644
index 000000000..9016dc013
--- /dev/null
+++ b/drivers/mtd/onenand/samsung.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SAMSUNG_ONENAND_H__
+#define __SAMSUNG_ONENAND_H__
+
+/*
+ * OneNAND Controller
+ */
+#define MEM_CFG_OFFSET 0x0000
+#define BURST_LEN_OFFSET 0x0010
+#define MEM_RESET_OFFSET 0x0020
+#define INT_ERR_STAT_OFFSET 0x0030
+#define INT_ERR_MASK_OFFSET 0x0040
+#define INT_ERR_ACK_OFFSET 0x0050
+#define ECC_ERR_STAT_OFFSET 0x0060
+#define MANUFACT_ID_OFFSET 0x0070
+#define DEVICE_ID_OFFSET 0x0080
+#define DATA_BUF_SIZE_OFFSET 0x0090
+#define BOOT_BUF_SIZE_OFFSET 0x00A0
+#define BUF_AMOUNT_OFFSET 0x00B0
+#define TECH_OFFSET 0x00C0
+#define FBA_WIDTH_OFFSET 0x00D0
+#define FPA_WIDTH_OFFSET 0x00E0
+#define FSA_WIDTH_OFFSET 0x00F0
+#define TRANS_SPARE_OFFSET 0x0140
+#define DBS_DFS_WIDTH_OFFSET 0x0160
+#define INT_PIN_ENABLE_OFFSET 0x01A0
+#define ACC_CLOCK_OFFSET 0x01C0
+#define FLASH_VER_ID_OFFSET 0x01F0
+#define FLASH_AUX_CNTRL_OFFSET 0x0300 /* s3c64xx only */
+
+#define ONENAND_MEM_RESET_HOT 0x3
+#define ONENAND_MEM_RESET_COLD 0x2
+#define ONENAND_MEM_RESET_WARM 0x1
+
+#define CACHE_OP_ERR (1 << 13)
+#define RST_CMP (1 << 12)
+#define RDY_ACT (1 << 11)
+#define INT_ACT (1 << 10)
+#define UNSUP_CMD (1 << 9)
+#define LOCKED_BLK (1 << 8)
+#define BLK_RW_CMP (1 << 7)
+#define ERS_CMP (1 << 6)
+#define PGM_CMP (1 << 5)
+#define LOAD_CMP (1 << 4)
+#define ERS_FAIL (1 << 3)
+#define PGM_FAIL (1 << 2)
+#define INT_TO (1 << 1)
+#define LD_FAIL_ECC_ERR (1 << 0)
+
+#define TSRF (1 << 0)
+
+#endif
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
new file mode 100644
index 000000000..5da911ebd
--- /dev/null
+++ b/drivers/mtd/redboot.c
@@ -0,0 +1,317 @@
+/*
+ * Parse RedBoot-style Flash Image System (FIS) tables and
+ * produce a Linux partition array to match.
+ *
+ * Copyright © 2001 Red Hat UK Limited
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+
+struct fis_image_desc {
+ unsigned char name[16]; // Null terminated name
+ uint32_t flash_base; // Address within FLASH of image
+ uint32_t mem_base; // Address in memory where it executes
+ uint32_t size; // Length of image
+ uint32_t entry_point; // Execution entry point
+ uint32_t data_length; // Length of actual data
+ unsigned char _pad[256-(16+7*sizeof(uint32_t))];
+ uint32_t desc_cksum; // Checksum over image descriptor
+ uint32_t file_cksum; // Checksum over image data
+};
+
+struct fis_list {
+ struct fis_image_desc *img;
+ struct fis_list *next;
+};
+
+static int directory = CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK;
+module_param(directory, int, 0);
+
+static inline int redboot_checksum(struct fis_image_desc *img)
+{
+ /* RedBoot doesn't actually write the desc_cksum field yet AFAICT */
+ return 1;
+}
+
+static int parse_redboot_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ int nrparts = 0;
+ struct fis_image_desc *buf;
+ struct mtd_partition *parts;
+ struct fis_list *fl = NULL, *tmp_fl;
+ int ret, i;
+ size_t retlen;
+ char *names;
+ char *nullname;
+ int namelen = 0;
+ int nulllen = 0;
+ int numslots;
+ unsigned long offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ static char nullstring[] = "unallocated";
+#endif
+
+ if ( directory < 0 ) {
+ offset = master->size + directory * master->erasesize;
+ while (mtd_block_isbad(master, offset)) {
+ if (!offset) {
+ nogood:
+ printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
+ return -EIO;
+ }
+ offset -= master->erasesize;
+ }
+ } else {
+ offset = directory * master->erasesize;
+ while (mtd_block_isbad(master, offset)) {
+ offset += master->erasesize;
+ if (offset == master->size)
+ goto nogood;
+ }
+ }
+ buf = vmalloc(master->erasesize);
+
+ if (!buf)
+ return -ENOMEM;
+
+ printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
+ master->name, offset);
+
+ ret = mtd_read(master, offset, master->erasesize, &retlen,
+ (void *)buf);
+
+ if (ret)
+ goto out;
+
+ if (retlen != master->erasesize) {
+ ret = -EIO;
+ goto out;
+ }
+
+ numslots = (master->erasesize / sizeof(struct fis_image_desc));
+ for (i = 0; i < numslots; i++) {
+ if (!memcmp(buf[i].name, "FIS directory", 14)) {
+ /* This is apparently the FIS directory entry for the
+ * FIS directory itself. The FIS directory size is
+ * one erase block; if the buf[i].size field is
+ * swab32(erasesize) then we know we are looking at
+ * a byte swapped FIS directory - swap all the entries!
+ * (NOTE: this is 'size' not 'data_length'; size is
+ * the full size of the entry.)
+ */
+
+ /* RedBoot can combine the FIS directory and
+ config partitions into a single eraseblock;
+ we assume wrong-endian if either the swapped
+ 'size' matches the eraseblock size precisely,
+ or if the swapped size actually fits in an
+ eraseblock while the unswapped size doesn't. */
+ if (swab32(buf[i].size) == master->erasesize ||
+ (buf[i].size > master->erasesize
+ && swab32(buf[i].size) < master->erasesize)) {
+ int j;
+ /* Update numslots based on actual FIS directory size */
+ numslots = swab32(buf[i].size) / sizeof (struct fis_image_desc);
+ for (j = 0; j < numslots; ++j) {
+
+ /* A single 0xff denotes a deleted entry.
+ * Two of them in a row is the end of the table.
+ */
+ if (buf[j].name[0] == 0xff) {
+ if (buf[j].name[1] == 0xff) {
+ break;
+ } else {
+ continue;
+ }
+ }
+
+ /* The unsigned long fields were written with the
+ * wrong byte sex, name and pad have no byte sex.
+ */
+ swab32s(&buf[j].flash_base);
+ swab32s(&buf[j].mem_base);
+ swab32s(&buf[j].size);
+ swab32s(&buf[j].entry_point);
+ swab32s(&buf[j].data_length);
+ swab32s(&buf[j].desc_cksum);
+ swab32s(&buf[j].file_cksum);
+ }
+ } else if (buf[i].size < master->erasesize) {
+ /* Update numslots based on actual FIS directory size */
+ numslots = buf[i].size / sizeof(struct fis_image_desc);
+ }
+ break;
+ }
+ }
+ if (i == numslots) {
+ /* Didn't find it */
+ printk(KERN_NOTICE "No RedBoot partition table detected in %s\n",
+ master->name);
+ ret = 0;
+ goto out;
+ }
+
+ for (i = 0; i < numslots; i++) {
+ struct fis_list *new_fl, **prev;
+
+ if (buf[i].name[0] == 0xff) {
+ if (buf[i].name[1] == 0xff) {
+ break;
+ } else {
+ continue;
+ }
+ }
+ if (!redboot_checksum(&buf[i]))
+ break;
+
+ new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL);
+ namelen += strlen(buf[i].name)+1;
+ if (!new_fl) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ new_fl->img = &buf[i];
+ if (data && data->origin)
+ buf[i].flash_base -= data->origin;
+ else
+ buf[i].flash_base &= master->size-1;
+
+ /* I'm sure the JFFS2 code has done me permanent damage.
+ * I now think the following is _normal_
+ */
+ prev = &fl;
+ while(*prev && (*prev)->img->flash_base < new_fl->img->flash_base)
+ prev = &(*prev)->next;
+ new_fl->next = *prev;
+ *prev = new_fl;
+
+ nrparts++;
+ }
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (fl->img->flash_base) {
+ nrparts++;
+ nulllen = sizeof(nullstring);
+ }
+
+ for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) {
+ if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) {
+ nrparts++;
+ nulllen = sizeof(nullstring);
+ }
+ }
+#endif
+ parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
+
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nullname = (char *)&parts[nrparts];
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (nulllen > 0) {
+ strcpy(nullname, nullstring);
+ }
+#endif
+ names = nullname + nulllen;
+
+ i=0;
+
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (fl->img->flash_base) {
+ parts[0].name = nullname;
+ parts[0].size = fl->img->flash_base;
+ parts[0].offset = 0;
+ i++;
+ }
+#endif
+ for ( ; i<nrparts; i++) {
+ parts[i].size = fl->img->size;
+ parts[i].offset = fl->img->flash_base;
+ parts[i].name = names;
+
+ strcpy(names, fl->img->name);
+#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
+ if (!memcmp(names, "RedBoot", 8) ||
+ !memcmp(names, "RedBoot config", 15) ||
+ !memcmp(names, "FIS directory", 14)) {
+ parts[i].mask_flags = MTD_WRITEABLE;
+ }
+#endif
+ names += strlen(names)+1;
+
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+ i++;
+ parts[i].offset = parts[i-1].size + parts[i-1].offset;
+ parts[i].size = fl->next->img->flash_base - parts[i].offset;
+ parts[i].name = nullname;
+ }
+#endif
+ tmp_fl = fl;
+ fl = fl->next;
+ kfree(tmp_fl);
+ }
+ ret = nrparts;
+ *pparts = parts;
+ out:
+ while (fl) {
+ struct fis_list *old = fl;
+ fl = fl->next;
+ kfree(old);
+ }
+ vfree(buf);
+ return ret;
+}
+
+static struct mtd_part_parser redboot_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_redboot_partitions,
+ .name = "RedBoot",
+};
+
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("RedBoot");
+
+static int __init redboot_parser_init(void)
+{
+ register_mtd_parser(&redboot_parser);
+ return 0;
+}
+
+static void __exit redboot_parser_exit(void)
+{
+ deregister_mtd_parser(&redboot_parser);
+}
+
+module_init(redboot_parser_init);
+module_exit(redboot_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Parsing code for RedBoot Flash Image System (FIS) tables");
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
new file mode 100644
index 000000000..d1cbf26db
--- /dev/null
+++ b/drivers/mtd/rfd_ftl.c
@@ -0,0 +1,849 @@
+/*
+ * rfd_ftl.c -- resident flash disk (flash translation layer)
+ *
+ * Copyright © 2005 Sean Young <sean@mess.org>
+ *
+ * This type of flash translation layer (FTL) is used by the Embedded BIOS
+ * by General Software. It is known as the Resident Flash Disk (RFD), see:
+ *
+ * http://www.gensw.com/pages/prod/bios/rfd.htm
+ *
+ * based on ftl.c
+ */
+
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+#include <asm/types.h>
+
+static int block_size = 0;
+module_param(block_size, int, 0);
+MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
+
+#define PREFIX "rfd_ftl: "
+
+/* This major has been assigned by device@lanana.org */
+#ifndef RFD_FTL_MAJOR
+#define RFD_FTL_MAJOR 256
+#endif
+
+/* Maximum number of partitions in an FTL region */
+#define PART_BITS 4
+
+/* An erase unit should start with this value */
+#define RFD_MAGIC 0x9193
+
+/* the second value is 0xffff or 0xffc8; function unknown */
+
+/* the third value is always 0xffff, ignored */
+
+/* next is an array of mapping for each corresponding sector */
+#define HEADER_MAP_OFFSET 3
+#define SECTOR_DELETED 0x0000
+#define SECTOR_ZERO 0xfffe
+#define SECTOR_FREE 0xffff
+
+#define SECTOR_SIZE 512
+
+#define SECTORS_PER_TRACK 63
+
+struct block {
+ enum {
+ BLOCK_OK,
+ BLOCK_ERASING,
+ BLOCK_ERASED,
+ BLOCK_UNUSED,
+ BLOCK_FAILED
+ } state;
+ int free_sectors;
+ int used_sectors;
+ int erases;
+ u_long offset;
+};
+
+struct partition {
+ struct mtd_blktrans_dev mbd;
+
+ u_int block_size; /* size of erase unit */
+ u_int total_blocks; /* number of erase units */
+ u_int header_sectors_per_block; /* header sectors in erase unit */
+ u_int data_sectors_per_block; /* data sectors in erase unit */
+ u_int sector_count; /* sectors in translated disk */
+ u_int header_size; /* bytes in header sector */
+ int reserved_block; /* block next up for reclaim */
+ int current_block; /* block to write to */
+ u16 *header_cache; /* cached header */
+
+ int is_reclaiming;
+ int cylinders;
+ int errors;
+ u_long *sector_map;
+ struct block *blocks;
+};
+
+static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
+
+static int build_block_map(struct partition *part, int block_no)
+{
+ struct block *block = &part->blocks[block_no];
+ int i;
+
+ block->offset = part->block_size * block_no;
+
+ if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
+ block->state = BLOCK_UNUSED;
+ return -ENOENT;
+ }
+
+ block->state = BLOCK_OK;
+
+ for (i=0; i<part->data_sectors_per_block; i++) {
+ u16 entry;
+
+ entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
+
+ if (entry == SECTOR_DELETED)
+ continue;
+
+ if (entry == SECTOR_FREE) {
+ block->free_sectors++;
+ continue;
+ }
+
+ if (entry == SECTOR_ZERO)
+ entry = 0;
+
+ if (entry >= part->sector_count) {
+ printk(KERN_WARNING PREFIX
+ "'%s': unit #%d: entry %d corrupt, "
+ "sector %d out of range\n",
+ part->mbd.mtd->name, block_no, i, entry);
+ continue;
+ }
+
+ if (part->sector_map[entry] != -1) {
+ printk(KERN_WARNING PREFIX
+ "'%s': more than one entry for sector %d\n",
+ part->mbd.mtd->name, entry);
+ part->errors = 1;
+ continue;
+ }
+
+ part->sector_map[entry] = block->offset +
+ (i + part->header_sectors_per_block) * SECTOR_SIZE;
+
+ block->used_sectors++;
+ }
+
+ if (block->free_sectors == part->data_sectors_per_block)
+ part->reserved_block = block_no;
+
+ return 0;
+}
+
+static int scan_header(struct partition *part)
+{
+ int sectors_per_block;
+ int i, rc = -ENOMEM;
+ int blocks_found;
+ size_t retlen;
+
+ sectors_per_block = part->block_size / SECTOR_SIZE;
+ part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
+
+ if (part->total_blocks < 2)
+ return -ENOENT;
+
+ /* each erase block has three bytes header, followed by the map */
+ part->header_sectors_per_block =
+ ((HEADER_MAP_OFFSET + sectors_per_block) *
+ sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
+
+ part->data_sectors_per_block = sectors_per_block -
+ part->header_sectors_per_block;
+
+ part->header_size = (HEADER_MAP_OFFSET +
+ part->data_sectors_per_block) * sizeof(u16);
+
+ part->cylinders = (part->data_sectors_per_block *
+ (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
+
+ part->sector_count = part->cylinders * SECTORS_PER_TRACK;
+
+ part->current_block = -1;
+ part->reserved_block = -1;
+ part->is_reclaiming = 0;
+
+ part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
+ if (!part->header_cache)
+ goto err;
+
+ part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
+ GFP_KERNEL);
+ if (!part->blocks)
+ goto err;
+
+ part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
+ if (!part->sector_map) {
+ printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
+ "sector map", part->mbd.mtd->name);
+ goto err;
+ }
+
+ for (i=0; i<part->sector_count; i++)
+ part->sector_map[i] = -1;
+
+ for (i=0, blocks_found=0; i<part->total_blocks; i++) {
+ rc = mtd_read(part->mbd.mtd, i * part->block_size,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc)
+ goto err;
+
+ if (!build_block_map(part, i))
+ blocks_found++;
+ }
+
+ if (blocks_found == 0) {
+ printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
+ part->mbd.mtd->name);
+ rc = -ENOENT;
+ goto err;
+ }
+
+ if (part->reserved_block == -1) {
+ printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
+ part->mbd.mtd->name);
+
+ part->errors = 1;
+ }
+
+ return 0;
+
+err:
+ vfree(part->sector_map);
+ kfree(part->header_cache);
+ kfree(part->blocks);
+
+ return rc;
+}
+
+static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
+{
+ struct partition *part = (struct partition*)dev;
+ u_long addr;
+ size_t retlen;
+ int rc;
+
+ if (sector >= part->sector_count)
+ return -EIO;
+
+ addr = part->sector_map[sector];
+ if (addr != -1) {
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_WARNING PREFIX "error reading '%s' at "
+ "0x%lx\n", part->mbd.mtd->name, addr);
+ return rc;
+ }
+ } else
+ memset(buf, 0, SECTOR_SIZE);
+
+ return 0;
+}
+
+static void erase_callback(struct erase_info *erase)
+{
+ struct partition *part;
+ u16 magic;
+ int i, rc;
+ size_t retlen;
+
+ part = (struct partition*)erase->priv;
+
+ i = (u32)erase->addr / part->block_size;
+ if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
+ erase->addr > UINT_MAX) {
+ printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
+ "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
+ return;
+ }
+
+ if (erase->state != MTD_ERASE_DONE) {
+ printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
+ "state %d\n", (unsigned long long)erase->addr,
+ part->mbd.mtd->name, erase->state);
+
+ part->blocks[i].state = BLOCK_FAILED;
+ part->blocks[i].free_sectors = 0;
+ part->blocks[i].used_sectors = 0;
+
+ kfree(erase);
+
+ return;
+ }
+
+ magic = cpu_to_le16(RFD_MAGIC);
+
+ part->blocks[i].state = BLOCK_ERASED;
+ part->blocks[i].free_sectors = part->data_sectors_per_block;
+ part->blocks[i].used_sectors = 0;
+ part->blocks[i].erases++;
+
+ rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
+ &retlen, (u_char *)&magic);
+
+ if (!rc && retlen != sizeof(magic))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': unable to write RFD "
+ "header at 0x%lx\n",
+ part->mbd.mtd->name,
+ part->blocks[i].offset);
+ part->blocks[i].state = BLOCK_FAILED;
+ }
+ else
+ part->blocks[i].state = BLOCK_OK;
+
+ kfree(erase);
+}
+
+static int erase_block(struct partition *part, int block)
+{
+ struct erase_info *erase;
+ int rc = -ENOMEM;
+
+ erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ if (!erase)
+ goto err;
+
+ erase->mtd = part->mbd.mtd;
+ erase->callback = erase_callback;
+ erase->addr = part->blocks[block].offset;
+ erase->len = part->block_size;
+ erase->priv = (u_long)part;
+
+ part->blocks[block].state = BLOCK_ERASING;
+ part->blocks[block].free_sectors = 0;
+
+ rc = mtd_erase(part->mbd.mtd, erase);
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
+ "failed\n", (unsigned long long)erase->addr,
+ (unsigned long long)erase->len, part->mbd.mtd->name);
+ kfree(erase);
+ }
+
+err:
+ return rc;
+}
+
+static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
+{
+ void *sector_data;
+ u16 *map;
+ size_t retlen;
+ int i, rc = -ENOMEM;
+
+ part->is_reclaiming = 1;
+
+ sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (!sector_data)
+ goto err3;
+
+ map = kmalloc(part->header_size, GFP_KERNEL);
+ if (!map)
+ goto err2;
+
+ rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
+ part->header_size, &retlen, (u_char *)map);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error reading '%s' at "
+ "0x%lx\n", part->mbd.mtd->name,
+ part->blocks[block_no].offset);
+
+ goto err;
+ }
+
+ for (i=0; i<part->data_sectors_per_block; i++) {
+ u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
+ u_long addr;
+
+
+ if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
+ continue;
+
+ if (entry == SECTOR_ZERO)
+ entry = 0;
+
+ /* already warned about and ignored in build_block_map() */
+ if (entry >= part->sector_count)
+ continue;
+
+ addr = part->blocks[block_no].offset +
+ (i + part->header_sectors_per_block) * SECTOR_SIZE;
+
+ if (*old_sector == addr) {
+ *old_sector = -1;
+ if (!part->blocks[block_no].used_sectors--) {
+ rc = erase_block(part, block_no);
+ break;
+ }
+ continue;
+ }
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ sector_data);
+
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': Unable to "
+ "read sector for relocation\n",
+ part->mbd.mtd->name);
+
+ goto err;
+ }
+
+ rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
+ entry, sector_data);
+
+ if (rc)
+ goto err;
+ }
+
+err:
+ kfree(map);
+err2:
+ kfree(sector_data);
+err3:
+ part->is_reclaiming = 0;
+
+ return rc;
+}
+
+static int reclaim_block(struct partition *part, u_long *old_sector)
+{
+ int block, best_block, score, old_sector_block;
+ int rc;
+
+ /* we have a race if sync doesn't exist */
+ mtd_sync(part->mbd.mtd);
+
+ score = 0x7fffffff; /* MAX_INT */
+ best_block = -1;
+ if (*old_sector != -1)
+ old_sector_block = *old_sector / part->block_size;
+ else
+ old_sector_block = -1;
+
+ for (block=0; block<part->total_blocks; block++) {
+ int this_score;
+
+ if (block == part->reserved_block)
+ continue;
+
+ /*
+ * Postpone reclaiming if there is a free sector as
+ * more removed sectors is more efficient (have to move
+ * less).
+ */
+ if (part->blocks[block].free_sectors)
+ return 0;
+
+ this_score = part->blocks[block].used_sectors;
+
+ if (block == old_sector_block)
+ this_score--;
+ else {
+ /* no point in moving a full block */
+ if (part->blocks[block].used_sectors ==
+ part->data_sectors_per_block)
+ continue;
+ }
+
+ this_score += part->blocks[block].erases;
+
+ if (this_score < score) {
+ best_block = block;
+ score = this_score;
+ }
+ }
+
+ if (best_block == -1)
+ return -ENOSPC;
+
+ part->current_block = -1;
+ part->reserved_block = best_block;
+
+ pr_debug("reclaim_block: reclaiming block #%d with %d used "
+ "%d free sectors\n", best_block,
+ part->blocks[best_block].used_sectors,
+ part->blocks[best_block].free_sectors);
+
+ if (part->blocks[best_block].used_sectors)
+ rc = move_block_contents(part, best_block, old_sector);
+ else
+ rc = erase_block(part, best_block);
+
+ return rc;
+}
+
+/*
+ * IMPROVE: It would be best to choose the block with the most deleted sectors,
+ * because if we fill that one up first it'll have the most chance of having
+ * the least live sectors at reclaim.
+ */
+static int find_free_block(struct partition *part)
+{
+ int block, stop;
+
+ block = part->current_block == -1 ?
+ jiffies % part->total_blocks : part->current_block;
+ stop = block;
+
+ do {
+ if (part->blocks[block].free_sectors &&
+ block != part->reserved_block)
+ return block;
+
+ if (part->blocks[block].state == BLOCK_UNUSED)
+ erase_block(part, block);
+
+ if (++block >= part->total_blocks)
+ block = 0;
+
+ } while (block != stop);
+
+ return -1;
+}
+
+static int find_writable_block(struct partition *part, u_long *old_sector)
+{
+ int rc, block;
+ size_t retlen;
+
+ block = find_free_block(part);
+
+ if (block == -1) {
+ if (!part->is_reclaiming) {
+ rc = reclaim_block(part, old_sector);
+ if (rc)
+ goto err;
+
+ block = find_free_block(part);
+ }
+
+ if (block == -1) {
+ rc = -ENOSPC;
+ goto err;
+ }
+ }
+
+ rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': unable to read header at "
+ "0x%lx\n", part->mbd.mtd->name,
+ part->blocks[block].offset);
+ goto err;
+ }
+
+ part->current_block = block;
+
+err:
+ return rc;
+}
+
+static int mark_sector_deleted(struct partition *part, u_long old_addr)
+{
+ int block, offset, rc;
+ u_long addr;
+ size_t retlen;
+ u16 del = cpu_to_le16(SECTOR_DELETED);
+
+ block = old_addr / part->block_size;
+ offset = (old_addr % part->block_size) / SECTOR_SIZE -
+ part->header_sectors_per_block;
+
+ addr = part->blocks[block].offset +
+ (HEADER_MAP_OFFSET + offset) * sizeof(u16);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
+ (u_char *)&del);
+
+ if (!rc && retlen != sizeof(del))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at "
+ "0x%lx\n", part->mbd.mtd->name, addr);
+ goto err;
+ }
+ if (block == part->current_block)
+ part->header_cache[offset + HEADER_MAP_OFFSET] = del;
+
+ part->blocks[block].used_sectors--;
+
+ if (!part->blocks[block].used_sectors &&
+ !part->blocks[block].free_sectors)
+ rc = erase_block(part, block);
+
+err:
+ return rc;
+}
+
+static int find_free_sector(const struct partition *part, const struct block *block)
+{
+ int i, stop;
+
+ i = stop = part->data_sectors_per_block - block->free_sectors;
+
+ do {
+ if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
+ == SECTOR_FREE)
+ return i;
+
+ if (++i == part->data_sectors_per_block)
+ i = 0;
+ }
+ while(i != stop);
+
+ return -1;
+}
+
+static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
+{
+ struct partition *part = (struct partition*)dev;
+ struct block *block;
+ u_long addr;
+ int i;
+ int rc;
+ size_t retlen;
+ u16 entry;
+
+ if (part->current_block == -1 ||
+ !part->blocks[part->current_block].free_sectors) {
+
+ rc = find_writable_block(part, old_addr);
+ if (rc)
+ goto err;
+ }
+
+ block = &part->blocks[part->current_block];
+
+ i = find_free_sector(part, block);
+
+ if (i < 0) {
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
+ block->offset;
+ rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
+ part->mbd.mtd->name, addr);
+ goto err;
+ }
+
+ part->sector_map[sector] = addr;
+
+ entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
+
+ part->header_cache[i + HEADER_MAP_OFFSET] = entry;
+
+ addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
+ (u_char *)&entry);
+
+ if (!rc && retlen != sizeof(entry))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
+ part->mbd.mtd->name, addr);
+ goto err;
+ }
+ block->used_sectors++;
+ block->free_sectors--;
+
+err:
+ return rc;
+}
+
+static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
+{
+ struct partition *part = (struct partition*)dev;
+ u_long old_addr;
+ int i;
+ int rc = 0;
+
+ pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
+
+ if (part->reserved_block == -1) {
+ rc = -EACCES;
+ goto err;
+ }
+
+ if (sector >= part->sector_count) {
+ rc = -EIO;
+ goto err;
+ }
+
+ old_addr = part->sector_map[sector];
+
+ for (i=0; i<SECTOR_SIZE; i++) {
+ if (!buf[i])
+ continue;
+
+ rc = do_writesect(dev, sector, buf, &old_addr);
+ if (rc)
+ goto err;
+ break;
+ }
+
+ if (i == SECTOR_SIZE)
+ part->sector_map[sector] = -1;
+
+ if (old_addr != -1)
+ rc = mark_sector_deleted(part, old_addr);
+
+err:
+ return rc;
+}
+
+static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct partition *part = (struct partition*)dev;
+
+ geo->heads = 1;
+ geo->sectors = SECTORS_PER_TRACK;
+ geo->cylinders = part->cylinders;
+
+ return 0;
+}
+
+static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct partition *part;
+
+ if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
+ return;
+
+ part = kzalloc(sizeof(struct partition), GFP_KERNEL);
+ if (!part)
+ return;
+
+ part->mbd.mtd = mtd;
+
+ if (block_size)
+ part->block_size = block_size;
+ else {
+ if (!mtd->erasesize) {
+ printk(KERN_WARNING PREFIX "please provide block_size");
+ goto out;
+ } else
+ part->block_size = mtd->erasesize;
+ }
+
+ if (scan_header(part) == 0) {
+ part->mbd.size = part->sector_count;
+ part->mbd.tr = tr;
+ part->mbd.devnum = -1;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ part->mbd.readonly = 1;
+ else if (part->errors) {
+ printk(KERN_WARNING PREFIX "'%s': errors found, "
+ "setting read-only\n", mtd->name);
+ part->mbd.readonly = 1;
+ }
+
+ printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
+ mtd->name, mtd->type, mtd->flags);
+
+ if (!add_mtd_blktrans_dev((void*)part))
+ return;
+ }
+out:
+ kfree(part);
+}
+
+static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct partition *part = (struct partition*)dev;
+ int i;
+
+ for (i=0; i<part->total_blocks; i++) {
+ pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
+ part->mbd.mtd->name, i, part->blocks[i].erases);
+ }
+
+ del_mtd_blktrans_dev(dev);
+ vfree(part->sector_map);
+ kfree(part->header_cache);
+ kfree(part->blocks);
+}
+
+static struct mtd_blktrans_ops rfd_ftl_tr = {
+ .name = "rfd",
+ .major = RFD_FTL_MAJOR,
+ .part_bits = PART_BITS,
+ .blksize = SECTOR_SIZE,
+
+ .readsect = rfd_ftl_readsect,
+ .writesect = rfd_ftl_writesect,
+ .getgeo = rfd_ftl_getgeo,
+ .add_mtd = rfd_ftl_add_mtd,
+ .remove_dev = rfd_ftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_rfd_ftl(void)
+{
+ return register_mtd_blktrans(&rfd_ftl_tr);
+}
+
+static void __exit cleanup_rfd_ftl(void)
+{
+ deregister_mtd_blktrans(&rfd_ftl_tr);
+}
+
+module_init(init_rfd_ftl);
+module_exit(cleanup_rfd_ftl);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
+ "used by General Software's Embedded BIOS");
+
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 000000000..c23184a47
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1297 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand_ecc.h>
+#include "nand/sm_common.h"
+#include "sm_ftl.h"
+
+
+
+static struct workqueue_struct *cache_flush_workqueue;
+
+static int cache_timeout = 1000;
+module_param(cache_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_timeout,
+ "Timeout (in ms) for cache flush (1000 ms default");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+
+/* ------------------- sysfs attributes ---------------------------------- */
+struct sm_sysfs_attribute {
+ struct device_attribute dev_attr;
+ char *data;
+ int len;
+};
+
+static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(attr, struct sm_sysfs_attribute, dev_attr);
+
+ strncpy(buf, sm_attr->data, sm_attr->len);
+ return sm_attr->len;
+}
+
+
+#define NUM_ATTRIBUTES 1
+#define SM_CIS_VENDOR_OFFSET 0x59
+static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute_group *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
+ char *vendor;
+
+ vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
+ if (!vendor)
+ goto error1;
+
+ /* Initialize sysfs attributes */
+ vendor_attribute =
+ kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
+ if (!vendor_attribute)
+ goto error2;
+
+ sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+
+ vendor_attribute->data = vendor;
+ vendor_attribute->len = strlen(vendor);
+ vendor_attribute->dev_attr.attr.name = "vendor";
+ vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+ vendor_attribute->dev_attr.show = sm_attr_show;
+
+
+ /* Create array of pointers to the attributes */
+ attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
+ GFP_KERNEL);
+ if (!attributes)
+ goto error3;
+ attributes[0] = &vendor_attribute->dev_attr.attr;
+
+ /* Finally create the attribute group */
+ attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (!attr_group)
+ goto error4;
+ attr_group->attrs = attributes;
+ return attr_group;
+error4:
+ kfree(attributes);
+error3:
+ kfree(vendor_attribute);
+error2:
+ kfree(vendor);
+error1:
+ return NULL;
+}
+
+static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute **attributes = ftl->disk_attributes->attrs;
+ int i;
+
+ for (i = 0; attributes[i] ; i++) {
+
+ struct device_attribute *dev_attr = container_of(attributes[i],
+ struct device_attribute, attr);
+
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(dev_attr,
+ struct sm_sysfs_attribute, dev_attr);
+
+ kfree(sm_attr->data);
+ kfree(sm_attr);
+ }
+
+ kfree(ftl->disk_attributes->attrs);
+ kfree(ftl->disk_attributes);
+}
+
+
+/* ----------------------- oob helpers -------------------------------------- */
+
+static int sm_get_lba(uint8_t *lba)
+{
+ /* check fixed bits */
+ if ((lba[0] & 0xF8) != 0x10)
+ return -2;
+
+ /* check parity - endianness doesn't matter */
+ if (hweight16(*(uint16_t *)lba) & 1)
+ return -2;
+
+ return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
+}
+
+
+/*
+ * Read LBA associated with block
+ * returns -1, if block is erased
+ * returns -2 if error happens
+ */
+static int sm_read_lba(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ uint16_t lba_test;
+ int lba;
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
+ return -1;
+
+ /* Now check is both copies of the LBA differ too much */
+ lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
+ if (lba_test && !is_power_of_2(lba_test))
+ return -2;
+
+ /* And read it */
+ lba = sm_get_lba(oob->lba_copy1);
+
+ if (lba == -2)
+ lba = sm_get_lba(oob->lba_copy2);
+
+ return lba;
+}
+
+static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
+{
+ uint8_t tmp[2];
+
+ WARN_ON(lba >= 1000);
+
+ tmp[0] = 0x10 | ((lba >> 7) & 0x07);
+ tmp[1] = (lba << 1) & 0xFF;
+
+ if (hweight16(*(uint16_t *)tmp) & 0x01)
+ tmp[1] |= 1;
+
+ oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
+ oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
+}
+
+
+/* Make offset from parts */
+static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
+{
+ WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
+ WARN_ON(zone < 0 || zone >= ftl->zone_count);
+ WARN_ON(block >= ftl->zone_size);
+ WARN_ON(boffset >= ftl->block_size);
+
+ if (block == -1)
+ return -1;
+
+ return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
+}
+
+/* Breaks offset into parts */
+static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
+ int *zone, int *block, int *boffset)
+{
+ *boffset = do_div(offset, ftl->block_size);
+ *block = do_div(offset, ftl->max_lba);
+ *zone = offset >= ftl->zone_count ? -1 : offset;
+}
+
+/* ---------------------- low level IO ------------------------------------- */
+
+static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
+{
+ uint8_t ecc[3];
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
+ return -EIO;
+
+ buffer += SM_SMALL_PAGE;
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a sector + oob*/
+static int sm_read_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct mtd_oob_ops ops;
+ struct sm_oob tmp_oob;
+ int ret = -EIO;
+ int try = 0;
+
+ /* FTL can contain -1 entries that are by default filled with bits */
+ if (block == -1) {
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ return 0;
+ }
+
+ /* User might not need the oob, but we do for data verification */
+ if (!oob)
+ oob = &tmp_oob;
+
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+
+again:
+ if (try++) {
+ /* Avoid infinite recursion on CIS reads, sm_recheck_media
+ won't help anyway */
+ if (zone == 0 && block == ftl->cis_block && boffset ==
+ ftl->cis_boffset)
+ return ret;
+
+ /* Test if media is stable */
+ if (try == 3 || sm_recheck_media(ftl))
+ return ret;
+ }
+
+ /* Unfortunately, oob read will _always_ succeed,
+ despite card removal..... */
+ ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Test for unknown errors */
+ if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
+ dbg("read of block %d at zone %d, failed due to error (%d)",
+ block, zone, ret);
+ goto again;
+ }
+
+ /* Do a basic test on the oob, to guard against returned garbage */
+ if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
+ goto again;
+
+ /* This should never happen, unless there is a bug in the mtd driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ if (!buffer)
+ return 0;
+
+ /* Test if sector marked as bad */
+ if (!sm_sector_valid(oob)) {
+ dbg("read of block %d at zone %d, failed because it is marked"
+ " as bad" , block, zone);
+ goto again;
+ }
+
+ /* Test ECC*/
+ if (mtd_is_eccerr(ret) ||
+ (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
+
+ dbg("read of block %d at zone %d, failed due to ECC error",
+ block, zone);
+ goto again;
+ }
+
+ return 0;
+}
+
+/* Writes a sector to media */
+static int sm_write_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_oob_ops ops;
+ struct mtd_info *mtd = ftl->trans->mtd;
+ int ret;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone == 0 && (block == ftl->cis_block || block == 0)) {
+ dbg("attempted to write the CIS!");
+ return -EIO;
+ }
+
+ if (ftl->unstable)
+ return -EIO;
+
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+
+ ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Now we assume that hardware will catch write bitflip errors */
+
+ if (ret) {
+ dbg("write to block %d at zone %d, failed with error %d",
+ block, zone, ret);
+
+ sm_recheck_media(ftl);
+ return ret;
+ }
+
+ /* This should never happen, unless there is a bug in the driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ return 0;
+}
+
+/* ------------------------ block IO ------------------------------------- */
+
+/* Write a block using data and lba, and invalid sector bitmap */
+static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
+ int zone, int block, int lba,
+ unsigned long invalid_bitmap)
+{
+ struct sm_oob oob;
+ int boffset;
+ int retry = 0;
+
+ /* Initialize the oob with requested values */
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ sm_write_lba(&oob, lba);
+restart:
+ if (ftl->unstable)
+ return -EIO;
+
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ oob.data_status = 0xFF;
+
+ if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
+
+ sm_printk("sector %d of block at LBA %d of zone %d"
+ " coudn't be read, marking it as invalid",
+ boffset / SM_SECTOR_SIZE, lba, zone);
+
+ oob.data_status = 0;
+ }
+
+ if (ftl->smallpagenand) {
+ __nand_calculate_ecc(buf + boffset,
+ SM_SMALL_PAGE, oob.ecc1);
+
+ __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
+ SM_SMALL_PAGE, oob.ecc2);
+ }
+ if (!sm_write_sector(ftl, zone, block, boffset,
+ buf + boffset, &oob))
+ continue;
+
+ if (!retry) {
+
+ /* If write fails. try to erase the block */
+ /* This is safe, because we never write in blocks
+ that contain valuable data.
+ This is intended to repair block that are marked
+ as erased, but that isn't fully erased*/
+
+ if (sm_erase_block(ftl, zone, block, 0))
+ return -EIO;
+
+ retry = 1;
+ goto restart;
+ } else {
+ sm_mark_block_bad(ftl, zone, block);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+
+/* Mark whole block at offset 'offs' as bad. */
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
+{
+ struct sm_oob oob;
+ int boffset;
+
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ oob.block_status = 0xF0;
+
+ if (ftl->unstable)
+ return;
+
+ if (sm_recheck_media(ftl))
+ return;
+
+ sm_printk("marking block %d of zone %d as bad", block, zone);
+
+ /* We aren't checking the return value, because we don't care */
+ /* This also fails on fake xD cards, but I guess these won't expose
+ any bad blocks till fail completely */
+ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
+ sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
+}
+
+/*
+ * Erase a block within a zone
+ * If erase succeeds, it updates free block fifo, otherwise marks block as bad
+ */
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct erase_info erase;
+
+ erase.mtd = mtd;
+ erase.callback = sm_erase_callback;
+ erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
+ erase.len = ftl->block_size;
+ erase.priv = (u_long)ftl;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
+ sm_printk("attempted to erase the CIS!");
+ return -EIO;
+ }
+
+ if (mtd_erase(mtd, &erase)) {
+ sm_printk("erase of block %d in zone %d failed",
+ block, zone_num);
+ goto error;
+ }
+
+ if (erase.state == MTD_ERASE_PENDING)
+ wait_for_completion(&ftl->erase_completion);
+
+ if (erase.state != MTD_ERASE_DONE) {
+ sm_printk("erase of block %d in zone %d failed after wait",
+ block, zone_num);
+ goto error;
+ }
+
+ if (put_free)
+ kfifo_in(&zone->free_sectors,
+ (const unsigned char *)&block, sizeof(block));
+
+ return 0;
+error:
+ sm_mark_block_bad(ftl, zone_num, block);
+ return -EIO;
+}
+
+static void sm_erase_callback(struct erase_info *self)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
+ complete(&ftl->erase_completion);
+}
+
+/* Thoroughly test that block is valid. */
+static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
+{
+ int boffset;
+ struct sm_oob oob;
+ int lbas[] = { -3, 0, 0, 0 };
+ int i = 0;
+ int test_lba;
+
+
+ /* First just check that block doesn't look fishy */
+ /* Only blocks that are valid or are sliced in two parts, are
+ accepted */
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ /* This shouldn't happen anyway */
+ if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
+ return -2;
+
+ test_lba = sm_read_lba(&oob);
+
+ if (lbas[i] != test_lba)
+ lbas[++i] = test_lba;
+
+ /* If we found three different LBAs, something is fishy */
+ if (i == 3)
+ return -EIO;
+ }
+
+ /* If the block is sliced (partially erased usually) erase it */
+ if (i == 2) {
+ sm_erase_block(ftl, zone, block, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ----------------- media scanning --------------------------------- */
+static const struct chs_entry chs_table[] = {
+ { 1, 125, 4, 4 },
+ { 2, 125, 4, 8 },
+ { 4, 250, 4, 8 },
+ { 8, 250, 4, 16 },
+ { 16, 500, 4, 16 },
+ { 32, 500, 8, 16 },
+ { 64, 500, 8, 32 },
+ { 128, 500, 16, 32 },
+ { 256, 1000, 16, 32 },
+ { 512, 1015, 32, 63 },
+ { 1024, 985, 33, 63 },
+ { 2048, 985, 33, 63 },
+ { 0 },
+};
+
+
+static const uint8_t cis_signature[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+/* Find out media parameters.
+ * This ideally has to be based on nand id, but for now device size is enough */
+static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+{
+ int i;
+ int size_in_megs = mtd->size / (1024 * 1024);
+
+ ftl->readonly = mtd->type == MTD_ROM;
+
+ /* Manual settings for very old devices */
+ ftl->zone_count = 1;
+ ftl->smallpagenand = 0;
+
+ switch (size_in_megs) {
+ case 1:
+ /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+
+ break;
+ case 2:
+ /* 2 MiB flash SmartMedia (256 byte pages)*/
+ if (mtd->writesize == SM_SMALL_PAGE) {
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+ /* 2 MiB rom SmartMedia */
+ } else {
+
+ if (!ftl->readonly)
+ return -ENODEV;
+
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+ break;
+ case 4:
+ /* 4 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ break;
+ case 8:
+ /* 8 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+
+ /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
+ sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
+ if (size_in_megs >= 16) {
+ ftl->zone_count = size_in_megs / 16;
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 32 * SM_SECTOR_SIZE;
+ }
+
+ /* Test for proper write,erase and oob sizes */
+ if (mtd->erasesize > ftl->block_size)
+ return -ENODEV;
+
+ if (mtd->writesize > SM_SECTOR_SIZE)
+ return -ENODEV;
+
+ if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
+ return -ENODEV;
+
+ if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
+ return -ENODEV;
+
+ /* We use OOB */
+ if (!mtd_has_oob(mtd))
+ return -ENODEV;
+
+ /* Find geometry information */
+ for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
+ if (chs_table[i].size == size_in_megs) {
+ ftl->cylinders = chs_table[i].cyl;
+ ftl->heads = chs_table[i].head;
+ ftl->sectors = chs_table[i].sec;
+ return 0;
+ }
+ }
+
+ sm_printk("media has unknown size : %dMiB", size_in_megs);
+ ftl->cylinders = 985;
+ ftl->heads = 33;
+ ftl->sectors = 63;
+ return 0;
+}
+
+/* Validate the CIS */
+static int sm_read_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+
+ if (sm_read_sector(ftl,
+ 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
+ return -EIO;
+
+ if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
+ return -EIO;
+
+ if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
+ cis_signature, sizeof(cis_signature))) {
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Scan the media for the CIS */
+static int sm_find_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+ int block, boffset;
+ int block_found = 0;
+ int cis_found = 0;
+
+ /* Search for first valid block */
+ for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
+
+ if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
+ continue;
+
+ if (!sm_block_valid(&oob))
+ continue;
+ block_found = 1;
+ break;
+ }
+
+ if (!block_found)
+ return -EIO;
+
+ /* Search for first valid sector in this block */
+ for (boffset = 0 ; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
+ continue;
+
+ if (!sm_sector_valid(&oob))
+ continue;
+ break;
+ }
+
+ if (boffset == ftl->block_size)
+ return -EIO;
+
+ ftl->cis_block = block;
+ ftl->cis_boffset = boffset;
+ ftl->cis_page_offset = 0;
+
+ cis_found = !sm_read_cis(ftl);
+
+ if (!cis_found) {
+ ftl->cis_page_offset = SM_SMALL_PAGE;
+ cis_found = !sm_read_cis(ftl);
+ }
+
+ if (cis_found) {
+ dbg("CIS block found at offset %x",
+ block * ftl->block_size +
+ boffset + ftl->cis_page_offset);
+ return 0;
+ }
+ return -EIO;
+}
+
+/* Basic test to determine if underlying mtd device if functional */
+static int sm_recheck_media(struct sm_ftl *ftl)
+{
+ if (sm_read_cis(ftl)) {
+
+ if (!ftl->unstable) {
+ sm_printk("media unstable, not allowing writes");
+ ftl->unstable = 1;
+ }
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Initialize a FTL zone */
+static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct sm_oob oob;
+ uint16_t block;
+ int lba;
+ int i = 0;
+ int len;
+
+ dbg("initializing zone %d", zone_num);
+
+ /* Allocate memory for FTL table */
+ zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
+
+ if (!zone->lba_to_phys_table)
+ return -ENOMEM;
+ memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
+
+
+ /* Allocate memory for free sectors FIFO */
+ if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
+ kfree(zone->lba_to_phys_table);
+ return -ENOMEM;
+ }
+
+ /* Now scan the zone */
+ for (block = 0 ; block < ftl->zone_size ; block++) {
+
+ /* Skip blocks till the CIS (including) */
+ if (zone_num == 0 && block <= ftl->cis_block)
+ continue;
+
+ /* Read the oob of first sector */
+ if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
+ return -EIO;
+
+ /* Test to see if block is erased. It is enough to test
+ first sector, because erase happens in one shot */
+ if (sm_block_erased(&oob)) {
+ kfifo_in(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ continue;
+ }
+
+ /* If block is marked as bad, skip it */
+ /* This assumes we can trust first sector*/
+ /* However the way the block valid status is defined, ensures
+ very low probability of failure here */
+ if (!sm_block_valid(&oob)) {
+ dbg("PH %04d <-> <marked bad>", block);
+ continue;
+ }
+
+
+ lba = sm_read_lba(&oob);
+
+ /* Invalid LBA means that block is damaged. */
+ /* We can try to erase it, or mark it as bad, but
+ lets leave that to recovery application */
+ if (lba == -2 || lba >= ftl->max_lba) {
+ dbg("PH %04d <-> LBA %04d(bad)", block, lba);
+ continue;
+ }
+
+
+ /* If there is no collision,
+ just put the sector in the FTL table */
+ if (zone->lba_to_phys_table[lba] < 0) {
+ dbg_verbose("PH %04d <-> LBA %04d", block, lba);
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ sm_printk("collision"
+ " of LBA %d between blocks %d and %d in zone %d",
+ lba, zone->lba_to_phys_table[lba], block, zone_num);
+
+ /* Test that this block is valid*/
+ if (sm_check_block(ftl, zone_num, block))
+ continue;
+
+ /* Test now the old block */
+ if (sm_check_block(ftl, zone_num,
+ zone->lba_to_phys_table[lba])) {
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ /* If both blocks are valid and share same LBA, it means that
+ they hold different versions of same data. It not
+ known which is more recent, thus just erase one of them
+ */
+ sm_printk("both blocks are valid, erasing the later");
+ sm_erase_block(ftl, zone_num, block, 1);
+ }
+
+ dbg("zone initialized");
+ zone->initialized = 1;
+
+ /* No free sectors, means that the zone is heavily damaged, write won't
+ work, but it can still can be (partially) read */
+ if (!kfifo_len(&zone->free_sectors)) {
+ sm_printk("no free blocks in zone %d", zone_num);
+ return 0;
+ }
+
+ /* Randomize first block we write to */
+ get_random_bytes(&i, 2);
+ i %= (kfifo_len(&zone->free_sectors) / 2);
+
+ while (i--) {
+ len = kfifo_out(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ WARN_ON(len != 2);
+ kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
+ }
+ return 0;
+}
+
+/* Get and automatically initialize an FTL mapping for one zone */
+static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone;
+ int error;
+
+ BUG_ON(zone_num >= ftl->zone_count);
+ zone = &ftl->zones[zone_num];
+
+ if (!zone->initialized) {
+ error = sm_init_zone(ftl, zone_num);
+
+ if (error)
+ return ERR_PTR(error);
+ }
+ return zone;
+}
+
+
+/* ----------------- cache handling ------------------------------------------*/
+
+/* Initialize the one block cache */
+static void sm_cache_init(struct sm_ftl *ftl)
+{
+ ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
+ ftl->cache_clean = 1;
+ ftl->cache_zone = -1;
+ ftl->cache_block = -1;
+ /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
+}
+
+/* Put sector in one block cache */
+static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
+ clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
+ ftl->cache_clean = 0;
+}
+
+/* Read a sector from the cache */
+static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ if (test_bit(boffset / SM_SECTOR_SIZE,
+ &ftl->cache_data_invalid_bitmap))
+ return -1;
+
+ memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
+ return 0;
+}
+
+/* Write the cache to hardware */
+static int sm_cache_flush(struct sm_ftl *ftl)
+{
+ struct ftl_zone *zone;
+
+ int sector_num;
+ uint16_t write_sector;
+ int zone_num = ftl->cache_zone;
+ int block_num;
+
+ if (ftl->cache_clean)
+ return 0;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(zone_num < 0);
+ zone = &ftl->zones[zone_num];
+ block_num = zone->lba_to_phys_table[ftl->cache_block];
+
+
+ /* Try to read all unread areas of the cache block*/
+ for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
+ ftl->block_size / SM_SECTOR_SIZE) {
+
+ if (!sm_read_sector(ftl,
+ zone_num, block_num, sector_num * SM_SECTOR_SIZE,
+ ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
+ clear_bit(sector_num,
+ &ftl->cache_data_invalid_bitmap);
+ }
+restart:
+
+ if (ftl->unstable)
+ return -EIO;
+
+ /* If there are no spare blocks, */
+ /* we could still continue by erasing/writing the current block,
+ but for such worn out media it doesn't worth the trouble,
+ and the dangers */
+ if (kfifo_out(&zone->free_sectors,
+ (unsigned char *)&write_sector, 2) != 2) {
+ dbg("no free sectors for write!");
+ return -EIO;
+ }
+
+
+ if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
+ ftl->cache_block, ftl->cache_data_invalid_bitmap))
+ goto restart;
+
+ /* Update the FTL table */
+ zone->lba_to_phys_table[ftl->cache_block] = write_sector;
+
+ /* Write succesfull, so erase and free the old block */
+ if (block_num > 0)
+ sm_erase_block(ftl, zone_num, block_num, 1);
+
+ sm_cache_init(ftl);
+ return 0;
+}
+
+
+/* flush timer, runs a second after last write */
+static void sm_cache_flush_timer(unsigned long data)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)data;
+ queue_work(cache_flush_workqueue, &ftl->flush_work);
+}
+
+/* cache flush work, kicked by timer */
+static void sm_cache_flush_work(struct work_struct *work)
+{
+ struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
+ mutex_lock(&ftl->mutex);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return;
+}
+
+/* ---------------- outside interface -------------------------------------- */
+
+/* outside interface: read a sector */
+static int sm_read(struct mtd_blktrans_dev *dev,
+ unsigned long sect_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, in_cache = 0;
+ int zone_num, block, boffset;
+
+ sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
+ mutex_lock(&ftl->mutex);
+
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* Have to look at cache first */
+ if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
+ in_cache = 1;
+ if (!sm_cache_get(ftl, buf, boffset))
+ goto unlock;
+ }
+
+ /* Translate the block and return if doesn't exist in the table */
+ block = zone->lba_to_phys_table[block];
+
+ if (block == -1) {
+ memset(buf, 0xFF, SM_SECTOR_SIZE);
+ goto unlock;
+ }
+
+ if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
+ error = -EIO;
+ goto unlock;
+ }
+
+ if (in_cache)
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: write a sector */
+static int sm_write(struct mtd_blktrans_dev *dev,
+ unsigned long sec_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, zone_num, block, boffset;
+
+ BUG_ON(ftl->readonly);
+ sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
+
+ /* No need in flush thread running now */
+ del_timer(&ftl->timer);
+ mutex_lock(&ftl->mutex);
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* If entry is not in cache, flush it */
+ if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
+
+ error = sm_cache_flush(ftl);
+ if (error)
+ goto unlock;
+
+ ftl->cache_block = block;
+ ftl->cache_zone = zone_num;
+ }
+
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: flush everything */
+static int sm_flush(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int retval;
+
+ mutex_lock(&ftl->mutex);
+ retval = sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return retval;
+}
+
+/* outside interface: device is released */
+static void sm_release(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+
+ mutex_lock(&ftl->mutex);
+ del_timer_sync(&ftl->timer);
+ cancel_work_sync(&ftl->flush_work);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+}
+
+/* outside interface: get geometry */
+static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct sm_ftl *ftl = dev->priv;
+ geo->heads = ftl->heads;
+ geo->sectors = ftl->sectors;
+ geo->cylinders = ftl->cylinders;
+ return 0;
+}
+
+/* external interface: main initialization function */
+static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *trans;
+ struct sm_ftl *ftl;
+
+ /* Allocate & initialize our private structure */
+ ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
+ if (!ftl)
+ goto error1;
+
+
+ mutex_init(&ftl->mutex);
+ setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
+ init_completion(&ftl->erase_completion);
+
+ /* Read media information */
+ if (sm_get_media_info(ftl, mtd)) {
+ dbg("found unsupported mtd device, aborting");
+ goto error2;
+ }
+
+
+ /* Allocate temporary CIS buffer for read retry support */
+ ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+ if (!ftl->cis_buffer)
+ goto error2;
+
+ /* Allocate zone array, it will be initialized on demand */
+ ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
+ GFP_KERNEL);
+ if (!ftl->zones)
+ goto error3;
+
+ /* Allocate the cache*/
+ ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
+
+ if (!ftl->cache_data)
+ goto error4;
+
+ sm_cache_init(ftl);
+
+
+ /* Allocate upper layer structure and initialize it */
+ trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!trans)
+ goto error5;
+
+ ftl->trans = trans;
+ trans->priv = ftl;
+
+ trans->tr = tr;
+ trans->mtd = mtd;
+ trans->devnum = -1;
+ trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
+ trans->readonly = ftl->readonly;
+
+ if (sm_find_cis(ftl)) {
+ dbg("CIS not found on mtd device, aborting");
+ goto error6;
+ }
+
+ ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
+ if (!ftl->disk_attributes)
+ goto error6;
+ trans->disk_attributes = ftl->disk_attributes;
+
+ sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
+ (int)(mtd->size / (1024 * 1024)), mtd->index);
+
+ dbg("FTL layout:");
+ dbg("%d zone(s), each consists of %d blocks (+%d spares)",
+ ftl->zone_count, ftl->max_lba,
+ ftl->zone_size - ftl->max_lba);
+ dbg("each block consists of %d bytes",
+ ftl->block_size);
+
+
+ /* Register device*/
+ if (add_mtd_blktrans_dev(trans)) {
+ dbg("error in mtdblktrans layer");
+ goto error6;
+ }
+ return;
+error6:
+ kfree(trans);
+error5:
+ kfree(ftl->cache_data);
+error4:
+ kfree(ftl->zones);
+error3:
+ kfree(ftl->cis_buffer);
+error2:
+ kfree(ftl);
+error1:
+ return;
+}
+
+/* main interface: device {surprise,} removal */
+static void sm_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int i;
+
+ del_mtd_blktrans_dev(dev);
+ ftl->trans = NULL;
+
+ for (i = 0 ; i < ftl->zone_count; i++) {
+
+ if (!ftl->zones[i].initialized)
+ continue;
+
+ kfree(ftl->zones[i].lba_to_phys_table);
+ kfifo_free(&ftl->zones[i].free_sectors);
+ }
+
+ sm_delete_sysfs_attributes(ftl);
+ kfree(ftl->cis_buffer);
+ kfree(ftl->zones);
+ kfree(ftl->cache_data);
+ kfree(ftl);
+}
+
+static struct mtd_blktrans_ops sm_ftl_ops = {
+ .name = "smblk",
+ .major = 0,
+ .part_bits = SM_FTL_PARTN_BITS,
+ .blksize = SM_SECTOR_SIZE,
+ .getgeo = sm_getgeo,
+
+ .add_mtd = sm_add_mtd,
+ .remove_dev = sm_remove_dev,
+
+ .readsect = sm_read,
+ .writesect = sm_write,
+
+ .flush = sm_flush,
+ .release = sm_release,
+
+ .owner = THIS_MODULE,
+};
+
+static __init int sm_module_init(void)
+{
+ int error = 0;
+
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
+ if (!cache_flush_workqueue)
+ return -ENOMEM;
+
+ error = register_mtd_blktrans(&sm_ftl_ops);
+ if (error)
+ destroy_workqueue(cache_flush_workqueue);
+ return error;
+
+}
+
+static void __exit sm_module_exit(void)
+{
+ destroy_workqueue(cache_flush_workqueue);
+ deregister_mtd_blktrans(&sm_ftl_ops);
+}
+
+module_init(sm_module_init);
+module_exit(sm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
new file mode 100644
index 000000000..43bb73007
--- /dev/null
+++ b/drivers/mtd/sm_ftl.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * Based loosly on ssfdc.c which is
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/blktrans.h>
+#include <linux/kfifo.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+
+
+
+struct ftl_zone {
+ bool initialized;
+ int16_t *lba_to_phys_table; /* LBA to physical table */
+ struct kfifo free_sectors; /* queue of free sectors */
+};
+
+struct sm_ftl {
+ struct mtd_blktrans_dev *trans;
+
+ struct mutex mutex; /* protects the structure */
+ struct ftl_zone *zones; /* FTL tables for each zone */
+
+ /* Media information */
+ int block_size; /* block size in bytes */
+ int zone_size; /* zone size in blocks */
+ int zone_count; /* number of zones */
+ int max_lba; /* maximum lba in a zone */
+ int smallpagenand; /* 256 bytes/page nand */
+ bool readonly; /* is FS readonly */
+ bool unstable;
+ int cis_block; /* CIS block location */
+ int cis_boffset; /* CIS offset in the block */
+ int cis_page_offset; /* CIS offset in the page */
+ void *cis_buffer; /* tmp buffer for cis reads */
+
+ /* Cache */
+ int cache_block; /* block number of cached block */
+ int cache_zone; /* zone of cached block */
+ unsigned char *cache_data; /* cached block data */
+ long unsigned int cache_data_invalid_bitmap;
+ bool cache_clean;
+ struct work_struct flush_work;
+ struct timer_list timer;
+
+ /* Async erase stuff */
+ struct completion erase_completion;
+
+ /* Geometry stuff */
+ int heads;
+ int sectors;
+ int cylinders;
+
+ struct attribute_group *disk_attributes;
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+};
+
+
+#define SM_FTL_PARTN_BITS 3
+
+#define sm_printk(format, ...) \
+ printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+
+static void sm_erase_callback(struct erase_info *self);
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free);
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
+
+static int sm_recheck_media(struct sm_ftl *ftl);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
new file mode 100644
index 000000000..64a4f0eda
--- /dev/null
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -0,0 +1,31 @@
+menuconfig MTD_SPI_NOR
+ tristate "SPI-NOR device support"
+ depends on MTD
+ help
+ This is the framework for the SPI NOR which can be used by the SPI
+ device drivers and the SPI-NOR device driver.
+
+if MTD_SPI_NOR
+
+config MTD_SPI_NOR_USE_4K_SECTORS
+ bool "Use small 4096 B erase sectors"
+ default y
+ help
+ Many flash memories support erasing small (4096 B) sectors. Depending
+ on the usage this feature may provide performance gain in comparison
+ to erasing whole blocks (32/64 KiB).
+ Changing a small part of the flash's contents is usually faster with
+ small sectors. On the other hand erasing should be faster when using
+ 64 KiB block instead of 16 × 4 KiB sectors.
+
+ Please note that some tools/drivers/filesystems may not work with
+ 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+
+config SPI_FSL_QUADSPI
+ tristate "Freescale Quad SPI controller"
+ depends on ARCH_MXC
+ help
+ This enables support for the Quad SPI controller in master mode.
+ We only connect the NOR to this controller now.
+
+endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
new file mode 100644
index 000000000..6a7ce1462
--- /dev/null
+++ b/drivers/mtd/spi-nor/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
new file mode 100644
index 000000000..5d5d36272
--- /dev/null
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -0,0 +1,1012 @@
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+
+/* The registers */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_SHIFT 16
+#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT)
+#define QUADSPI_MCR_MDIS_SHIFT 14
+#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT)
+#define QUADSPI_MCR_CLR_TXF_SHIFT 11
+#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT)
+#define QUADSPI_MCR_CLR_RXF_SHIFT 10
+#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT)
+#define QUADSPI_MCR_DDR_EN_SHIFT 7
+#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT)
+#define QUADSPI_MCR_END_CFG_SHIFT 2
+#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT)
+#define QUADSPI_MCR_SWRSTHD_SHIFT 1
+#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT)
+#define QUADSPI_MCR_SWRSTSD_SHIFT 0
+#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID_SHIFT 24
+#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0CR 0x10
+#define QUADSPI_BUF1CR 0x14
+#define QUADSPI_BUF2CR 0x18
+#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
+#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
+#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8
+#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
+#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT))
+#define QUADSPI_BFGENCR_SEQID_SHIFT 12
+#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_SHIFT 16
+#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT)
+#define QUADSPI_SMPR_FSDLY_SHIFT 6
+#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT)
+#define QUADSPI_SMPR_FSPHS_SHIFT 5
+#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT)
+#define QUADSPI_SMPR_HSENA_SHIFT 0
+#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT)
+
+#define QUADSPI_RBSR 0x10c
+#define QUADSPI_RBSR_RDBFL_SHIFT 8
+#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK 0x1F
+#define QUADSPI_RBCT_RXBRD_SHIFT 8
+#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT)
+
+#define QUADSPI_TBSR 0x150
+#define QUADSPI_TBDR 0x154
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_SHIFT 1
+#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT)
+#define QUADSPI_SR_AHB_ACC_SHIFT 2
+#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK 0x1
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR 0x200
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK 0x1
+#define QUADSPI_LCKER_UNLOCK 0x2
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE (0x1 << 0)
+
+#define QUADSPI_LUT_BASE 0x310
+
+/*
+ * The definition of the LUT register shows below:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define OPRND0_SHIFT 0
+#define PAD0_SHIFT 8
+#define INSTR0_SHIFT 10
+#define OPRND1_SHIFT 16
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_READ 7
+#define LUT_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_READ_DDR 14
+#define LUT_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the lines number of IO[0:3].
+ * For example, the Quad read need four IO lines, so you should
+ * set LUT_PAD4 which means we use four IO lines.
+ */
+#define LUT_PAD1 0
+#define LUT_PAD2 1
+#define LUT_PAD4 2
+
+/* Oprands for the LUT register. */
+#define ADDR24BIT 0x18
+#define ADDR32BIT 0x20
+
+/* Macros for constructing the LUT register. */
+#define LUT0(ins, pad, opr) \
+ (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
+ ((LUT_##ins) << INSTR0_SHIFT))
+
+#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
+
+/* other macros for LUT register. */
+#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4)
+#define QUADSPI_LUT_NUM 64
+
+/* SEQID -- we can have 16 seqids at most. */
+#define SEQID_QUAD_READ 0
+#define SEQID_WREN 1
+#define SEQID_WRDI 2
+#define SEQID_RDSR 3
+#define SEQID_SE 4
+#define SEQID_CHIP_ERASE 5
+#define SEQID_PP 6
+#define SEQID_RDID 7
+#define SEQID_WRSR 8
+#define SEQID_RDCR 9
+#define SEQID_EN4B 10
+#define SEQID_BRWR 11
+
+enum fsl_qspi_devtype {
+ FSL_QUADSPI_VYBRID,
+ FSL_QUADSPI_IMX6SX,
+};
+
+struct fsl_qspi_devtype_data {
+ enum fsl_qspi_devtype devtype;
+ int rxfifo;
+ int txfifo;
+ int ahb_buf_size;
+};
+
+static struct fsl_qspi_devtype_data vybrid_data = {
+ .devtype = FSL_QUADSPI_VYBRID,
+ .rxfifo = 128,
+ .txfifo = 64,
+ .ahb_buf_size = 1024
+};
+
+static struct fsl_qspi_devtype_data imx6sx_data = {
+ .devtype = FSL_QUADSPI_IMX6SX,
+ .rxfifo = 128,
+ .txfifo = 512,
+ .ahb_buf_size = 1024
+};
+
+#define FSL_QSPI_MAX_CHIP 4
+struct fsl_qspi {
+ struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
+ struct spi_nor nor[FSL_QSPI_MAX_CHIP];
+ void __iomem *iobase;
+ void __iomem *ahb_base; /* Used when read from AHB bus */
+ u32 memmap_phy;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ struct fsl_qspi_devtype_data *devtype_data;
+ u32 nor_size;
+ u32 nor_num;
+ u32 clk_rate;
+ unsigned int chip_base_addr; /* We may support two chips. */
+ bool has_second_chip;
+};
+
+static inline int is_vybrid_qspi(struct fsl_qspi *q)
+{
+ return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
+}
+
+static inline int is_imx6sx_qspi(struct fsl_qspi *q)
+{
+ return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
+}
+
+/*
+ * An IC bug makes us to re-arrange the 32-bit data.
+ * The following chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return is_vybrid_qspi(q) ? __swab32(a) : a;
+}
+
+static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
+{
+ writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
+{
+ writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
+{
+ struct fsl_qspi *q = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = readl(q->iobase + QUADSPI_FR);
+ writel(reg, q->iobase + QUADSPI_FR);
+
+ if (reg & QUADSPI_FR_TFF_MASK)
+ complete(&q->c);
+
+ dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg);
+ return IRQ_HANDLED;
+}
+
+static void fsl_qspi_init_lut(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int rxfifo = q->devtype_data->rxfifo;
+ u32 lut_base;
+ u8 cmd, addrlen, dummy;
+ int i;
+
+ fsl_qspi_unlock_lut(q);
+
+ /* Clear all the LUT table */
+ for (i = 0; i < QUADSPI_LUT_NUM; i++)
+ writel(0, base + QUADSPI_LUT_BASE + i * 4);
+
+ /* Quad Read */
+ lut_base = SEQID_QUAD_READ * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_READ_1_1_4;
+ addrlen = ADDR24BIT;
+ dummy = 8;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_READ_1_1_4;
+ addrlen = ADDR32BIT;
+ dummy = 8;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo),
+ base + QUADSPI_LUT(lut_base + 1));
+
+ /* Write enable */
+ lut_base = SEQID_WREN * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base));
+
+ /* Page Program */
+ lut_base = SEQID_PP * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_PP;
+ addrlen = ADDR24BIT;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_PP;
+ addrlen = ADDR32BIT;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
+
+ /* Read Status */
+ lut_base = SEQID_RDSR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase a sector */
+ lut_base = SEQID_SE * 4;
+
+ if (q->nor_size <= SZ_16M) {
+ cmd = SPINOR_OP_SE;
+ addrlen = ADDR24BIT;
+ } else {
+ /* use the 4-byte address */
+ cmd = SPINOR_OP_SE;
+ addrlen = ADDR32BIT;
+ }
+
+ writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase the whole chip */
+ lut_base = SEQID_CHIP_ERASE * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
+ base + QUADSPI_LUT(lut_base));
+
+ /* READ ID */
+ lut_base = SEQID_RDID * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write Register */
+ lut_base = SEQID_WRSR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Read Configuration Register */
+ lut_base = SEQID_RDCR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write disable */
+ lut_base = SEQID_WRDI * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Micron) */
+ lut_base = SEQID_EN4B * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Spansion) */
+ lut_base = SEQID_BRWR * 4;
+ writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base));
+
+ fsl_qspi_lock_lut(q);
+}
+
+/* Get the SEQID for the command */
+static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
+{
+ switch (cmd) {
+ case SPINOR_OP_READ_1_1_4:
+ return SEQID_QUAD_READ;
+ case SPINOR_OP_WREN:
+ return SEQID_WREN;
+ case SPINOR_OP_WRDI:
+ return SEQID_WRDI;
+ case SPINOR_OP_RDSR:
+ return SEQID_RDSR;
+ case SPINOR_OP_SE:
+ return SEQID_SE;
+ case SPINOR_OP_CHIP_ERASE:
+ return SEQID_CHIP_ERASE;
+ case SPINOR_OP_PP:
+ return SEQID_PP;
+ case SPINOR_OP_RDID:
+ return SEQID_RDID;
+ case SPINOR_OP_WRSR:
+ return SEQID_WRSR;
+ case SPINOR_OP_RDCR:
+ return SEQID_RDCR;
+ case SPINOR_OP_EN4B:
+ return SEQID_EN4B;
+ case SPINOR_OP_BRWR:
+ return SEQID_BRWR;
+ default:
+ dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+ u32 reg, reg2;
+ int err;
+
+ init_completion(&q->c);
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
+ q->chip_base_addr, addr, len, cmd);
+
+ /* save the reg */
+ reg = readl(base + QUADSPI_MCR);
+
+ writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR);
+ writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
+ base + QUADSPI_RBCT);
+ writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
+
+ do {
+ reg2 = readl(base + QUADSPI_SR);
+ if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
+ udelay(1);
+ dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
+ continue;
+ }
+ break;
+ } while (1);
+
+ /* trigger the LUT now */
+ seqid = fsl_qspi_get_seqid(q, cmd);
+ writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
+ dev_err(q->dev,
+ "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
+ cmd, addr, readl(base + QUADSPI_FR),
+ readl(base + QUADSPI_SR));
+ err = -ETIMEDOUT;
+ } else {
+ err = 0;
+ }
+
+ /* restore the MCR */
+ writel(reg, base + QUADSPI_MCR);
+
+ return err;
+}
+
+/* Read out the data from the QUADSPI_RBDR buffer registers. */
+static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
+{
+ u32 tmp;
+ int i = 0;
+
+ while (len > 0) {
+ tmp = readl(q->iobase + QUADSPI_RBDR + i * 4);
+ tmp = fsl_qspi_endian_xchg(q, tmp);
+ dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
+ q->chip_base_addr, tmp);
+
+ if (len >= 4) {
+ *((u32 *)rxbuf) = tmp;
+ rxbuf += 4;
+ } else {
+ memcpy(rxbuf, &tmp, len);
+ break;
+ }
+
+ len -= 4;
+ i++;
+ }
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing,
+ * we need to invalidate the AHB buffer. If we do not do so, we may read out
+ * the wrong data. The spec tells us reset the AHB domain and Serial Flash
+ * domain at the same time.
+ */
+static inline void fsl_qspi_invalid(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = readl(q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ writel(reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ writel(reg, q->iobase + QUADSPI_MCR);
+}
+
+static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+ u8 opcode, unsigned int to, u32 *txbuf,
+ unsigned count, size_t *retlen)
+{
+ int ret, i, j;
+ u32 tmp;
+
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
+ q->chip_base_addr, to, count);
+
+ /* clear the TX FIFO. */
+ tmp = readl(q->iobase + QUADSPI_MCR);
+ writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
+
+ /* fill the TX data to the FIFO */
+ for (j = 0, i = ((count + 3) / 4); j < i; j++) {
+ tmp = fsl_qspi_endian_xchg(q, *txbuf);
+ writel(tmp, q->iobase + QUADSPI_TBDR);
+ txbuf++;
+ }
+
+ /* Trigger it */
+ ret = fsl_qspi_runcmd(q, opcode, to, count);
+
+ if (ret == 0 && retlen)
+ *retlen += count;
+
+ return ret;
+}
+
+static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
+{
+ int nor_size = q->nor_size;
+ void __iomem *base = q->iobase;
+
+ writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
+ writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
+ writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
+ writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
+}
+
+/*
+ * There are two different ways to read out the data from the flash:
+ * the "IP Command Read" and the "AHB Command Read".
+ *
+ * The IC guy suggests we use the "AHB Command Read" which is faster
+ * then the "IP Command Read". (What's more is that there is a bug in
+ * the "IP Command Read" in the Vybrid.)
+ *
+ * After we set up the registers for the "AHB Command Read", we can use
+ * the memcpy to read the data directly. A "missed" access to the buffer
+ * causes the controller to clear the buffer, and use the sequence pointed
+ * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
+ */
+static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+
+ /* AHB configuration for access buffer 0/1/2 .*/
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
+ writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
+ /*
+ * Set ADATSZ with the maximum AHB buffer size to improve the
+ * read performance.
+ */
+ writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8)
+ << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR);
+
+ /* We only use the buffer3 */
+ writel(0, base + QUADSPI_BUF0IND);
+ writel(0, base + QUADSPI_BUF1IND);
+ writel(0, base + QUADSPI_BUF2IND);
+
+ /* Set the default lut sequence for AHB Read. */
+ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
+ writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
+ q->iobase + QUADSPI_BFGENCR);
+}
+
+/* We use this function to do some basic init for spi_nor_scan(). */
+static int fsl_qspi_nor_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg;
+ int ret;
+
+ /* the default frequency, we will change it in the future.*/
+ ret = clk_set_rate(q->clk, 66000000);
+ if (ret)
+ return ret;
+
+ /* Init the LUT table. */
+ fsl_qspi_init_lut(q);
+
+ /* Disable the module */
+ writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ reg = readl(base + QUADSPI_SMPR);
+ writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* Enable the module */
+ writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+
+ /* enable the interrupt */
+ writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+
+ return 0;
+}
+
+static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
+{
+ unsigned long rate = q->clk_rate;
+ int ret;
+
+ if (is_imx6sx_qspi(q))
+ rate *= 4;
+
+ ret = clk_set_rate(q->clk, rate);
+ if (ret)
+ return ret;
+
+ /* Init the LUT table again. */
+ fsl_qspi_init_lut(q);
+
+ /* Init for AHB read */
+ fsl_qspi_init_abh_read(q);
+
+ return 0;
+}
+
+static struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+
+static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor)
+{
+ q->chip_base_addr = q->nor_size * (nor - q->nor);
+}
+
+static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+ struct fsl_qspi *q = nor->priv;
+
+ ret = fsl_qspi_runcmd(q, opcode, 0, len);
+ if (ret)
+ return ret;
+
+ fsl_qspi_read_data(q, len, buf);
+ return 0;
+}
+
+static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
+ int write_enable)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ if (!buf) {
+ ret = fsl_qspi_runcmd(q, opcode, 0, 1);
+ if (ret)
+ return ret;
+
+ if (opcode == SPINOR_OP_CHIP_ERASE)
+ fsl_qspi_invalid(q);
+
+ } else if (len > 0) {
+ ret = fsl_qspi_nor_write(q, nor, opcode, 0,
+ (u32 *)buf, len, NULL);
+ } else {
+ dev_err(q->dev, "invalid cmd %d\n", opcode);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void fsl_qspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+
+ fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+ (u32 *)buf, len, retlen);
+
+ /* invalid the data in the AHB buffer. */
+ fsl_qspi_invalid(q);
+}
+
+static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+ u8 cmd = nor->read_opcode;
+
+ dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
+ cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
+
+ /* Read out the data directly from the AHB buffer.*/
+ memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
+
+ *retlen += len;
+ return 0;
+}
+
+static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
+ nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
+
+ ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
+ if (ret)
+ return ret;
+
+ fsl_qspi_invalid(q);
+ return 0;
+}
+
+static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ ret = clk_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(q->clk);
+ if (ret) {
+ clk_disable(q->clk_en);
+ return ret;
+ }
+
+ fsl_qspi_set_base_addr(q, nor);
+ return 0;
+}
+
+static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+
+ clk_disable(q->clk);
+ clk_disable(q->clk_en);
+}
+
+static int fsl_qspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+ struct device *dev = &pdev->dev;
+ struct fsl_qspi *q;
+ struct resource *res;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int ret, i = 0;
+ const struct of_device_id *of_id =
+ of_match_device(fsl_qspi_dt_ids, &pdev->dev);
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->nor_num = of_get_child_count(dev->of_node);
+ if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
+ return -ENODEV;
+
+ /* find the resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
+ q->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->iobase))
+ return PTR_ERR(q->iobase);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "QuadSPI-memory");
+ q->ahb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->ahb_base))
+ return PTR_ERR(q->ahb_base);
+
+ q->memmap_phy = res->start;
+
+ /* find the clocks */
+ q->clk_en = devm_clk_get(dev, "qspi_en");
+ if (IS_ERR(q->clk_en))
+ return PTR_ERR(q->clk_en);
+
+ q->clk = devm_clk_get(dev, "qspi");
+ if (IS_ERR(q->clk))
+ return PTR_ERR(q->clk);
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret) {
+ dev_err(dev, "cannot enable the qspi_en clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ dev_err(dev, "cannot enable the qspi clock: %d\n", ret);
+ goto clk_failed;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq: %d\n", ret);
+ goto irq_failed;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ fsl_qspi_irq_handler, 0, pdev->name, q);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto irq_failed;
+ }
+
+ q->dev = dev;
+ q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
+ platform_set_drvdata(pdev, q);
+
+ ret = fsl_qspi_nor_setup(q);
+ if (ret)
+ goto irq_failed;
+
+ if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
+ q->has_second_chip = true;
+
+ /* iterate the subnodes. */
+ for_each_available_child_of_node(dev->of_node, np) {
+ char modalias[40];
+
+ /* skip the holes */
+ if (!q->has_second_chip)
+ i *= 2;
+
+ nor = &q->nor[i];
+ mtd = &q->mtd[i];
+
+ nor->mtd = mtd;
+ nor->dev = dev;
+ nor->priv = q;
+ mtd->priv = nor;
+
+ /* fill the hooks */
+ nor->read_reg = fsl_qspi_read_reg;
+ nor->write_reg = fsl_qspi_write_reg;
+ nor->read = fsl_qspi_read;
+ nor->write = fsl_qspi_write;
+ nor->erase = fsl_qspi_erase;
+
+ nor->prepare = fsl_qspi_prep;
+ nor->unprepare = fsl_qspi_unprep;
+
+ ret = of_modalias_node(np, modalias, sizeof(modalias));
+ if (ret < 0)
+ goto irq_failed;
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &q->clk_rate);
+ if (ret < 0)
+ goto irq_failed;
+
+ /* set the chip address for READID */
+ fsl_qspi_set_base_addr(q, nor);
+
+ ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
+ if (ret)
+ goto irq_failed;
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret)
+ goto irq_failed;
+
+ /* Set the correct NOR size now. */
+ if (q->nor_size == 0) {
+ q->nor_size = mtd->size;
+
+ /* Map the SPI NOR to accessiable address */
+ fsl_qspi_set_map_addr(q);
+ }
+
+ /*
+ * The TX FIFO is 64 bytes in the Vybrid, but the Page Program
+ * may writes 265 bytes per time. The write is working in the
+ * unit of the TX FIFO, not in the unit of the SPI NOR's page
+ * size.
+ *
+ * So shrink the spi_nor->page_size if it is larger then the
+ * TX FIFO.
+ */
+ if (nor->page_size > q->devtype_data->txfifo)
+ nor->page_size = q->devtype_data->txfifo;
+
+ i++;
+ }
+
+ /* finish the rest init. */
+ ret = fsl_qspi_nor_setup_last(q);
+ if (ret)
+ goto last_init_failed;
+
+ clk_disable(q->clk);
+ clk_disable(q->clk_en);
+ return 0;
+
+last_init_failed:
+ for (i = 0; i < q->nor_num; i++) {
+ /* skip the holes */
+ if (!q->has_second_chip)
+ i *= 2;
+ mtd_device_unregister(&q->mtd[i]);
+ }
+irq_failed:
+ clk_disable_unprepare(q->clk);
+clk_failed:
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+}
+
+static int fsl_qspi_remove(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < q->nor_num; i++) {
+ /* skip the holes */
+ if (!q->has_second_chip)
+ i *= 2;
+ mtd_device_unregister(&q->mtd[i]);
+ }
+
+ /* disable the hardware */
+ writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ writel(0x0, q->iobase + QUADSPI_RSER);
+
+ clk_unprepare(q->clk);
+ clk_unprepare(q->clk_en);
+ return 0;
+}
+
+static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int fsl_qspi_resume(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+
+ fsl_qspi_nor_setup(q);
+ fsl_qspi_set_map_addr(q);
+ fsl_qspi_nor_setup_last(q);
+
+ return 0;
+}
+
+static struct platform_driver fsl_qspi_driver = {
+ .driver = {
+ .name = "fsl-quadspi",
+ .bus = &platform_bus_type,
+ .of_match_table = fsl_qspi_dt_ids,
+ },
+ .probe = fsl_qspi_probe,
+ .remove = fsl_qspi_remove,
+ .suspend = fsl_qspi_suspend,
+ .resume = fsl_qspi_resume,
+};
+module_platform_driver(fsl_qspi_driver);
+
+MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
new file mode 100644
index 000000000..14a5d2325
--- /dev/null
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -0,0 +1,1225 @@
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+/* Define max times to check status register before we give up. */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct flash_info {
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u16 flags;
+#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
+#define SST_WRITE 0x04 /* use SST byte programming */
+#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
+#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
+#define USE_FSR 0x80 /* use flag status register */
+};
+
+#define JEDEC_MFR(info) ((info)->id[0])
+
+static const struct spi_device_id *spi_nor_match_id(const char *name);
+
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read the flag status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_fsr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
+ if (ret < 0) {
+ pr_err("error %d reading FSR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occured.
+ */
+static int read_cr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Dummy Cycle calculation for different type of read.
+ * It can be used to support more commands with
+ * different dummy cycle requirements.
+ */
+static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
+{
+ switch (nor->flash_read) {
+ case SPI_NOR_FAST:
+ case SPI_NOR_DUAL:
+ case SPI_NOR_QUAD:
+ return 8;
+ case SPI_NOR_NORMAL:
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static inline int write_sr(struct spi_nor *nor, u8 val)
+{
+ nor->cmd_buf[0] = val;
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+}
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static inline int write_enable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
+}
+
+/*
+ * Send write disble instruction to the chip.
+ */
+static inline int write_disable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0, 0);
+}
+
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+/* Enable/disable 4-byte addressing mode. */
+static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
+ int enable)
+{
+ int status;
+ bool need_wren = false;
+ u8 cmd;
+
+ switch (JEDEC_MFR(info)) {
+ case CFI_MFR_ST: /* Micron, actually */
+ /* Some Micron need WREN command; all will accept it */
+ need_wren = true;
+ case CFI_MFR_MACRONIX:
+ case 0xEF /* winbond */:
+ if (need_wren)
+ write_enable(nor);
+
+ cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+ status = nor->write_reg(nor, cmd, NULL, 0, 0);
+ if (need_wren)
+ write_disable(nor);
+
+ return status;
+ default:
+ /* Spansion style */
+ nor->cmd_buf[0] = enable << 7;
+ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
+ }
+}
+static inline int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int sr = read_sr(nor);
+ if (sr < 0)
+ return sr;
+ else
+ return !(sr & SR_WIP);
+}
+
+static inline int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int fsr = read_fsr(nor);
+ if (fsr < 0)
+ return fsr;
+ else
+ return fsr & FSR_READY;
+}
+
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
+}
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ unsigned long deadline;
+ int timeout = 0, ret;
+
+ deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ cond_resched();
+ }
+
+ dev_err(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Erase the whole flash memory
+ *
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int erase_chip(struct spi_nor *nor)
+{
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
+
+ return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
+}
+
+static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ int ret = 0;
+
+ mutex_lock(&nor->lock);
+
+ if (nor->prepare) {
+ ret = nor->prepare(nor, ops);
+ if (ret) {
+ dev_err(nor->dev, "failed in the preparation.\n");
+ mutex_unlock(&nor->lock);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ if (nor->unprepare)
+ nor->unprepare(nor, ops);
+ mutex_unlock(&nor->lock);
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size) {
+ write_enable(nor);
+
+ if (erase_chip(nor)) {
+ ret = -EIO;
+ goto erase_err;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else {
+ while (len) {
+ write_enable(nor);
+
+ if (nor->erase(nor, addr)) {
+ ret = -EIO;
+ goto erase_err;
+ }
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+ }
+ }
+
+ write_disable(nor);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return ret;
+
+erase_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+ instr->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = nor->mtd;
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int ret = 0;
+
+ status_old = read_sr(nor);
+
+ if (offset < mtd->size - (mtd->size / 2))
+ status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 4))
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ else if (offset < mtd->size - (mtd->size / 8))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 16))
+ status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
+ else if (offset < mtd->size - (mtd->size / 32))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset < mtd->size - (mtd->size / 64))
+ status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
+ else
+ status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
+ (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ }
+
+ return ret;
+}
+
+static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = nor->mtd;
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int ret = 0;
+
+ status_old = read_sr(nor);
+
+ if (offset+len > mtd->size - (mtd->size / 64))
+ status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
+ else if (offset+len > mtd->size - (mtd->size / 32))
+ status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+ else if (offset+len > mtd->size - (mtd->size / 16))
+ status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
+ else if (offset+len > mtd->size - (mtd->size / 8))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset+len > mtd->size - (mtd->size / 4))
+ status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
+ else if (offset+len > mtd->size - (mtd->size / 2))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
+ (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ }
+
+ return ret;
+}
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_lock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags), \
+ })
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags), \
+ })
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = (_flags), \
+ })
+
+/* NOTE: double check command sets and memory organization when you add
+ * more nor chips. This current list focusses on newer chips, which
+ * have been converging on command sets which including JEDEC ID.
+ */
+static const struct spi_device_id spi_nor_ids[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, 0) },
+
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
+
+ /* Everspin */
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+
+ /* GigaDevice */
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
+
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+
+ /* Macronix */
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
+
+ /* Micron */
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+
+ /* Spansion -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, 0) },
+
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+
+ /* ST Microelectronics -- newer production may have feature updates */
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
+
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { },
+};
+
+static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
+{
+ int tmp;
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ struct flash_info *info;
+
+ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+ if (tmp < 0) {
+ dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
+ return ERR_PTR(tmp);
+ }
+
+ for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
+ info = (void *)spi_nor_ids[tmp].driver_data;
+ if (info->id_len) {
+ if (!memcmp(info->id, id, info->id_len))
+ return &spi_nor_ids[tmp];
+ }
+ }
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
+ id[0], id[1], id[2]);
+ return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+ if (ret)
+ return ret;
+
+ ret = nor->read(nor, from, len, retlen, buf);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+ return ret;
+}
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ write_enable(nor);
+
+ nor->sst_write_second = false;
+
+ actual = to % 2;
+ /* Start write from odd address. */
+ if (actual) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ nor->write(nor, to, 1, retlen, buf);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ }
+ to += actual;
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ nor->write(nor, to, 2, retlen, buf + actual);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ write_disable(nor);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ write_enable(nor);
+
+ nor->program_opcode = SPINOR_OP_BP;
+ nor->write(nor, to, 1, retlen, buf + actual);
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto time_out;
+ write_disable(nor);
+ }
+time_out:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 page_offset, page_size, i;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ write_enable(nor);
+
+ page_offset = to & (nor->page_size - 1);
+
+ /* do all the bytes fit onto one page? */
+ if (page_offset + len <= nor->page_size) {
+ nor->write(nor, to, len, retlen, buf);
+ } else {
+ /* the size of data remaining on the first page */
+ page_size = nor->page_size - page_offset;
+ nor->write(nor, to, page_size, retlen, buf);
+
+ /* write everything in nor->page_size chunks */
+ for (i = page_size; i < len; i += page_size) {
+ page_size = len - i;
+ if (page_size > nor->page_size)
+ page_size = nor->page_size;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+
+ write_enable(nor);
+
+ nor->write(nor, to + i, page_size, retlen, buf + i);
+ }
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+write_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return ret;
+}
+
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+ int ret, val;
+
+ val = read_sr(nor);
+ write_enable(nor);
+
+ nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
+ nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+
+ if (spi_nor_wait_till_ready(nor))
+ return 1;
+
+ ret = read_sr(nor);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(nor->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occured.
+ */
+static int write_sr_cr(struct spi_nor *nor, u16 val)
+{
+ nor->cmd_buf[0] = val & 0xff;
+ nor->cmd_buf[1] = (val >> 8);
+
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2, 0);
+}
+
+static int spansion_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+ int quad_en = CR_QUAD_EN_SPAN << 8;
+
+ write_enable(nor);
+
+ ret = write_sr_cr(nor, quad_en);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ /* read back and check it */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int micron_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading EVCR\n", ret);
+ return ret;
+ }
+
+ write_enable(nor);
+
+ /* set EVCR, enable quad I/O */
+ nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
+ ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1, 0);
+ if (ret < 0) {
+ dev_err(nor->dev, "error while writing EVCR register\n");
+ return ret;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /* read EVCR and check it */
+ ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading EVCR\n", ret);
+ return ret;
+ }
+ if (val & EVCR_QUAD_EN_MICRON) {
+ dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
+{
+ int status;
+
+ switch (JEDEC_MFR(info)) {
+ case CFI_MFR_MACRONIX:
+ status = macronix_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Macronix quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ case CFI_MFR_ST:
+ status = micron_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Micron quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ default:
+ status = spansion_quad_enable(nor);
+ if (status) {
+ dev_err(nor->dev, "Spansion quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ }
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev || !nor->read || !nor->write ||
+ !nor->read_reg || !nor->write_reg || !nor->erase) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
+{
+ const struct spi_device_id *id = NULL;
+ struct flash_info *info;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = nor->mtd;
+ struct device_node *np = dev->of_node;
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Try to auto-detect if chip name wasn't specified */
+ if (!name)
+ id = spi_nor_read_id(nor);
+ else
+ id = spi_nor_match_id(name);
+ if (IS_ERR_OR_NULL(id))
+ return -ENOENT;
+
+ info = (void *)id->driver_data;
+
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
+ const struct spi_device_id *jid;
+
+ jid = spi_nor_read_id(nor);
+ if (IS_ERR(jid)) {
+ return PTR_ERR(jid);
+ } else if (jid != id) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(dev, "found %s, expected %s\n",
+ jid->name, id->name);
+ id = jid;
+ info = (void *)jid->driver_data;
+ }
+ }
+
+ mutex_init(&nor->lock);
+
+ /*
+ * Atmel, SST and Intel/Numonyx serial nor tend to power
+ * up with the software protection bits set
+ */
+
+ if (JEDEC_MFR(info) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info) == CFI_MFR_SST) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ }
+
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = info->sector_size * info->n_sectors;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+
+ /* nor protection support for STmicro chips */
+ if (JEDEC_MFR(info) == CFI_MFR_ST) {
+ nor->flash_lock = stm_lock;
+ nor->flash_unlock = stm_unlock;
+ }
+
+ if (nor->flash_lock && nor->flash_unlock) {
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ }
+
+ /* sst nor chips use AAI word program */
+ if (info->flags & SST_WRITE)
+ mtd->_write = sst_write;
+ else
+ mtd->_write = spi_nor_write;
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ if (info->flags & SECT_4K) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else
+#endif
+ {
+ nor->erase_opcode = SPINOR_OP_SE;
+ mtd->erasesize = info->sector_size;
+ }
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ mtd->dev.parent = dev;
+ nor->page_size = info->page_size;
+ mtd->writebufsize = nor->page_size;
+
+ if (np) {
+ /* If we were instantiated by DT, use it */
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ nor->flash_read = SPI_NOR_FAST;
+ else
+ nor->flash_read = SPI_NOR_NORMAL;
+ } else {
+ /* If we weren't instantiated by DT, default to fast-read */
+ nor->flash_read = SPI_NOR_FAST;
+ }
+
+ /* Some devices cannot do fast-read, no matter what DT tells us */
+ if (info->flags & SPI_NOR_NO_FR)
+ nor->flash_read = SPI_NOR_NORMAL;
+
+ /* Quad/Dual-read mode takes precedence over fast/normal */
+ if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
+ ret = set_quad_mode(nor, info);
+ if (ret) {
+ dev_err(dev, "quad mode not supported\n");
+ return ret;
+ }
+ nor->flash_read = SPI_NOR_QUAD;
+ } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
+ nor->flash_read = SPI_NOR_DUAL;
+ }
+
+ /* Default commands */
+ switch (nor->flash_read) {
+ case SPI_NOR_QUAD:
+ nor->read_opcode = SPINOR_OP_READ_1_1_4;
+ break;
+ case SPI_NOR_DUAL:
+ nor->read_opcode = SPINOR_OP_READ_1_1_2;
+ break;
+ case SPI_NOR_FAST:
+ nor->read_opcode = SPINOR_OP_READ_FAST;
+ break;
+ case SPI_NOR_NORMAL:
+ nor->read_opcode = SPINOR_OP_READ;
+ break;
+ default:
+ dev_err(dev, "No Read opcode defined\n");
+ return -EINVAL;
+ }
+
+ nor->program_opcode = SPINOR_OP_PP;
+
+ if (info->addr_width)
+ nor->addr_width = info->addr_width;
+ else if (mtd->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info) == CFI_MFR_AMD) {
+ /* Dedicated 4-byte command set */
+ switch (nor->flash_read) {
+ case SPI_NOR_QUAD:
+ nor->read_opcode = SPINOR_OP_READ4_1_1_4;
+ break;
+ case SPI_NOR_DUAL:
+ nor->read_opcode = SPINOR_OP_READ4_1_1_2;
+ break;
+ case SPI_NOR_FAST:
+ nor->read_opcode = SPINOR_OP_READ4_FAST;
+ break;
+ case SPI_NOR_NORMAL:
+ nor->read_opcode = SPINOR_OP_READ4;
+ break;
+ }
+ nor->program_opcode = SPINOR_OP_PP_4B;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE_4B;
+ mtd->erasesize = info->sector_size;
+ } else
+ set_4byte(nor, info, 1);
+ } else {
+ nor->addr_width = 3;
+ }
+
+ nor->read_dummy = spi_nor_read_dummy_cycles(nor);
+
+ dev_info(dev, "%s (%lld Kbytes)\n", id->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+static const struct spi_device_id *spi_nor_match_id(const char *name)
+{
+ const struct spi_device_id *id = spi_nor_ids;
+
+ while (id->name[0]) {
+ if (!strcmp(name, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
new file mode 100644
index 000000000..daf82ba7a
--- /dev/null
+++ b/drivers/mtd/ssfdc.c
@@ -0,0 +1,458 @@
+/*
+ * Linux driver for SSFDC Flash Translation Layer (Read only)
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * Based on NTFL and MTDBLOCK_RO drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/blktrans.h>
+
+struct ssfdcr_record {
+ struct mtd_blktrans_dev mbd;
+ int usecount;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ int cis_block; /* block n. containing CIS/IDI */
+ int erase_size; /* phys_block_size */
+ unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on
+ the 128MiB) */
+ int map_len; /* n. phys_blocks on the card */
+};
+
+#define SSFDCR_MAJOR 257
+#define SSFDCR_PARTN_BITS 3
+
+#define SECTOR_SIZE 512
+#define SECTOR_SHIFT 9
+#define OOB_SIZE 16
+
+#define MAX_LOGIC_BLK_PER_ZONE 1000
+#define MAX_PHYS_BLK_PER_ZONE 1024
+
+#define KiB(x) ( (x) * 1024L )
+#define MiB(x) ( KiB(x) * 1024L )
+
+/** CHS Table
+ 1MiB 2MiB 4MiB 8MiB 16MiB 32MiB 64MiB 128MiB
+NCylinder 125 125 250 250 500 500 500 500
+NHead 4 4 4 4 4 8 8 16
+NSector 4 8 8 16 16 16 32 32
+SumSector 2,000 4,000 8,000 16,000 32,000 64,000 128,000 256,000
+SectorSize 512 512 512 512 512 512 512 512
+**/
+
+typedef struct {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+} chs_entry_t;
+
+/* Must be ordered by size */
+static const chs_entry_t chs_table[] = {
+ { MiB( 1), 125, 4, 4 },
+ { MiB( 2), 125, 4, 8 },
+ { MiB( 4), 250, 4, 8 },
+ { MiB( 8), 250, 4, 16 },
+ { MiB( 16), 500, 4, 16 },
+ { MiB( 32), 500, 8, 16 },
+ { MiB( 64), 500, 8, 32 },
+ { MiB(128), 500, 16, 32 },
+ { 0 },
+};
+
+static int get_chs(unsigned long size, unsigned short *cyl, unsigned char *head,
+ unsigned char *sec)
+{
+ int k;
+ int found = 0;
+
+ k = 0;
+ while (chs_table[k].size > 0 && size > chs_table[k].size)
+ k++;
+
+ if (chs_table[k].size > 0) {
+ if (cyl)
+ *cyl = chs_table[k].cyl;
+ if (head)
+ *head = chs_table[k].head;
+ if (sec)
+ *sec = chs_table[k].sec;
+ found = 1;
+ }
+
+ return found;
+}
+
+/* These bytes are the signature for the CIS/IDI sector */
+static const uint8_t cis_numbers[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+
+/* Read and check for a valid CIS sector */
+static int get_valid_cis_sector(struct mtd_info *mtd)
+{
+ int ret, k, cis_sector;
+ size_t retlen;
+ loff_t offset;
+ uint8_t *sect_buf;
+
+ cis_sector = -1;
+
+ sect_buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (!sect_buf)
+ goto out;
+
+ /*
+ * Look for CIS/IDI sector on the first GOOD block (give up after 4 bad
+ * blocks). If the first good block doesn't contain CIS number the flash
+ * is not SSFDC formatted
+ */
+ for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
+ if (mtd_block_isbad(mtd, offset)) {
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
+ sect_buf);
+
+ /* CIS pattern match on the sector buffer */
+ if (ret < 0 || retlen != SECTOR_SIZE) {
+ printk(KERN_WARNING
+ "SSFDC_RO:can't read CIS/IDI sector\n");
+ } else if (!memcmp(sect_buf, cis_numbers,
+ sizeof(cis_numbers))) {
+ /* Found */
+ cis_sector = (int)(offset >> SECTOR_SHIFT);
+ } else {
+ pr_debug("SSFDC_RO: CIS/IDI sector not found"
+ " on %s (mtd%d)\n", mtd->name,
+ mtd->index);
+ }
+ break;
+ }
+ }
+
+ kfree(sect_buf);
+ out:
+ return cis_sector;
+}
+
+/* Read physical sector (wrapper to MTD_READ) */
+static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
+ int sect_no)
+{
+ int ret;
+ size_t retlen;
+ loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
+
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+ if (ret < 0 || retlen != SECTOR_SIZE)
+ return -1;
+
+ return 0;
+}
+
+/* Read redundancy area (wrapper to MTD_READ_OOB */
+static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ ops.mode = MTD_OPS_RAW;
+ ops.ooboffs = 0;
+ ops.ooblen = OOB_SIZE;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ ret = mtd_read_oob(mtd, offs, &ops);
+ if (ret < 0 || ops.oobretlen != OOB_SIZE)
+ return -1;
+
+ return 0;
+}
+
+/* Parity calculator on a word of n bit size */
+static int get_parity(int number, int size)
+{
+ int k;
+ int parity;
+
+ parity = 1;
+ for (k = 0; k < size; k++) {
+ parity += (number >> k);
+ parity &= 1;
+ }
+ return parity;
+}
+
+/* Read and validate the logical block address field stored in the OOB */
+static int get_logical_address(uint8_t *oob_buf)
+{
+ int block_address, parity;
+ int offset[2] = {6, 11}; /* offset of the 2 address fields within OOB */
+ int j;
+ int ok = 0;
+
+ /*
+ * Look for the first valid logical address
+ * Valid address has fixed pattern on most significant bits and
+ * parity check
+ */
+ for (j = 0; j < ARRAY_SIZE(offset); j++) {
+ block_address = ((int)oob_buf[offset[j]] << 8) |
+ oob_buf[offset[j]+1];
+
+ /* Check for the signature bits in the address field (MSBits) */
+ if ((block_address & ~0x7FF) == 0x1000) {
+ parity = block_address & 0x01;
+ block_address &= 0x7FF;
+ block_address >>= 1;
+
+ if (get_parity(block_address, 10) != parity) {
+ pr_debug("SSFDC_RO: logical address field%d"
+ "parity error(0x%04X)\n", j+1,
+ block_address);
+ } else {
+ ok = 1;
+ break;
+ }
+ }
+ }
+
+ if (!ok)
+ block_address = -2;
+
+ pr_debug("SSFDC_RO: get_logical_address() %d\n",
+ block_address);
+
+ return block_address;
+}
+
+/* Build the logic block map */
+static int build_logical_block_map(struct ssfdcr_record *ssfdc)
+{
+ unsigned long offset;
+ uint8_t oob_buf[OOB_SIZE];
+ int ret, block_address, phys_block;
+ struct mtd_info *mtd = ssfdc->mbd.mtd;
+
+ pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
+ ssfdc->map_len,
+ (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
+
+ /* Scan every physical block, skip CIS block */
+ for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
+ phys_block++) {
+ offset = (unsigned long)phys_block * ssfdc->erase_size;
+ if (mtd_block_isbad(mtd, offset))
+ continue; /* skip bad blocks */
+
+ ret = read_raw_oob(mtd, offset, oob_buf);
+ if (ret < 0) {
+ pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n",
+ offset);
+ return -1;
+ }
+ block_address = get_logical_address(oob_buf);
+
+ /* Skip invalid addresses */
+ if (block_address >= 0 &&
+ block_address < MAX_LOGIC_BLK_PER_ZONE) {
+ int zone_index;
+
+ zone_index = phys_block / MAX_PHYS_BLK_PER_ZONE;
+ block_address += zone_index * MAX_LOGIC_BLK_PER_ZONE;
+ ssfdc->logic_block_map[block_address] =
+ (unsigned short)phys_block;
+
+ pr_debug("SSFDC_RO: build_block_map() phys_block=%d,"
+ "logic_block_addr=%d, zone=%d\n",
+ phys_block, block_address, zone_index);
+ }
+ }
+ return 0;
+}
+
+static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct ssfdcr_record *ssfdc;
+ int cis_sector;
+
+ /* Check for small page NAND flash */
+ if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
+ mtd->size > UINT_MAX)
+ return;
+
+ /* Check for SSDFC format by reading CIS/IDI sector */
+ cis_sector = get_valid_cis_sector(mtd);
+ if (cis_sector == -1)
+ return;
+
+ ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
+ if (!ssfdc)
+ return;
+
+ ssfdc->mbd.mtd = mtd;
+ ssfdc->mbd.devnum = -1;
+ ssfdc->mbd.tr = tr;
+ ssfdc->mbd.readonly = 1;
+
+ ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
+ ssfdc->erase_size = mtd->erasesize;
+ ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
+
+ pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
+ ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
+ DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
+
+ /* Set geometry */
+ ssfdc->heads = 16;
+ ssfdc->sectors = 32;
+ get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
+ ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
+ ((long)ssfdc->sectors * (long)ssfdc->heads));
+
+ pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
+ ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
+ (long)ssfdc->cylinders * (long)ssfdc->heads *
+ (long)ssfdc->sectors);
+
+ ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders *
+ (long)ssfdc->sectors;
+
+ /* Allocate logical block map */
+ ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
+ ssfdc->map_len, GFP_KERNEL);
+ if (!ssfdc->logic_block_map)
+ goto out_err;
+ memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
+ ssfdc->map_len);
+
+ /* Build logical block map */
+ if (build_logical_block_map(ssfdc) < 0)
+ goto out_err;
+
+ /* Register device + partitions */
+ if (add_mtd_blktrans_dev(&ssfdc->mbd))
+ goto out_err;
+
+ printk(KERN_INFO "SSFDC_RO: Found ssfdc%c on mtd%d (%s)\n",
+ ssfdc->mbd.devnum + 'a', mtd->index, mtd->name);
+ return;
+
+out_err:
+ kfree(ssfdc->logic_block_map);
+ kfree(ssfdc);
+}
+
+static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+
+ pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+ kfree(ssfdc->logic_block_map);
+}
+
+static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long logic_sect_no, char *buf)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+ int sectors_per_block, offset, block_address;
+
+ sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT;
+ offset = (int)(logic_sect_no % sectors_per_block);
+ block_address = (int)(logic_sect_no / sectors_per_block);
+
+ pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
+ " block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
+ block_address);
+
+ if (block_address >= ssfdc->map_len)
+ BUG();
+
+ block_address = ssfdc->logic_block_map[block_address];
+
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
+ block_address);
+
+ if (block_address < 0xffff) {
+ unsigned long sect_no;
+
+ sect_no = (unsigned long)block_address * sectors_per_block +
+ offset;
+
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
+ sect_no);
+
+ if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
+ return -EIO;
+ } else {
+ memset(buf, 0xff, SECTOR_SIZE);
+ }
+
+ return 0;
+}
+
+static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+
+ pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
+ ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
+
+ geo->heads = ssfdc->heads;
+ geo->sectors = ssfdc->sectors;
+ geo->cylinders = ssfdc->cylinders;
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * Module stuff
+ *
+ ****************************************************************************/
+
+static struct mtd_blktrans_ops ssfdcr_tr = {
+ .name = "ssfdc",
+ .major = SSFDCR_MAJOR,
+ .part_bits = SSFDCR_PARTN_BITS,
+ .blksize = SECTOR_SIZE,
+ .getgeo = ssfdcr_getgeo,
+ .readsect = ssfdcr_readsect,
+ .add_mtd = ssfdcr_add_mtd,
+ .remove_dev = ssfdcr_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_ssfdcr(void)
+{
+ printk(KERN_INFO "SSFDC read-only Flash Translation layer\n");
+
+ return register_mtd_blktrans(&ssfdcr_tr);
+}
+
+static void __exit cleanup_ssfdcr(void)
+{
+ deregister_mtd_blktrans(&ssfdcr_tr);
+}
+
+module_init(init_ssfdcr);
+module_exit(cleanup_ssfdcr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
+MODULE_DESCRIPTION("Flash Translation Layer for read-only SSFDC SmartMedia card");
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
new file mode 100644
index 000000000..937a829bb
--- /dev/null
+++ b/drivers/mtd/tests/Makefile
@@ -0,0 +1,18 @@
+obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
+
+mtd_oobtest-objs := oobtest.o mtd_test.o
+mtd_pagetest-objs := pagetest.o mtd_test.o
+mtd_readtest-objs := readtest.o mtd_test.o
+mtd_speedtest-objs := speedtest.o mtd_test.o
+mtd_stresstest-objs := stresstest.o mtd_test.o
+mtd_subpagetest-objs := subpagetest.o mtd_test.o
+mtd_torturetest-objs := torturetest.o mtd_test.o
+mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
new file mode 100644
index 000000000..79316159e
--- /dev/null
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -0,0 +1,322 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand_ecc.h>
+
+#include "mtd_test.h"
+
+/*
+ * Test the implementation for software ECC
+ *
+ * No actual MTD device is needed, So we don't need to warry about losing
+ * important data by human error.
+ *
+ * This covers possible patterns of corruption which can be reliably corrected
+ * or detected.
+ */
+
+#if IS_ENABLED(CONFIG_MTD_NAND)
+
+struct nand_ecc_test {
+ const char *name;
+ void (*prepare)(void *, void *, void *, void *, const size_t);
+ int (*verify)(void *, void *, void *, const size_t);
+};
+
+/*
+ * The reason for this __change_bit_le() instead of __change_bit() is to inject
+ * bit error properly within the region which is not a multiple of
+ * sizeof(unsigned long) on big-endian systems
+ */
+#ifdef __LITTLE_ENDIAN
+#define __change_bit_le(nr, addr) __change_bit(nr, addr)
+#elif defined(__BIG_ENDIAN)
+#define __change_bit_le(nr, addr) \
+ __change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr)
+#else
+#error "Unknown byte order"
+#endif
+
+static void single_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
+{
+ unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
+
+ memcpy(error_data, correct_data, size);
+ __change_bit_le(offset, error_data);
+}
+
+static void double_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
+{
+ unsigned int offset[2];
+
+ offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
+ do {
+ offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
+ } while (offset[0] == offset[1]);
+
+ memcpy(error_data, correct_data, size);
+
+ __change_bit_le(offset[0], error_data);
+ __change_bit_le(offset[1], error_data);
+}
+
+static unsigned int random_ecc_bit(size_t size)
+{
+ unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
+
+ if (size == 256) {
+ /*
+ * Don't inject a bit error into the insignificant bits (16th
+ * and 17th bit) in ECC code for 256 byte data block
+ */
+ while (offset == 16 || offset == 17)
+ offset = prandom_u32() % (3 * BITS_PER_BYTE);
+ }
+
+ return offset;
+}
+
+static void single_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset = random_ecc_bit(size);
+
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset, error_ecc);
+}
+
+static void double_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset[2];
+
+ offset[0] = random_ecc_bit(size);
+ do {
+ offset[1] = random_ecc_bit(size);
+ } while (offset[0] == offset[1]);
+
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset[0], error_ecc);
+ __change_bit_le(offset[1], error_ecc);
+}
+
+static void no_bit_error(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static int no_bit_error_verify(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ if (ret == 0 && !memcmp(correct_data, error_data, size))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void single_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int single_bit_error_correct(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ if (ret == 1 && !memcmp(correct_data, error_data, size))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void double_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ double_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ double_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int double_bit_error_detect(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+
+ return (ret == -1) ? 0 : -EINVAL;
+}
+
+static const struct nand_ecc_test nand_ecc_test[] = {
+ {
+ .name = "no-bit-error",
+ .prepare = no_bit_error,
+ .verify = no_bit_error_verify,
+ },
+ {
+ .name = "single-bit-error-in-data-correct",
+ .prepare = single_bit_error_in_data,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "single-bit-error-in-ecc-correct",
+ .prepare = single_bit_error_in_ecc,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "double-bit-error-in-data-detect",
+ .prepare = double_bit_error_in_data,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "single-bit-error-in-data-and-ecc-detect",
+ .prepare = single_bit_error_in_data_and_ecc,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "double-bit-error-in-ecc-detect",
+ .prepare = double_bit_error_in_ecc,
+ .verify = double_bit_error_detect,
+ },
+};
+
+static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
+ void *correct_ecc, const size_t size)
+{
+ pr_info("hexdump of error data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ error_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of error ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false);
+
+ pr_info("hexdump of correct data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ correct_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of correct ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false);
+}
+
+static int nand_ecc_test_run(const size_t size)
+{
+ int i;
+ int err = 0;
+ void *error_data;
+ void *error_ecc;
+ void *correct_data;
+ void *correct_ecc;
+
+ error_data = kmalloc(size, GFP_KERNEL);
+ error_ecc = kmalloc(3, GFP_KERNEL);
+ correct_data = kmalloc(size, GFP_KERNEL);
+ correct_ecc = kmalloc(3, GFP_KERNEL);
+
+ if (!error_data || !error_ecc || !correct_data || !correct_ecc) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ prandom_bytes(correct_data, size);
+ __nand_calculate_ecc(correct_data, size, correct_ecc);
+
+ for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
+ nand_ecc_test[i].prepare(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ err = nand_ecc_test[i].verify(error_data, error_ecc,
+ correct_data, size);
+
+ if (err) {
+ pr_err("not ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+ dump_data_ecc(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ break;
+ }
+ pr_info("ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+
+ err = mtdtest_relax();
+ if (err)
+ break;
+ }
+error:
+ kfree(error_data);
+ kfree(error_ecc);
+ kfree(correct_data);
+ kfree(correct_ecc);
+
+ return err;
+}
+
+#else
+
+static int nand_ecc_test_run(const size_t size)
+{
+ return 0;
+}
+
+#endif
+
+static int __init ecc_test_init(void)
+{
+ int err;
+
+ err = nand_ecc_test_run(256);
+ if (err)
+ return err;
+
+ return nand_ecc_test_run(512);
+}
+
+static void __exit ecc_test_exit(void)
+{
+}
+
+module_init(ecc_test_init);
+module_exit(ecc_test_exit);
+
+MODULE_DESCRIPTION("NAND ECC function test module");
+MODULE_AUTHOR("Akinobu Mita");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
new file mode 100644
index 000000000..34736bbcc
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.c
@@ -0,0 +1,113 @@
+#define pr_fmt(fmt) "mtd_test: " fmt
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+
+#include "mtd_test.h"
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = mtd->erasesize;
+
+ err = mtd_erase(mtd, &ei);
+ if (err) {
+ pr_info("error %d while erasing EB %d\n", err, ebnum);
+ return err;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ pr_info("some erase error occurred at EB %d\n", ebnum);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int ret;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ ret = mtd_block_isbad(mtd, addr);
+ if (ret)
+ pr_info("block %d is bad\n", ebnum);
+
+ return ret;
+}
+
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int i, bad = 0;
+
+ if (!mtd_can_have_bb(mtd))
+ return 0;
+
+ pr_info("scanning for bad eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0;
+ if (bbt[i])
+ bad += 1;
+ cond_resched();
+ }
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
+
+ return 0;
+}
+
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = mtdtest_erase_eraseblock(mtd, eb + i);
+ if (err)
+ return err;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+{
+ size_t read;
+ int err;
+
+ err = mtd_read(mtd, addr, size, &read, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+ if (!err && read != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: read failed at %#llx\n", addr);
+
+ return err;
+}
+
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf)
+{
+ size_t written;
+ int err;
+
+ err = mtd_write(mtd, addr, size, &written, buf);
+ if (!err && written != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: write failed at %#llx\n", addr);
+
+ return err;
+}
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
new file mode 100644
index 000000000..4b7bee17c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.h
@@ -0,0 +1,23 @@
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+static inline int mtdtest_relax(void)
+{
+ cond_resched();
+ if (signal_pending(current)) {
+ pr_info("aborting test due to pending signal!\n");
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum);
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf);
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf);
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
new file mode 100644
index 000000000..09a4ccac5
--- /dev/null
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright © 2012 NetCommWireless
+ * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au>
+ *
+ * Test for multi-bit error recovery on a NAND page This mostly tests the
+ * ECC controller / driver.
+ *
+ * There are two test modes:
+ *
+ * 0 - artificially inserting bit errors until the ECC fails
+ * This is the default method and fairly quick. It should
+ * be independent of the quality of the FLASH.
+ *
+ * 1 - re-writing the same pattern repeatedly until the ECC fails.
+ * This method relies on the physics of NAND FLASH to eventually
+ * generate '0' bits if '1' has been written sufficient times.
+ * Depending on the NAND, the first bit errors will appear after
+ * 1000 or more writes and then will usually snowball, reaching the
+ * limits of the ECC quickly.
+ *
+ * The test stops after 10000 cycles, should your FLASH be
+ * exceptionally good and not generate bit errors before that. Try
+ * a different page in that case.
+ *
+ * Please note that neither of these tests will significantly 'use up' any
+ * FLASH endurance. Only a maximum of two erase operations will be performed.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/err.h>
+#include <linux/mtd/nand.h>
+#include <linux/slab.h>
+#include "mtd_test.h"
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static unsigned page_offset;
+module_param(page_offset, uint, S_IRUGO);
+MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
+
+static unsigned seed;
+module_param(seed, uint, S_IRUGO);
+MODULE_PARM_DESC(seed, "Random seed");
+
+static int mode;
+module_param(mode, int, S_IRUGO);
+MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test");
+
+static unsigned max_overwrite = 10000;
+
+static loff_t offset; /* Offset of the page we're using. */
+static unsigned eraseblock; /* Eraseblock number for our page. */
+
+/* We assume that the ECC can correct up to a certain number
+ * of biterrors per subpage. */
+static unsigned subsize; /* Size of subpages */
+static unsigned subcount; /* Number of subpages per page */
+
+static struct mtd_info *mtd; /* MTD device */
+
+static uint8_t *wbuffer; /* One page write / compare buffer */
+static uint8_t *rbuffer; /* One page read buffer */
+
+/* 'random' bytes from known offsets */
+static uint8_t hash(unsigned offset)
+{
+ unsigned v = offset;
+ unsigned char c;
+ v ^= 0x7f7edfd3;
+ v = v ^ (v >> 3);
+ v = v ^ (v >> 5);
+ v = v ^ (v >> 13);
+ c = v & 0xFF;
+ /* Reverse bits of result. */
+ c = (c & 0x0F) << 4 | (c & 0xF0) >> 4;
+ c = (c & 0x33) << 2 | (c & 0xCC) >> 2;
+ c = (c & 0x55) << 1 | (c & 0xAA) >> 1;
+ return c;
+}
+
+/* Writes wbuffer to page */
+static int write_page(int log)
+{
+ if (log)
+ pr_info("write_page\n");
+
+ return mtdtest_write(mtd, offset, mtd->writesize, wbuffer);
+}
+
+/* Re-writes the data area while leaving the OOB alone. */
+static int rewrite_page(int log)
+{
+ int err = 0;
+ struct mtd_oob_ops ops;
+
+ if (log)
+ pr_info("rewrite page\n");
+
+ ops.mode = MTD_OPS_RAW; /* No ECC */
+ ops.len = mtd->writesize;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = wbuffer;
+ ops.oobbuf = NULL;
+
+ err = mtd_write_oob(mtd, offset, &ops);
+ if (err || ops.retlen != mtd->writesize) {
+ pr_err("error: write_oob failed (%d)\n", err);
+ if (!err)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Reads page into rbuffer. Returns number of corrected bit errors (>=0)
+ * or error (<0) */
+static int read_page(int log)
+{
+ int err = 0;
+ size_t read;
+ struct mtd_ecc_stats oldstats;
+
+ if (log)
+ pr_info("read_page\n");
+
+ /* Saving last mtd stats */
+ memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
+
+ err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
+ if (err == -EUCLEAN)
+ err = mtd->ecc_stats.corrected - oldstats.corrected;
+
+ if (err < 0 || read != mtd->writesize) {
+ pr_err("error: read failed at %#llx\n", (long long)offset);
+ if (err >= 0)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Verifies rbuffer against random sequence */
+static int verify_page(int log)
+{
+ unsigned i, errs = 0;
+
+ if (log)
+ pr_info("verify_page\n");
+
+ for (i = 0; i < mtd->writesize; i++) {
+ if (rbuffer[i] != hash(i+seed)) {
+ pr_err("Error: page offset %u, expected %02x, got %02x\n",
+ i, hash(i+seed), rbuffer[i]);
+ errs++;
+ }
+ }
+
+ if (errs)
+ return -EIO;
+ else
+ return 0;
+}
+
+#define CBIT(v, n) ((v) & (1 << (n)))
+#define BCLR(v, n) ((v) = (v) & ~(1 << (n)))
+
+/* Finds the first '1' bit in wbuffer starting at offset 'byte'
+ * and sets it to '0'. */
+static int insert_biterror(unsigned byte)
+{
+ int bit;
+
+ while (byte < mtd->writesize) {
+ for (bit = 7; bit >= 0; bit--) {
+ if (CBIT(wbuffer[byte], bit)) {
+ BCLR(wbuffer[byte], bit);
+ pr_info("Inserted biterror @ %u/%u\n", byte, bit);
+ return 0;
+ }
+ }
+ byte++;
+ }
+ pr_err("biterror: Failed to find a '1' bit\n");
+ return -EIO;
+}
+
+/* Writes 'random' data to page and then introduces deliberate bit
+ * errors into the page, while verifying each step. */
+static int incremental_errors_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned errs_per_subpage = 0;
+
+ pr_info("incremental biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (1) {
+
+ err = rewrite_page(1);
+ if (err)
+ goto exit;
+
+ err = read_page(1);
+ if (err > 0)
+ pr_info("Read reported %d corrected bit errors\n", err);
+ if (err < 0) {
+ pr_err("After %d biterrors per subpage, read reported error %d\n",
+ errs_per_subpage, err);
+ err = 0;
+ goto exit;
+ }
+
+ err = verify_page(1);
+ if (err) {
+ pr_err("ECC failure, read data is incorrect despite read success\n");
+ goto exit;
+ }
+
+ pr_info("Successfully corrected %d bit errors per subpage\n",
+ errs_per_subpage);
+
+ for (i = 0; i < subcount; i++) {
+ err = insert_biterror(i * subsize);
+ if (err < 0)
+ goto exit;
+ }
+ errs_per_subpage++;
+ }
+
+exit:
+ return err;
+}
+
+
+/* Writes 'random' data to page and then re-writes that same data repeatedly.
+ This eventually develops bit errors (bits written as '1' will slowly become
+ '0'), which are corrected as far as the ECC is capable of. */
+static int overwrite_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned max_corrected = 0;
+ unsigned opno = 0;
+ /* We don't expect more than this many correctable bit errors per
+ * page. */
+ #define MAXBITS 512
+ static unsigned bitstats[MAXBITS]; /* bit error histogram. */
+
+ memset(bitstats, 0, sizeof(bitstats));
+
+ pr_info("overwrite biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (opno < max_overwrite) {
+
+ err = rewrite_page(0);
+ if (err)
+ break;
+
+ err = read_page(0);
+ if (err >= 0) {
+ if (err >= MAXBITS) {
+ pr_info("Implausible number of bit errors corrected\n");
+ err = -EIO;
+ break;
+ }
+ bitstats[err]++;
+ if (err > max_corrected) {
+ max_corrected = err;
+ pr_info("Read reported %d corrected bit errors\n",
+ err);
+ }
+ } else { /* err < 0 */
+ pr_info("Read reported error %d\n", err);
+ err = 0;
+ break;
+ }
+
+ err = verify_page(0);
+ if (err) {
+ bitstats[max_corrected] = opno;
+ pr_info("ECC failure, read data is incorrect despite read success\n");
+ break;
+ }
+
+ err = mtdtest_relax();
+ if (err)
+ break;
+
+ opno++;
+ }
+
+ /* At this point bitstats[0] contains the number of ops with no bit
+ * errors, bitstats[1] the number of ops with 1 bit error, etc. */
+ pr_info("Bit error histogram (%d operations total):\n", opno);
+ for (i = 0; i < max_corrected; i++)
+ pr_info("Page reads with %3d corrected bit errors: %d\n",
+ i, bitstats[i]);
+
+exit:
+ return err;
+}
+
+static int __init mtd_nandbiterrs_init(void)
+{
+ int err = 0;
+
+ printk("\n");
+ printk(KERN_INFO "==================================================\n");
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ goto exit_mtddev;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ err = -ENODEV;
+ goto exit_nand;
+ }
+
+ pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, mtd->oobsize);
+
+ subsize = mtd->writesize >> mtd->subpage_sft;
+ subcount = mtd->writesize / subsize;
+
+ pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
+
+ offset = (loff_t)page_offset * mtd->writesize;
+ eraseblock = mtd_div_by_eb(offset, mtd);
+
+ pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
+ page_offset, offset, eraseblock);
+
+ wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!wbuffer) {
+ err = -ENOMEM;
+ goto exit_wbuffer;
+ }
+
+ rbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!rbuffer) {
+ err = -ENOMEM;
+ goto exit_rbuffer;
+ }
+
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
+ if (err)
+ goto exit_error;
+
+ if (mode == 0)
+ err = incremental_errors_test();
+ else
+ err = overwrite_test();
+
+ if (err)
+ goto exit_error;
+
+ /* We leave the block un-erased in case of test failure. */
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
+ if (err)
+ goto exit_error;
+
+ err = -EIO;
+ pr_info("finished successfully.\n");
+ printk(KERN_INFO "==================================================\n");
+
+exit_error:
+ kfree(rbuffer);
+exit_rbuffer:
+ kfree(wbuffer);
+exit_wbuffer:
+ /* Nothing */
+exit_nand:
+ put_mtd_device(mtd);
+exit_mtddev:
+ return err;
+}
+
+static void __exit mtd_nandbiterrs_exit(void)
+{
+ return;
+}
+
+module_init(mtd_nandbiterrs_init);
+module_exit(mtd_nandbiterrs_exit);
+
+MODULE_DESCRIPTION("NAND bit error recovery test");
+MODULE_AUTHOR("Iwo Mergler");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
new file mode 100644
index 000000000..8e8525f02
--- /dev/null
+++ b/drivers/mtd/tests/oobtest.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test OOB read and write on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+static int bitflip_limit;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+module_param(bitflip_limit, int, S_IRUGO);
+MODULE_PARM_DESC(bitflip_limit, "Max. allowed bitflips per page");
+
+static struct mtd_info *mtd;
+static unsigned char *readbuf;
+static unsigned char *writebuf;
+static unsigned char *bbt;
+
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static int use_offset;
+static int use_len;
+static int use_len_max;
+static int vary_offset;
+static struct rnd_state rnd_state;
+
+static void do_vary_offset(void)
+{
+ use_len -= 1;
+ if (use_len < 1) {
+ use_offset += 1;
+ if (use_offset >= use_len_max)
+ use_offset = 0;
+ use_len = use_len_max - use_offset;
+ }
+}
+
+static int write_eraseblock(int ebnum)
+{
+ int i;
+ struct mtd_oob_ops ops;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = use_len;
+ ops.oobretlen = 0;
+ ops.ooboffs = use_offset;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
+ err = mtd_write_oob(mtd, addr, &ops);
+ if (err || ops.oobretlen != use_len) {
+ pr_err("error: writeoob failed at %#llx\n",
+ (long long)addr);
+ pr_err("error: use_len %d, use_offset %d\n",
+ use_len, use_offset);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+ if (vary_offset)
+ do_vary_offset();
+ }
+
+ return err;
+}
+
+static int write_whole_device(void)
+{
+ int err;
+ unsigned int i;
+
+ pr_info("writing OOBs of whole device\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (err)
+ return err;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
+ }
+ pr_info("written %u eraseblocks\n", i);
+ return 0;
+}
+
+/*
+ * Display the address, offset and data bytes at comparison failure.
+ * Return number of bitflips encountered.
+ */
+static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
+ res = *su1 ^ *su2;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%zx] 0x%x -> 0x%x diff 0x%x\n",
+ (unsigned long)addr, i, *su1, *su2, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
+/*
+ * Compare with 0xff and show the address, offset and data bytes at
+ * comparison failure. Return number of bitflips encountered.
+ */
+static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
+ size_t count)
+{
+ const unsigned char *su1;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs; 0 < count; ++su1, count--, i++) {
+ res = *su1 ^ 0xff;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
+ (unsigned long)addr, (unsigned long)offset + i,
+ *su1, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
+static int verify_eraseblock(int ebnum)
+{
+ int i;
+ struct mtd_oob_ops ops;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t bitflips;
+
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = use_len;
+ ops.oobretlen = 0;
+ ops.ooboffs = use_offset;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (err || ops.oobretlen != use_len) {
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+
+ bitflips = memcmpshow(addr, readbuf,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
+ }
+
+ if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
+ int k;
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->ecclayout->oobavail;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+ bitflips = memcmpshow(addr, readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+
+ /* verify pre-offset area for 0xff */
+ bitflips += memffshow(addr, 0, readbuf, use_offset);
+
+ /* verify post-(use_offset + use_len) area for 0xff */
+ k = use_offset + use_len;
+ bitflips += memffshow(addr, k, readbuf + k,
+ mtd->ecclayout->oobavail - k);
+
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring errors as within bitflip limit\n");
+ }
+ }
+ if (vary_offset)
+ do_vary_offset();
+ }
+ return err;
+}
+
+static int verify_eraseblock_in_one_go(int ebnum)
+{
+ struct mtd_oob_ops ops;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t len = mtd->ecclayout->oobavail * pgcnt;
+ size_t oobavail = mtd->ecclayout->oobavail;
+ size_t bitflips;
+ int i;
+
+ prandom_bytes_state(&rnd_state, writebuf, len);
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = len;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+
+ /* read entire block's OOB at one go */
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (err || ops.oobretlen != len) {
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+
+ /* verify one page OOB at a time for bitflip per page limit check */
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ bitflips = memcmpshow(addr, readbuf + (i * oobavail),
+ writebuf + (i * oobavail), oobavail);
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
+ }
+ }
+
+ return err;
+}
+
+static int verify_all_eraseblocks(void)
+{
+ int err;
+ unsigned int i;
+
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock(i);
+ if (err)
+ return err;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+ return 0;
+}
+
+static int __init mtd_oobtest_init(void)
+{
+ int err = 0;
+ unsigned int i;
+ uint64_t tmp;
+ struct mtd_oob_ops ops;
+ loff_t addr = 0, addr0;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ goto out;
+ }
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!readbuf)
+ goto out;
+ writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!writebuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ use_offset = 0;
+ use_len = mtd->ecclayout->oobavail;
+ use_len_max = mtd->ecclayout->oobavail;
+ vary_offset = 0;
+
+ /* First test: write all OOB, read it back and verify */
+ pr_info("test 1 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ prandom_seed_state(&rnd_state, 1);
+ err = write_whole_device();
+ if (err)
+ goto out;
+
+ prandom_seed_state(&rnd_state, 1);
+ err = verify_all_eraseblocks();
+ if (err)
+ goto out;
+
+ /*
+ * Second test: write all OOB, a block at a time, read it back and
+ * verify.
+ */
+ pr_info("test 2 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ prandom_seed_state(&rnd_state, 3);
+ err = write_whole_device();
+ if (err)
+ goto out;
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock_in_one_go(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ /*
+ * Third test: write OOB at varying offsets and lengths, read it back
+ * and verify.
+ */
+ pr_info("test 3 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks */
+ use_offset = 0;
+ use_len = mtd->ecclayout->oobavail;
+ use_len_max = mtd->ecclayout->oobavail;
+ vary_offset = 1;
+ prandom_seed_state(&rnd_state, 5);
+
+ err = write_whole_device();
+ if (err)
+ goto out;
+
+ /* Check all eraseblocks */
+ use_offset = 0;
+ use_len = mtd->ecclayout->oobavail;
+ use_len_max = mtd->ecclayout->oobavail;
+ vary_offset = 1;
+ prandom_seed_state(&rnd_state, 5);
+ err = verify_all_eraseblocks();
+ if (err)
+ goto out;
+
+ use_offset = 0;
+ use_len = mtd->ecclayout->oobavail;
+ use_len_max = mtd->ecclayout->oobavail;
+ vary_offset = 0;
+
+ /* Fourth test: try to write off end of device */
+ pr_info("test 4 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ /* Attempt to write off end of OOB */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = mtd->ecclayout->oobavail;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf;
+ pr_info("attempting to start write past end of OOB\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, addr0, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ err = 0;
+ } else {
+ pr_err("error: can write past end of OOB\n");
+ errcnt += 1;
+ }
+
+ /* Attempt to read off end of OOB */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = mtd->ecclayout->oobavail;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ pr_info("attempting to start read past end of OOB\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, addr0, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ err = 0;
+ } else {
+ pr_err("error: can read past end of OOB\n");
+ errcnt += 1;
+ }
+
+ if (bbt[ebcnt - 1])
+ pr_info("skipping end of device tests because last "
+ "block is bad\n");
+ else {
+ /* Attempt to write off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->ecclayout->oobavail + 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf;
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ err = 0;
+ } else {
+ pr_err("error: wrote past end of device\n");
+ errcnt += 1;
+ }
+
+ /* Attempt to read off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->ecclayout->oobavail + 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ err = 0;
+ } else {
+ pr_err("error: read past end of device\n");
+ errcnt += 1;
+ }
+
+ err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
+ if (err)
+ goto out;
+
+ /* Attempt to write off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->ecclayout->oobavail;
+ ops.oobretlen = 0;
+ ops.ooboffs = 1;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf;
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ err = 0;
+ } else {
+ pr_err("error: wrote past end of device\n");
+ errcnt += 1;
+ }
+
+ /* Attempt to read off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->ecclayout->oobavail;
+ ops.oobretlen = 0;
+ ops.ooboffs = 1;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ err = 0;
+ } else {
+ pr_err("error: read past end of device\n");
+ errcnt += 1;
+ }
+ }
+
+ /* Fifth test: write / read across block boundaries */
+ pr_info("test 5 of 5\n");
+
+ /* Erase all eraseblocks */
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks */
+ prandom_seed_state(&rnd_state, 11);
+ pr_info("writing OOBs of whole device\n");
+ for (i = 0; i < ebcnt - 1; ++i) {
+ int cnt = 2;
+ int pg;
+ size_t sz = mtd->ecclayout->oobavail;
+ if (bbt[i] || bbt[i + 1])
+ continue;
+ addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
+ prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
+ for (pg = 0; pg < cnt; ++pg) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = sz;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf + pg * sz;
+ err = mtd_write_oob(mtd, addr, &ops);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
+ addr += mtd->writesize;
+ }
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 11);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt - 1; ++i) {
+ if (bbt[i] || bbt[i + 1])
+ continue;
+ prandom_bytes_state(&rnd_state, writebuf,
+ mtd->ecclayout->oobavail * 2);
+ addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->ecclayout->oobavail * 2;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (err)
+ goto out;
+ if (memcmpshow(addr, readbuf, writebuf,
+ mtd->ecclayout->oobavail * 2)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ goto out;
+ }
+ }
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ pr_info("finished with %d errors\n", errcnt);
+out:
+ kfree(bbt);
+ kfree(writebuf);
+ kfree(readbuf);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_oobtest_init);
+
+static void __exit mtd_oobtest_exit(void)
+{
+ return;
+}
+module_exit(mtd_oobtest_exit);
+
+MODULE_DESCRIPTION("Out-of-band test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
new file mode 100644
index 000000000..ba1890d56
--- /dev/null
+++ b/drivers/mtd/tests/pagetest.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test page read and write on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *twopages;
+static unsigned char *writebuf;
+static unsigned char *boundary;
+static unsigned char *bbt;
+
+static int pgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static struct rnd_state rnd_state;
+
+static int write_eraseblock(int ebnum)
+{
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
+ cond_resched();
+ return mtdtest_write(mtd, addr, mtd->erasesize, writebuf);
+}
+
+static int verify_eraseblock(int ebnum)
+{
+ uint32_t j;
+ int err = 0, i;
+ loff_t addr0, addrn;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ addrn = mtd->size;
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
+ addrn -= mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
+ for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
+ /* Do a read to set the internal dataRAMs to different data */
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
+ return err;
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
+ return err;
+ memset(twopages, 0, bufsize);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
+ break;
+ if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ }
+ /* Check boundary between eraseblocks */
+ if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
+ struct rnd_state old_state = rnd_state;
+
+ /* Do a read to set the internal dataRAMs to different data */
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
+ return err;
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
+ return err;
+ memset(twopages, 0, bufsize);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
+ return err;
+ memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
+ prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
+ if (memcmp(twopages, boundary, bufsize)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ rnd_state = old_state;
+ }
+ return err;
+}
+
+static int crosstest(void)
+{
+ int err = 0, i;
+ loff_t addr, addr0, addrn;
+ unsigned char *pp1, *pp2, *pp3, *pp4;
+
+ pr_info("crosstest\n");
+ pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
+ if (!pp1)
+ return -ENOMEM;
+ pp2 = pp1 + pgsize;
+ pp3 = pp2 + pgsize;
+ pp4 = pp3 + pgsize;
+ memset(pp1, 0, pgsize * 4);
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ addrn = mtd->size;
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
+ addrn -= mtd->erasesize;
+
+ /* Read 2nd-to-last page to pp1 */
+ addr = addrn - pgsize - pgsize;
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read 3rd-to-last page to pp1 */
+ addr = addrn - pgsize - pgsize - pgsize;
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read first page to pp2 */
+ addr = addr0;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp2);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read last page to pp3 */
+ addr = addrn - pgsize;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp3);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read first page again to pp4 */
+ addr = addr0;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp4);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* pp2 and pp4 should be the same */
+ pr_info("verifying pages read at %#llx match\n",
+ (long long)addr0);
+ if (memcmp(pp2, pp4, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ } else if (!err)
+ pr_info("crosstest ok\n");
+ kfree(pp1);
+ return err;
+}
+
+static int erasecrosstest(void)
+{
+ int err = 0, i, ebnum, ebnum2;
+ loff_t addr0;
+ char *readbuf = twopages;
+
+ pr_info("erasecrosstest\n");
+
+ ebnum = 0;
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
+ addr0 += mtd->erasesize;
+ ebnum += 1;
+ }
+
+ ebnum2 = ebcnt - 1;
+ while (ebnum2 && bbt[ebnum2])
+ ebnum2 -= 1;
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ strcpy(writebuf, "There is no data like this!");
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ memset(readbuf, 0, pgsize);
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d\n", ebnum);
+ if (memcmp(writebuf, readbuf, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ return -1;
+ }
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ strcpy(writebuf, "There is no data like this!");
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("erasing block %d\n", ebnum2);
+ err = mtdtest_erase_eraseblock(mtd, ebnum2);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ memset(readbuf, 0, pgsize);
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d\n", ebnum);
+ if (memcmp(writebuf, readbuf, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ return -1;
+ }
+
+ if (!err)
+ pr_info("erasecrosstest ok\n");
+ return err;
+}
+
+static int erasetest(void)
+{
+ int err = 0, i, ebnum, ok = 1;
+ loff_t addr0;
+
+ pr_info("erasetest\n");
+
+ ebnum = 0;
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
+ addr0 += mtd->erasesize;
+ ebnum += 1;
+ }
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ err = mtdtest_read(mtd, addr0, pgsize, twopages);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d is all 0xff\n",
+ ebnum);
+ for (i = 0; i < pgsize; ++i)
+ if (twopages[i] != 0xff) {
+ pr_err("verifying all 0xff failed at %d\n",
+ i);
+ errcnt += 1;
+ ok = 0;
+ break;
+ }
+
+ if (ok && !err)
+ pr_info("erasetest ok\n");
+
+ return err;
+}
+
+static int __init mtd_pagetest_init(void)
+{
+ int err = 0;
+ uint64_t tmp;
+ uint32_t i;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ goto out;
+ }
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+ pgsize = mtd->writesize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ bufsize = pgsize * 2;
+ writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!writebuf)
+ goto out;
+ twopages = kmalloc(bufsize, GFP_KERNEL);
+ if (!twopages)
+ goto out;
+ boundary = kmalloc(bufsize, GFP_KERNEL);
+ if (!boundary)
+ goto out;
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Erase all eraseblocks */
+ pr_info("erasing whole device\n");
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ pr_info("erased %u eraseblocks\n", ebcnt);
+
+ /* Write all eraseblocks */
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("writing whole device\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ err = crosstest();
+ if (err)
+ goto out;
+
+ err = erasecrosstest();
+ if (err)
+ goto out;
+
+ err = erasetest();
+ if (err)
+ goto out;
+
+ pr_info("finished with %d errors\n", errcnt);
+out:
+
+ kfree(bbt);
+ kfree(boundary);
+ kfree(twopages);
+ kfree(writebuf);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_pagetest_init);
+
+static void __exit mtd_pagetest_exit(void)
+{
+ return;
+}
+module_exit(mtd_pagetest_exit);
+
+MODULE_DESCRIPTION("NAND page test");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/readtest.c b/drivers/mtd/tests/readtest.c
new file mode 100644
index 000000000..58df07acd
--- /dev/null
+++ b/drivers/mtd/tests/readtest.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Check MTD device read.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *iobuf;
+static unsigned char *iobuf1;
+static unsigned char *bbt;
+
+static int pgsize;
+static int ebcnt;
+static int pgcnt;
+
+static int read_eraseblock_by_page(int ebnum)
+{
+ int i, ret, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+ void *oobbuf = iobuf1;
+
+ for (i = 0; i < pgcnt; i++) {
+ memset(buf, 0 , pgsize);
+ ret = mtdtest_read(mtd, addr, pgsize, buf);
+ if (ret) {
+ if (!err)
+ err = ret;
+ }
+ if (mtd->oobsize) {
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = oobbuf;
+ ret = mtd_read_oob(mtd, addr, &ops);
+ if ((ret && !mtd_is_bitflip(ret)) ||
+ ops.oobretlen != mtd->oobsize) {
+ pr_err("error: read oob failed at "
+ "%#llx\n", (long long)addr);
+ if (!err)
+ err = ret;
+ if (!err)
+ err = -EINVAL;
+ }
+ oobbuf += mtd->oobsize;
+ }
+ addr += pgsize;
+ buf += pgsize;
+ }
+
+ return err;
+}
+
+static void dump_eraseblock(int ebnum)
+{
+ int i, j, n;
+ char line[128];
+ int pg, oob;
+
+ pr_info("dumping eraseblock %d\n", ebnum);
+ n = mtd->erasesize;
+ for (i = 0; i < n;) {
+ char *p = line;
+
+ p += sprintf(p, "%05x: ", i);
+ for (j = 0; j < 32 && i < n; j++, i++)
+ p += sprintf(p, "%02x", (unsigned int)iobuf[i]);
+ printk(KERN_CRIT "%s\n", line);
+ cond_resched();
+ }
+ if (!mtd->oobsize)
+ return;
+ pr_info("dumping oob from eraseblock %d\n", ebnum);
+ n = mtd->oobsize;
+ for (pg = 0, i = 0; pg < pgcnt; pg++)
+ for (oob = 0; oob < n;) {
+ char *p = line;
+
+ p += sprintf(p, "%05x: ", i);
+ for (j = 0; j < 32 && oob < n; j++, oob++, i++)
+ p += sprintf(p, "%02x",
+ (unsigned int)iobuf1[i]);
+ printk(KERN_CRIT "%s\n", line);
+ cond_resched();
+ }
+}
+
+static int __init mtd_readtest_init(void)
+{
+ uint64_t tmp;
+ int err, i;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: Cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / pgsize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!iobuf)
+ goto out;
+ iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!iobuf1)
+ goto out;
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Read all eraseblocks 1 page at a time */
+ pr_info("testing page read\n");
+ for (i = 0; i < ebcnt; ++i) {
+ int ret;
+
+ if (bbt[i])
+ continue;
+ ret = read_eraseblock_by_page(i);
+ if (ret) {
+ dump_eraseblock(i);
+ if (!err)
+ err = ret;
+ }
+
+ ret = mtdtest_relax();
+ if (ret) {
+ err = ret;
+ goto out;
+ }
+ }
+
+ if (err)
+ pr_info("finished with errors\n");
+ else
+ pr_info("finished\n");
+
+out:
+
+ kfree(iobuf);
+ kfree(iobuf1);
+ kfree(bbt);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_readtest_init);
+
+static void __exit mtd_readtest_exit(void)
+{
+ return;
+}
+module_exit(mtd_readtest_exit);
+
+MODULE_DESCRIPTION("Read test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/speedtest.c b/drivers/mtd/tests/speedtest.c
new file mode 100644
index 000000000..5a6f31af0
--- /dev/null
+++ b/drivers/mtd/tests/speedtest.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test read and write speed of a MTD device.
+ *
+ * Author: Adrian Hunter <adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int count;
+module_param(count, int, S_IRUGO);
+MODULE_PARM_DESC(count, "Maximum number of eraseblocks to use "
+ "(0 means use all)");
+
+static struct mtd_info *mtd;
+static unsigned char *iobuf;
+static unsigned char *bbt;
+
+static int pgsize;
+static int ebcnt;
+static int pgcnt;
+static int goodebcnt;
+static struct timeval start, finish;
+
+static int multiblock_erase(int ebnum, int blocks)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = mtd->erasesize * blocks;
+
+ err = mtd_erase(mtd, &ei);
+ if (err) {
+ pr_err("error %d while erasing EB %d, blocks %d\n",
+ err, ebnum, blocks);
+ return err;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ pr_err("some erase error occurred at EB %d,"
+ "blocks %d\n", ebnum, blocks);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int write_eraseblock(int ebnum)
+{
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ return mtdtest_write(mtd, addr, mtd->erasesize, iobuf);
+}
+
+static int write_eraseblock_by_page(int ebnum)
+{
+ int i, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < pgcnt; i++) {
+ err = mtdtest_write(mtd, addr, pgsize, buf);
+ if (err)
+ break;
+ addr += pgsize;
+ buf += pgsize;
+ }
+
+ return err;
+}
+
+static int write_eraseblock_by_2pages(int ebnum)
+{
+ size_t sz = pgsize * 2;
+ int i, n = pgcnt / 2, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < n; i++) {
+ err = mtdtest_write(mtd, addr, sz, buf);
+ if (err)
+ return err;
+ addr += sz;
+ buf += sz;
+ }
+ if (pgcnt % 2)
+ err = mtdtest_write(mtd, addr, pgsize, buf);
+
+ return err;
+}
+
+static int read_eraseblock(int ebnum)
+{
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ return mtdtest_read(mtd, addr, mtd->erasesize, iobuf);
+}
+
+static int read_eraseblock_by_page(int ebnum)
+{
+ int i, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < pgcnt; i++) {
+ err = mtdtest_read(mtd, addr, pgsize, buf);
+ if (err)
+ break;
+ addr += pgsize;
+ buf += pgsize;
+ }
+
+ return err;
+}
+
+static int read_eraseblock_by_2pages(int ebnum)
+{
+ size_t sz = pgsize * 2;
+ int i, n = pgcnt / 2, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < n; i++) {
+ err = mtdtest_read(mtd, addr, sz, buf);
+ if (err)
+ return err;
+ addr += sz;
+ buf += sz;
+ }
+ if (pgcnt % 2)
+ err = mtdtest_read(mtd, addr, pgsize, buf);
+
+ return err;
+}
+
+static inline void start_timing(void)
+{
+ do_gettimeofday(&start);
+}
+
+static inline void stop_timing(void)
+{
+ do_gettimeofday(&finish);
+}
+
+static long calc_speed(void)
+{
+ uint64_t k;
+ long ms;
+
+ ms = (finish.tv_sec - start.tv_sec) * 1000 +
+ (finish.tv_usec - start.tv_usec) / 1000;
+ if (ms == 0)
+ return 0;
+ k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000;
+ do_div(k, ms);
+ return k;
+}
+
+static int __init mtd_speedtest_init(void)
+{
+ int err, i, blocks, j, k;
+ long speed;
+ uint64_t tmp;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ if (count)
+ pr_info("MTD device: %d count: %d\n", dev, count);
+ else
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / pgsize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ if (count > 0 && count < ebcnt)
+ ebcnt = count;
+
+ err = -ENOMEM;
+ iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!iobuf)
+ goto out;
+
+ prandom_bytes(iobuf, mtd->erasesize);
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ for (i = 0; i < ebcnt; i++) {
+ if (!bbt[i])
+ goodebcnt++;
+ }
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks, 1 eraseblock at a time */
+ pr_info("testing eraseblock write speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("eraseblock write speed is %ld KiB/s\n", speed);
+
+ /* Read all eraseblocks, 1 eraseblock at a time */
+ pr_info("testing eraseblock read speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = read_eraseblock(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("eraseblock read speed is %ld KiB/s\n", speed);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks, 1 page at a time */
+ pr_info("testing page write speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock_by_page(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("page write speed is %ld KiB/s\n", speed);
+
+ /* Read all eraseblocks, 1 page at a time */
+ pr_info("testing page read speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = read_eraseblock_by_page(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("page read speed is %ld KiB/s\n", speed);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks, 2 pages at a time */
+ pr_info("testing 2 page write speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock_by_2pages(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("2 page write speed is %ld KiB/s\n", speed);
+
+ /* Read all eraseblocks, 2 pages at a time */
+ pr_info("testing 2 page read speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = read_eraseblock_by_2pages(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("2 page read speed is %ld KiB/s\n", speed);
+
+ /* Erase all eraseblocks */
+ pr_info("Testing erase speed\n");
+ start_timing();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ stop_timing();
+ speed = calc_speed();
+ pr_info("erase speed is %ld KiB/s\n", speed);
+
+ /* Multi-block erase all eraseblocks */
+ for (k = 1; k < 7; k++) {
+ blocks = 1 << k;
+ pr_info("Testing %dx multi-block erase speed\n",
+ blocks);
+ start_timing();
+ for (i = 0; i < ebcnt; ) {
+ for (j = 0; j < blocks && (i + j) < ebcnt; j++)
+ if (bbt[i + j])
+ break;
+ if (j < 1) {
+ i++;
+ continue;
+ }
+ err = multiblock_erase(i, j);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
+ i += j;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("%dx multi-block erase speed is %ld KiB/s\n",
+ blocks, speed);
+ }
+ pr_info("finished\n");
+out:
+ kfree(iobuf);
+ kfree(bbt);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_speedtest_init);
+
+static void __exit mtd_speedtest_exit(void)
+{
+ return;
+}
+module_exit(mtd_speedtest_exit);
+
+MODULE_DESCRIPTION("Speed test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/stresstest.c b/drivers/mtd/tests/stresstest.c
new file mode 100644
index 000000000..e509f8aa9
--- /dev/null
+++ b/drivers/mtd/tests/stresstest.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test random reads, writes and erases on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int count = 10000;
+module_param(count, int, S_IRUGO);
+MODULE_PARM_DESC(count, "Number of operations to do (default is 10000)");
+
+static struct mtd_info *mtd;
+static unsigned char *writebuf;
+static unsigned char *readbuf;
+static unsigned char *bbt;
+static int *offsets;
+
+static int pgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+
+static int rand_eb(void)
+{
+ unsigned int eb;
+
+again:
+ eb = prandom_u32();
+ /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
+ eb %= (ebcnt - 1);
+ if (bbt[eb])
+ goto again;
+ return eb;
+}
+
+static int rand_offs(void)
+{
+ unsigned int offs;
+
+ offs = prandom_u32();
+ offs %= bufsize;
+ return offs;
+}
+
+static int rand_len(int offs)
+{
+ unsigned int len;
+
+ len = prandom_u32();
+ len %= (bufsize - offs);
+ return len;
+}
+
+static int do_read(void)
+{
+ int eb = rand_eb();
+ int offs = rand_offs();
+ int len = rand_len(offs);
+ loff_t addr;
+
+ if (bbt[eb + 1]) {
+ if (offs >= mtd->erasesize)
+ offs -= mtd->erasesize;
+ if (offs + len > mtd->erasesize)
+ len = mtd->erasesize - offs;
+ }
+ addr = (loff_t)eb * mtd->erasesize + offs;
+ return mtdtest_read(mtd, addr, len, readbuf);
+}
+
+static int do_write(void)
+{
+ int eb = rand_eb(), offs, err, len;
+ loff_t addr;
+
+ offs = offsets[eb];
+ if (offs >= mtd->erasesize) {
+ err = mtdtest_erase_eraseblock(mtd, eb);
+ if (err)
+ return err;
+ offs = offsets[eb] = 0;
+ }
+ len = rand_len(offs);
+ len = ((len + pgsize - 1) / pgsize) * pgsize;
+ if (offs + len > mtd->erasesize) {
+ if (bbt[eb + 1])
+ len = mtd->erasesize - offs;
+ else {
+ err = mtdtest_erase_eraseblock(mtd, eb + 1);
+ if (err)
+ return err;
+ offsets[eb + 1] = 0;
+ }
+ }
+ addr = (loff_t)eb * mtd->erasesize + offs;
+ err = mtdtest_write(mtd, addr, len, writebuf);
+ if (unlikely(err))
+ return err;
+ offs += len;
+ while (offs > mtd->erasesize) {
+ offsets[eb++] = mtd->erasesize;
+ offs -= mtd->erasesize;
+ }
+ offsets[eb] = offs;
+ return 0;
+}
+
+static int do_operation(void)
+{
+ if (prandom_u32() & 1)
+ return do_read();
+ else
+ return do_write();
+}
+
+static int __init mtd_stresstest_init(void)
+{
+ int err;
+ int i, op;
+ uint64_t tmp;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / pgsize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ if (ebcnt < 2) {
+ pr_err("error: need at least 2 eraseblocks\n");
+ err = -ENOSPC;
+ goto out_put_mtd;
+ }
+
+ /* Read or write up 2 eraseblocks at a time */
+ bufsize = mtd->erasesize * 2;
+
+ err = -ENOMEM;
+ readbuf = vmalloc(bufsize);
+ writebuf = vmalloc(bufsize);
+ offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
+ if (!readbuf || !writebuf || !offsets)
+ goto out;
+ for (i = 0; i < ebcnt; i++)
+ offsets[i] = mtd->erasesize;
+ prandom_bytes(writebuf, bufsize);
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Do operations */
+ pr_info("doing operations\n");
+ for (op = 0; op < count; op++) {
+ if ((op & 1023) == 0)
+ pr_info("%d operations done\n", op);
+ err = do_operation();
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("finished, %d operations done\n", op);
+
+out:
+ kfree(offsets);
+ kfree(bbt);
+ vfree(writebuf);
+ vfree(readbuf);
+out_put_mtd:
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_stresstest_init);
+
+static void __exit mtd_stresstest_exit(void)
+{
+ return;
+}
+module_exit(mtd_stresstest_exit);
+
+MODULE_DESCRIPTION("Stress test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
new file mode 100644
index 000000000..aecc6ce5a
--- /dev/null
+++ b/drivers/mtd/tests/subpagetest.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test sub-page read and write on MTD device.
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *writebuf;
+static unsigned char *readbuf;
+static unsigned char *bbt;
+
+static int subpgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static struct rnd_state rnd_state;
+
+static inline void clear_data(unsigned char *buf, size_t len)
+{
+ memset(buf, 0, len);
+}
+
+static int write_eraseblock(int ebnum)
+{
+ size_t written;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
+ if (unlikely(err || written != subpgsize)) {
+ pr_err("error: write failed at %#llx\n",
+ (long long)addr);
+ if (written != subpgsize) {
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
+ }
+ return err ? err : -1;
+ }
+
+ addr += subpgsize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
+ if (unlikely(err || written != subpgsize)) {
+ pr_err("error: write failed at %#llx\n",
+ (long long)addr);
+ if (written != subpgsize) {
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
+ }
+ return err ? err : -1;
+ }
+
+ return err;
+}
+
+static int write_eraseblock2(int ebnum)
+{
+ size_t written;
+ int err = 0, k;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ for (k = 1; k < 33; ++k) {
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
+ break;
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
+ err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
+ if (unlikely(err || written != subpgsize * k)) {
+ pr_err("error: write failed at %#llx\n",
+ (long long)addr);
+ if (written != subpgsize) {
+ pr_err(" write size: %#x\n",
+ subpgsize * k);
+ pr_err(" written: %#08zx\n",
+ written);
+ }
+ return err ? err : -1;
+ }
+ addr += subpgsize * k;
+ }
+
+ return err;
+}
+
+static void print_subpage(unsigned char *p)
+{
+ int i, j;
+
+ for (i = 0; i < subpgsize; ) {
+ for (j = 0; i < subpgsize && j < 32; ++i, ++j)
+ printk("%02x", *p++);
+ printk("\n");
+ }
+}
+
+static int verify_eraseblock(int ebnum)
+{
+ size_t read;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ clear_data(readbuf, subpgsize);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
+ if (unlikely(err || read != subpgsize)) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at %#llx\n",
+ (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ pr_info("------------- written----------------\n");
+ print_subpage(writebuf);
+ pr_info("------------- read ------------------\n");
+ print_subpage(readbuf);
+ pr_info("-------------------------------------\n");
+ errcnt += 1;
+ }
+
+ addr += subpgsize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ clear_data(readbuf, subpgsize);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
+ if (unlikely(err || read != subpgsize)) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at %#llx\n",
+ (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+ pr_info("error: verify failed at %#llx\n",
+ (long long)addr);
+ pr_info("------------- written----------------\n");
+ print_subpage(writebuf);
+ pr_info("------------- read ------------------\n");
+ print_subpage(readbuf);
+ pr_info("-------------------------------------\n");
+ errcnt += 1;
+ }
+
+ return err;
+}
+
+static int verify_eraseblock2(int ebnum)
+{
+ size_t read;
+ int err = 0, k;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ for (k = 1; k < 33; ++k) {
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
+ break;
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
+ clear_data(readbuf, subpgsize * k);
+ err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
+ if (unlikely(err || read != subpgsize * k)) {
+ if (mtd_is_bitflip(err) && read == subpgsize * k) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at "
+ "%#llx\n", (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ addr += subpgsize * k;
+ }
+
+ return err;
+}
+
+static int verify_eraseblock_ff(int ebnum)
+{
+ uint32_t j;
+ size_t read;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ memset(writebuf, 0xff, subpgsize);
+ for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
+ clear_data(readbuf, subpgsize);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
+ if (unlikely(err || read != subpgsize)) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at "
+ "%#llx\n", (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+ pr_err("error: verify 0xff failed at "
+ "%#llx\n", (long long)addr);
+ errcnt += 1;
+ }
+ addr += subpgsize;
+ }
+
+ return err;
+}
+
+static int verify_all_eraseblocks_ff(void)
+{
+ int err;
+ unsigned int i;
+
+ pr_info("verifying all eraseblocks for 0xff\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock_ff(i);
+ if (err)
+ return err;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+ return 0;
+}
+
+static int __init mtd_subpagetest_init(void)
+{
+ int err = 0;
+ uint32_t i;
+ uint64_t tmp;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ goto out;
+ }
+
+ subpgsize = mtd->writesize >> mtd->subpage_sft;
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, subpage size %u, count of eraseblocks %u, "
+ "pages per eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, subpgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ bufsize = subpgsize * 32;
+ writebuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!writebuf)
+ goto out;
+ readbuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!readbuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ pr_info("writing whole device\n");
+ prandom_seed_state(&rnd_state, 1);
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ err = verify_all_eraseblocks_ff();
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks */
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("writing whole device\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock2(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock2(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ err = verify_all_eraseblocks_ff();
+ if (err)
+ goto out;
+
+ pr_info("finished with %d errors\n", errcnt);
+
+out:
+ kfree(bbt);
+ kfree(readbuf);
+ kfree(writebuf);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_subpagetest_init);
+
+static void __exit mtd_subpagetest_exit(void)
+{
+ return;
+}
+module_exit(mtd_subpagetest_exit);
+
+MODULE_DESCRIPTION("Subpage test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
new file mode 100644
index 000000000..e5d6e6d95
--- /dev/null
+++ b/drivers/mtd/tests/torturetest.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2006-2008 Artem Bityutskiy
+ * Copyright (C) 2006-2008 Jarkko Lavinen
+ * Copyright (C) 2006-2008 Adrian Hunter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Authors: Artem Bityutskiy, Jarkko Lavinen, Adria Hunter
+ *
+ * WARNING: this test program may kill your flash and your device. Do not
+ * use it unless you know what you do. Authors are not responsible for any
+ * damage caused by this program.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "mtd_test.h"
+
+#define RETRIES 3
+
+static int eb = 8;
+module_param(eb, int, S_IRUGO);
+MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device");
+
+static int ebcnt = 32;
+module_param(ebcnt, int, S_IRUGO);
+MODULE_PARM_DESC(ebcnt, "number of consecutive eraseblocks to torture");
+
+static int pgcnt;
+module_param(pgcnt, int, S_IRUGO);
+MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int gran = 512;
+module_param(gran, int, S_IRUGO);
+MODULE_PARM_DESC(gran, "how often the status information should be printed");
+
+static int check = 1;
+module_param(check, int, S_IRUGO);
+MODULE_PARM_DESC(check, "if the written data should be checked");
+
+static unsigned int cycles_count;
+module_param(cycles_count, uint, S_IRUGO);
+MODULE_PARM_DESC(cycles_count, "how many erase cycles to do "
+ "(infinite by default)");
+
+static struct mtd_info *mtd;
+
+/* This buffer contains 0x555555...0xAAAAAA... pattern */
+static unsigned char *patt_5A5;
+/* This buffer contains 0xAAAAAA...0x555555... pattern */
+static unsigned char *patt_A5A;
+/* This buffer contains all 0xFF bytes */
+static unsigned char *patt_FF;
+/* This a temporary buffer is use when checking data */
+static unsigned char *check_buf;
+/* How many erase cycles were done */
+static unsigned int erase_cycles;
+
+static int pgsize;
+static struct timeval start, finish;
+
+static void report_corrupt(unsigned char *read, unsigned char *written);
+
+static inline void start_timing(void)
+{
+ do_gettimeofday(&start);
+}
+
+static inline void stop_timing(void)
+{
+ do_gettimeofday(&finish);
+}
+
+/*
+ * Check that the contents of eraseblock number @enbum is equivalent to the
+ * @buf buffer.
+ */
+static inline int check_eraseblock(int ebnum, unsigned char *buf)
+{
+ int err, retries = 0;
+ size_t read;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t len = mtd->erasesize;
+
+ if (pgcnt) {
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ len = pgcnt * pgsize;
+ }
+
+retry:
+ err = mtd_read(mtd, addr, len, &read, check_buf);
+ if (mtd_is_bitflip(err))
+ pr_err("single bit flip occurred at EB %d "
+ "MTD reported that it was fixed.\n", ebnum);
+ else if (err) {
+ pr_err("error %d while reading EB %d, "
+ "read %zd\n", err, ebnum, read);
+ return err;
+ }
+
+ if (read != len) {
+ pr_err("failed to read %zd bytes from EB %d, "
+ "read only %zd, but no error reported\n",
+ len, ebnum, read);
+ return -EIO;
+ }
+
+ if (memcmp(buf, check_buf, len)) {
+ pr_err("read wrong data from EB %d\n", ebnum);
+ report_corrupt(check_buf, buf);
+
+ if (retries++ < RETRIES) {
+ /* Try read again */
+ yield();
+ pr_info("re-try reading data from EB %d\n",
+ ebnum);
+ goto retry;
+ } else {
+ pr_info("retried %d times, still errors, "
+ "give-up\n", RETRIES);
+ return -EINVAL;
+ }
+ }
+
+ if (retries != 0)
+ pr_info("only attempt number %d was OK (!!!)\n",
+ retries);
+
+ return 0;
+}
+
+static inline int write_pattern(int ebnum, void *buf)
+{
+ int err;
+ size_t written;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t len = mtd->erasesize;
+
+ if (pgcnt) {
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ len = pgcnt * pgsize;
+ }
+ err = mtd_write(mtd, addr, len, &written, buf);
+ if (err) {
+ pr_err("error %d while writing EB %d, written %zd"
+ " bytes\n", err, ebnum, written);
+ return err;
+ }
+ if (written != len) {
+ pr_info("written only %zd bytes of %zd, but no error"
+ " reported\n", written, len);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int __init tort_init(void)
+{
+ int err = 0, i, infinite = !cycles_count;
+ unsigned char *bad_ebs;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+ pr_info("Warning: this program is trying to wear out your "
+ "flash, stop it if this is not wanted.\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+ pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
+ ebcnt, eb, eb + ebcnt - 1, dev);
+ if (pgcnt)
+ pr_info("torturing just %d pages per eraseblock\n",
+ pgcnt);
+ pr_info("write verify %s\n", check ? "enabled" : "disabled");
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
+ pr_err("error: invalid pgcnt value %d\n", pgcnt);
+ goto out_mtd;
+ }
+
+ err = -ENOMEM;
+ patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!patt_5A5)
+ goto out_mtd;
+
+ patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!patt_A5A)
+ goto out_patt_5A5;
+
+ patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!patt_FF)
+ goto out_patt_A5A;
+
+ check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!check_buf)
+ goto out_patt_FF;
+
+ bad_ebs = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bad_ebs)
+ goto out_check_buf;
+
+ err = 0;
+
+ /* Initialize patterns */
+ memset(patt_FF, 0xFF, mtd->erasesize);
+ for (i = 0; i < mtd->erasesize / pgsize; i++) {
+ if (!(i & 1)) {
+ memset(patt_5A5 + i * pgsize, 0x55, pgsize);
+ memset(patt_A5A + i * pgsize, 0xAA, pgsize);
+ } else {
+ memset(patt_5A5 + i * pgsize, 0xAA, pgsize);
+ memset(patt_A5A + i * pgsize, 0x55, pgsize);
+ }
+ }
+
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
+
+ start_timing();
+ while (1) {
+ int i;
+ void *patt;
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
+
+ /* Check if the eraseblocks contain only 0xFF bytes */
+ if (check) {
+ for (i = eb; i < eb + ebcnt; i++) {
+ if (bad_ebs[i - eb])
+ continue;
+ err = check_eraseblock(i, patt_FF);
+ if (err) {
+ pr_info("verify failed"
+ " for 0xFF... pattern\n");
+ goto out;
+ }
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ }
+
+ /* Write the pattern */
+ for (i = eb; i < eb + ebcnt; i++) {
+ if (bad_ebs[i - eb])
+ continue;
+ if ((eb + erase_cycles) & 1)
+ patt = patt_5A5;
+ else
+ patt = patt_A5A;
+ err = write_pattern(i, patt);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+
+ /* Verify what we wrote */
+ if (check) {
+ for (i = eb; i < eb + ebcnt; i++) {
+ if (bad_ebs[i - eb])
+ continue;
+ if ((eb + erase_cycles) & 1)
+ patt = patt_5A5;
+ else
+ patt = patt_A5A;
+ err = check_eraseblock(i, patt);
+ if (err) {
+ pr_info("verify failed for %s"
+ " pattern\n",
+ ((eb + erase_cycles) & 1) ?
+ "0x55AA55..." : "0xAA55AA...");
+ goto out;
+ }
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ }
+
+ erase_cycles += 1;
+
+ if (erase_cycles % gran == 0) {
+ long ms;
+
+ stop_timing();
+ ms = (finish.tv_sec - start.tv_sec) * 1000 +
+ (finish.tv_usec - start.tv_usec) / 1000;
+ pr_info("%08u erase cycles done, took %lu "
+ "milliseconds (%lu seconds)\n",
+ erase_cycles, ms, ms / 1000);
+ start_timing();
+ }
+
+ if (!infinite && --cycles_count == 0)
+ break;
+ }
+out:
+
+ pr_info("finished after %u erase cycles\n",
+ erase_cycles);
+ kfree(bad_ebs);
+out_check_buf:
+ kfree(check_buf);
+out_patt_FF:
+ kfree(patt_FF);
+out_patt_A5A:
+ kfree(patt_A5A);
+out_patt_5A5:
+ kfree(patt_5A5);
+out_mtd:
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred during torturing\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(tort_init);
+
+static void __exit tort_exit(void)
+{
+ return;
+}
+module_exit(tort_exit);
+
+static int countdiffs(unsigned char *buf, unsigned char *check_buf,
+ unsigned offset, unsigned len, unsigned *bytesp,
+ unsigned *bitsp);
+static void print_bufs(unsigned char *read, unsigned char *written, int start,
+ int len);
+
+/*
+ * Report the detailed information about how the read EB differs from what was
+ * written.
+ */
+static void report_corrupt(unsigned char *read, unsigned char *written)
+{
+ int i;
+ int bytes, bits, pages, first;
+ int offset, len;
+ size_t check_len = mtd->erasesize;
+
+ if (pgcnt)
+ check_len = pgcnt * pgsize;
+
+ bytes = bits = pages = 0;
+ for (i = 0; i < check_len; i += pgsize)
+ if (countdiffs(written, read, i, pgsize, &bytes,
+ &bits) >= 0)
+ pages++;
+
+ pr_info("verify fails on %d pages, %d bytes/%d bits\n",
+ pages, bytes, bits);
+ pr_info("The following is a list of all differences between"
+ " what was read from flash and what was expected\n");
+
+ for (i = 0; i < check_len; i += pgsize) {
+ cond_resched();
+ bytes = bits = 0;
+ first = countdiffs(written, read, i, pgsize, &bytes,
+ &bits);
+ if (first < 0)
+ continue;
+
+ printk("-------------------------------------------------------"
+ "----------------------------------\n");
+
+ pr_info("Page %zd has %d bytes/%d bits failing verify,"
+ " starting at offset 0x%x\n",
+ (mtd->erasesize - check_len + i) / pgsize,
+ bytes, bits, first);
+
+ offset = first & ~0x7;
+ len = ((first + bytes) | 0x7) + 1 - offset;
+
+ print_bufs(read, written, offset, len);
+ }
+}
+
+static void print_bufs(unsigned char *read, unsigned char *written, int start,
+ int len)
+{
+ int i = 0, j1, j2;
+ char *diff;
+
+ printk("Offset Read Written\n");
+ while (i < len) {
+ printk("0x%08x: ", start + i);
+ diff = " ";
+ for (j1 = 0; j1 < 8 && i + j1 < len; j1++) {
+ printk(" %02x", read[start + i + j1]);
+ if (read[start + i + j1] != written[start + i + j1])
+ diff = "***";
+ }
+
+ while (j1 < 8) {
+ printk(" ");
+ j1 += 1;
+ }
+
+ printk(" %s ", diff);
+
+ for (j2 = 0; j2 < 8 && i + j2 < len; j2++)
+ printk(" %02x", written[start + i + j2]);
+ printk("\n");
+ i += 8;
+ }
+}
+
+/*
+ * Count the number of differing bytes and bits and return the first differing
+ * offset.
+ */
+static int countdiffs(unsigned char *buf, unsigned char *check_buf,
+ unsigned offset, unsigned len, unsigned *bytesp,
+ unsigned *bitsp)
+{
+ unsigned i, bit;
+ int first = -1;
+
+ for (i = offset; i < offset + len; i++)
+ if (buf[i] != check_buf[i]) {
+ first = i;
+ break;
+ }
+
+ while (i < offset + len) {
+ if (buf[i] != check_buf[i]) {
+ (*bytesp)++;
+ bit = 1;
+ while (bit < 256) {
+ if ((buf[i] & bit) != (check_buf[i] & bit))
+ (*bitsp)++;
+ bit <<= 1;
+ }
+ }
+ i++;
+ }
+
+ return first;
+}
+
+MODULE_DESCRIPTION("Eraseblock torturing module");
+MODULE_AUTHOR("Artem Bityutskiy, Jarkko Lavinen, Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
new file mode 100644
index 000000000..f0855ce08
--- /dev/null
+++ b/drivers/mtd/ubi/Kconfig
@@ -0,0 +1,106 @@
+menuconfig MTD_UBI
+ tristate "Enable UBI - Unsorted block images"
+ select CRC32
+ help
+ UBI is a software layer above MTD layer which admits of LVM-like
+ logical volumes on top of MTD devices, hides some complexities of
+ flash chips like wear and bad blocks and provides some other useful
+ capabilities. Please, consult the MTD web site for more details
+ (www.linux-mtd.infradead.org).
+
+if MTD_UBI
+
+config MTD_UBI_WL_THRESHOLD
+ int "UBI wear-leveling threshold"
+ default 4096
+ range 2 65536
+ help
+ This parameter defines the maximum difference between the highest
+ erase counter value and the lowest erase counter value of eraseblocks
+ of UBI devices. When this threshold is exceeded, UBI starts performing
+ wear leveling by means of moving data from eraseblock with low erase
+ counter to eraseblocks with high erase counter.
+
+ The default value should be OK for SLC NAND flashes, NOR flashes and
+ other flashes which have eraseblock life-cycle 100000 or more.
+ However, in case of MLC NAND flashes which typically have eraseblock
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
+ to 128 or 256, although it does not have to be power of 2).
+
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 20
+ range 0 768
+ help
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
+
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default n
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
+
+config MTD_UBI_GLUEBI
+ tristate "MTD devices emulation driver (gluebi)"
+ help
+ This option enables gluebi - an additional driver which emulates MTD
+ devices on top of UBI volumes: for each UBI volumes an MTD device is
+ created, and all I/O to this MTD device is redirected to the UBI
+ volume. This is handy to make MTD-oriented software (like JFFS2)
+ work on top of UBI. Do not enable this unless you use legacy
+ software.
+
+config MTD_UBI_BLOCK
+ bool "Read-only block devices on top of UBI volumes"
+ default n
+ depends on BLOCK
+ help
+ This option enables read-only UBI block devices support. UBI block
+ devices will be layered on top of UBI volumes, which means that the
+ UBI driver will transparently handle things like bad eraseblocks and
+ bit-flips. You can put any block-oriented file system on top of UBI
+ volumes in read-only mode (e.g., ext4), but it is probably most
+ practical for read-only file systems, like squashfs.
+
+ When selected, this feature will be built in the UBI driver.
+
+ If in doubt, say "N".
+
+endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
new file mode 100644
index 000000000..4e3c3d70d
--- /dev/null
+++ b/drivers/mtd/ubi/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_MTD_UBI) += ubi.o
+
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
+ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
+ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
+
+obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
new file mode 100644
index 000000000..68eea5bef
--- /dev/null
+++ b/drivers/mtd/ubi/attach.c
@@ -0,0 +1,1768 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI attaching sub-system.
+ *
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
+ *
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
+ * objects which are kept in volume RB-tree with root at the @volumes field.
+ * The RB-tree is indexed by the volume ID.
+ *
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
+ *
+ * Corrupted physical eraseblocks are put to the @corr list, free physical
+ * eraseblocks are put to the @free list and the physical eraseblock to be
+ * erased are put to the @erase list.
+ *
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
+ * UBI tries to distinguish between 2 types of corruptions.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
+ *
+ * 2. Unexpected corruptions which are not caused by power cuts. During
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
+ *
+ * However, it is difficult to reliably distinguish between these types of
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
+ * NAND), it is probably a PEB which was being erased when power cut
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this is corruption type 2.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/math64.h>
+#include <linux/random.h>
+#include "ubi.h"
+
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* Temporary variables used during scanning */
+static struct ubi_ec_hdr *ech;
+static struct ubi_vid_hdr *vidh;
+
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
+ * @ec: erase counter of the physical eraseblock
+ * @to_head: if not zero, add to the head of the list
+ * @list: the list to add to
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
+ * If @to_head is not zero, PEB will be added to the head of the list, which
+ * basically means it will be processed first later. E.g., we add corrupted
+ * PEBs (corrupted due to power cuts) to the head of the erase list to make
+ * sure we erase them first and get rid of corruptions ASAP. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
+{
+ struct ubi_ainf_peb *aeb;
+
+ if (list == &ai->free) {
+ dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->erase) {
+ dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->alien) {
+ dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
+ ai->alien_peb_count += 1;
+ } else
+ BUG();
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->ec = ec;
+ if (to_head)
+ list_add(&aeb->u.list, list);
+ else
+ list_add_tail(&aeb->u.list, list);
+ return 0;
+}
+
+/**
+ * add_corrupted - add a corrupted physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ ai->corr_peb_count += 1;
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->corr);
+ return 0;
+}
+
+/**
+ * validate_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+ * @av: information about the volume this logical eraseblock belongs to
+ * @pnum: physical eraseblock number the VID header came from
+ *
+ * This function checks that data stored in @vid_hdr is consistent. Returns
+ * non-zero if an inconsistency was found and zero if not.
+ *
+ * Note, UBI does sanity check of everything it reads from the flash media.
+ * Most of the checks are done in the I/O sub-system. Here we check that the
+ * information in the VID header is consistent to the information in other VID
+ * headers of the same volume.
+ */
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr,
+ const struct ubi_ainf_volume *av, int pnum)
+{
+ int vol_type = vid_hdr->vol_type;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+
+ if (av->leb_count != 0) {
+ int av_vol_type;
+
+ /*
+ * This is not the first logical eraseblock belonging to this
+ * volume. Ensure that the data in its VID header is consistent
+ * to the data in previous logical eraseblock headers.
+ */
+
+ if (vol_id != av->vol_id) {
+ ubi_err(ubi, "inconsistent vol_id");
+ goto bad;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
+ else
+ av_vol_type = UBI_VID_DYNAMIC;
+
+ if (vol_type != av_vol_type) {
+ ubi_err(ubi, "inconsistent vol_type");
+ goto bad;
+ }
+
+ if (used_ebs != av->used_ebs) {
+ ubi_err(ubi, "inconsistent used_ebs");
+ goto bad;
+ }
+
+ if (data_pad != av->data_pad) {
+ ubi_err(ubi, "inconsistent data_pad");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
+ return -EINVAL;
+}
+
+/**
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: ID of the volume to add
+ * @pnum: physical eraseblock number
+ * @vid_hdr: volume identifier header
+ *
+ * If the volume corresponding to the @vid_hdr logical eraseblock is already
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
+ */
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
+
+ /* Walk the volume RB-tree to look if this volume is already present */
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ /* The volume is absent - add it */
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ return ERR_PTR(-ENOMEM);
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->root = RB_ROOT;
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ : UBI_STATIC_VOLUME;
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
+ dbg_bld("added volume %d", vol_id);
+ return av;
+}
+
+/**
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
+ * @ubi: UBI device description object
+ * @aeb: first logical eraseblock to compare
+ * @pnum: physical eraseblock number of the second logical eraseblock to
+ * compare
+ * @vid_hdr: volume identifier header of the second logical eraseblock
+ *
+ * This function compares 2 copies of a LEB and informs which one is newer. In
+ * case of success this function returns a positive value, in case of failure, a
+ * negative error code is returned. The success return codes use the following
+ * bits:
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
+ * second PEB (described by @pnum and @vid_hdr);
+ * o bit 0 is set: the second PEB is newer;
+ * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
+ * o bit 1 is set: bit-flips were detected in the newer LEB;
+ * o bit 2 is cleared: the older LEB is not corrupted;
+ * o bit 2 is set: the older LEB is corrupted.
+ */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr)
+{
+ int len, err, second_is_newer, bitflips = 0, corrupted = 0;
+ uint32_t data_crc, crc;
+ struct ubi_vid_hdr *vh = NULL;
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
+
+ if (sqnum2 == aeb->sqnum) {
+ /*
+ * This must be a really ancient UBI image which has been
+ * created before sequence numbers support has been added. At
+ * that times we used 32-bit LEB versions stored in logical
+ * eraseblocks. That was before UBI got into mainline. We do not
+ * support these images anymore. Well, those images still work,
+ * but only if no unclean reboots happened.
+ */
+ ubi_err(ubi, "unsupported on-flash UBI format");
+ return -EINVAL;
+ }
+
+ /* Obviously the LEB with lower sequence counter is older */
+ second_is_newer = (sqnum2 > aeb->sqnum);
+
+ /*
+ * Now we know which copy is newer. If the copy flag of the PEB with
+ * newer version is not set, then we just return, otherwise we have to
+ * check data CRC. For the second PEB we already have the VID header,
+ * for the first one - we'll need to re-read it from flash.
+ *
+ * Note: this may be optimized so that we wouldn't read twice.
+ */
+
+ if (second_is_newer) {
+ if (!vid_hdr->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("second PEB %d is newer, copy_flag is unset",
+ pnum);
+ return 1;
+ }
+ } else {
+ if (!aeb->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("first PEB %d is newer, copy_flag is unset",
+ pnum);
+ return bitflips << 1;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh)
+ return -ENOMEM;
+
+ pnum = aeb->pnum;
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else {
+ ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
+ if (err > 0)
+ err = -EIO;
+
+ goto out_free_vidh;
+ }
+ }
+
+ vid_hdr = vh;
+ }
+
+ /* Read the data of the copy and check the CRC */
+
+ len = be32_to_cpu(vid_hdr->data_size);
+
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto out_unlock;
+
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
+ if (crc != data_crc) {
+ dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
+ pnum, crc, data_crc);
+ corrupted = 1;
+ bitflips = 0;
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+ bitflips |= !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+ ubi_free_vid_hdr(ubi, vh);
+
+ if (second_is_newer)
+ dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
+ else
+ dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
+
+ return second_is_newer | (bitflips << 1) | (corrupted << 2);
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+out_free_vidh:
+ ubi_free_vid_hdr(ubi, vh);
+ return err;
+}
+
+/**
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @ec: erase counter
+ * @vid_hdr: the volume identifier header
+ * @bitflips: if bit-flips were detected when this physical eraseblock was read
+ *
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
+{
+ int err, vol_id, lnum;
+ unsigned long long sqnum;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node **p, *parent = NULL;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+
+ dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
+ pnum, vol_id, lnum, ec, sqnum, bitflips);
+
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
+
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
+
+ /*
+ * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
+ * if this is the first instance of this logical eraseblock or not.
+ */
+ p = &av->root.rb_node;
+ while (*p) {
+ int cmp_res;
+
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ continue;
+ }
+
+ /*
+ * There is already a physical eraseblock describing the same
+ * logical eraseblock present.
+ */
+
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
+
+ /*
+ * Make sure that the logical eraseblocks have different
+ * sequence numbers. Otherwise the image is bad.
+ *
+ * However, if the sequence number is zero, we assume it must
+ * be an ancient UBI image from the era when UBI did not have
+ * sequence numbers. We still can attach these images, unless
+ * there is a need to distinguish between old and new
+ * eraseblocks, in which case we'll refuse the image in
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
+ * images, but refuse attaching old images with duplicated
+ * logical eraseblocks because there was an unclean reboot.
+ */
+ if (aeb->sqnum == sqnum && sqnum != 0) {
+ ubi_err(ubi, "two LEBs with same sequence number %llu",
+ sqnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
+ return -EINVAL;
+ }
+
+ /*
+ * Now we have to drop the older one and preserve the newer
+ * one.
+ */
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ if (cmp_res & 1) {
+ /*
+ * This logical eraseblock is newer than the one
+ * found earlier.
+ */
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
+ if (err)
+ return err;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
+ be32_to_cpu(vid_hdr->data_size);
+
+ return 0;
+ } else {
+ /*
+ * This logical eraseblock is older than the one found
+ * previously.
+ */
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
+ }
+ }
+
+ /*
+ * We've met this logical eraseblock for the first time, add it to the
+ * attaching information.
+ */
+
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ }
+
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+ return 0;
+}
+
+/**
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the volume description or %NULL if there
+ * are no data about this volume in the attaching information.
+ */
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *p = ai->volumes.rb_node;
+
+ while (p) {
+ av = rb_entry(p, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+
+ return NULL;
+}
+
+/**
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
+ */
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct rb_node *rb;
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
+
+ while ((rb = rb_first(&av->root))) {
+ aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, &ai->erase);
+ }
+
+ rb_erase(&av->rb, &ai->volumes);
+ kfree(av);
+ ai->vols_found -= 1;
+}
+
+/**
+ * early_erase_peb - erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to erase;
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
+ *
+ * This function erases physical eraseblock 'pnum', and writes the erase
+ * counter header to it. This function should only be used on UBI device
+ * initialization stages, when the EBA sub-system had not been yet initialized.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
+ pnum, ec);
+ return -EINVAL;
+ }
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_sync_erase(ubi, pnum, 0);
+ if (err < 0)
+ goto out_free;
+
+ err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * ubi_early_get_peb - get a free physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns a free physical eraseblock. It is supposed to be
+ * called on the UBI initialization stages when the wear-leveling sub-system is
+ * not initialized yet. This function picks a physical eraseblocks from one of
+ * the lists, writes the EC header if it is needed, and removes it from the
+ * list.
+ *
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
+ */
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err = 0;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
+
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ /*
+ * We try to erase the first physical eraseblock from the erase list
+ * and pick it if we succeed, or try to erase the next one if not. And
+ * so forth. We don't want to take care about bad eraseblocks here -
+ * they'll be handled later.
+ */
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+ if (err)
+ continue;
+
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ ubi_err(ubi, "no free eraseblocks");
+ return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * check_corruption - check the data area of PEB.
+ * @ubi: UBI device description object
+ * @vid_hdr: the (corrupted) VID header of this PEB
+ * @pnum: the physical eraseblock number to check
+ *
+ * This is a helper function which is used to distinguish between VID header
+ * corruptions caused by power cuts and other reasons. If the PEB contains only
+ * 0xFF bytes in the data area, the VID header is most probably corrupted
+ * because of a power cut (%0 is returned in this case). Otherwise, it was
+ * probably corrupted for some other reasons (%1 is returned in this case). A
+ * negative error code is returned if a read error occurred.
+ *
+ * If the corruption reason was a power cut, UBI can safely erase this PEB.
+ * Otherwise, it should preserve it to avoid possibly destroying important
+ * information.
+ */
+static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
+ int pnum)
+{
+ int err;
+
+ mutex_lock(&ubi->buf_mutex);
+ memset(ubi->peb_buf, 0x00, ubi->leb_size);
+
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
+ ubi->leb_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * Bit-flips or integrity errors while reading the data area.
+ * It is difficult to say for sure what type of corruption is
+ * this, but presumably a power cut happened while this PEB was
+ * erased, so it became unstable and corrupted, and should be
+ * erased.
+ */
+ err = 0;
+ goto out_unlock;
+ }
+
+ if (err)
+ goto out_unlock;
+
+ if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
+ goto out_unlock;
+
+ ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_dump_vid_hdr(vid_hdr);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
+ ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->peb_buf, ubi->leb_size, 1);
+ err = 1;
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+ return err;
+}
+
+/**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum, int *vid, unsigned long long *sqnum)
+{
+ long long uninitialized_var(ec);
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
+
+ dbg_bld("scan PEB %d", pnum);
+
+ /* Skip bad physical eraseblocks */
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0)
+ return err;
+ else if (err) {
+ ai->bad_peb_count += 1;
+ return 0;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_FF:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
+ case UBI_IO_FF_BITFLIPS:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
+ case UBI_IO_BAD_HDR_EBADMSG:
+ case UBI_IO_BAD_HDR:
+ /*
+ * We have to also look at the VID header, possibly it is not
+ * corrupted. Set %bitflips flag in order to make this PEB be
+ * moved and EC be re-created.
+ */
+ ec_err = err;
+ ec = UBI_UNKNOWN;
+ bitflips = 1;
+ break;
+ default:
+ ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ if (!ec_err) {
+ int image_seq;
+
+ /* Make sure UBI version is OK */
+ if (ech->version != UBI_VERSION) {
+ ubi_err(ubi, "this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ech->version);
+ return -EINVAL;
+ }
+
+ ec = be64_to_cpu(ech->ec);
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. The EC headers have 64 bits
+ * reserved, but we anyway make use of only 31 bit
+ * values, as this seems to be enough for any existing
+ * flash. Upgrade UBI and use 64-bit erase counters
+ * internally.
+ */
+ ubi_err(ubi, "erase counter overflow, max is %d",
+ UBI_MAX_ERASECOUNTER);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure that all PEBs have the same image sequence number.
+ * This allows us to detect situations when users flash UBI
+ * images incorrectly, so that the flash has the new UBI image
+ * and leftovers from the old one. This feature was added
+ * relatively recently, and the sequence number was always
+ * zero, because old UBI implementations always set it to zero.
+ * For this reasons, we do not panic if some PEBs have zero
+ * sequence number, while other PEBs have non-zero sequence
+ * number.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+ if (image_seq && ubi->image_seq != image_seq) {
+ ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+ }
+
+ /* OK, we've done with the EC header, let's look at the VID header */
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_BAD_HDR_EBADMSG:
+ if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
+ /*
+ * Both EC and VID headers are corrupted and were read
+ * with data integrity error, probably this is a bad
+ * PEB, bit it is not marked as bad yet. This may also
+ * be a result of power cut during erasure.
+ */
+ ai->maybe_bad_peb_count += 1;
+ case UBI_IO_BAD_HDR:
+ if (ec_err)
+ /*
+ * Both headers are corrupted. There is a possibility
+ * that this a valid UBI PEB which has corresponding
+ * LEB, but the headers are corrupted. However, it is
+ * impossible to distinguish it from a PEB which just
+ * contains garbage because of a power cut during erase
+ * operation. So we just schedule this PEB for erasure.
+ *
+ * Besides, in case of NOR flash, we deliberately
+ * corrupt both headers because NOR flash erasure is
+ * slow and can start from the end.
+ */
+ err = 0;
+ else
+ /*
+ * The EC was OK, but the VID header is corrupted. We
+ * have to check what is in the data area.
+ */
+ err = check_corruption(ubi, vidh, pnum);
+
+ if (err < 0)
+ return err;
+ else if (!err)
+ /* This corruption is caused by a power cut */
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ /* This is an unexpected corruption */
+ err = add_corrupted(ai, pnum, ec);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF_BITFLIPS:
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+ if (ec_err || bitflips)
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ default:
+ ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ vol_id = be32_to_cpu(vidh->vol_id);
+ if (vid)
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
+ if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+ /* Unsupported internal volume */
+ switch (vidh->compat) {
+ case UBI_COMPAT_DELETE:
+ if (vol_id != UBI_FM_SB_VOLUME_ID
+ && vol_id != UBI_FM_DATA_VOLUME_ID) {
+ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+ }
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_RO:
+ ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
+ vol_id, lnum);
+ ubi->ro_mode = 1;
+ break;
+
+ case UBI_COMPAT_PRESERVE:
+ ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_REJECT:
+ ubi_err(ubi, "incompatible internal volume %d:%d found",
+ vol_id, lnum);
+ return -EINVAL;
+ }
+ }
+
+ if (ec_err)
+ ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
+ pnum);
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+ if (err)
+ return err;
+
+adjust_mean_ec:
+ if (!ec_err) {
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
+ }
+
+ return 0;
+}
+
+/**
+ * late_analysis - analyze the overall situation with PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
+ */
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ int max_corr, peb_count;
+
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
+ max_corr = peb_count / 20 ?: 8;
+
+ /*
+ * Few corrupted PEBs is not a problem and may be just a result of
+ * unclean reboots. However, many of them may indicate some problems
+ * with the flash HW or driver.
+ */
+ if (ai->corr_peb_count) {
+ ubi_err(ubi, "%d PEBs are corrupted and preserved",
+ ai->corr_peb_count);
+ pr_err("Corrupted PEBs are:");
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ pr_cont(" %d", aeb->pnum);
+ pr_cont("\n");
+
+ /*
+ * If too many PEBs are corrupted, we refuse attaching,
+ * otherwise, only print a warning.
+ */
+ if (ai->corr_peb_count >= max_corr) {
+ ubi_err(ubi, "too many corrupted PEBs, refusing");
+ return -EINVAL;
+ }
+ }
+
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
+ /*
+ * All PEBs are empty, or almost all - a couple PEBs look like
+ * they may be bad PEBs which were not marked as bad yet.
+ *
+ * This piece of code basically tries to distinguish between
+ * the following situations:
+ *
+ * 1. Flash is empty, but there are few bad PEBs, which are not
+ * marked as bad so far, and which were read with error. We
+ * want to go ahead and format this flash. While formatting,
+ * the faulty PEBs will probably be marked as bad.
+ *
+ * 2. Flash contains non-UBI data and we do not want to format
+ * it and destroy possibly important information.
+ */
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
+ ubi_msg(ubi, "empty MTD device detected");
+ get_random_bytes(&ubi->image_seq,
+ sizeof(ubi->image_seq));
+ } else {
+ ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av);
+ }
+ }
+
+ if (ai->aeb_slab_cache)
+ kmem_cache_destroy(ai->aeb_slab_cache);
+
+ kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
+ *
+ * This function does full scanning of an MTD device and returns complete
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
+ */
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
+{
+ int err, pnum;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return err;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, NULL, NULL);
+ if (err < 0)
+ goto out_vidh;
+ }
+
+ ubi_msg(ubi, "scanning is finished");
+
+ /* Calculate mean erase counter */
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+
+ err = late_analysis(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ /*
+ * In case of unknown erase counter we use the mean erase counter
+ * value.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = self_check_ai(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ return 0;
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+ return err;
+}
+
+static struct ubi_attach_info *alloc_ai(void)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+/**
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ *
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
+ */
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
+{
+ int err, pnum, fm_anchor = -1;
+ unsigned long long max_sqnum = 0;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ int vol_id = -1;
+ unsigned long long sqnum = -1;
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
+ if (err < 0)
+ goto out_vidh;
+
+ if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ fm_anchor = pnum;
+ }
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ destroy_ai(*ai);
+ *ai = alloc_ai();
+ if (!*ai)
+ return -ENOMEM;
+
+ return ubi_scan_fastmap(ubi, *ai, fm_anchor);
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out:
+ return err;
+}
+
+#endif
+
+/**
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi, int force_scan)
+{
+ int err;
+ struct ubi_attach_info *ai;
+
+ ai = alloc_ai();
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
+ }
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, &ai);
+ if (err > 0 || mtd_is_eccerr(err)) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ ai = alloc_ai();
+ if (!ai)
+ return -ENOMEM;
+
+ err = scan_all(ubi, ai, 0);
+ } else {
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
+ }
+ }
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
+
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
+
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
+
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
+ struct ubi_attach_info *scan_ai;
+
+ scan_ai = alloc_ai();
+ if (!scan_ai) {
+ err = -ENOMEM;
+ goto out_wl;
+ }
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
+ }
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
+ }
+#endif
+
+ destroy_ai(ai);
+ return 0;
+
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ destroy_ai(ai);
+ return err;
+}
+
+/**
+ * self_check_ai - check the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero if the attaching information is all right, and a
+ * negative error code if not or if an error occurred.
+ */
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int pnum, err, vols_found = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
+ uint8_t *buf;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ /*
+ * At first, check that attaching information is OK.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ int leb_count = 0;
+
+ cond_resched();
+
+ vols_found += 1;
+
+ if (ai->is_empty) {
+ ubi_err(ubi, "bad is_empty flag");
+ goto bad_av;
+ }
+
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad_av;
+ }
+
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "bad vol_id");
+ goto bad_av;
+ }
+
+ if (av->vol_id > ai->highest_vol_id) {
+ ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
+ ai->highest_vol_id, av->vol_id);
+ goto out;
+ }
+
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad_av;
+ }
+
+ if (av->data_pad > ubi->leb_size / 2) {
+ ubi_err(ubi, "bad data_pad");
+ goto bad_av;
+ }
+
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ cond_resched();
+
+ last_aeb = aeb;
+ leb_count += 1;
+
+ if (aeb->pnum < 0 || aeb->ec < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad_aeb;
+ }
+
+ if (aeb->ec < ai->min_ec) {
+ ubi_err(ubi, "bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->ec > ai->max_ec) {
+ ubi_err(ubi, "bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->pnum >= ubi->peb_count) {
+ ubi_err(ubi, "too high PEB number %d, total PEBs %d",
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
+ ubi_err(ubi, "bad lnum or used_ebs");
+ goto bad_aeb;
+ }
+ } else {
+ if (av->used_ebs != 0) {
+ ubi_err(ubi, "non-zero used_ebs");
+ goto bad_aeb;
+ }
+ }
+
+ if (aeb->lnum > av->highest_lnum) {
+ ubi_err(ubi, "incorrect highest_lnum or lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (av->leb_count != leb_count) {
+ ubi_err(ubi, "bad leb_count, %d objects in the tree",
+ leb_count);
+ goto bad_av;
+ }
+
+ if (!last_aeb)
+ continue;
+
+ aeb = last_aeb;
+
+ if (aeb->lnum != av->highest_lnum) {
+ ubi_err(ubi, "bad highest_lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (vols_found != ai->vols_found) {
+ ubi_err(ubi, "bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
+ goto out;
+ }
+
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ int vol_type;
+
+ cond_resched();
+
+ last_aeb = aeb;
+
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "VID header is not OK (%d)",
+ err);
+ if (err > 0)
+ err = -EIO;
+ return err;
+ }
+
+ vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ if (av->vol_type != vol_type) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err(ubi, "bad vol_id %d", av->vol_id);
+ goto bad_vid_hdr;
+ }
+
+ if (av->compat != vidh->compat) {
+ ubi_err(ubi, "bad compat %d", vidh->compat);
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err(ubi, "bad lnum %d", aeb->lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
+ goto bad_vid_hdr;
+ }
+
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err(ubi, "bad data_pad %d", av->data_pad);
+ goto bad_vid_hdr;
+ }
+ }
+
+ if (!last_aeb)
+ continue;
+
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err(ubi, "bad last_data_size %d",
+ av->last_data_size);
+ goto bad_vid_hdr;
+ }
+ }
+
+ /*
+ * Make sure that all the physical eraseblocks are in one of the lists
+ * or trees.
+ */
+ buf = kzalloc(ubi->peb_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ } else if (err)
+ buf[pnum] = 1;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
+
+ err = 0;
+ for (pnum = 0; pnum < ubi->peb_count; pnum++)
+ if (!buf[pnum]) {
+ ubi_err(ubi, "PEB %d is not referred", pnum);
+ err = 1;
+ }
+
+ kfree(buf);
+ if (err)
+ goto out;
+ return 0;
+
+bad_aeb:
+ ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
+ goto out;
+
+bad_av:
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ goto out;
+
+bad_vid_hdr:
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
+
+out:
+ dump_stack();
+ return -EINVAL;
+}
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
new file mode 100644
index 000000000..c9eb78f10
--- /dev/null
+++ b/drivers/mtd/ubi/block.c
@@ -0,0 +1,669 @@
+/*
+ * Copyright (c) 2014 Ezequiel Garcia
+ * Copyright (c) 2011 Free Electrons
+ *
+ * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ * Authors: Artem Bityutskiy, Frank Haverkamp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+
+/*
+ * Read-only block devices on top of UBI volumes
+ *
+ * A simple implementation to allow a block device to be layered on top of a
+ * UBI volume. The implementation is provided by creating a static 1-to-1
+ * mapping between the block device and the UBI volume.
+ *
+ * The addressed byte is obtained from the addressed block sector, which is
+ * mapped linearly into the corresponding LEB:
+ *
+ * LEB number = addressed byte / LEB size
+ *
+ * This feature is compiled in the UBI core, and adds a 'block' parameter
+ * to allow early creation of block devices on top of UBI volumes. Runtime
+ * block creation/removal for UBI volumes is provided through two UBI ioctls:
+ * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/mtd/ubi.h>
+#include <linux/workqueue.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/hdreg.h>
+#include <linux/scatterlist.h>
+#include <asm/div64.h>
+
+#include "ubi-media.h"
+#include "ubi.h"
+
+/* Maximum number of supported devices */
+#define UBIBLOCK_MAX_DEVICES 32
+
+/* Maximum length of the 'block=' parameter */
+#define UBIBLOCK_PARAM_LEN 63
+
+/* Maximum number of comma-separated items in the 'block=' parameter */
+#define UBIBLOCK_PARAM_COUNT 2
+
+struct ubiblock_param {
+ int ubi_num;
+ int vol_id;
+ char name[UBIBLOCK_PARAM_LEN+1];
+};
+
+struct ubiblock_pdu {
+ struct work_struct work;
+ struct ubi_sgl usgl;
+};
+
+/* Numbers of elements set in the @ubiblock_param array */
+static int ubiblock_devs __initdata;
+
+/* MTD devices specification parameters */
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+
+struct ubiblock {
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ int refcnt;
+ int leb_size;
+
+ struct gendisk *gd;
+ struct request_queue *rq;
+
+ struct workqueue_struct *wq;
+
+ struct mutex dev_mutex;
+ struct list_head list;
+ struct blk_mq_tag_set tag_set;
+};
+
+/* Linked list of all ubiblock instances */
+static LIST_HEAD(ubiblock_devices);
+static DEFINE_MUTEX(devices_mutex);
+static int ubiblock_major;
+
+static int __init ubiblock_set_param(const char *val,
+ const struct kernel_param *kp)
+{
+ int i, ret;
+ size_t len;
+ struct ubiblock_param *param;
+ char buf[UBIBLOCK_PARAM_LEN];
+ char *pbuf = &buf[0];
+ char *tokens[UBIBLOCK_PARAM_COUNT];
+
+ if (!val)
+ return -EINVAL;
+
+ len = strnlen(val, UBIBLOCK_PARAM_LEN);
+ if (len == 0) {
+ pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
+ return 0;
+ }
+
+ if (len == UBIBLOCK_PARAM_LEN) {
+ pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
+ val, UBIBLOCK_PARAM_LEN);
+ return -EINVAL;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ param = &ubiblock_param[ubiblock_devs];
+ if (tokens[1]) {
+ /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
+ ret = kstrtoint(tokens[0], 10, &param->ubi_num);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* Second param can be a number or a name */
+ ret = kstrtoint(tokens[1], 10, &param->vol_id);
+ if (ret < 0) {
+ param->vol_id = -1;
+ strcpy(param->name, tokens[1]);
+ }
+
+ } else {
+ /* One parameter: must be device path */
+ strcpy(param->name, tokens[0]);
+ param->ubi_num = -1;
+ param->vol_id = -1;
+ }
+
+ ubiblock_devs++;
+
+ return 0;
+}
+
+static struct kernel_param_ops ubiblock_param_ops = {
+ .set = ubiblock_set_param,
+};
+module_param_cb(block, &ubiblock_param_ops, NULL, 0);
+MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
+ "Multiple \"block\" parameters may be specified.\n"
+ "UBI volumes may be specified by their number, name, or path to the device node.\n"
+ "Examples\n"
+ "Using the UBI volume path:\n"
+ "ubi.block=/dev/ubi0_0\n"
+ "Using the UBI device, and the volume name:\n"
+ "ubi.block=0,rootfs\n"
+ "Using both UBI device number and UBI volume number:\n"
+ "ubi.block=0,0\n");
+
+static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
+{
+ struct ubiblock *dev;
+
+ list_for_each_entry(dev, &ubiblock_devices, list)
+ if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
+ return dev;
+ return NULL;
+}
+
+static int ubiblock_read(struct ubiblock_pdu *pdu)
+{
+ int ret, leb, offset, bytes_left, to_read;
+ u64 pos;
+ struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct ubiblock *dev = req->q->queuedata;
+
+ to_read = blk_rq_bytes(req);
+ pos = blk_rq_pos(req) << 9;
+
+ /* Get LEB:offset address to read from */
+ offset = do_div(pos, dev->leb_size);
+ leb = pos;
+ bytes_left = to_read;
+
+ while (bytes_left) {
+ /*
+ * We can only read one LEB at a time. Therefore if the read
+ * length is larger than one LEB size, we split the operation.
+ */
+ if (offset + to_read > dev->leb_size)
+ to_read = dev->leb_size - offset;
+
+ ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
+ if (ret < 0)
+ return ret;
+
+ bytes_left -= to_read;
+ to_read = bytes_left;
+ leb += 1;
+ offset = 0;
+ }
+ return 0;
+}
+
+static int ubiblock_open(struct block_device *bdev, fmode_t mode)
+{
+ struct ubiblock *dev = bdev->bd_disk->private_data;
+ int ret;
+
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ /*
+ * The volume is already open, just increase the reference
+ * counter.
+ */
+ goto out_done;
+ }
+
+ /*
+ * We want users to be aware they should only mount us as read-only.
+ * It's just a paranoid check, as write requests will get rejected
+ * in any case.
+ */
+ if (mode & FMODE_WRITE) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
+ if (IS_ERR(dev->desc)) {
+ dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
+ dev->ubi_num, dev->vol_id);
+ ret = PTR_ERR(dev->desc);
+ dev->desc = NULL;
+ goto out_unlock;
+ }
+
+out_done:
+ dev->refcnt++;
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static void ubiblock_release(struct gendisk *gd, fmode_t mode)
+{
+ struct ubiblock *dev = gd->private_data;
+
+ mutex_lock(&dev->dev_mutex);
+ dev->refcnt--;
+ if (dev->refcnt == 0) {
+ ubi_close_volume(dev->desc);
+ dev->desc = NULL;
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ /* Some tools might require this information */
+ geo->heads = 1;
+ geo->cylinders = 1;
+ geo->sectors = get_capacity(bdev->bd_disk);
+ geo->start = 0;
+ return 0;
+}
+
+static const struct block_device_operations ubiblock_ops = {
+ .owner = THIS_MODULE,
+ .open = ubiblock_open,
+ .release = ubiblock_release,
+ .getgeo = ubiblock_getgeo,
+};
+
+static void ubiblock_do_work(struct work_struct *work)
+{
+ int ret;
+ struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
+ struct request *req = blk_mq_rq_from_pdu(pdu);
+
+ blk_mq_start_request(req);
+
+ /*
+ * It is safe to ignore the return value of blk_rq_map_sg() because
+ * the number of sg entries is limited to UBI_MAX_SG_COUNT
+ * and ubi_read_sg() will check that limit.
+ */
+ blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+
+ ret = ubiblock_read(pdu);
+ rq_flush_dcache_pages(req);
+
+ blk_mq_end_request(req, ret);
+}
+
+static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct ubiblock *dev = hctx->queue->queuedata;
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+
+ if (req->cmd_type != REQ_TYPE_FS)
+ return BLK_MQ_RQ_QUEUE_ERROR;
+
+ if (rq_data_dir(req) != READ)
+ return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
+
+ ubi_sgl_init(&pdu->usgl);
+ queue_work(dev->wq, &pdu->work);
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static int ubiblock_init_request(void *data, struct request *req,
+ unsigned int hctx_idx,
+ unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+
+ sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
+ INIT_WORK(&pdu->work, ubiblock_do_work);
+
+ return 0;
+}
+
+static struct blk_mq_ops ubiblock_mq_ops = {
+ .queue_rq = ubiblock_queue_rq,
+ .init_request = ubiblock_init_request,
+ .map_queue = blk_mq_map_queue,
+};
+
+int ubiblock_create(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ struct gendisk *gd;
+ u64 disk_capacity = vi->used_bytes >> 9;
+ int ret;
+
+ if ((sector_t)disk_capacity != disk_capacity)
+ return -EFBIG;
+ /* Check that the volume isn't already handled */
+ mutex_lock(&devices_mutex);
+ if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
+ mutex_unlock(&devices_mutex);
+ return -EEXIST;
+ }
+ mutex_unlock(&devices_mutex);
+
+ dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->dev_mutex);
+
+ dev->ubi_num = vi->ubi_num;
+ dev->vol_id = vi->vol_id;
+ dev->leb_size = vi->usable_leb_size;
+
+ /* Initialize the gendisk of this ubiblock device */
+ gd = alloc_disk(1);
+ if (!gd) {
+ pr_err("UBI: block: alloc_disk failed");
+ ret = -ENODEV;
+ goto out_free_dev;
+ }
+
+ gd->fops = &ubiblock_ops;
+ gd->major = ubiblock_major;
+ gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
+ gd->private_data = dev;
+ sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
+ set_capacity(gd, disk_capacity);
+ dev->gd = gd;
+
+ dev->tag_set.ops = &ubiblock_mq_ops;
+ dev->tag_set.queue_depth = 64;
+ dev->tag_set.numa_node = NUMA_NO_NODE;
+ dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
+ dev->tag_set.driver_data = dev;
+ dev->tag_set.nr_hw_queues = 1;
+
+ ret = blk_mq_alloc_tag_set(&dev->tag_set);
+ if (ret) {
+ dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
+ goto out_put_disk;
+ }
+
+ dev->rq = blk_mq_init_queue(&dev->tag_set);
+ if (IS_ERR(dev->rq)) {
+ dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
+ ret = PTR_ERR(dev->rq);
+ goto out_free_tags;
+ }
+ blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
+
+ dev->rq->queuedata = dev;
+ dev->gd->queue = dev->rq;
+
+ /*
+ * Create one workqueue per volume (per registered block device).
+ * Rembember workqueues are cheap, they're not threads.
+ */
+ dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto out_free_queue;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&dev->list, &ubiblock_devices);
+ mutex_unlock(&devices_mutex);
+
+ /* Must be the last step: anyone can call file ops from now on */
+ add_disk(dev->gd);
+ dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
+ dev->ubi_num, dev->vol_id, vi->name);
+ return 0;
+
+out_free_queue:
+ blk_cleanup_queue(dev->rq);
+out_free_tags:
+ blk_mq_free_tag_set(&dev->tag_set);
+out_put_disk:
+ put_disk(dev->gd);
+out_free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+static void ubiblock_cleanup(struct ubiblock *dev)
+{
+ /* Stop new requests to arrive */
+ del_gendisk(dev->gd);
+ /* Flush pending work */
+ destroy_workqueue(dev->wq);
+ /* Finally destroy the blk queue */
+ blk_cleanup_queue(dev->rq);
+ blk_mq_free_tag_set(&dev->tag_set);
+ dev_info(disk_to_dev(dev->gd), "released");
+ put_disk(dev->gd);
+}
+
+int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ mutex_unlock(&devices_mutex);
+ return -ENODEV;
+ }
+
+ /* Found a device, let's lock it so we can check if it's busy */
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+ return -EBUSY;
+ }
+
+ /* Remove from device list */
+ list_del(&dev->list);
+ mutex_unlock(&devices_mutex);
+
+ ubiblock_cleanup(dev);
+ mutex_unlock(&dev->dev_mutex);
+ kfree(dev);
+ return 0;
+}
+
+static int ubiblock_resize(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ u64 disk_capacity = vi->used_bytes >> 9;
+
+ /*
+ * Need to lock the device list until we stop using the device,
+ * otherwise the device struct might get released in
+ * 'ubiblock_remove()'.
+ */
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ mutex_unlock(&devices_mutex);
+ return -ENODEV;
+ }
+ if ((sector_t)disk_capacity != disk_capacity) {
+ mutex_unlock(&devices_mutex);
+ dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
+ vi->size);
+ return -EFBIG;
+ }
+
+ mutex_lock(&dev->dev_mutex);
+
+ if (get_capacity(dev->gd) != disk_capacity) {
+ set_capacity(dev->gd, disk_capacity);
+ dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
+ vi->used_bytes);
+ }
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+static int ubiblock_notify(struct notifier_block *nb,
+ unsigned long notification_type, void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (notification_type) {
+ case UBI_VOLUME_ADDED:
+ /*
+ * We want to enforce explicit block device creation for
+ * volumes, so when a volume is added we do nothing.
+ */
+ break;
+ case UBI_VOLUME_REMOVED:
+ ubiblock_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ ubiblock_resize(&nt->vi);
+ break;
+ case UBI_VOLUME_UPDATED:
+ /*
+ * If the volume is static, a content update might mean the
+ * size (i.e. used_bytes) was also changed.
+ */
+ if (nt->vi.vol_type == UBI_STATIC_VOLUME)
+ ubiblock_resize(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ubiblock_notifier = {
+ .notifier_call = ubiblock_notify,
+};
+
+static struct ubi_volume_desc * __init
+open_volume_desc(const char *name, int ubi_num, int vol_id)
+{
+ if (ubi_num == -1)
+ /* No ubi num, name must be a vol device path */
+ return ubi_open_volume_path(name, UBI_READONLY);
+ else if (vol_id == -1)
+ /* No vol_id, must be vol_name */
+ return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
+ else
+ return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
+}
+
+static void __init ubiblock_create_from_param(void)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+ struct ubi_volume_desc *desc;
+ struct ubi_volume_info vi;
+
+ /*
+ * If there is an error creating one of the ubiblocks, continue on to
+ * create the following ubiblocks. This helps in a circumstance where
+ * the kernel command-line specifies multiple block devices and some
+ * may be broken, but we still want the working ones to come up.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
+ if (IS_ERR(desc)) {
+ pr_err(
+ "UBI: block: can't open volume on ubi%d_%d, err=%ld",
+ p->ubi_num, p->vol_id, PTR_ERR(desc));
+ continue;
+ }
+
+ ubi_get_volume_info(desc, &vi);
+ ubi_close_volume(desc);
+
+ ret = ubiblock_create(&vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d",
+ vi.name, p->ubi_num, p->vol_id, ret);
+ continue;
+ }
+ }
+}
+
+static void ubiblock_remove_all(void)
+{
+ struct ubiblock *next;
+ struct ubiblock *dev;
+
+ list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
+ /* The module is being forcefully removed */
+ WARN_ON(dev->desc);
+ /* Remove from device list */
+ list_del(&dev->list);
+ ubiblock_cleanup(dev);
+ kfree(dev);
+ }
+}
+
+int __init ubiblock_init(void)
+{
+ int ret;
+
+ ubiblock_major = register_blkdev(0, "ubiblock");
+ if (ubiblock_major < 0)
+ return ubiblock_major;
+
+ /*
+ * Attach block devices from 'block=' module param.
+ * Even if one block device in the param list fails to come up,
+ * still allow the module to load and leave any others up.
+ */
+ ubiblock_create_from_param();
+
+ /*
+ * Block devices are only created upon user requests, so we ignore
+ * existing volumes.
+ */
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ if (ret)
+ goto err_unreg;
+ return 0;
+
+err_unreg:
+ unregister_blkdev(ubiblock_major, "ubiblock");
+ ubiblock_remove_all();
+ return ret;
+}
+
+void __exit ubiblock_exit(void)
+{
+ ubi_unregister_volume_notifier(&ubiblock_notifier);
+ ubiblock_remove_all();
+ unregister_blkdev(ubiblock_major, "ubiblock");
+}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
new file mode 100644
index 000000000..b7f824d5e
--- /dev/null
+++ b/drivers/mtd/ubi/build.c
@@ -0,0 +1,1515 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём),
+ * Frank Haverkamp
+ */
+
+/*
+ * This file includes UBI initialization and building of UBI devices.
+ *
+ * When UBI is initialized, it attaches all the MTD devices specified as the
+ * module load parameters or the kernel boot parameters. If MTD devices were
+ * specified, UBI does not attach any MTD device, but it is possible to do
+ * later using the "UBI control device".
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/namei.h>
+#include <linux/stat.h>
+#include <linux/miscdevice.h>
+#include <linux/mtd/partitions.h>
+#include <linux/log2.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include "ubi.h"
+
+/* Maximum length of the 'mtd=' parameter */
+#define MTD_PARAM_LEN_MAX 64
+
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 4
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
+/**
+ * struct mtd_dev_param - MTD device parameter description data structure.
+ * @name: MTD character device node path, MTD device name, or MTD device number
+ * string
+ * @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
+ */
+struct mtd_dev_param {
+ char name[MTD_PARAM_LEN_MAX];
+ int ubi_num;
+ int vid_hdr_offs;
+ int max_beb_per1024;
+};
+
+/* Numbers of elements set in the @mtd_dev_param array */
+static int __initdata mtd_devs;
+
+/* MTD devices specification parameters */
+static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+static bool fm_debug;
+#endif
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class *ubi_class;
+
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+/* UBI control character device */
+static struct miscdevice ubi_ctrl_cdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ubi_ctrl",
+ .fops = &ubi_ctrl_cdev_operations,
+};
+
+/* All UBI devices in system */
+static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+
+/* Serializes UBI devices creations and removals */
+DEFINE_MUTEX(ubi_devices_mutex);
+
+/* Protects @ubi_devices and @ubi->ref_count */
+static DEFINE_SPINLOCK(ubi_devices_lock);
+
+/* "Show" method for files in '/<sysfs>/class/ubi/' */
+static ssize_t ubi_version_show(struct class *class,
+ struct class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", UBI_VERSION);
+}
+
+/* UBI version attribute ('/<sysfs>/class/ubi/version') */
+static struct class_attribute ubi_version =
+ __ATTR(version, S_IRUGO, ubi_version_show, NULL);
+
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
+static struct device_attribute dev_eraseblock_size =
+ __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_avail_eraseblocks =
+ __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_total_eraseblocks =
+ __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_volumes_count =
+ __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_ec =
+ __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_reserved_for_bad =
+ __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bad_peb_count =
+ __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_vol_count =
+ __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_min_io_size =
+ __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bgt_enabled =
+ __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_num =
+ __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+
+/**
+ * ubi_volume_notify - send a volume change notification.
+ * @ubi: UBI device description object
+ * @vol: volume description object of the changed volume
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ *
+ * This is a helper function which notifies all subscribers about a volume
+ * change event (creation, removal, re-sizing, re-naming, updating). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
+{
+ int ret;
+ struct ubi_notification nt;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+ ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ ret = ubi_update_fastmap(ubi);
+ if (ret)
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+ }
+
+ return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
+}
+
+/**
+ * ubi_notify_all - send a notification to all volumes.
+ * @ubi: UBI device description object
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ * @nb: the notifier to call
+ *
+ * This function walks all volumes of UBI device @ubi and sends the @ntype
+ * notification for each volume. If @nb is %NULL, then all registered notifiers
+ * are called, otherwise only the @nb notifier is called. Returns the number of
+ * sent notifications.
+ */
+int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
+{
+ struct ubi_notification nt;
+ int i, count = 0;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+
+ mutex_lock(&ubi->device_mutex);
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ /*
+ * Since the @ubi->device is locked, and we are not going to
+ * change @ubi->volumes, we do not have to lock
+ * @ubi->volumes_lock.
+ */
+ if (!ubi->volumes[i])
+ continue;
+
+ ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
+ if (nb)
+ nb->notifier_call(nb, ntype, &nt);
+ else
+ blocking_notifier_call_chain(&ubi_notifiers, ntype,
+ &nt);
+ count += 1;
+ }
+ mutex_unlock(&ubi->device_mutex);
+
+ return count;
+}
+
+/**
+ * ubi_enumerate_volumes - send "add" notification for all existing volumes.
+ * @nb: the notifier to call
+ *
+ * This function walks all UBI devices and volumes and sends the
+ * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
+ * registered notifiers are called, otherwise only the @nb notifier is called.
+ * Returns the number of sent notifications.
+ */
+int ubi_enumerate_volumes(struct notifier_block *nb)
+{
+ int i, count = 0;
+
+ /*
+ * Since the @ubi_devices_mutex is locked, and we are not going to
+ * change @ubi_devices, we do not have to lock @ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (!ubi)
+ continue;
+ count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
+ }
+
+ return count;
+}
+
+/**
+ * ubi_get_device - get UBI device.
+ * @ubi_num: UBI device number
+ *
+ * This function returns UBI device description object for UBI device number
+ * @ubi_num, or %NULL if the device does not exist. This function increases the
+ * device reference count to prevent removal of the device. In other words, the
+ * device cannot be removed if its reference count is not zero.
+ */
+struct ubi_device *ubi_get_device(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (ubi) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi;
+}
+
+/**
+ * ubi_put_device - drop an UBI device reference.
+ * @ubi: UBI device description object
+ */
+void ubi_put_device(struct ubi_device *ubi)
+{
+ spin_lock(&ubi_devices_lock);
+ ubi->ref_count -= 1;
+ put_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+}
+
+/**
+ * ubi_get_by_major - get UBI device by character device major number.
+ * @major: major number
+ *
+ * This function is similar to 'ubi_get_device()', but it searches the device
+ * by its major number.
+ */
+struct ubi_device *ubi_get_by_major(int major)
+{
+ int i;
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+ return ubi;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return NULL;
+}
+
+/**
+ * ubi_major2num - get UBI device number by character device major number.
+ * @major: major number
+ *
+ * This function searches UBI device number object by its major number. If UBI
+ * device was not found, this function returns -ENODEV, otherwise the UBI device
+ * number is returned.
+ */
+int ubi_major2num(int major)
+{
+ int i, ubi_num = -ENODEV;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_num = ubi->ubi_num;
+ break;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi_num;
+}
+
+/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct ubi_device *ubi;
+
+ /*
+ * The below code looks weird, but it actually makes sense. We get the
+ * UBI device reference from the contained 'struct ubi_device'. But it
+ * is unclear if the device was removed or not yet. Indeed, if the
+ * device was removed before we increased its reference count,
+ * 'ubi_get_device()' will return -ENODEV and we fail.
+ *
+ * Remember, 'struct ubi_device' is freed in the release function, so
+ * we still can use 'ubi->ubi_num'.
+ */
+ ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ if (attr == &dev_eraseblock_size)
+ ret = sprintf(buf, "%d\n", ubi->leb_size);
+ else if (attr == &dev_avail_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->avail_pebs);
+ else if (attr == &dev_total_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->good_peb_count);
+ else if (attr == &dev_volumes_count)
+ ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
+ else if (attr == &dev_max_ec)
+ ret = sprintf(buf, "%d\n", ubi->max_ec);
+ else if (attr == &dev_reserved_for_bad)
+ ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
+ else if (attr == &dev_bad_peb_count)
+ ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
+ else if (attr == &dev_max_vol_count)
+ ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
+ else if (attr == &dev_min_io_size)
+ ret = sprintf(buf, "%d\n", ubi->min_io_size);
+ else if (attr == &dev_bgt_enabled)
+ ret = sprintf(buf, "%d\n", ubi->thread_enabled);
+ else if (attr == &dev_mtd_num)
+ ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else
+ ret = -EINVAL;
+
+ ubi_put_device(ubi);
+ return ret;
+}
+
+static void dev_release(struct device *dev)
+{
+ struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
+
+ kfree(ubi);
+}
+
+/**
+ * ubi_sysfs_init - initialize sysfs for an UBI device.
+ * @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ * taken
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
+{
+ int err;
+
+ ubi->dev.release = dev_release;
+ ubi->dev.devt = ubi->cdev.dev;
+ ubi->dev.class = ubi_class;
+ dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
+ err = device_register(&ubi->dev);
+ if (err)
+ return err;
+
+ *ref = 1;
+ err = device_create_file(&ubi->dev, &dev_eraseblock_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_volumes_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_max_ec);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_bad_peb_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_max_vol_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_min_io_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_bgt_enabled);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_mtd_num);
+ return err;
+}
+
+/**
+ * ubi_sysfs_close - close sysfs for an UBI device.
+ * @ubi: UBI device description object
+ */
+static void ubi_sysfs_close(struct ubi_device *ubi)
+{
+ device_remove_file(&ubi->dev, &dev_mtd_num);
+ device_remove_file(&ubi->dev, &dev_bgt_enabled);
+ device_remove_file(&ubi->dev, &dev_min_io_size);
+ device_remove_file(&ubi->dev, &dev_max_vol_count);
+ device_remove_file(&ubi->dev, &dev_bad_peb_count);
+ device_remove_file(&ubi->dev, &dev_reserved_for_bad);
+ device_remove_file(&ubi->dev, &dev_max_ec);
+ device_remove_file(&ubi->dev, &dev_volumes_count);
+ device_remove_file(&ubi->dev, &dev_total_eraseblocks);
+ device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
+ device_remove_file(&ubi->dev, &dev_eraseblock_size);
+ device_unregister(&ubi->dev);
+}
+
+/**
+ * kill_volumes - destroy all user volumes.
+ * @ubi: UBI device description object
+ */
+static void kill_volumes(struct ubi_device *ubi)
+{
+ int i;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i])
+ ubi_free_volume(ubi, ubi->volumes[i]);
+}
+
+/**
+ * uif_init - initialize user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ * taken, otherwise set to %0
+ *
+ * This function initializes various user interfaces for an UBI device. If the
+ * initialization fails at an early stage, this function frees all the
+ * resources it allocated, returns an error, and @ref is set to %0. However,
+ * if the initialization fails after the UBI device was registered in the
+ * driver core subsystem, this function takes a reference to @ubi->dev, because
+ * otherwise the release function ('dev_release()') would free whole @ubi
+ * object. The @ref argument is set to %1 in this case. The caller has to put
+ * this reference.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int uif_init(struct ubi_device *ubi, int *ref)
+{
+ int i, err;
+ dev_t dev;
+
+ *ref = 0;
+ sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
+
+ /*
+ * Major numbers for the UBI character devices are allocated
+ * dynamically. Major numbers of volume character devices are
+ * equivalent to ones of the corresponding UBI character device. Minor
+ * numbers of UBI character devices are 0, while minor numbers of
+ * volume character devices start from 1. Thus, we allocate one major
+ * number and ubi->vtbl_slots + 1 minor numbers.
+ */
+ err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
+ if (err) {
+ ubi_err(ubi, "cannot register UBI character devices");
+ return err;
+ }
+
+ ubi_assert(MINOR(dev) == 0);
+ cdev_init(&ubi->cdev, &ubi_cdev_operations);
+ dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
+ ubi->cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&ubi->cdev, dev, 1);
+ if (err) {
+ ubi_err(ubi, "cannot add character device");
+ goto out_unreg;
+ }
+
+ err = ubi_sysfs_init(ubi, ref);
+ if (err)
+ goto out_sysfs;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i]) {
+ err = ubi_add_volume(ubi, ubi->volumes[i]);
+ if (err) {
+ ubi_err(ubi, "cannot add volume %d", i);
+ goto out_volumes;
+ }
+ }
+
+ return 0;
+
+out_volumes:
+ kill_volumes(ubi);
+out_sysfs:
+ if (*ref)
+ get_device(&ubi->dev);
+ ubi_sysfs_close(ubi);
+ cdev_del(&ubi->cdev);
+out_unreg:
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+ ubi_err(ubi, "cannot initialize UBI %s, error %d",
+ ubi->ubi_name, err);
+ return err;
+}
+
+/**
+ * uif_close - close user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * Note, since this function un-registers UBI volume device objects (@vol->dev),
+ * the memory allocated voe the volumes is freed as well (in the release
+ * function).
+ */
+static void uif_close(struct ubi_device *ubi)
+{
+ kill_volumes(ubi);
+ ubi_sysfs_close(ubi);
+ cdev_del(&ubi->cdev);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+}
+
+/**
+ * ubi_free_internal_volumes - free internal volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_internal_volumes(struct ubi_device *ubi)
+{
+ int i;
+
+ for (i = ubi->vtbl_slots;
+ i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ kfree(ubi->volumes[i]->eba_tbl);
+ kfree(ubi->volumes[i]);
+ }
+}
+
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
+{
+ int limit, device_pebs;
+ uint64_t device_size;
+
+ if (!max_beb_per1024)
+ return 0;
+
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = mtd_get_device_size(ubi->mtd);
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
+
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
+
+ return limit;
+}
+
+/**
+ * io_init - initialize I/O sub-system for a given UBI device.
+ * @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ *
+ * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
+ * assumed:
+ * o EC header is always at offset zero - this cannot be changed;
+ * o VID header starts just after the EC header at the closest address
+ * aligned to @io->hdrs_min_io_size;
+ * o data starts just after the VID header at the closest address aligned to
+ * @io->min_io_size
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
+ if (ubi->mtd->numeraseregions != 0) {
+ /*
+ * Some flashes have several erase regions. Different regions
+ * may have different eraseblock size and other
+ * characteristics. It looks like mostly multi-region flashes
+ * have one "main" region and one or more small regions to
+ * store boot loader code or boot parameters or whatever. I
+ * guess we should just pick the largest region. But this is
+ * not implemented.
+ */
+ ubi_err(ubi, "multiple regions, not implemented");
+ return -EINVAL;
+ }
+
+ if (ubi->vid_hdr_offset < 0)
+ return -EINVAL;
+
+ /*
+ * Note, in this implementation we support MTD devices with 0x7FFFFFFF
+ * physical eraseblocks maximum.
+ */
+
+ ubi->peb_size = ubi->mtd->erasesize;
+ ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
+ ubi->flash_size = ubi->mtd->size;
+
+ if (mtd_can_have_bb(ubi->mtd)) {
+ ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
+
+ if (ubi->mtd->type == MTD_NORFLASH) {
+ ubi_assert(ubi->mtd->writesize == 1);
+ ubi->nor_flash = 1;
+ }
+
+ ubi->min_io_size = ubi->mtd->writesize;
+ ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
+
+ /*
+ * Make sure minimal I/O unit is power of 2. Note, there is no
+ * fundamental reason for this assumption. It is just an optimization
+ * which allows us to avoid costly division operations.
+ */
+ if (!is_power_of_2(ubi->min_io_size)) {
+ ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
+ ubi->min_io_size);
+ return -EINVAL;
+ }
+
+ ubi_assert(ubi->hdrs_min_io_size > 0);
+ ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
+ ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+
+ ubi->max_write_size = ubi->mtd->writebufsize;
+ /*
+ * Maximum write size has to be greater or equivalent to min. I/O
+ * size, and be multiple of min. I/O size.
+ */
+ if (ubi->max_write_size < ubi->min_io_size ||
+ ubi->max_write_size % ubi->min_io_size ||
+ !is_power_of_2(ubi->max_write_size)) {
+ ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
+ ubi->max_write_size, ubi->min_io_size);
+ return -EINVAL;
+ }
+
+ /* Calculate default aligned sizes of EC and VID headers */
+ ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+
+ if (ubi->vid_hdr_offset == 0)
+ /* Default offset */
+ ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
+ ubi->ec_hdr_alsize;
+ else {
+ ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
+ ~(ubi->hdrs_min_io_size - 1);
+ ubi->vid_hdr_shift = ubi->vid_hdr_offset -
+ ubi->vid_hdr_aloffset;
+ }
+
+ /* Similar for the data offset */
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
+ ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
+
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
+
+ /* The shift must be aligned to 32-bit boundary */
+ if (ubi->vid_hdr_shift % 4) {
+ ubi_err(ubi, "unaligned VID header shift %d",
+ ubi->vid_hdr_shift);
+ return -EINVAL;
+ }
+
+ /* Check sanity */
+ if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
+ ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
+ ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
+ ubi->leb_start & (ubi->min_io_size - 1)) {
+ ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
+ ubi->vid_hdr_offset, ubi->leb_start);
+ return -EINVAL;
+ }
+
+ /*
+ * Set maximum amount of physical erroneous eraseblocks to be 10%.
+ * Erroneous PEB are those which have read errors.
+ */
+ ubi->max_erroneous = ubi->peb_count / 10;
+ if (ubi->max_erroneous < 16)
+ ubi->max_erroneous = 16;
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
+
+ /*
+ * It may happen that EC and VID headers are situated in one minimal
+ * I/O unit. In this case we can only accept this UBI image in
+ * read-only mode.
+ */
+ if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
+ ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
+ ubi->ro_mode = 1;
+ }
+
+ ubi->leb_size = ubi->peb_size - ubi->leb_start;
+
+ if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
+ ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
+ ubi->ro_mode = 1;
+ }
+
+ /*
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
+ * unfortunately, MTD does not provide this information. We should loop
+ * over all physical eraseblocks and invoke mtd->block_is_bad() for
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
+ */
+
+ return 0;
+}
+
+/**
+ * autoresize - re-size the volume which has the "auto-resize" flag set.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to re-size
+ *
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
+ * the volume table to the largest possible size. See comments in ubi-header.h
+ * for more description of the flag. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int autoresize(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_volume_desc desc;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
+
+ if (ubi->ro_mode) {
+ ubi_warn(ubi, "skip auto-resize because of R/O mode");
+ return 0;
+ }
+
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propagate this change
+ * to the flash.
+ */
+ ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
+
+ if (ubi->avail_pebs == 0) {
+ struct ubi_vtbl_record vtbl_rec;
+
+ /*
+ * No available PEBs to re-size the volume, clear the flag on
+ * flash and exit.
+ */
+ vtbl_rec = ubi->vtbl[vol_id];
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
+ vol_id);
+ } else {
+ desc.vol = vol;
+ err = ubi_resize_volume(&desc,
+ old_reserved_pebs + ubi->avail_pebs);
+ if (err)
+ ubi_err(ubi, "cannot auto-resize volume %d",
+ vol_id);
+ }
+
+ if (err)
+ return err;
+
+ ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
+ vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
+ return 0;
+}
+
+/**
+ * ubi_attach_mtd_dev - attach an MTD device.
+ * @mtd: MTD device description object
+ * @ubi_num: number to assign to the new UBI device
+ * @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ *
+ * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
+ * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
+ * which case this function finds a vacant device number and assigns it
+ * automatically. Returns the new UBI device number in case of success and a
+ * negative error code in case of failure.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024)
+{
+ struct ubi_device *ubi;
+ int i, err, ref = 0;
+
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
+ /*
+ * Check if we already have the same MTD device attached.
+ *
+ * Note, this function assumes that UBI devices creations and deletions
+ * are serialized, so it does not take the &ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && mtd->index == ubi->mtd->index) {
+ ubi_err(ubi, "mtd%d is already attached to ubi%d",
+ mtd->index, i);
+ return -EEXIST;
+ }
+ }
+
+ /*
+ * Make sure this MTD device is not emulated on top of an UBI volume
+ * already. Well, generally this recursion works fine, but there are
+ * different problems like the UBI module takes a reference to itself
+ * by attaching (and thus, opening) the emulated MTD device. This
+ * results in inability to unload the module. And in general it makes
+ * no sense to attach emulated MTD devices, so we prohibit this.
+ */
+ if (mtd->type == MTD_UBIVOLUME) {
+ ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
+ mtd->index);
+ return -EINVAL;
+ }
+
+ if (ubi_num == UBI_DEV_NUM_AUTO) {
+ /* Search for an empty slot in the @ubi_devices array */
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+ if (!ubi_devices[ubi_num])
+ break;
+ if (ubi_num == UBI_MAX_DEVICES) {
+ ubi_err(ubi, "only %d UBI devices may be created",
+ UBI_MAX_DEVICES);
+ return -ENFILE;
+ }
+ } else {
+ if (ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Make sure ubi_num is not busy */
+ if (ubi_devices[ubi_num]) {
+ ubi_err(ubi, "already exists");
+ return -EEXIST;
+ }
+ }
+
+ ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
+ if (!ubi)
+ return -ENOMEM;
+
+ ubi->mtd = mtd;
+ ubi->ubi_num = ubi_num;
+ ubi->vid_hdr_offset = vid_hdr_offset;
+ ubi->autoresize_vol_id = -1;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+ if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+ ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+ ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
+ ubi->fm_disabled = !fm_autoconvert;
+ if (fm_debug)
+ ubi_enable_dbg_chk_fastmap(ubi);
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "default fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
+ mutex_init(&ubi->buf_mutex);
+ mutex_init(&ubi->ckvol_mutex);
+ mutex_init(&ubi->device_mutex);
+ spin_lock_init(&ubi->volumes_lock);
+ init_rwsem(&ubi->fm_protect);
+ init_rwsem(&ubi->fm_eba_sem);
+
+ ubi_msg(ubi, "attaching mtd%d", mtd->index);
+
+ err = io_init(ubi, max_beb_per1024);
+ if (err)
+ goto out_free;
+
+ err = -ENOMEM;
+ ubi->peb_buf = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf)
+ goto out_free;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = vzalloc(ubi->fm_size);
+ if (!ubi->fm_buf)
+ goto out_free;
+#endif
+ err = ubi_attach(ubi, 0);
+ if (err) {
+ ubi_err(ubi, "failed to attach mtd%d, error %d",
+ mtd->index, err);
+ goto out_free;
+ }
+
+ if (ubi->autoresize_vol_id != -1) {
+ err = autoresize(ubi, ubi->autoresize_vol_id);
+ if (err)
+ goto out_detach;
+ }
+
+ err = uif_init(ubi, &ref);
+ if (err)
+ goto out_detach;
+
+ err = ubi_debugfs_init_dev(ubi);
+ if (err)
+ goto out_uif;
+
+ ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
+ if (IS_ERR(ubi->bgt_thread)) {
+ err = PTR_ERR(ubi->bgt_thread);
+ ubi_err(ubi, "cannot spawn \"%s\", error %d",
+ ubi->bgt_name, err);
+ goto out_debugfs;
+ }
+
+ ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
+ mtd->index, mtd->name, ubi->flash_size >> 20);
+ ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
+
+ /*
+ * The below lock makes sure we do not race with 'ubi_thread()' which
+ * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
+ */
+ spin_lock(&ubi->wl_lock);
+ ubi->thread_enabled = 1;
+ wake_up_process(ubi->bgt_thread);
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_devices[ubi_num] = ubi;
+ ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
+ return ubi_num;
+
+out_debugfs:
+ ubi_debugfs_exit_dev(ubi);
+out_uif:
+ get_device(&ubi->dev);
+ ubi_assert(ref);
+ uif_close(ubi);
+out_detach:
+ ubi_wl_close(ubi);
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_free:
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
+ if (ref)
+ put_device(&ubi->dev);
+ else
+ kfree(ubi);
+ return err;
+}
+
+/**
+ * ubi_detach_mtd_dev - detach an MTD device.
+ * @ubi_num: UBI device number to detach from
+ * @anyway: detach MTD even if device reference count is not zero
+ *
+ * This function destroys an UBI device number @ubi_num and detaches the
+ * underlying MTD device. Returns zero in case of success and %-EBUSY if the
+ * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
+ * exist.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -EINVAL;
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
+ ubi->ref_count -= 1;
+ if (ubi->ref_count) {
+ if (!anyway) {
+ spin_unlock(&ubi_devices_lock);
+ return -EBUSY;
+ }
+ /* This may only happen if there is a bug */
+ ubi_err(ubi, "%s reference count %d, destroy anyway",
+ ubi->ubi_name, ubi->ref_count);
+ }
+ ubi_devices[ubi_num] = NULL;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_assert(ubi_num == ubi->ubi_num);
+ ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
+ ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap.
+ * In case of fastmap debugging we omit the update to simulate an
+ * unclean shutdown. */
+ if (!ubi_dbg_chk_fastmap(ubi))
+ ubi_update_fastmap(ubi);
+#endif
+ /*
+ * Before freeing anything, we have to stop the background thread to
+ * prevent it from doing anything on this device while we are freeing.
+ */
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+
+ /*
+ * Get a reference to the device in order to prevent 'dev_release()'
+ * from freeing the @ubi object.
+ */
+ get_device(&ubi->dev);
+
+ ubi_debugfs_exit_dev(ubi);
+ uif_close(ubi);
+
+ ubi_wl_close(ubi);
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+ put_mtd_device(ubi->mtd);
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
+ ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
+ put_device(&ubi->dev);
+ return 0;
+}
+
+/**
+ * open_mtd_by_chdev - open an MTD device by its character device node path.
+ * @mtd_dev: MTD character device node path
+ *
+ * This helper function opens an MTD device by its character node device path.
+ * Returns MTD device description object in case of success and a negative
+ * error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
+{
+ int err, major, minor, mode;
+ struct path path;
+
+ /* Probably this is an MTD character device node path */
+ err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+ if (err)
+ return ERR_PTR(err);
+
+ /* MTD device number is defined by the major / minor numbers */
+ major = imajor(d_backing_inode(path.dentry));
+ minor = iminor(d_backing_inode(path.dentry));
+ mode = d_backing_inode(path.dentry)->i_mode;
+ path_put(&path);
+ if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
+ return ERR_PTR(-EINVAL);
+
+ if (minor & 1)
+ /*
+ * Just do not think the "/dev/mtdrX" devices support is need,
+ * so do not support them to avoid doing extra work.
+ */
+ return ERR_PTR(-EINVAL);
+
+ return get_mtd_device(NULL, minor / 2);
+}
+
+/**
+ * open_mtd_device - open MTD device by name, character device path, or number.
+ * @mtd_dev: name, character device node path, or MTD device device number
+ *
+ * This function tries to open and MTD device described by @mtd_dev string,
+ * which is first treated as ASCII MTD device number, and if it is not true, it
+ * is treated as MTD device name, and if that is also not true, it is treated
+ * as MTD character device node path. Returns MTD device description object in
+ * case of success and a negative error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
+{
+ struct mtd_info *mtd;
+ int mtd_num;
+ char *endp;
+
+ mtd_num = simple_strtoul(mtd_dev, &endp, 0);
+ if (*endp != '\0' || mtd_dev == endp) {
+ /*
+ * This does not look like an ASCII integer, probably this is
+ * MTD device name.
+ */
+ mtd = get_mtd_device_nm(mtd_dev);
+ if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+ /* Probably this is an MTD character device node path */
+ mtd = open_mtd_by_chdev(mtd_dev);
+ } else
+ mtd = get_mtd_device(NULL, mtd_num);
+
+ return mtd;
+}
+
+static int __init ubi_init(void)
+{
+ int err, i, k;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
+ if (IS_ERR(ubi_class)) {
+ err = PTR_ERR(ubi_class);
+ pr_err("UBI error: cannot create UBI class");
+ goto out;
+ }
+
+ err = class_create_file(ubi_class, &ubi_version);
+ if (err) {
+ pr_err("UBI error: cannot create sysfs file");
+ goto out_class;
+ }
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device");
+ goto out_version;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
+
+ /* Attach MTD devices */
+ for (i = 0; i < mtd_devs; i++) {
+ struct mtd_dev_param *p = &mtd_dev_param[i];
+ struct mtd_info *mtd;
+
+ cond_resched();
+
+ mtd = open_mtd_device(p->name);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("UBI error: cannot open mtd %s, error %d",
+ p->name, err);
+ /* See comment below re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ continue;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, p->ubi_num,
+ p->vid_hdr_offs, p->max_beb_per1024);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ pr_err("UBI error: cannot attach mtd%d",
+ mtd->index);
+ put_mtd_device(mtd);
+
+ /*
+ * Originally UBI stopped initializing on any error.
+ * However, later on it was found out that this
+ * behavior is not very good when UBI is compiled into
+ * the kernel and the MTD devices to attach are passed
+ * through the command line. Indeed, UBI failure
+ * stopped whole boot sequence.
+ *
+ * To fix this, we changed the behavior for the
+ * non-module case, but preserved the old behavior for
+ * the module case, just for compatibility. This is a
+ * little inconsistent, though.
+ */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+ }
+
+ err = ubiblock_init();
+ if (err) {
+ pr_err("UBI error: block: cannot initialize, error %d", err);
+
+ /* See comment above re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ ubi_debugfs_exit();
+out_slab:
+ kmem_cache_destroy(ubi_wl_entry_slab);
+out_dev_unreg:
+ misc_deregister(&ubi_ctrl_cdev);
+out_version:
+ class_remove_file(ubi_class, &ubi_version);
+out_class:
+ class_destroy(ubi_class);
+out:
+ pr_err("UBI error: cannot initialize UBI, error %d", err);
+ return err;
+}
+late_initcall(ubi_init);
+
+static void __exit ubi_exit(void)
+{
+ int i;
+
+ ubiblock_exit();
+
+ for (i = 0; i < UBI_MAX_DEVICES; i++)
+ if (ubi_devices[i]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ ubi_debugfs_exit();
+ kmem_cache_destroy(ubi_wl_entry_slab);
+ misc_deregister(&ubi_ctrl_cdev);
+ class_remove_file(ubi_class, &ubi_version);
+ class_destroy(ubi_class);
+}
+module_exit(ubi_exit);
+
+/**
+ * bytes_str_to_int - convert a number of bytes string into an integer.
+ * @str: the string to convert
+ *
+ * This function returns positive resulting integer in case of success and a
+ * negative error code in case of failure.
+ */
+static int __init bytes_str_to_int(const char *str)
+{
+ char *endp;
+ unsigned long result;
+
+ result = simple_strtoul(str, &endp, 0);
+ if (str == endp || result >= INT_MAX) {
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
+ return -EINVAL;
+ }
+
+ switch (*endp) {
+ case 'G':
+ result *= 1024;
+ case 'M':
+ result *= 1024;
+ case 'K':
+ result *= 1024;
+ if (endp[1] == 'i' && endp[2] == 'B')
+ endp += 2;
+ case '\0':
+ break;
+ default:
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+/**
+ * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
+ * @val: the parameter value to parse
+ * @kp: not used
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of error.
+ */
+static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+{
+ int i, len;
+ struct mtd_dev_param *p;
+ char buf[MTD_PARAM_LEN_MAX];
+ char *pbuf = &buf[0];
+ char *tokens[MTD_PARAM_MAX_COUNT], *token;
+
+ if (!val)
+ return -EINVAL;
+
+ if (mtd_devs == UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ len = strnlen(val, MTD_PARAM_LEN_MAX);
+ if (len == MTD_PARAM_LEN_MAX) {
+ pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
+ return -EINVAL;
+ }
+
+ if (len == 0) {
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
+ return 0;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ if (pbuf) {
+ pr_err("UBI error: too many arguments at \"%s\"\n", val);
+ return -EINVAL;
+ }
+
+ p = &mtd_dev_param[mtd_devs];
+ strcpy(&p->name[0], tokens[0]);
+
+ token = tokens[1];
+ if (token) {
+ p->vid_hdr_offs = bytes_str_to_int(token);
+
+ if (p->vid_hdr_offs < 0)
+ return p->vid_hdr_offs;
+ }
+
+ token = tokens[2];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->max_beb_per1024);
+
+ if (err) {
+ pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+ token);
+ return -EINVAL;
+ }
+ }
+
+ token = tokens[3];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->ubi_num);
+
+ if (err) {
+ pr_err("UBI error: bad value for ubi_num parameter: %s",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->ubi_num = UBI_DEV_NUM_AUTO;
+
+ mtd_devs += 1;
+ return 0;
+}
+
+module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
+ "Multiple \"mtd\" parameters may be specified.\n"
+ "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+ "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+ __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+ "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+ "\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+ "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+ "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+ "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+module_param(fm_debug, bool, 0);
+MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
+#endif
+MODULE_VERSION(__stringify(UBI_VERSION));
+MODULE_DESCRIPTION("UBI - Unsorted Block Images");
+MODULE_AUTHOR("Artem Bityutskiy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
new file mode 100644
index 000000000..d16fccf79
--- /dev/null
+++ b/drivers/mtd/ubi/cdev.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file includes implementation of UBI character device operations.
+ *
+ * There are two kinds of character devices in UBI: UBI character devices and
+ * UBI volume character devices. UBI character devices allow users to
+ * manipulate whole volumes: create, remove, and re-size them. Volume character
+ * devices provide volume I/O capabilities.
+ *
+ * Major and minor numbers are assigned dynamically to both UBI and volume
+ * character devices.
+ *
+ * Well, there is the third kind of character devices - the UBI control
+ * character device, which allows to manipulate by UBI devices - create and
+ * delete them. In other words, it is used for attaching and detaching MTD
+ * devices.
+ */
+
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <linux/capability.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <linux/math64.h>
+#include <mtd/ubi-user.h>
+#include "ubi.h"
+
+/**
+ * get_exclusive - get exclusive access to an UBI volume.
+ * @desc: volume descriptor
+ *
+ * This function changes UBI volume open mode to "exclusive". Returns previous
+ * mode value (positive integer) in case of success and a negative error code
+ * in case of failure.
+ */
+static int get_exclusive(struct ubi_volume_desc *desc)
+{
+ int users, err;
+ struct ubi_volume *vol = desc->vol;
+
+ spin_lock(&vol->ubi->volumes_lock);
+ users = vol->readers + vol->writers + vol->exclusive + vol->metaonly;
+ ubi_assert(users > 0);
+ if (users > 1) {
+ ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id);
+ err = -EBUSY;
+ } else {
+ vol->readers = vol->writers = vol->metaonly = 0;
+ vol->exclusive = 1;
+ err = desc->mode;
+ desc->mode = UBI_EXCLUSIVE;
+ }
+ spin_unlock(&vol->ubi->volumes_lock);
+
+ return err;
+}
+
+/**
+ * revoke_exclusive - revoke exclusive mode.
+ * @desc: volume descriptor
+ * @mode: new mode to switch to
+ */
+static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
+{
+ struct ubi_volume *vol = desc->vol;
+
+ spin_lock(&vol->ubi->volumes_lock);
+ ubi_assert(vol->readers == 0 && vol->writers == 0 && vol->metaonly == 0);
+ ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
+ vol->exclusive = 0;
+ if (mode == UBI_READONLY)
+ vol->readers = 1;
+ else if (mode == UBI_READWRITE)
+ vol->writers = 1;
+ else if (mode == UBI_METAONLY)
+ vol->metaonly = 1;
+ else
+ vol->exclusive = 1;
+ spin_unlock(&vol->ubi->volumes_lock);
+
+ desc->mode = mode;
+}
+
+static int vol_cdev_open(struct inode *inode, struct file *file)
+{
+ struct ubi_volume_desc *desc;
+ int vol_id = iminor(inode) - 1, mode, ubi_num;
+
+ ubi_num = ubi_major2num(imajor(inode));
+ if (ubi_num < 0)
+ return ubi_num;
+
+ if (file->f_mode & FMODE_WRITE)
+ mode = UBI_READWRITE;
+ else
+ mode = UBI_READONLY;
+
+ dbg_gen("open device %d, volume %d, mode %d",
+ ubi_num, vol_id, mode);
+
+ desc = ubi_open_volume(ubi_num, vol_id, mode);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ file->private_data = desc;
+ return 0;
+}
+
+static int vol_cdev_release(struct inode *inode, struct file *file)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+
+ dbg_gen("release device %d, volume %d, mode %d",
+ vol->ubi->ubi_num, vol->vol_id, desc->mode);
+
+ if (vol->updating) {
+ ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged",
+ vol->vol_id);
+ ubi_assert(!vol->changing_leb);
+ vol->updating = 0;
+ vfree(vol->upd_buf);
+ } else if (vol->changing_leb) {
+ dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+ vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+ vol->vol_id);
+ vol->changing_leb = 0;
+ vfree(vol->upd_buf);
+ }
+
+ ubi_close_volume(desc);
+ return 0;
+}
+
+static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+
+ if (vol->updating) {
+ /* Update is in progress, seeking is prohibited */
+ ubi_err(vol->ubi, "updating");
+ return -EBUSY;
+ }
+
+ return fixed_size_llseek(file, offset, origin, vol->used_bytes);
+}
+
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_device *ubi = desc->vol->ubi;
+ struct inode *inode = file_inode(file);
+ int err;
+ mutex_lock(&inode->i_mutex);
+ err = ubi_sync(ubi->ubi_num);
+ mutex_unlock(&inode->i_mutex);
+ return err;
+}
+
+
+static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
+ loff_t *offp)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, lnum, off, len, tbuf_size;
+ size_t count_save = count;
+ void *tbuf;
+
+ dbg_gen("read %zd bytes from offset %lld of volume %d",
+ count, *offp, vol->vol_id);
+
+ if (vol->updating) {
+ ubi_err(vol->ubi, "updating");
+ return -EBUSY;
+ }
+ if (vol->upd_marker) {
+ ubi_err(vol->ubi, "damaged volume, update marker is set");
+ return -EBADF;
+ }
+ if (*offp == vol->used_bytes || count == 0)
+ return 0;
+
+ if (vol->corrupted)
+ dbg_gen("read from corrupted volume %d", vol->vol_id);
+
+ if (*offp + count > vol->used_bytes)
+ count_save = count = vol->used_bytes - *offp;
+
+ tbuf_size = vol->usable_leb_size;
+ if (count < tbuf_size)
+ tbuf_size = ALIGN(count, ubi->min_io_size);
+ tbuf = vmalloc(tbuf_size);
+ if (!tbuf)
+ return -ENOMEM;
+
+ len = count > tbuf_size ? tbuf_size : count;
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
+
+ do {
+ cond_resched();
+
+ if (off + len >= vol->usable_leb_size)
+ len = vol->usable_leb_size - off;
+
+ err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
+ if (err)
+ break;
+
+ off += len;
+ if (off == vol->usable_leb_size) {
+ lnum += 1;
+ off -= vol->usable_leb_size;
+ }
+
+ count -= len;
+ *offp += len;
+
+ err = copy_to_user(buf, tbuf, len);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ buf += len;
+ len = count > tbuf_size ? tbuf_size : count;
+ } while (count);
+
+ vfree(tbuf);
+ return err ? err : count_save - count;
+}
+
+/*
+ * This function allows to directly write to dynamic UBI volumes, without
+ * issuing the volume update operation.
+ */
+static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int lnum, off, len, tbuf_size, err = 0;
+ size_t count_save = count;
+ char *tbuf;
+
+ if (!vol->direct_writes)
+ return -EPERM;
+
+ dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
+ count, *offp, vol->vol_id);
+
+ if (vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
+ if (off & (ubi->min_io_size - 1)) {
+ ubi_err(ubi, "unaligned position");
+ return -EINVAL;
+ }
+
+ if (*offp + count > vol->used_bytes)
+ count_save = count = vol->used_bytes - *offp;
+
+ /* We can write only in fractions of the minimum I/O unit */
+ if (count & (ubi->min_io_size - 1)) {
+ ubi_err(ubi, "unaligned write length");
+ return -EINVAL;
+ }
+
+ tbuf_size = vol->usable_leb_size;
+ if (count < tbuf_size)
+ tbuf_size = ALIGN(count, ubi->min_io_size);
+ tbuf = vmalloc(tbuf_size);
+ if (!tbuf)
+ return -ENOMEM;
+
+ len = count > tbuf_size ? tbuf_size : count;
+
+ while (count) {
+ cond_resched();
+
+ if (off + len >= vol->usable_leb_size)
+ len = vol->usable_leb_size - off;
+
+ err = copy_from_user(tbuf, buf, len);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
+ if (err)
+ break;
+
+ off += len;
+ if (off == vol->usable_leb_size) {
+ lnum += 1;
+ off -= vol->usable_leb_size;
+ }
+
+ count -= len;
+ *offp += len;
+ buf += len;
+ len = count > tbuf_size ? tbuf_size : count;
+ }
+
+ vfree(tbuf);
+ return err ? err : count_save - count;
+}
+
+static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ int err = 0;
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ if (!vol->updating && !vol->changing_leb)
+ return vol_cdev_direct_write(file, buf, count, offp);
+
+ if (vol->updating)
+ err = ubi_more_update_data(ubi, vol, buf, count);
+ else
+ err = ubi_more_leb_change_data(ubi, vol, buf, count);
+
+ if (err < 0) {
+ ubi_err(ubi, "cannot accept more %zd bytes of data, error %d",
+ count, err);
+ return err;
+ }
+
+ if (err) {
+ /*
+ * The operation is finished, @err contains number of actually
+ * written bytes.
+ */
+ count = err;
+
+ if (vol->changing_leb) {
+ revoke_exclusive(desc, UBI_READWRITE);
+ return count;
+ }
+
+ err = ubi_check_volume(ubi, vol->vol_id);
+ if (err < 0)
+ return err;
+
+ if (err) {
+ ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
+ vol->vol_id, ubi->ubi_num);
+ vol->corrupted = 1;
+ }
+ vol->checked = 1;
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
+ revoke_exclusive(desc, UBI_READWRITE);
+ }
+
+ return count;
+}
+
+static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ /* Volume update command */
+ case UBI_IOCVOLUP:
+ {
+ int64_t bytes, rsvd_bytes;
+
+ if (!capable(CAP_SYS_RESOURCE)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = copy_from_user(&bytes, argp, sizeof(int64_t));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (desc->mode == UBI_READONLY) {
+ err = -EROFS;
+ break;
+ }
+
+ rsvd_bytes = (long long)vol->reserved_pebs *
+ ubi->leb_size-vol->data_pad;
+ if (bytes < 0 || bytes > rsvd_bytes) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = get_exclusive(desc);
+ if (err < 0)
+ break;
+
+ err = ubi_start_update(ubi, vol, bytes);
+ if (bytes == 0) {
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
+ revoke_exclusive(desc, UBI_READWRITE);
+ }
+ break;
+ }
+
+ /* Atomic logical eraseblock change command */
+ case UBI_IOCEBCH:
+ {
+ struct ubi_leb_change_req req;
+
+ err = copy_from_user(&req, argp,
+ sizeof(struct ubi_leb_change_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (desc->mode == UBI_READONLY ||
+ vol->vol_type == UBI_STATIC_VOLUME) {
+ err = -EROFS;
+ break;
+ }
+
+ /* Validate the request */
+ err = -EINVAL;
+ if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ break;
+
+ err = get_exclusive(desc);
+ if (err < 0)
+ break;
+
+ err = ubi_start_leb_change(ubi, vol, &req);
+ if (req.bytes == 0)
+ revoke_exclusive(desc, UBI_READWRITE);
+ break;
+ }
+
+ /* Logical eraseblock erasure command */
+ case UBI_IOCEBER:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (desc->mode == UBI_READONLY ||
+ vol->vol_type == UBI_STATIC_VOLUME) {
+ err = -EROFS;
+ break;
+ }
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs) {
+ err = -EINVAL;
+ break;
+ }
+
+ dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ break;
+
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ break;
+ }
+
+ /* Logical eraseblock map command */
+ case UBI_IOCEBMAP:
+ {
+ struct ubi_map_req req;
+
+ err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_map(desc, req.lnum);
+ break;
+ }
+
+ /* Logical eraseblock un-map command */
+ case UBI_IOCEBUNMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_unmap(desc, lnum);
+ break;
+ }
+
+ /* Check if logical eraseblock is mapped command */
+ case UBI_IOCEBISMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_is_mapped(desc, lnum);
+ break;
+ }
+
+ /* Set volume property command */
+ case UBI_IOCSETVOLPROP:
+ {
+ struct ubi_set_vol_prop_req req;
+
+ err = copy_from_user(&req, argp,
+ sizeof(struct ubi_set_vol_prop_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ switch (req.property) {
+ case UBI_VOL_PROP_DIRECT_WRITE:
+ mutex_lock(&ubi->device_mutex);
+ desc->vol->direct_writes = !!req.value;
+ mutex_unlock(&ubi->device_mutex);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ /* Create a R/O block device on top of the UBI volume */
+ case UBI_IOCVOLCRBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_create(&vi);
+ break;
+ }
+
+ /* Remove the R/O block device */
+ case UBI_IOCVOLRMBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_remove(&vi);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+ return err;
+}
+
+/**
+ * verify_mkvol_req - verify volume creation request.
+ * @ubi: UBI device description object
+ * @req: the request to check
+ *
+ * This function zero if the request is correct, and %-EINVAL if not.
+ */
+static int verify_mkvol_req(const struct ubi_device *ubi,
+ const struct ubi_mkvol_req *req)
+{
+ int n, err = -EINVAL;
+
+ if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
+ req->name_len < 0)
+ goto bad;
+
+ if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
+ req->vol_id != UBI_VOL_NUM_AUTO)
+ goto bad;
+
+ if (req->alignment == 0)
+ goto bad;
+
+ if (req->bytes == 0)
+ goto bad;
+
+ if (req->vol_type != UBI_DYNAMIC_VOLUME &&
+ req->vol_type != UBI_STATIC_VOLUME)
+ goto bad;
+
+ if (req->alignment > ubi->leb_size)
+ goto bad;
+
+ n = req->alignment & (ubi->min_io_size - 1);
+ if (req->alignment != 1 && n)
+ goto bad;
+
+ if (!req->name[0] || !req->name_len)
+ goto bad;
+
+ if (req->name_len > UBI_VOL_NAME_MAX) {
+ err = -ENAMETOOLONG;
+ goto bad;
+ }
+
+ n = strnlen(req->name, req->name_len + 1);
+ if (n != req->name_len)
+ goto bad;
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad volume creation request");
+ ubi_dump_mkvol_req(req);
+ return err;
+}
+
+/**
+ * verify_rsvol_req - verify volume re-size request.
+ * @ubi: UBI device description object
+ * @req: the request to check
+ *
+ * This function returns zero if the request is correct, and %-EINVAL if not.
+ */
+static int verify_rsvol_req(const struct ubi_device *ubi,
+ const struct ubi_rsvol_req *req)
+{
+ if (req->bytes <= 0)
+ return -EINVAL;
+
+ if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * rename_volumes - rename UBI volumes.
+ * @ubi: UBI device description object
+ * @req: volumes re-name request
+ *
+ * This is a helper function for the volume re-name IOCTL which validates the
+ * the request, opens the volume and calls corresponding volumes management
+ * function. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int rename_volumes(struct ubi_device *ubi,
+ struct ubi_rnvol_req *req)
+{
+ int i, n, err;
+ struct list_head rename_list;
+ struct ubi_rename_entry *re, *re1;
+
+ if (req->count < 0 || req->count > UBI_MAX_RNVOL)
+ return -EINVAL;
+
+ if (req->count == 0)
+ return 0;
+
+ /* Validate volume IDs and names in the request */
+ for (i = 0; i < req->count; i++) {
+ if (req->ents[i].vol_id < 0 ||
+ req->ents[i].vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+ if (req->ents[i].name_len < 0)
+ return -EINVAL;
+ if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
+ return -ENAMETOOLONG;
+ req->ents[i].name[req->ents[i].name_len] = '\0';
+ n = strlen(req->ents[i].name);
+ if (n != req->ents[i].name_len)
+ return -EINVAL;
+ }
+
+ /* Make sure volume IDs and names are unique */
+ for (i = 0; i < req->count - 1; i++) {
+ for (n = i + 1; n < req->count; n++) {
+ if (req->ents[i].vol_id == req->ents[n].vol_id) {
+ ubi_err(ubi, "duplicated volume id %d",
+ req->ents[i].vol_id);
+ return -EINVAL;
+ }
+ if (!strcmp(req->ents[i].name, req->ents[n].name)) {
+ ubi_err(ubi, "duplicated volume name \"%s\"",
+ req->ents[i].name);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Create the re-name list */
+ INIT_LIST_HEAD(&rename_list);
+ for (i = 0; i < req->count; i++) {
+ int vol_id = req->ents[i].vol_id;
+ int name_len = req->ents[i].name_len;
+ const char *name = req->ents[i].name;
+
+ re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
+ if (IS_ERR(re->desc)) {
+ err = PTR_ERR(re->desc);
+ ubi_err(ubi, "cannot open volume %d, error %d",
+ vol_id, err);
+ kfree(re);
+ goto out_free;
+ }
+
+ /* Skip this re-naming if the name does not really change */
+ if (re->desc->vol->name_len == name_len &&
+ !memcmp(re->desc->vol->name, name, name_len)) {
+ ubi_close_volume(re->desc);
+ kfree(re);
+ continue;
+ }
+
+ re->new_name_len = name_len;
+ memcpy(re->new_name, name, name_len);
+ list_add_tail(&re->list, &rename_list);
+ dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
+ vol_id, re->desc->vol->name, name);
+ }
+
+ if (list_empty(&rename_list))
+ return 0;
+
+ /* Find out the volumes which have to be removed */
+ list_for_each_entry(re, &rename_list, list) {
+ struct ubi_volume_desc *desc;
+ int no_remove_needed = 0;
+
+ /*
+ * Volume @re->vol_id is going to be re-named to
+ * @re->new_name, while its current name is @name. If a volume
+ * with name @re->new_name currently exists, it has to be
+ * removed, unless it is also re-named in the request (@req).
+ */
+ list_for_each_entry(re1, &rename_list, list) {
+ if (re->new_name_len == re1->desc->vol->name_len &&
+ !memcmp(re->new_name, re1->desc->vol->name,
+ re1->desc->vol->name_len)) {
+ no_remove_needed = 1;
+ break;
+ }
+ }
+
+ if (no_remove_needed)
+ continue;
+
+ /*
+ * It seems we need to remove volume with name @re->new_name,
+ * if it exists.
+ */
+ desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
+ UBI_EXCLUSIVE);
+ if (IS_ERR(desc)) {
+ err = PTR_ERR(desc);
+ if (err == -ENODEV)
+ /* Re-naming into a non-existing volume name */
+ continue;
+
+ /* The volume exists but busy, or an error occurred */
+ ubi_err(ubi, "cannot open volume \"%s\", error %d",
+ re->new_name, err);
+ goto out_free;
+ }
+
+ re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re1) {
+ err = -ENOMEM;
+ ubi_close_volume(desc);
+ goto out_free;
+ }
+
+ re1->remove = 1;
+ re1->desc = desc;
+ list_add(&re1->list, &rename_list);
+ dbg_gen("will remove volume %d, name \"%s\"",
+ re1->desc->vol->vol_id, re1->desc->vol->name);
+ }
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_rename_volumes(ubi, &rename_list);
+ mutex_unlock(&ubi->device_mutex);
+
+out_free:
+ list_for_each_entry_safe(re, re1, &rename_list, list) {
+ ubi_close_volume(re->desc);
+ list_del(&re->list);
+ kfree(re);
+ }
+ return err;
+}
+
+static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ struct ubi_device *ubi;
+ struct ubi_volume_desc *desc;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ ubi = ubi_get_by_major(imajor(file->f_mapping->host));
+ if (!ubi)
+ return -ENODEV;
+
+ switch (cmd) {
+ /* Create volume command */
+ case UBI_IOCMKVOL:
+ {
+ struct ubi_mkvol_req req;
+
+ dbg_gen("create volume");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = verify_mkvol_req(ubi, &req);
+ if (err)
+ break;
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_create_volume(ubi, &req);
+ mutex_unlock(&ubi->device_mutex);
+ if (err)
+ break;
+
+ err = put_user(req.vol_id, (__user int32_t *)argp);
+ if (err)
+ err = -EFAULT;
+
+ break;
+ }
+
+ /* Remove volume command */
+ case UBI_IOCRMVOL:
+ {
+ int vol_id;
+
+ dbg_gen("remove volume");
+ err = get_user(vol_id, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
+ if (IS_ERR(desc)) {
+ err = PTR_ERR(desc);
+ break;
+ }
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_remove_volume(desc, 0);
+ mutex_unlock(&ubi->device_mutex);
+
+ /*
+ * The volume is deleted (unless an error occurred), and the
+ * 'struct ubi_volume' object will be freed when
+ * 'ubi_close_volume()' will call 'put_device()'.
+ */
+ ubi_close_volume(desc);
+ break;
+ }
+
+ /* Re-size volume command */
+ case UBI_IOCRSVOL:
+ {
+ int pebs;
+ struct ubi_rsvol_req req;
+
+ dbg_gen("re-size volume");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = verify_rsvol_req(ubi, &req);
+ if (err)
+ break;
+
+ desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
+ if (IS_ERR(desc)) {
+ err = PTR_ERR(desc);
+ break;
+ }
+
+ pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
+ desc->vol->usable_leb_size);
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_resize_volume(desc, pebs);
+ mutex_unlock(&ubi->device_mutex);
+ ubi_close_volume(desc);
+ break;
+ }
+
+ /* Re-name volumes command */
+ case UBI_IOCRNVOL:
+ {
+ struct ubi_rnvol_req *req;
+
+ dbg_gen("re-name volumes");
+ req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ break;
+ };
+
+ err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
+ if (err) {
+ err = -EFAULT;
+ kfree(req);
+ break;
+ }
+
+ err = rename_volumes(ubi, req);
+ kfree(req);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ ubi_put_device(ubi);
+ return err;
+}
+
+static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ switch (cmd) {
+ /* Attach an MTD device command */
+ case UBI_IOCATT:
+ {
+ struct ubi_attach_req req;
+ struct mtd_info *mtd;
+
+ dbg_gen("attach MTD device");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (req.mtd_num < 0 ||
+ (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mtd = get_mtd_device(NULL, req.mtd_num);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ break;
+ }
+
+ /*
+ * Note, further request verification is done by
+ * 'ubi_attach_mtd_dev()'.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
+ req.max_beb_per1024);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0)
+ put_mtd_device(mtd);
+ else
+ /* @err contains UBI device number */
+ err = put_user(err, (__user int32_t *)argp);
+
+ break;
+ }
+
+ /* Detach an MTD device command */
+ case UBI_IOCDET:
+ {
+ int ubi_num;
+
+ dbg_gen("detach MTD device");
+ err = get_user(ubi_num, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_detach_mtd_dev(ubi_num, 0);
+ mutex_unlock(&ubi_devices_mutex);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return vol_cdev_ioctl(file, cmd, translated_arg);
+}
+
+static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return ubi_cdev_ioctl(file, cmd, translated_arg);
+}
+
+static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return ctrl_cdev_ioctl(file, cmd, translated_arg);
+}
+#else
+#define vol_cdev_compat_ioctl NULL
+#define ubi_cdev_compat_ioctl NULL
+#define ctrl_cdev_compat_ioctl NULL
+#endif
+
+/* UBI volume character device operations */
+const struct file_operations ubi_vol_cdev_operations = {
+ .owner = THIS_MODULE,
+ .open = vol_cdev_open,
+ .release = vol_cdev_release,
+ .llseek = vol_cdev_llseek,
+ .read = vol_cdev_read,
+ .write = vol_cdev_write,
+ .fsync = vol_cdev_fsync,
+ .unlocked_ioctl = vol_cdev_ioctl,
+ .compat_ioctl = vol_cdev_compat_ioctl,
+};
+
+/* UBI character device operations */
+const struct file_operations ubi_cdev_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = ubi_cdev_ioctl,
+ .compat_ioctl = ubi_cdev_compat_ioctl,
+};
+
+/* UBI control character device operations */
+const struct file_operations ubi_ctrl_cdev_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ctrl_cdev_ioctl,
+ .compat_ioctl = ctrl_cdev_compat_ioctl,
+ .llseek = no_llseek,
+};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
new file mode 100644
index 000000000..b077e43b5
--- /dev/null
+++ b/drivers/mtd/ubi/debug.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#include "ubi.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+
+
+/**
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto out;
+ }
+
+ ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
+
+/**
+ * ubi_dump_ec_hdr - dump an erase counter header.
+ * @ec_hdr: the erase counter header to dump
+ */
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+{
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ec_hdr, UBI_EC_HDR_SIZE, 1);
+}
+
+/**
+ * ubi_dump_vid_hdr - dump a volume identifier header.
+ * @vid_hdr: the volume identifier header to dump
+ */
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+{
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
+ (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ vid_hdr, UBI_VID_HDR_SIZE, 1);
+}
+
+/**
+ * ubi_dump_vol_info - dump volume information.
+ * @vol: UBI volume description object
+ */
+void ubi_dump_vol_info(const struct ubi_volume *vol)
+{
+ pr_err("Volume information dump:\n");
+ pr_err("\tvol_id %d\n", vol->vol_id);
+ pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
+ pr_err("\talignment %d\n", vol->alignment);
+ pr_err("\tdata_pad %d\n", vol->data_pad);
+ pr_err("\tvol_type %d\n", vol->vol_type);
+ pr_err("\tname_len %d\n", vol->name_len);
+ pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+ pr_err("\tused_ebs %d\n", vol->used_ebs);
+ pr_err("\tused_bytes %lld\n", vol->used_bytes);
+ pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ pr_err("\tcorrupted %d\n", vol->corrupted);
+ pr_err("\tupd_marker %d\n", vol->upd_marker);
+
+ if (vol->name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
+ pr_err("\tname %s\n", vol->name);
+ } else {
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
+ vol->name[0], vol->name[1], vol->name[2],
+ vol->name[3], vol->name[4]);
+ }
+}
+
+/**
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * @r: the object to dump
+ * @idx: volume table index
+ */
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+{
+ int name_len = be16_to_cpu(r->name_len);
+
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
+
+ if (r->name[0] == '\0') {
+ pr_err("\tname NULL\n");
+ return;
+ }
+
+ if (name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(&r->name[0], name_len + 1) == name_len) {
+ pr_err("\tname %s\n", &r->name[0]);
+ } else {
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
+ r->name[0], r->name[1], r->name[2], r->name[3],
+ r->name[4]);
+ }
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
+}
+
+/**
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
+ */
+void ubi_dump_av(const struct ubi_ainf_volume *av)
+{
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
+}
+
+/**
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
+ * @type: object type: 0 - not corrupted, 1 - corrupted
+ */
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
+{
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
+ if (type == 0) {
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
+ }
+}
+
+/**
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * @req: the object to dump
+ */
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
+{
+ char nm[17];
+
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
+
+ memcpy(nm, req->name, 16);
+ nm[16] = 0;
+ pr_err("\t1st 16 characters of name: %s\n", nm);
+}
+
+/*
+ * Root directory for UBI stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular UBI devices.
+ */
+static struct dentry *dfs_rootdir;
+
+/**
+ * ubi_debugfs_init - create UBI debugfs directory.
+ *
+ * Create UBI debugfs directory. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_debugfs_init(void)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dfs_rootdir = debugfs_create_dir("ubi", NULL);
+ if (IS_ERR_OR_NULL(dfs_rootdir)) {
+ int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+
+ pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_debugfs_exit - remove UBI debugfs directory.
+ */
+void ubi_debugfs_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
+}
+
+/* Read an UBI debugfs file */
+static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ char buf[8];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ if (dent == d->dfs_chk_gen)
+ val = d->chk_gen;
+ else if (dent == d->dfs_chk_io)
+ val = d->chk_io;
+ else if (dent == d->dfs_chk_fastmap)
+ val = d->chk_fastmap;
+ else if (dent == d->dfs_disable_bgt)
+ val = d->disable_bgt;
+ else if (dent == d->dfs_emulate_bitflips)
+ val = d->emulate_bitflips;
+ else if (dent == d->dfs_emulate_io_failures)
+ val = d->emulate_io_failures;
+ else if (dent == d->dfs_emulate_power_cut) {
+ snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_min) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ }
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (val)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* Write an UBI debugfs file */
+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ size_t buf_size;
+ char buf[8] = {0};
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (dent == d->dfs_power_cut_min) {
+ if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_emulate_power_cut) {
+ if (kstrtoint(buf, 0, &val) != 0)
+ count = -EINVAL;
+ d->emulate_power_cut = val;
+ goto out;
+ }
+
+ if (buf[0] == '1')
+ val = 1;
+ else if (buf[0] == '0')
+ val = 0;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (dent == d->dfs_chk_gen)
+ d->chk_gen = val;
+ else if (dent == d->dfs_chk_io)
+ d->chk_io = val;
+ else if (dent == d->dfs_chk_fastmap)
+ d->chk_fastmap = val;
+ else if (dent == d->dfs_disable_bgt)
+ d->disable_bgt = val;
+ else if (dent == d->dfs_emulate_bitflips)
+ d->emulate_bitflips = val;
+ else if (dent == d->dfs_emulate_io_failures)
+ d->emulate_io_failures = val;
+ else
+ count = -EINVAL;
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* File operations for all UBI debugfs files */
+static const struct file_operations dfs_fops = {
+ .read = dfs_file_read,
+ .write = dfs_file_write,
+ .open = simple_open,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * ubi_debugfs_init_dev - initialize debugfs for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+ int err, n;
+ unsigned long ubi_num = ubi->ubi_num;
+ const char *fname;
+ struct dentry *dent;
+ struct ubi_debug_info *d = &ubi->dbg;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ ubi->ubi_num);
+ if (n == UBI_DFS_DIR_LEN) {
+ /* The array size is too small */
+ fname = UBI_DFS_DIR_NAME;
+ dent = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ fname = d->dfs_dir_name;
+ dent = debugfs_create_dir(fname, dfs_rootdir);
+ if (IS_ERR_OR_NULL(dent))
+ goto out;
+ d->dfs_dir = dent;
+
+ fname = "chk_gen";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_gen = dent;
+
+ fname = "chk_io";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_io = dent;
+
+ fname = "chk_fastmap";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_fastmap = dent;
+
+ fname = "tst_disable_bgt";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_disable_bgt = dent;
+
+ fname = "tst_emulate_bitflips";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_bitflips = dent;
+
+ fname = "tst_emulate_io_failures";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_io_failures = dent;
+
+ fname = "tst_emulate_power_cut";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_power_cut = dent;
+
+ fname = "tst_emulate_power_cut_min";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_power_cut_min = dent;
+
+ fname = "tst_emulate_power_cut_max";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_power_cut_max = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(d->dfs_dir);
+out:
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
+ fname, err);
+ return err;
+}
+
+/**
+ * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * @ubi: UBI device description object
+ */
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
+}
+
+/**
+ * ubi_dbg_power_cut - emulate a power cut if it is time to do so
+ * @ubi: UBI device description object
+ * @caller: Flags set to indicate from where the function is being called
+ *
+ * Returns non-zero if a power cut was emulated, zero if not.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
+{
+ unsigned int range;
+
+ if ((ubi->dbg.emulate_power_cut & caller) == 0)
+ return 0;
+
+ if (ubi->dbg.power_cut_counter == 0) {
+ ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
+
+ if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
+ range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
+ ubi->dbg.power_cut_counter += prandom_u32() % range;
+ }
+ return 0;
+ }
+
+ ubi->dbg.power_cut_counter--;
+ if (ubi->dbg.power_cut_counter)
+ return 0;
+
+ ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
+ ubi_ro_mode(ubi);
+ return 1;
+}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
new file mode 100644
index 000000000..eb8985e5c
--- /dev/null
+++ b/drivers/mtd/ubi/debug.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_DEBUG_H__
+#define __UBI_DEBUG_H__
+
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
+#include <linux/random.h>
+
+#define ubi_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
+ __func__, __LINE__, current->pid); \
+ dump_stack(); \
+ } \
+} while (0)
+
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
+ print_hex_dump(l, ps, pt, r, g, b, len, a)
+
+#define ubi_dbg_msg(type, fmt, ...) \
+ pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
+ ##__VA_ARGS__)
+
+/* General debugging messages */
+#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+/* Messages from the eraseblock association sub-system */
+#define dbg_eba(fmt, ...) ubi_dbg_msg("eba", fmt, ##__VA_ARGS__)
+/* Messages from the wear-leveling sub-system */
+#define dbg_wl(fmt, ...) ubi_dbg_msg("wl", fmt, ##__VA_ARGS__)
+/* Messages from the input/output sub-system */
+#define dbg_io(fmt, ...) ubi_dbg_msg("io", fmt, ##__VA_ARGS__)
+/* Initialization and build messages */
+#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
+
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+void ubi_debugfs_exit_dev(struct ubi_device *ubi);
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+ return ubi->dbg.disable_bgt;
+}
+
+/**
+ * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ */
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_bitflips)
+ return !(prandom_u32() % 200);
+ return 0;
+}
+
+/**
+ * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if a write failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !(prandom_u32() % 500);
+ return 0;
+}
+
+/**
+ * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if an erase failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !(prandom_u32() % 400);
+ return 0;
+}
+
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
+
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
+
+static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_fastmap;
+}
+
+static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
+{
+ ubi->dbg.chk_fastmap = 1;
+}
+
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
+#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
new file mode 100644
index 000000000..51bca035c
--- /dev/null
+++ b/drivers/mtd/ubi/eba.c
@@ -0,0 +1,1477 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * The UBI Eraseblock Association (EBA) sub-system.
+ *
+ * This sub-system is responsible for I/O to/from logical eraseblock.
+ *
+ * Although in this implementation the EBA table is fully kept and managed in
+ * RAM, which assumes poor scalability, it might be (partially) maintained on
+ * flash in future implementations.
+ *
+ * The EBA sub-system implements per-logical eraseblock locking. Before
+ * accessing a logical eraseblock it is locked for reading or writing. The
+ * per-logical eraseblock locking is implemented by means of the lock tree. The
+ * lock tree is an RB-tree which refers all the currently locked logical
+ * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
+ * They are indexed by (@vol_id, @lnum) pairs.
+ *
+ * EBA also maintains the global sequence counter which is incremented each
+ * time a logical eraseblock is mapped to a physical eraseblock and it is
+ * stored in the volume identifier header. This means that each VID header has
+ * a unique sequence number. The sequence number is only increased an we assume
+ * 64 bits is enough to never overflow.
+ */
+
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include "ubi.h"
+
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
+/**
+ * next_sqnum - get next sequence number.
+ * @ubi: UBI device description object
+ *
+ * This function returns next sequence number to use, which is just the current
+ * global sequence counter value. It also increases the global sequence
+ * counter.
+ */
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
+{
+ unsigned long long sqnum;
+
+ spin_lock(&ubi->ltree_lock);
+ sqnum = ubi->global_sqnum++;
+ spin_unlock(&ubi->ltree_lock);
+
+ return sqnum;
+}
+
+/**
+ * ubi_get_compat - get compatibility flags of a volume.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ *
+ * This function returns compatibility flags for an internal volume. User
+ * volumes have no compatibility flags, so %0 is returned.
+ */
+static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id == UBI_LAYOUT_VOLUME_ID)
+ return UBI_LAYOUT_VOLUME_COMPAT;
+ return 0;
+}
+
+/**
+ * ltree_lookup - look up the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
+ * object if the logical eraseblock is locked and %NULL if it is not.
+ * @ubi->ltree_lock has to be locked.
+ */
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+ int lnum)
+{
+ struct rb_node *p;
+
+ p = ubi->ltree.rb_node;
+ while (p) {
+ struct ubi_ltree_entry *le;
+
+ le = rb_entry(p, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le->vol_id)
+ p = p->rb_left;
+ else if (vol_id > le->vol_id)
+ p = p->rb_right;
+ else {
+ if (lnum < le->lnum)
+ p = p->rb_left;
+ else if (lnum > le->lnum)
+ p = p->rb_right;
+ else
+ return le;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ltree_add_entry - add new entry to the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
+ * lock tree. If such entry is already there, its usage counter is increased.
+ * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
+ * failed.
+ */
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+ int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le, *le1, *le_free;
+
+ le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
+
+ le->users = 0;
+ init_rwsem(&le->mutex);
+ le->vol_id = vol_id;
+ le->lnum = lnum;
+
+ spin_lock(&ubi->ltree_lock);
+ le1 = ltree_lookup(ubi, vol_id, lnum);
+
+ if (le1) {
+ /*
+ * This logical eraseblock is already locked. The newly
+ * allocated lock entry is not needed.
+ */
+ le_free = le;
+ le = le1;
+ } else {
+ struct rb_node **p, *parent = NULL;
+
+ /*
+ * No lock entry, add the newly allocated one to the
+ * @ubi->ltree RB-tree.
+ */
+ le_free = NULL;
+
+ p = &ubi->ltree.rb_node;
+ while (*p) {
+ parent = *p;
+ le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le1->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > le1->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(lnum != le1->lnum);
+ if (lnum < le1->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&le->rb, parent, p);
+ rb_insert_color(&le->rb, &ubi->ltree);
+ }
+ le->users += 1;
+ spin_unlock(&ubi->ltree_lock);
+
+ kfree(le_free);
+ return le;
+}
+
+/**
+ * leb_read_lock - lock logical eraseblock for reading.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for reading. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_read(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_read_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ up_read(&le->mutex);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+}
+
+/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_write(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing if there is no
+ * contention and does nothing if there is contention. Returns %0 in case of
+ * success, %1 in case of contention, and and a negative error code in case of
+ * failure.
+ */
+static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ if (down_write_trylock(&le->mutex))
+ return 0;
+
+ /* Contention, cancel */
+ spin_lock(&ubi->ltree_lock);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+
+ return 1;
+}
+
+/**
+ * leb_write_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ up_write(&le->mutex);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+}
+
+/**
+ * ubi_eba_unmap_leb - un-map logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules corresponding
+ * physical eraseblock for erasure. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum)
+{
+ int err, pnum, vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum < 0)
+ /* This logical eraseblock is already unmapped */
+ goto out_unlock;
+
+ dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
+ up_read(&ubi->fm_eba_sem);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
+
+out_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * ubi_eba_read_leb - read data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: buffer to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
+ * bytes. The @check flag only makes sense for static volumes and forces
+ * eraseblock data CRC checking.
+ *
+ * In case of success this function returns zero. In case of a static volume,
+ * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
+ * returned for any volume type if an ECC error was detected by the MTD device
+ * driver. Other negative error cored may be returned in case of other errors.
+ */
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check)
+{
+ int err, pnum, scrub = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t uninitialized_var(crc);
+
+ err = leb_read_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum < 0) {
+ /*
+ * The logical eraseblock is not mapped, fill the whole buffer
+ * with 0xFF bytes. The exception is static volumes for which
+ * it is an error to read unmapped logical eraseblocks.
+ */
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
+ len, offset, vol_id, lnum);
+ leb_read_unlock(ubi, vol_id, lnum);
+ ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
+ memset(buf, 0xFF, len);
+ return 0;
+ }
+
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ check = 0;
+
+retry:
+ if (check) {
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0) {
+ /*
+ * The header is either absent or corrupted.
+ * The former case means there is a bug -
+ * switch to read-only mode just in case.
+ * The latter case means a real corruption - we
+ * may try to recover data. FIXME: but this is
+ * not implemented.
+ */
+ if (err == UBI_IO_BAD_HDR_EBADMSG ||
+ err == UBI_IO_BAD_HDR) {
+ ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
+ err = -EBADMSG;
+ } else {
+ err = -EINVAL;
+ ubi_ro_mode(ubi);
+ }
+ }
+ goto out_free;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
+ ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
+
+ crc = be32_to_cpu(vid_hdr->data_crc);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ }
+
+ err = ubi_io_read_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+ else if (mtd_is_eccerr(err)) {
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ goto out_unlock;
+ scrub = 1;
+ if (!check) {
+ ubi_msg(ubi, "force data checking");
+ check = 1;
+ goto retry;
+ }
+ } else
+ goto out_unlock;
+ }
+
+ if (check) {
+ uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
+ if (crc1 != crc) {
+ ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
+ crc1, crc);
+ err = -EBADMSG;
+ goto out_unlock;
+ }
+ }
+
+ if (scrub)
+ err = ubi_wl_scrub_peb(ubi, pnum);
+
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+
+out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+out_unlock:
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * ubi_eba_read_leb_sg - read data into a scatter gather list.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @sgl: UBI scatter gather list to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * This function works exactly like ubi_eba_read_leb(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check)
+{
+ int to_read;
+ int ret;
+ struct scatterlist *sg;
+
+ for (;;) {
+ ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
+ sg = &sgl->sg[sgl->list_pos];
+ if (len < sg->length - sgl->page_pos)
+ to_read = len;
+ else
+ to_read = sg->length - sgl->page_pos;
+
+ ret = ubi_eba_read_leb(ubi, vol, lnum,
+ sg_virt(sg) + sgl->page_pos, offset,
+ to_read, check);
+ if (ret < 0)
+ return ret;
+
+ offset += to_read;
+ len -= to_read;
+ if (!len) {
+ sgl->page_pos += to_read;
+ if (sgl->page_pos == sg->length) {
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ break;
+ }
+
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * recover_peb - recover from write failure.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to recover
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ * @buf: data which was not written because of the write failure
+ * @offset: offset of the failed write
+ * @len: how many bytes should have been written
+ *
+ * This function is called in case of a write failure and moves all good data
+ * from the potentially bad physical eraseblock to a good physical eraseblock.
+ * This function also writes the data which was not written due to the failure.
+ * Returns new physical eraseblock number in case of success, and a negative
+ * error code in case of failure.
+ */
+static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
+ struct ubi_volume *vol = ubi->volumes[idx];
+ struct ubi_vid_hdr *vid_hdr;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+retry:
+ new_pnum = ubi_wl_get_peb(ubi);
+ if (new_pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ up_read(&ubi->fm_eba_sem);
+ return new_pnum;
+ }
+
+ ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
+ pnum, new_pnum);
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0)
+ err = -EIO;
+ up_read(&ubi->fm_eba_sem);
+ goto out_put;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+ if (err) {
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ data_size = offset + len;
+ mutex_lock(&ubi->buf_mutex);
+ memset(ubi->peb_buf + offset, 0xFF, len);
+
+ /* Read everything before the area where the write failure happened */
+ if (offset > 0) {
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
+ if (err && err != UBI_IO_BITFLIPS) {
+ up_read(&ubi->fm_eba_sem);
+ goto out_unlock;
+ }
+ }
+
+ memcpy(ubi->peb_buf + offset, buf, len);
+
+ err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
+ if (err) {
+ mutex_unlock(&ubi->buf_mutex);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ mutex_unlock(&ubi->buf_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+
+ vol->eba_tbl[lnum] = new_pnum;
+ up_read(&ubi->fm_eba_sem);
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+
+ ubi_msg(ubi, "data was successfully recovered");
+ return 0;
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+out_put:
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ /*
+ * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
+ * get another one.
+ */
+ ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
+ if (++tries > UBI_IO_RETRIES) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+ ubi_msg(ubi, "try again");
+ goto retry;
+}
+
+/**
+ * ubi_eba_write_leb - write data to dynamic volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: the data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes data to logical eraseblock @lnum of a dynamic volume
+ * @vol. Returns zero in case of success and a negative error code in case
+ * of failure. In case of error, it is possible that something was still
+ * written to the flash media, but may be some garbage.
+ */
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum >= 0) {
+ dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write data to PEB %d", pnum);
+ if (err == -EIO && ubi->bad_allowed)
+ err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+ offset, len);
+ if (err)
+ ubi_ro_mode(ubi);
+ }
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+ }
+
+ /*
+ * The logical eraseblock is not mapped. We have to get a free physical
+ * eraseblock and write the volume identifier header there first.
+ */
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ leb_write_unlock(ubi, vol_id, lnum);
+ return -ENOMEM;
+ }
+
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ up_read(&ubi->fm_eba_sem);
+ return pnum;
+ }
+
+ dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ if (len) {
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+ }
+
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_eba_sem);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ /*
+ * Fortunately, this is the first write operation to this physical
+ * eraseblock, so just put it and request a new one. We assume that if
+ * this physical eraseblock went bad, the erase code will handle that.
+ */
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ goto retry;
+}
+
+/**
+ * ubi_eba_write_leb_st - write data to static volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ * @used_ebs: how many logical eraseblocks will this volume contain
+ *
+ * This function writes data to logical eraseblock @lnum of static volume
+ * @vol. The @used_ebs argument should contain total number of logical
+ * eraseblock in this static volume.
+ *
+ * When writing to the last logical eraseblock, the @len argument doesn't have
+ * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
+ * to the real data size, although the @buf buffer has to contain the
+ * alignment. In all other cases, @len has to be aligned.
+ *
+ * It is prohibited to write more than once to logical eraseblocks of static
+ * volumes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int used_ebs)
+{
+ int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (lnum == used_ebs - 1)
+ /* If this is the last LEB @len may be unaligned */
+ len = ALIGN(data_size, ubi->min_io_size);
+ else
+ ubi_assert(!(len & (ubi->min_io_size - 1)));
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, data_size);
+ vid_hdr->vol_type = UBI_VID_STATIC;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->used_ebs = cpu_to_be32(used_ebs);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ up_read(&ubi->fm_eba_sem);
+ return pnum;
+ }
+
+ dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
+ len, vol_id, lnum, pnum, used_ebs);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
+ len, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ ubi_assert(vol->eba_tbl[lnum] < 0);
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_eba_sem);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ goto retry;
+}
+
+/*
+ * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. This function guarantees that in case of an
+ * unclean reboot the old contents is preserved. Returns zero in case of
+ * success and a negative error code in case of failure.
+ *
+ * UBI reserves one LEB for the "atomic LEB change" operation, so only one
+ * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
+ */
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len)
+{
+ int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (len == 0) {
+ /*
+ * Special case when data length is zero. In this case the LEB
+ * has to be unmapped and mapped somewhere else.
+ */
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
+ }
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ mutex_lock(&ubi->alc_mutex);
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ goto out_mutex;
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, len);
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->data_size = cpu_to_be32(len);
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ err = pnum;
+ up_read(&ubi->fm_eba_sem);
+ goto out_leb_unlock;
+ }
+
+ dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
+ vol_id, lnum, vol->eba_tbl[lnum], pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
+ len, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ old_pnum = vol->eba_tbl[lnum];
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_eba_sem);
+
+ if (old_pnum >= 0) {
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
+ if (err)
+ goto out_leb_unlock;
+ }
+
+out_leb_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+out_mutex:
+ mutex_unlock(&ubi->alc_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ goto retry;
+}
+
+/**
+ * is_error_sane - check whether a read error is sane.
+ * @err: code of the error happened during reading
+ *
+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
+ * cannot read data from the target PEB (an error @err happened). If the error
+ * code is sane, then we treat this error as non-fatal. Otherwise the error is
+ * fatal and UBI will be switched to R/O mode later.
+ *
+ * The idea is that we try not to switch to R/O mode if the read error is
+ * something which suggests there was a real read problem. E.g., %-EIO. Or a
+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
+ * mode, simply because we do not know what happened at the MTD level, and we
+ * cannot handle this. E.g., the underlying driver may have become crazy, and
+ * it is safer to switch to R/O mode to preserve the data.
+ *
+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB
+ * which we have just written.
+ */
+static int is_error_sane(int err)
+{
+ if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
+ err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
+ return 0;
+ return 1;
+}
+
+/**
+ * ubi_eba_copy_leb - copy logical eraseblock.
+ * @ubi: UBI device description object
+ * @from: physical eraseblock number from where to copy
+ * @to: physical eraseblock number where to copy
+ * @vid_hdr: VID header of the @from physical eraseblock
+ *
+ * This function copies logical eraseblock from physical eraseblock @from to
+ * physical eraseblock @to. The @vid_hdr buffer may be changed by this
+ * function. Returns:
+ * o %0 in case of success;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
+ * o a negative error code in case of failure.
+ */
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ int err, vol_id, lnum, data_size, aldata_size, idx;
+ struct ubi_volume *vol;
+ uint32_t crc;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+ dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+
+ if (vid_hdr->vol_type == UBI_VID_STATIC) {
+ data_size = be32_to_cpu(vid_hdr->data_size);
+ aldata_size = ALIGN(data_size, ubi->min_io_size);
+ } else
+ data_size = aldata_size =
+ ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
+
+ idx = vol_id2idx(ubi, vol_id);
+ spin_lock(&ubi->volumes_lock);
+ /*
+ * Note, we may race with volume deletion, which means that the volume
+ * this logical eraseblock belongs to might be being deleted. Since the
+ * volume deletion un-maps all the volume's logical eraseblocks, it will
+ * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
+ */
+ vol = ubi->volumes[idx];
+ spin_unlock(&ubi->volumes_lock);
+ if (!vol) {
+ /* No need to do further work, cancel */
+ dbg_wl("volume %d is being removed, cancel", vol_id);
+ return MOVE_CANCEL_RACE;
+ }
+
+ /*
+ * We do not want anybody to write to this logical eraseblock while we
+ * are moving it, so lock it.
+ *
+ * Note, we are using non-waiting locking here, because we cannot sleep
+ * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
+ * unmapping the LEB which is mapped to the PEB we are going to move
+ * (@from). This task locks the LEB and goes sleep in the
+ * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ * LEB is already locked, we just do not move it and return
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
+ */
+ err = leb_write_trylock(ubi, vol_id, lnum);
+ if (err) {
+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return MOVE_RETRY;
+ }
+
+ /*
+ * The LEB might have been put meanwhile, and the task which put it is
+ * probably waiting on @ubi->move_mutex. No need to continue the work,
+ * cancel it.
+ */
+ if (vol->eba_tbl[lnum] != from) {
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl[lnum]);
+ err = MOVE_CANCEL_RACE;
+ goto out_unlock_leb;
+ }
+
+ /*
+ * OK, now the LEB is locked and we can safely start moving it. Since
+ * this function utilizes the @ubi->peb_buf buffer which is shared
+ * with some other functions - we lock the buffer by taking the
+ * @ubi->buf_mutex.
+ */
+ mutex_lock(&ubi->buf_mutex);
+ dbg_wl("read %d bytes of data", aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading data from PEB %d",
+ err, from);
+ err = MOVE_SOURCE_RD_ERR;
+ goto out_unlock_buf;
+ }
+
+ /*
+ * Now we have got to calculate how much data we have to copy. In
+ * case of a static volume it is fairly easy - the VID header contains
+ * the data size. In case of a dynamic volume it is more difficult - we
+ * have to read the contents, cut 0xFF bytes from the end and copy only
+ * the first part. We must do this to avoid writing 0xFF bytes as it
+ * may have some side-effects. And not only this. It is important not
+ * to include those 0xFFs to CRC because later the they may be filled
+ * by data.
+ */
+ if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
+ aldata_size = data_size =
+ ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
+
+ cond_resched();
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+ cond_resched();
+
+ /*
+ * It may turn out to be that the whole @from physical eraseblock
+ * contains only 0xFF bytes. Then we have to only write the VID header
+ * and do not write any data. This also means we should not set
+ * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
+ */
+ if (data_size > 0) {
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+ }
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+
+ err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ /* Read the VID header back and check if it was written correctly */
+ err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
+ err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_TARGET_BITFLIPS;
+ goto out_unlock_buf;
+ }
+
+ if (data_size > 0) {
+ err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ /*
+ * We've written the data and are going to read it back to make
+ * sure it was written correctly.
+ */
+ memset(ubi->peb_buf, 0xFF, aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading data back from PEB %d",
+ err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_TARGET_BITFLIPS;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
+ ubi_warn(ubi, "read data back from PEB %d and it is different",
+ to);
+ err = -EINVAL;
+ goto out_unlock_buf;
+ }
+ }
+
+ ubi_assert(vol->eba_tbl[lnum] == from);
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl[lnum] = to;
+ up_read(&ubi->fm_eba_sem);
+
+out_unlock_buf:
+ mutex_unlock(&ubi->buf_mutex);
+out_unlock_leb:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * print_rsvd_warning - warn about not having enough reserved PEBs.
+ * @ubi: UBI device description object
+ *
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
+ * cannot reserve enough PEBs for bad block handling. This function makes a
+ * decision whether we have to print a warning or not. The algorithm is as
+ * follows:
+ * o if this is a new UBI image, then just print the warning
+ * o if this is an UBI image which has already been used for some time, print
+ * a warning only if we can reserve less than 10% of the expected amount of
+ * the reserved PEB.
+ *
+ * The idea is that when UBI is used, PEBs become bad, and the reserved pool
+ * of PEBs becomes smaller, which is normal and we do not want to scare users
+ * with a warning every time they attach the MTD device. This was an issue
+ * reported by real users.
+ */
+static void print_rsvd_warning(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ /*
+ * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
+ * large number to distinguish between newly flashed and used images.
+ */
+ if (ai->max_sqnum > (1 << 18)) {
+ int min = ubi->beb_rsvd_level / 10;
+
+ if (!min)
+ min = 1;
+ if (ubi->beb_rsvd_pebs > min)
+ return;
+ }
+
+ ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ if (ubi->corr_peb_count)
+ ubi_warn(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, i, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
+}
+
+/**
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int i, j, err, num_volumes;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ dbg_eba("initialize EBA sub-system");
+
+ spin_lock_init(&ubi->ltree_lock);
+ mutex_init(&ubi->alc_mutex);
+ ubi->ltree = RB_ROOT;
+
+ ubi->global_sqnum = ai->max_sqnum + 1;
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ cond_resched();
+
+ vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
+ GFP_KERNEL);
+ if (!vol->eba_tbl) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs)
+ /*
+ * This may happen in case of an unclean reboot
+ * during re-size.
+ */
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ else
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ }
+ }
+
+ if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, EBA_RESERVED_PEBS);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= EBA_RESERVED_PEBS;
+ ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
+ if (ubi->bad_allowed) {
+ ubi_calculate_reserved(ubi);
+
+ if (ubi->avail_pebs < ubi->beb_rsvd_level) {
+ /* No enough free physical eraseblocks */
+ ubi->beb_rsvd_pebs = ubi->avail_pebs;
+ print_rsvd_warning(ubi, ai);
+ } else
+ ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
+
+ ubi->avail_pebs -= ubi->beb_rsvd_pebs;
+ ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
+ }
+
+ dbg_eba("EBA sub-system is initialized");
+ return 0;
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+ kfree(ubi->volumes[i]->eba_tbl);
+ ubi->volumes[i]->eba_tbl = NULL;
+ }
+ return err;
+}
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
new file mode 100644
index 000000000..b2a665398
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+/**
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
+ */
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+
+ ubi_update_fastmap(ubi);
+ spin_lock(&ubi->wl_lock);
+ ubi->fm_work_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_wl_entry *e;
+ int enough;
+
+ spin_lock(&ubi->wl_lock);
+
+ return_unused_pool_pebs(ubi, wl_pool);
+ return_unused_pool_pebs(ubi, pool);
+
+ wl_pool->size = 0;
+ pool->size = 0;
+
+ for (;;) {
+ enough = 0;
+ if (pool->size < pool->max_size) {
+ if (!ubi->free.rb_node)
+ break;
+
+ e = wl_get_wle(ubi);
+ if (!e)
+ break;
+
+ pool->pebs[pool->size] = e->pnum;
+ pool->size++;
+ } else
+ enough++;
+
+ if (wl_pool->size < wl_pool->max_size) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ wl_pool->pebs[wl_pool->size] = e->pnum;
+ wl_pool->size++;
+ } else
+ enough++;
+
+ if (enough == 2)
+ break;
+ }
+
+ wl_pool->used = 0;
+ pool->used = 0;
+
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret, retried = 0;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+again:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+
+ /* We check here also for the WL pool because at this point we can
+ * refill the WL pool synchronous. */
+ if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ ret = ubi_update_fastmap(ubi);
+ if (ret) {
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+ down_read(&ubi->fm_eba_sem);
+ return -ENOSPC;
+ }
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ }
+
+ if (pool->used == pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ if (retried) {
+ ubi_err(ubi, "Unable to get a free PEB from user WL pool");
+ ret = -ENOSPC;
+ goto out;
+ }
+ retried = 1;
+ up_read(&ubi->fm_eba_sem);
+ goto again;
+ }
+
+ ubi_assert(pool->used < pool->size);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+out:
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ if (!ubi->fm_work_scheduled) {
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
+ return NULL;
+ }
+
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+
+static void ubi_fastmap_close(struct ubi_device *ubi)
+{
+ int i;
+
+ flush_work(&ubi->fm_work);
+ return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kfree(ubi->fm->e[i]);
+ }
+ kfree(ubi->fm);
+}
+
+/**
+ * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
+ * See find_mean_wl_entry()
+ *
+ * @ubi: UBI device description object
+ * @e: physical eraseblock to return
+ * @root: RB tree to test against.
+ */
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+
+ return e;
+}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 000000000..02a6de2f5
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1631 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#include <linux/crc32.h>
+#include "ubi.h"
+
+/**
+ * init_seen - allocate memory for used for debugging.
+ * @ubi: UBI device description object
+ */
+static inline int *init_seen(struct ubi_device *ubi)
+{
+ int *ret;
+
+ if (!ubi_dbg_chk_fastmap(ubi))
+ return NULL;
+
+ ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ return ret;
+}
+
+/**
+ * free_seen - free the seen logic integer array.
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void free_seen(int *seen)
+{
+ kfree(seen);
+}
+
+/**
+ * set_seen - mark a PEB as seen.
+ * @ubi: UBI device description object
+ * @pnum: The PEB to be makred as seen
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
+{
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return;
+
+ seen[pnum] = 1;
+}
+
+/**
+ * self_check_seen - check whether all PEB have been seen by fastmap.
+ * @ubi: UBI device description object
+ * @seen: integer array of @ubi->peb_count size
+ */
+static int self_check_seen(struct ubi_device *ubi, int *seen)
+{
+ int pnum, ret = 0;
+
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return 0;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ if (!seen[pnum] && ubi->lookuptbl[pnum]) {
+ ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_sb) + \
+ sizeof(struct ubi_fm_hdr) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+ (sizeof(struct ubi_fm_eba) + \
+ (ubi->peb_count * sizeof(__be32))) + \
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_hdr *new;
+
+ new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ new->vol_type = UBI_VID_DYNAMIC;
+ new->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ new->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ goto out;
+
+ av->highest_lnum = av->leb_count = av->used_ebs = 0;
+ av->vol_id = vol_id;
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ av->root = RB_ROOT;
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = used_ebs;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ p = &av->root.rb_node;
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+
+ victim->ec = aeb->ec;
+ victim->pnum = aeb->pnum;
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size = \
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct ubi_ainf_volume *av, *tmp_av = NULL;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ int found = 0;
+
+ if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+ be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ while (*p) {
+ parent = *p;
+ tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+ p = &(*p)->rb_left;
+ else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ av = tmp_av;
+ else {
+ ubi_err(ubi, "orphaned volume in fastmap pool!");
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+ av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+ for (node2 = rb_first(&av->root); node2;
+ node2 = rb_next(node2)) {
+ aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ av->leb_count--;
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *free)
+{
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb;
+ int i, pnum, err, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+ int image_seq;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err(ubi, "bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+ if (err == UBI_IO_FF_BITFLIPS)
+ add_aeb(ai, free, pnum, ec, 1);
+ else
+ add_aeb(ai, free, pnum, ec, 0);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->ec = be64_to_cpu(ech->ec);
+ new_aeb->pnum = pnum;
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, free;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_buf;
+
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&free);
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl1->size);
+ wl_pool_size = be16_to_cpu(fmpl2->size);
+ fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err(ubi, "bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err(ubi, "bad maximal WL pool size: %i",
+ fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (!av)
+ goto fail_bad;
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ aeb = tmp_aeb;
+ break;
+ }
+ }
+
+ if (!aeb) {
+ ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
+ goto fail_bad;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+ }
+
+ ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free);
+ if (ret)
+ goto fail;
+
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->erase);
+
+ ubi_assert(list_empty(&free));
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+
+ return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, pnum, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ down_write(&ubi->fm_protect);
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err(ubi, "bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err(ubi, "number of fastmap blocks is invalid: %i",
+ used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
+ fm_size, ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ int image_seq;
+
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err(ubi, "wrong image seq:%d instead of %d",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ kfree(fmsb);
+ fmsb = NULL;
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err(ubi, "fastmap data CRC is invalid");
+ ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
+ tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kfree(fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg(ubi, "attached by fastmap");
+ ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+out:
+ up_write(&ubi->fm_protect);
+ if (ret == UBI_BAD_FASTMAP)
+ ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+free_fm_sb:
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ struct rb_node *tmp_rb;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+ int *seen_pebs = NULL;
+
+ fm_raw = ubi->fm_buf;
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avhdr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvhdr) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ seen_pebs = init_seen(ubi);
+ if (IS_ERR(seen_pebs)) {
+ ret = PTR_ERR(seen_pebs);
+ goto out_kfree;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ spin_lock(&ubi->wl_lock);
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++) {
+ fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++) {
+ fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
+ }
+
+ ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+
+ ubi_for_each_protected_peb(ubi, i, wl_e) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ spin_unlock(&ubi->wl_lock);
+ spin_unlock(&ubi->volumes_lock);
+
+ dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ if (ret) {
+ ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
+ goto out_kfree;
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ if (ret) {
+ ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+ if (ret) {
+ ubi_err(ubi, "unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ ret = self_check_seen(ubi, seen_pebs);
+ dbg_bld("fastmap written!");
+
+out_kfree:
+ ubi_free_vid_hdr(ubi, avhdr);
+ ubi_free_vid_hdr(ubi, dvhdr);
+ free_seen(seen_pebs);
+out:
+ return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ *
+ * This function ensures that upon next UBI attach a full scan
+ * is issued. We need this if UBI is about to write a new fastmap
+ * but is unable to do so. In this case we have two options:
+ * a) Make sure that the current fastmap will not be usued upon
+ * attach time and contine or b) fall back to RO mode to have the
+ * current fastmap in a valid state.
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_wl_entry *e;
+ struct ubi_vid_hdr *vh = NULL;
+
+ if (!ubi->fm)
+ return 0;
+
+ ubi->fm = NULL;
+
+ ret = -ENOMEM;
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm)
+ goto out;
+
+ vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vh)
+ goto out_free_fm;
+
+ ret = -ENOSPC;
+ e = ubi_wl_get_fm_peb(ubi, 1);
+ if (!e)
+ goto out_free_fm;
+
+ /*
+ * Create fake fastmap such that UBI will fall back
+ * to scanning mode.
+ */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
+ if (ret < 0) {
+ ubi_wl_put_fm_peb(ubi, e, 0, 0);
+ goto out_free_fm;
+ }
+
+ fm->used_blocks = 1;
+ fm->e[0] = e;
+
+ ubi->fm = fm;
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ return ret;
+
+out_free_fm:
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * return_fm_pebs - returns all PEBs used by a fastmap back to the
+ * WL sub-system.
+ * @ubi: UBI device object
+ * @fm: fastmap layout object
+ */
+static void return_fm_pebs(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int i;
+
+ if (!fm)
+ return;
+
+ for (i = 0; i < fm->used_blocks; i++) {
+ if (fm->e[i]) {
+ ubi_wl_put_fm_peb(ubi, fm->e[i], i,
+ fm->to_be_tortured[i]);
+ fm->e[i] = NULL;
+ }
+ }
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i, j;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ down_write(&ubi->fm_protect);
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled) {
+ up_write(&ubi->fm_protect);
+ return 0;
+ }
+
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret) {
+ up_write(&ubi->fm_protect);
+ return ret;
+ }
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ up_write(&ubi->fm_protect);
+ return -ENOMEM;
+ }
+
+ new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err(ubi, "fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+ spin_unlock(&ubi->wl_lock);
+
+ if (!tmp_e) {
+ if (old_fm && old_fm->e[i]) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ ubi_err(ubi, "could not erase old fastmap PEB");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+ new_fm->e[j] = NULL;
+ }
+ goto err;
+ }
+ new_fm->e[i] = old_fm->e[i];
+ old_fm->e[i] = NULL;
+ } else {
+ ubi_err(ubi, "could not get any free erase block");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+ new_fm->e[j] = NULL;
+ }
+
+ ret = -ENOSPC;
+ goto err;
+ }
+ } else {
+ new_fm->e[i] = tmp_e;
+
+ if (old_fm && old_fm->e[i]) {
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
+ }
+ }
+ }
+
+ /* Old fastmap is larger than the new one */
+ if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
+ for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
+ }
+ }
+
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ ubi_err(ubi, "could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ new_fm->e[i] = NULL;
+ }
+ goto err;
+ }
+ new_fm->e[0] = old_fm->e[0];
+ new_fm->e[0]->ec = ret;
+ old_fm->e[0] = NULL;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+ new_fm->e[0] = tmp_e;
+ old_fm->e[0] = NULL;
+ }
+ } else {
+ if (!tmp_e) {
+ ubi_err(ubi, "could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+ new_fm->e[i] = NULL;
+ }
+
+ ret = -ENOSPC;
+ goto err;
+ }
+ new_fm->e[0] = tmp_e;
+ }
+
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
+ ret = ubi_write_fastmap(ubi, new_fm);
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
+
+ if (ret)
+ goto err;
+
+out_unlock:
+ up_write(&ubi->fm_protect);
+ kfree(old_fm);
+ return ret;
+
+err:
+ ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
+
+ ret = invalidate_fastmap(ubi);
+ if (ret < 0) {
+ ubi_err(ubi, "Unable to invalidiate current fastmap!");
+ ubi_ro_mode(ubi);
+ } else {
+ return_fm_pebs(ubi, old_fm);
+ return_fm_pebs(ubi, new_fm);
+ ret = 0;
+ }
+
+ kfree(new_fm);
+ goto out_unlock;
+}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
new file mode 100644
index 000000000..b93807b4c
--- /dev/null
+++ b/drivers/mtd/ubi/gluebi.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel
+ */
+
+/*
+ * This is a small driver which implements fake MTD devices on top of UBI
+ * volumes. This sounds strange, but it is in fact quite useful to make
+ * MTD-oriented software (including all the legacy software) work on top of
+ * UBI.
+ *
+ * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit
+ * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The
+ * eraseblock size is equivalent to the logical eraseblock size of the volume.
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/ubi.h>
+#include <linux/mtd/mtd.h>
+#include "ubi-media.h"
+
+#define err_msg(fmt, ...) \
+ pr_err("gluebi (pid %d): %s: " fmt "\n", \
+ current->pid, __func__, ##__VA_ARGS__)
+
+/**
+ * struct gluebi_device - a gluebi device description data structure.
+ * @mtd: emulated MTD device description object
+ * @refcnt: gluebi device reference count
+ * @desc: UBI volume descriptor
+ * @ubi_num: UBI device number this gluebi device works on
+ * @vol_id: ID of UBI volume this gluebi device works on
+ * @list: link in a list of gluebi devices
+ */
+struct gluebi_device {
+ struct mtd_info mtd;
+ int refcnt;
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ struct list_head list;
+};
+
+/* List of all gluebi devices */
+static LIST_HEAD(gluebi_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+/**
+ * find_gluebi_nolock - find a gluebi device.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ *
+ * This function seraches for gluebi device corresponding to UBI device
+ * @ubi_num and UBI volume @vol_id. Returns the gluebi device description
+ * object in case of success and %NULL in case of failure. The caller has to
+ * have the &devices_mutex locked.
+ */
+static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id)
+{
+ struct gluebi_device *gluebi;
+
+ list_for_each_entry(gluebi, &gluebi_devices, list)
+ if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id)
+ return gluebi;
+ return NULL;
+}
+
+/**
+ * gluebi_get_device - get MTD device reference.
+ * @mtd: the MTD device description object
+ *
+ * This function is called every time the MTD device is being opened and
+ * implements the MTD get_device() operation. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int gluebi_get_device(struct mtd_info *mtd)
+{
+ struct gluebi_device *gluebi;
+ int ubi_mode = UBI_READONLY;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ if (mtd->flags & MTD_WRITEABLE)
+ ubi_mode = UBI_READWRITE;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ if (gluebi->refcnt > 0) {
+ /*
+ * The MTD device is already referenced and this is just one
+ * more reference. MTD allows many users to open the same
+ * volume simultaneously and do not distinguish between
+ * readers/writers/exclusive openers as UBI does. So we do not
+ * open the UBI volume again - just increase the reference
+ * counter and return.
+ */
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
+ return 0;
+ }
+
+ /*
+ * This is the first reference to this UBI volume via the MTD device
+ * interface. Open the corresponding volume in read-write mode.
+ */
+ gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id,
+ ubi_mode);
+ if (IS_ERR(gluebi->desc)) {
+ mutex_unlock(&devices_mutex);
+ module_put(THIS_MODULE);
+ return PTR_ERR(gluebi->desc);
+ }
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_put_device - put MTD device reference.
+ * @mtd: the MTD device description object
+ *
+ * This function is called every time the MTD device is being put. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static void gluebi_put_device(struct mtd_info *mtd)
+{
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ gluebi->refcnt -= 1;
+ if (gluebi->refcnt == 0)
+ ubi_close_volume(gluebi->desc);
+ module_put(THIS_MODULE);
+ mutex_unlock(&devices_mutex);
+}
+
+/**
+ * gluebi_read - read operation of emulated MTD devices.
+ * @mtd: MTD device description object
+ * @from: absolute offset from where to read
+ * @len: how many bytes to read
+ * @retlen: count of read bytes is returned here
+ * @buf: buffer to store the read data
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ int err = 0, lnum, offs, bytes_left;
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ lnum = div_u64_rem(from, mtd->erasesize, &offs);
+ bytes_left = len;
+ while (bytes_left) {
+ size_t to_read = mtd->erasesize - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ buf += to_read;
+ }
+
+ *retlen = len - bytes_left;
+ return err;
+}
+
+/**
+ * gluebi_write - write operation of emulated MTD devices.
+ * @mtd: MTD device description object
+ * @to: absolute offset where to write
+ * @len: how many bytes to write
+ * @retlen: count of written bytes is returned here
+ * @buf: buffer with data to write
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ int err = 0, lnum, offs, bytes_left;
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ lnum = div_u64_rem(to, mtd->erasesize, &offs);
+
+ if (len % mtd->writesize || offs % mtd->writesize)
+ return -EINVAL;
+
+ bytes_left = len;
+ while (bytes_left) {
+ size_t to_write = mtd->erasesize - offs;
+
+ if (to_write > bytes_left)
+ to_write = bytes_left;
+
+ err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_write;
+ buf += to_write;
+ }
+
+ *retlen = len - bytes_left;
+ return err;
+}
+
+/**
+ * gluebi_erase - erase operation of emulated MTD devices.
+ * @mtd: the MTD device description object
+ * @instr: the erase operation description
+ *
+ * This function calls the erase callback when finishes. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int err, i, lnum, count;
+ struct gluebi_device *gluebi;
+
+ if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
+ return -EINVAL;
+
+ lnum = mtd_div_by_eb(instr->addr, mtd);
+ count = mtd_div_by_eb(instr->len, mtd);
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+
+ for (i = 0; i < count - 1; i++) {
+ err = ubi_leb_unmap(gluebi->desc, lnum + i);
+ if (err)
+ goto out_err;
+ }
+ /*
+ * MTD erase operations are synchronous, so we have to make sure the
+ * physical eraseblock is wiped out.
+ *
+ * Thus, perform leb_erase instead of leb_unmap operation - leb_erase
+ * will wait for the end of operations
+ */
+ err = ubi_leb_erase(gluebi->desc, lnum + i);
+ if (err)
+ goto out_err;
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+
+out_err:
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = (long long)lnum * mtd->erasesize;
+ return err;
+}
+
+/**
+ * gluebi_create - create a gluebi device for an UBI volume.
+ * @di: UBI device description object
+ * @vi: UBI volume description object
+ *
+ * This function is called when a new UBI volume is created in order to create
+ * corresponding fake MTD device. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int gluebi_create(struct ubi_device_info *di,
+ struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi, *g;
+ struct mtd_info *mtd;
+
+ gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL);
+ if (!gluebi)
+ return -ENOMEM;
+
+ mtd = &gluebi->mtd;
+ mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL);
+ if (!mtd->name) {
+ kfree(gluebi);
+ return -ENOMEM;
+ }
+
+ gluebi->vol_id = vi->vol_id;
+ gluebi->ubi_num = vi->ubi_num;
+ mtd->type = MTD_UBIVOLUME;
+ if (!di->ro_mode)
+ mtd->flags = MTD_WRITEABLE;
+ mtd->owner = THIS_MODULE;
+ mtd->writesize = di->min_io_size;
+ mtd->erasesize = vi->usable_leb_size;
+ mtd->_read = gluebi_read;
+ mtd->_write = gluebi_write;
+ mtd->_erase = gluebi_erase;
+ mtd->_get_device = gluebi_get_device;
+ mtd->_put_device = gluebi_put_device;
+
+ /*
+ * In case of dynamic a volume, MTD device size is just volume size. In
+ * case of a static volume the size is equivalent to the amount of data
+ * bytes.
+ */
+ if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+ mtd->size = (unsigned long long)vi->usable_leb_size * vi->size;
+ else
+ mtd->size = vi->used_bytes;
+
+ /* Just a sanity check - make sure this gluebi device does not exist */
+ mutex_lock(&devices_mutex);
+ g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (g)
+ err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+ g->mtd.index, vi->ubi_num, vi->vol_id);
+ mutex_unlock(&devices_mutex);
+
+ if (mtd_device_register(mtd, NULL, 0)) {
+ err_msg("cannot add MTD device");
+ kfree(mtd->name);
+ kfree(gluebi);
+ return -ENFILE;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_remove - remove a gluebi device.
+ * @vi: UBI volume description object
+ *
+ * This function is called when an UBI volume is removed and it removes
+ * corresponding fake MTD device. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int gluebi_remove(struct ubi_volume_info *vi)
+{
+ int err = 0;
+ struct mtd_info *mtd;
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ err_msg("got remove notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
+ err = -ENOENT;
+ } else if (gluebi->refcnt)
+ err = -EBUSY;
+ else
+ list_del(&gluebi->list);
+ mutex_unlock(&devices_mutex);
+ if (err)
+ return err;
+
+ mtd = &gluebi->mtd;
+ err = mtd_device_unregister(mtd);
+ if (err) {
+ err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+ mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
+ return err;
+ }
+
+ kfree(mtd->name);
+ kfree(gluebi);
+ return 0;
+}
+
+/**
+ * gluebi_updated - UBI volume was updated notifier.
+ * @vi: volume info structure
+ *
+ * This function is called every time an UBI volume is updated. It does nothing
+ * if te volume @vol is dynamic, and changes MTD device size if the
+ * volume is static. This is needed because static volumes cannot be read past
+ * data they contain. This function returns zero in case of success and a
+ * negative error code in case of error.
+ */
+static int gluebi_updated(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+
+ if (vi->vol_type == UBI_STATIC_VOLUME)
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_resized - UBI volume was re-sized notifier.
+ * @vi: volume info structure
+ *
+ * This function is called every time an UBI volume is re-size. It changes the
+ * corresponding fake MTD device size. This function returns zero in case of
+ * success and a negative error code in case of error.
+ */
+static int gluebi_resized(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ptr: pointer to the &struct ubi_notification object
+ */
+static int gluebi_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_ADDED:
+ gluebi_create(&nt->di, &nt->vi);
+ break;
+ case UBI_VOLUME_REMOVED:
+ gluebi_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ gluebi_resized(&nt->vi);
+ break;
+ case UBI_VOLUME_UPDATED:
+ gluebi_updated(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gluebi_notifier = {
+ .notifier_call = gluebi_notify,
+};
+
+static int __init ubi_gluebi_init(void)
+{
+ return ubi_register_volume_notifier(&gluebi_notifier, 0);
+}
+
+static void __exit ubi_gluebi_exit(void)
+{
+ struct gluebi_device *gluebi, *g;
+
+ list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) {
+ int err;
+ struct mtd_info *mtd = &gluebi->mtd;
+
+ err = mtd_device_unregister(mtd);
+ if (err)
+ err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+ err, mtd->index, gluebi->ubi_num,
+ gluebi->vol_id);
+ kfree(mtd->name);
+ kfree(gluebi);
+ }
+ ubi_unregister_volume_notifier(&gluebi_notifier);
+}
+
+module_init(ubi_gluebi_init);
+module_exit(ubi_gluebi_exit);
+MODULE_DESCRIPTION("MTD emulation layer over UBI volumes");
+MODULE_AUTHOR("Artem Bityutskiy, Joern Engel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
new file mode 100644
index 000000000..5bbd1f094
--- /dev/null
+++ b/drivers/mtd/ubi/io.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI input/output sub-system.
+ *
+ * This sub-system provides a uniform way to work with all kinds of the
+ * underlying MTD devices. It also implements handy functions for reading and
+ * writing UBI headers.
+ *
+ * We are trying to have a paranoid mindset and not to trust to what we read
+ * from the flash media in order to be more secure and robust. So this
+ * sub-system validates every single header it reads from the flash media.
+ *
+ * Some words about how the eraseblock headers are stored.
+ *
+ * The erase counter header is always stored at offset zero. By default, the
+ * VID header is stored after the EC header at the closest aligned offset
+ * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
+ * header at the closest aligned offset. But this default layout may be
+ * changed. For example, for different reasons (e.g., optimization) UBI may be
+ * asked to put the VID header at further offset, and even at an unaligned
+ * offset. Of course, if the offset of the VID header is unaligned, UBI adds
+ * proper padding in front of it. Data offset may also be changed but it has to
+ * be aligned.
+ *
+ * About minimal I/O units. In general, UBI assumes flash device model where
+ * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
+ * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
+ * @ubi->mtd->writesize field. But as an exception, UBI admits of using another
+ * (smaller) minimal I/O unit size for EC and VID headers to make it possible
+ * to do different optimizations.
+ *
+ * This is extremely useful in case of NAND flashes which admit of several
+ * write operations to one NAND page. In this case UBI can fit EC and VID
+ * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
+ * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
+ * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
+ * users.
+ *
+ * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
+ * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
+ * headers.
+ *
+ * Q: why not just to treat sub-page as a minimal I/O unit of this flash
+ * device, e.g., make @ubi->min_io_size = 512 in the example above?
+ *
+ * A: because when writing a sub-page, MTD still writes a full 2K page but the
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
+ *
+ * As it was noted above, the VID header may start at a non-aligned offset.
+ * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
+ * the VID header may reside at offset 1984 which is the last 64 bytes of the
+ * last sub-page (EC header is always at offset zero). This causes some
+ * difficulties when reading and writing VID headers.
+ *
+ * Suppose we have a 64-byte buffer and we read a VID header at it. We change
+ * the data and want to write this VID header out. As we can only write in
+ * 512-byte chunks, we have to allocate one more buffer and copy our VID header
+ * to offset 448 of this buffer.
+ *
+ * The I/O sub-system does the following trick in order to avoid this extra
+ * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
+ * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
+ * When the VID header is being written out, it shifts the VID header pointer
+ * back and writes the whole sub-page.
+ */
+
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "ubi.h"
+
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
+
+/**
+ * ubi_io_read - read data from a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer where to store the read data
+ * @pnum: physical eraseblock number to read from
+ * @offset: offset within the physical eraseblock from where to read
+ * @len: how many bytes to read
+ *
+ * This function reads data from offset @offset of physical eraseblock @pnum
+ * and stores the read data in the @buf buffer. The following return codes are
+ * possible:
+ *
+ * o %0 if all the requested data were successfully read;
+ * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
+ * correctable bit-flips were detected; this is harmless but may indicate
+ * that this eraseblock may become bad soon (but do not have to);
+ * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
+ * example it can be an ECC error in case of NAND; this most probably means
+ * that the data is corrupted;
+ * o %-EIO if some I/O error occurred;
+ * o other negative error codes in case of other errors.
+ */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len)
+{
+ int err, retries = 0;
+ size_t read;
+ loff_t addr;
+
+ dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(len > 0);
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err)
+ return err;
+
+ /*
+ * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+ * do not do this, the following may happen:
+ * 1. The buffer contains data from previous operation, e.g., read from
+ * another PEB previously. The data looks like expected, e.g., if we
+ * just do not read anything and return - the caller would not
+ * notice this. E.g., if we are reading a VID header, the buffer may
+ * contain a valid VID header from another PEB.
+ * 2. The driver is buggy and returns us success or -EBADMSG or
+ * -EUCLEAN, but it does not actually put any data to the buffer.
+ *
+ * This may confuse UBI or upper layers - they may think the buffer
+ * contains valid data while in fact it is just old data. This is
+ * especially possible because UBI (and UBIFS) relies on CRC, and
+ * treats data as correct even in case of ECC errors if the CRC is
+ * correct.
+ *
+ * Try to prevent this situation by changing the first byte of the
+ * buffer.
+ */
+ *((uint8_t *)buf) ^= 0xFF;
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+retry:
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err) {
+ const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
+
+ if (mtd_is_bitflip(err)) {
+ /*
+ * -EUCLEAN is reported if there was a bit-flip which
+ * was corrected, so this is harmless.
+ *
+ * We do not report about it here unless debugging is
+ * enabled. A corresponding message will be printed
+ * later, when it is has been scrubbed.
+ */
+ ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
+ pnum);
+ ubi_assert(len == read);
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
+ yield();
+ goto retry;
+ }
+
+ ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
+ dump_stack();
+
+ /*
+ * The driver should never return -EBADMSG if it failed to read
+ * all the requested data. But some buggy drivers might do
+ * this, so we change it to -EIO.
+ */
+ if (read != len && mtd_is_eccerr(err)) {
+ ubi_assert(0);
+ err = -EIO;
+ }
+ } else {
+ ubi_assert(len == read);
+
+ if (ubi_dbg_is_bitflip(ubi)) {
+ dbg_gen("bit-flip (emulated)");
+ err = UBI_IO_BITFLIPS;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ubi_io_write - write data to a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer with the data to write
+ * @pnum: physical eraseblock number to write to
+ * @offset: offset within the physical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes @len bytes of data from buffer @buf to offset @offset
+ * of physical eraseblock @pnum. If all the data were successfully written,
+ * zero is returned. If an error occurred, this function returns a negative
+ * error code. If %-EIO is returned, the physical eraseblock most probably went
+ * bad.
+ *
+ * Note, in case of an error, it is possible that something was still written
+ * to the flash media, but may be some garbage.
+ */
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+
+ dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(offset % ubi->hdrs_min_io_size == 0);
+ ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err)
+ return err;
+
+ /* The area we are writing to has to contain all 0xFF bytes */
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ if (err)
+ return err;
+
+ if (offset >= ubi->leb_start) {
+ /*
+ * We write to the data area of the physical eraseblock. Make
+ * sure it has valid EC and VID headers.
+ */
+ err = self_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err;
+ err = self_check_peb_vid_hdr(ubi, pnum);
+ if (err)
+ return err;
+ }
+
+ if (ubi_dbg_is_write_failure(ubi)) {
+ ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
+ dump_stack();
+ return -EIO;
+ }
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+ err = mtd_write(ubi->mtd, addr, len, &written, buf);
+ if (err) {
+ ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
+ } else
+ ubi_assert(written == len);
+
+ if (!err) {
+ err = self_check_write(ubi, buf, pnum, offset, len);
+ if (err)
+ return err;
+
+ /*
+ * Since we always write sequentially, the rest of the PEB has
+ * to contain only 0xFF bytes.
+ */
+ offset += len;
+ len = ubi->peb_size - offset;
+ if (len)
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ }
+
+ return err;
+}
+
+/**
+ * erase_callback - MTD erasure call-back.
+ * @ei: MTD erase information object.
+ *
+ * Note, even though MTD erase interface is asynchronous, all the current
+ * implementations are synchronous anyway.
+ */
+static void erase_callback(struct erase_info *ei)
+{
+ wake_up_interruptible((wait_queue_head_t *)ei->priv);
+}
+
+/**
+ * do_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to erase
+ *
+ * This function synchronously erases physical eraseblock @pnum and returns
+ * zero in case of success and a negative error code in case of failure. If
+ * %-EIO is returned, the physical eraseblock most probably went bad.
+ */
+static int do_sync_erase(struct ubi_device *ubi, int pnum)
+{
+ int err, retries = 0;
+ struct erase_info ei;
+ wait_queue_head_t wq;
+
+ dbg_io("erase PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+retry:
+ init_waitqueue_head(&wq);
+ memset(&ei, 0, sizeof(struct erase_info));
+
+ ei.mtd = ubi->mtd;
+ ei.addr = (loff_t)pnum * ubi->peb_size;
+ ei.len = ubi->peb_size;
+ ei.callback = erase_callback;
+ ei.priv = (unsigned long)&wq;
+
+ err = mtd_erase(ubi->mtd, &ei);
+ if (err) {
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error %d while erasing PEB %d, retry",
+ err, pnum);
+ yield();
+ goto retry;
+ }
+ ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
+ dump_stack();
+ return err;
+ }
+
+ err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
+ ei.state == MTD_ERASE_FAILED);
+ if (err) {
+ ubi_err(ubi, "interrupted PEB %d erasure", pnum);
+ return -EINTR;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error while erasing PEB %d, retry",
+ pnum);
+ yield();
+ goto retry;
+ }
+ ubi_err(ubi, "cannot erase PEB %d", pnum);
+ dump_stack();
+ return -EIO;
+ }
+
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ if (err)
+ return err;
+
+ if (ubi_dbg_is_erase_failure(ubi)) {
+ ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Patterns to write to a physical eraseblock when torturing it */
+static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
+
+/**
+ * torture_peb - test a supposedly bad physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to test
+ *
+ * This function returns %-EIO if the physical eraseblock did not pass the
+ * test, a positive number of erase operations done if the test was
+ * successfully passed, and other negative error codes in case of other errors.
+ */
+static int torture_peb(struct ubi_device *ubi, int pnum)
+{
+ int err, i, patt_count;
+
+ ubi_msg(ubi, "run torture test for PEB %d", pnum);
+ patt_count = ARRAY_SIZE(patterns);
+ ubi_assert(patt_count > 0);
+
+ mutex_lock(&ubi->buf_mutex);
+ for (i = 0; i < patt_count; i++) {
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ goto out;
+
+ /* Make sure the PEB contains only 0xFF bytes */
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
+ if (err == 0) {
+ ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
+ pnum);
+ err = -EIO;
+ goto out;
+ }
+
+ /* Write a pattern and check it */
+ memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+ err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = ubi_check_pattern(ubi->peb_buf, patterns[i],
+ ubi->peb_size);
+ if (err == 0) {
+ ubi_err(ubi, "pattern %x checking failed for PEB %d",
+ patterns[i], pnum);
+ err = -EIO;
+ goto out;
+ }
+ }
+
+ err = patt_count;
+ ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
+
+out:
+ mutex_unlock(&ubi->buf_mutex);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * If a bit-flip or data integrity error was detected, the test
+ * has not passed because it happened on a freshly erased
+ * physical eraseblock which means something is wrong with it.
+ */
+ ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
+ pnum);
+ err = -EIO;
+ }
+ return err;
+}
+
+/**
+ * nor_erase_prepare - prepare a NOR flash PEB for erasure.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to prepare
+ *
+ * NOR flash, or at least some of them, have peculiar embedded PEB erasure
+ * algorithm: the PEB is first filled with zeroes, then it is erased. And
+ * filling with zeroes starts from the end of the PEB. This was observed with
+ * Spansion S29GL512N NOR flash.
+ *
+ * This means that in case of a power cut we may end up with intact data at the
+ * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
+ * EC and VID headers are OK, but a large chunk of data at the end of PEB is
+ * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
+ * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
+ *
+ * This function is called before erasing NOR PEBs and it zeroes out EC and VID
+ * magic numbers in order to invalidate them and prevent the failures. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+ uint32_t data = 0;
+ struct ubi_ec_hdr ec_hdr;
+
+ /*
+ * Note, we cannot generally define VID header buffers on stack,
+ * because of the way we deal with these buffers (see the header
+ * comment in this file). But we know this is a NOR-specific piece of
+ * code, so we can do this. But yes, this is error-prone and we should
+ * (pre-)allocate VID header buffer instead.
+ */
+ struct ubi_vid_hdr vid_hdr;
+
+ /*
+ * If VID or EC is valid, we have to corrupt them before erasing.
+ * It is important to first invalidate the EC header, and then the VID
+ * header. Otherwise a power cut may lead to valid EC header and
+ * invalid VID header, in which case UBI will treat this PEB as
+ * corrupted and will try to preserve it, and print scary warnings.
+ */
+ addr = (loff_t)pnum * ubi->peb_size;
+ err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if(err)
+ goto error;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ addr += ubi->vid_hdr_aloffset;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err)
+ goto error;
+ }
+ return 0;
+
+error:
+ /*
+ * The PEB contains a valid VID or EC header, but we cannot invalidate
+ * it. Supposedly the flash media or the driver is screwed up, so
+ * return an error.
+ */
+ ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ return -EIO;
+}
+
+/**
+ * ubi_io_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to erase
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function synchronously erases physical eraseblock @pnum. If @torture
+ * flag is not zero, the physical eraseblock is checked by means of writing
+ * different patterns to it and reading them back. If the torturing is enabled,
+ * the physical eraseblock is erased more than once.
+ *
+ * This function returns the number of erasures made in case of success, %-EIO
+ * if the erasure failed or the torturing test failed, and other negative error
+ * codes in case of other errors. Note, %-EIO means that the physical
+ * eraseblock is bad.
+ */
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
+{
+ int err, ret = 0;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err != 0)
+ return err;
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ if (ubi->nor_flash) {
+ err = nor_erase_prepare(ubi, pnum);
+ if (err)
+ return err;
+ }
+
+ if (torture) {
+ ret = torture_peb(ubi, pnum);
+ if (ret < 0)
+ return ret;
+ }
+
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ return err;
+
+ return ret + 1;
+}
+
+/**
+ * ubi_io_is_bad - check if a physical eraseblock is bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns a positive number if the physical eraseblock is bad,
+ * zero if not, and a negative error code if an error occurred.
+ */
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
+{
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->bad_allowed) {
+ int ret;
+
+ ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (ret < 0)
+ ubi_err(ubi, "error %d while checking if PEB %d is bad",
+ ret, pnum);
+ else if (ret)
+ dbg_io("PEB %d is bad", pnum);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_io_mark_bad - mark a physical eraseblock as bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to mark
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ if (!ubi->bad_allowed)
+ return 0;
+
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (err)
+ ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
+ return err;
+}
+
+/**
+ * validate_ec_hdr - validate an erase counter header.
+ * @ubi: UBI device description object
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header is OK, and %1 if
+ * not.
+ */
+static int validate_ec_hdr(const struct ubi_device *ubi,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ long long ec;
+ int vid_hdr_offset, leb_start;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
+ leb_start = be32_to_cpu(ec_hdr->data_offset);
+
+ if (ec_hdr->version != UBI_VERSION) {
+ ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ec_hdr->version);
+ goto bad;
+ }
+
+ if (vid_hdr_offset != ubi->vid_hdr_offset) {
+ ubi_err(ubi, "bad VID header offset %d, expected %d",
+ vid_hdr_offset, ubi->vid_hdr_offset);
+ goto bad;
+ }
+
+ if (leb_start != ubi->leb_start) {
+ ubi_err(ubi, "bad data offset %d, expected %d",
+ leb_start, ubi->leb_start);
+ goto bad;
+ }
+
+ if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
+ ubi_err(ubi, "bad erase counter %lld", ec);
+ goto bad;
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad EC header");
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_ec_hdr - read and check an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to read from
+ * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
+ * header
+ * @verbose: be verbose if the header is corrupted or was not found
+ *
+ * This function reads erase counter header from physical eraseblock @pnum and
+ * stores it in @ec_hdr. This function also checks CRC checksum of the read
+ * erase counter header. The following codes may be returned:
+ *
+ * o %0 if the CRC checksum is correct and the header was successfully read;
+ * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
+ * and corrected by the flash driver; this is harmless but may indicate that
+ * this eraseblock may become bad soon (but may be not);
+ * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
+ * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
+ * a data integrity error (uncorrectable ECC error in case of NAND);
+ * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
+ * o a negative error code in case of failure.
+ */
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose)
+{
+ int err, read_err;
+ uint32_t crc, magic, hdr_crc;
+
+ dbg_io("read EC header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (read_err) {
+ if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
+
+ /*
+ * We read all the data, but either a correctable bit-flip
+ * occurred, or MTD reported a data integrity error
+ * (uncorrectable ECC error in case of NAND). The former is
+ * harmless, the later may mean that the read data is
+ * corrupted. But we have a CRC check-sum and we will detect
+ * this. If the EC header is still OK, we just report this as
+ * there was a bit-flip, to force scrubbing.
+ */
+ }
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+
+ /*
+ * The magic field is wrong. Let's check if we have read all
+ * 0xFF. If yes, this physical eraseblock is assumed to be
+ * empty.
+ */
+ if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
+ /* The physical eraseblock is supposedly empty */
+ if (verbose)
+ ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ /*
+ * This is not a valid erase counter header, and these are not
+ * 0xFF bytes. Report that the header is corrupted.
+ */
+ if (verbose) {
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dump_ec_hdr(ec_hdr);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_ec_hdr(ec_hdr);
+ }
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ /* And of course validate what has just been read from the media */
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ /*
+ * If there was %-EBADMSG, but the header CRC is still OK, report about
+ * a bit-flip to force scrubbing on this PEB.
+ */
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_ec_hdr - write an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to write to
+ * @ec_hdr: the erase counter header to write
+ *
+ * This function writes erase counter header described by @ec_hdr to physical
+ * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
+ * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
+ * field.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock most probably
+ * went bad.
+ */
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t crc;
+
+ dbg_io("write EC header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
+ ec_hdr->version = UBI_VERSION;
+ ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
+ ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+ ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ ec_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
+ if (err)
+ return err;
+
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+ return -EROFS;
+
+ err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
+ return err;
+}
+
+/**
+ * validate_vid_hdr - validate a volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function checks that data stored in the volume identifier header
+ * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
+ */
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int vol_type = vid_hdr->vol_type;
+ int copy_flag = vid_hdr->copy_flag;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int lnum = be32_to_cpu(vid_hdr->lnum);
+ int compat = vid_hdr->compat;
+ int data_size = be32_to_cpu(vid_hdr->data_size);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+ int data_crc = be32_to_cpu(vid_hdr->data_crc);
+ int usable_leb_size = ubi->leb_size - data_pad;
+
+ if (copy_flag != 0 && copy_flag != 1) {
+ ubi_err(ubi, "bad copy_flag");
+ goto bad;
+ }
+
+ if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
+ data_pad < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "bad vol_id");
+ goto bad;
+ }
+
+ if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
+ ubi_err(ubi, "bad compat");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
+ compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
+ compat != UBI_COMPAT_REJECT) {
+ ubi_err(ubi, "bad compat");
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad;
+ }
+
+ if (data_pad >= ubi->leb_size / 2) {
+ ubi_err(ubi, "bad data_pad");
+ goto bad;
+ }
+
+ if (vol_type == UBI_VID_STATIC) {
+ /*
+ * Although from high-level point of view static volumes may
+ * contain zero bytes of data, but no VID headers can contain
+ * zero at these fields, because they empty volumes do not have
+ * mapped logical eraseblocks.
+ */
+ if (used_ebs == 0) {
+ ubi_err(ubi, "zero used_ebs");
+ goto bad;
+ }
+ if (data_size == 0) {
+ ubi_err(ubi, "zero data_size");
+ goto bad;
+ }
+ if (lnum < used_ebs - 1) {
+ if (data_size != usable_leb_size) {
+ ubi_err(ubi, "bad data_size");
+ goto bad;
+ }
+ } else if (lnum == used_ebs - 1) {
+ if (data_size == 0) {
+ ubi_err(ubi, "bad data_size at last LEB");
+ goto bad;
+ }
+ } else {
+ ubi_err(ubi, "too high lnum");
+ goto bad;
+ }
+ } else {
+ if (copy_flag == 0) {
+ if (data_crc != 0) {
+ ubi_err(ubi, "non-zero data CRC");
+ goto bad;
+ }
+ if (data_size != 0) {
+ ubi_err(ubi, "non-zero data_size");
+ goto bad;
+ }
+ } else {
+ if (data_size == 0) {
+ ubi_err(ubi, "zero data_size of copy");
+ goto bad;
+ }
+ }
+ if (used_ebs != 0) {
+ ubi_err(ubi, "bad used_ebs");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad VID header");
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_vid_hdr - read and check a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to read from
+ * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
+ * identifier header
+ * @verbose: be verbose if the header is corrupted or wasn't found
+ *
+ * This function reads the volume identifier header from physical eraseblock
+ * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
+ * volume identifier header. The error codes are the same as in
+ * 'ubi_io_read_ec_hdr()'.
+ *
+ * Note, the implementation of this function is also very similar to
+ * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
+ */
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int verbose)
+{
+ int err, read_err;
+ uint32_t crc, magic, hdr_crc;
+ void *p;
+
+ dbg_io("read VID header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+
+ if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
+ if (verbose)
+ ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (verbose) {
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dump_vid_hdr(vid_hdr);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_vid_hdr(vid_hdr);
+ }
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_vid_hdr - write a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to write to
+ * @vid_hdr: the volume identifier header to write
+ *
+ * This function writes the volume identifier header described by @vid_hdr to
+ * physical eraseblock @pnum. This function automatically fills the
+ * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
+ * header CRC checksum and stores it at vid_hdr->hdr_crc.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock probably went
+ * bad.
+ */
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ int err;
+ uint32_t crc;
+ void *p;
+
+ dbg_io("write VID header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = self_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err;
+
+ vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
+ vid_hdr->version = UBI_VERSION;
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ vid_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
+ if (err)
+ return err;
+
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+ return -EROFS;
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ return err;
+}
+
+/**
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to check
+ *
+ * This function returns zero if the physical eraseblock is good, %-EINVAL if
+ * it is bad and a negative error code if an error occurred.
+ */
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ err = ubi_io_is_bad(ubi, pnum);
+ if (!err)
+ return err;
+
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ dump_stack();
+ return err > 0 ? -EINVAL : err;
+}
+
+/**
+ * self_check_ec_hdr - check if an erase counter header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the erase counter header belongs to
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header contains valid
+ * values, and %-EINVAL if not.
+ */
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ ubi_err(ubi, "bad magic %#08x, must be %#08x",
+ magic, UBI_EC_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
+ * self_check_peb_ec_hdr - check erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the erase counter header is all right and and
+ * a negative error code if not or if an error occurred.
+ */
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
+ crc, hdr_crc);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
+
+exit:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * self_check_vid_hdr - check that a volume identifier header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the volume identifier header belongs to
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function returns zero if the volume identifier header is all right, and
+ * %-EINVAL if not.
+ */
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
+ magic, pnum, UBI_VID_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return err;
+
+fail:
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ return -EINVAL;
+
+}
+
+/**
+ * self_check_peb_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the volume identifier header is all right,
+ * and a negative error code if not or if an error occurred.
+ */
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_vid_hdr *vid_hdr;
+ void *p;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
+
+exit:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+}
+
+/**
+ * self_check_write - make sure write succeeded.
+ * @ubi: UBI device description object
+ * @buf: buffer with data which were written
+ * @pnum: physical eraseblock number the data were written to
+ * @offset: offset within the physical eraseblock the data were written to
+ * @len: how many bytes were written
+ *
+ * This functions reads data which were recently written and compares it with
+ * the original data buffer - the data have to match. Returns zero if the data
+ * match and a negative error code if not or in case of failure.
+ */
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
+{
+ int err, i;
+ size_t read;
+ void *buf1;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ if (!buf1) {
+ ubi_err(ubi, "cannot allocate memory to check writes");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
+ if (err && !mtd_is_bitflip(err))
+ goto out_free;
+
+ for (i = 0; i < len; i++) {
+ uint8_t c = ((uint8_t *)buf)[i];
+ uint8_t c1 = ((uint8_t *)buf1)[i];
+ int dump_len;
+
+ if (c == c1)
+ continue;
+
+ ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
+ pnum, offset, len);
+ ubi_msg(ubi, "data differ at position %d", i);
+ dump_len = max_t(int, 128, len - i);
+ ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ buf + i, dump_len, 1);
+ ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ buf1 + i, dump_len, 1);
+ dump_stack();
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ vfree(buf1);
+ return 0;
+
+out_free:
+ vfree(buf1);
+ return err;
+}
+
+/**
+ * ubi_self_check_all_ff - check that a region of flash is empty.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @offset: the starting offset within the physical eraseblock to check
+ * @len: the length of the region to check
+ *
+ * This function returns zero if only 0xFF bytes are present at offset
+ * @offset of the physical eraseblock @pnum, and a negative error code if not
+ * or if an error occurred.
+ */
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ size_t read;
+ int err;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ if (!buf) {
+ ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && !mtd_is_bitflip(err)) {
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto error;
+ }
+
+ err = ubi_check_pattern(buf, 0xFF, len);
+ if (err == 0) {
+ ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
+ goto fail;
+ }
+
+ vfree(buf);
+ return 0;
+
+fail:
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+ err = -EINVAL;
+error:
+ dump_stack();
+ vfree(buf);
+ return err;
+}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
new file mode 100644
index 000000000..e84488773
--- /dev/null
+++ b/drivers/mtd/ubi/kapi.c
@@ -0,0 +1,864 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* This file mostly implements UBI kernel API functions */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
+#include <asm/div64.h>
+#include "ubi.h"
+
+/**
+ * ubi_do_get_device_info - get information about UBI device.
+ * @ubi: UBI device description object
+ * @di: the information is stored here
+ *
+ * This function is the same as 'ubi_get_device_info()', but it assumes the UBI
+ * device is locked and cannot disappear.
+ */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
+{
+ di->ubi_num = ubi->ubi_num;
+ di->leb_size = ubi->leb_size;
+ di->leb_start = ubi->leb_start;
+ di->min_io_size = ubi->min_io_size;
+ di->max_write_size = ubi->max_write_size;
+ di->ro_mode = ubi->ro_mode;
+ di->cdev = ubi->cdev.dev;
+}
+EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
+
+/**
+ * ubi_get_device_info - get information about UBI device.
+ * @ubi_num: UBI device number
+ * @di: the information is stored here
+ *
+ * This function returns %0 in case of success, %-EINVAL if the UBI device
+ * number is invalid, and %-ENODEV if there is no such UBI device.
+ */
+int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ ubi_do_get_device_info(ubi, di);
+ ubi_put_device(ubi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_get_device_info);
+
+/**
+ * ubi_do_get_volume_info - get information about UBI volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @vi: the information is stored here
+ */
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi)
+{
+ vi->vol_id = vol->vol_id;
+ vi->ubi_num = ubi->ubi_num;
+ vi->size = vol->reserved_pebs;
+ vi->used_bytes = vol->used_bytes;
+ vi->vol_type = vol->vol_type;
+ vi->corrupted = vol->corrupted;
+ vi->upd_marker = vol->upd_marker;
+ vi->alignment = vol->alignment;
+ vi->usable_leb_size = vol->usable_leb_size;
+ vi->name_len = vol->name_len;
+ vi->name = vol->name;
+ vi->cdev = vol->cdev.dev;
+}
+
+/**
+ * ubi_get_volume_info - get information about UBI volume.
+ * @desc: volume descriptor
+ * @vi: the information is stored here
+ */
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+ struct ubi_volume_info *vi)
+{
+ ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
+}
+EXPORT_SYMBOL_GPL(ubi_get_volume_info);
+
+/**
+ * ubi_open_volume - open UBI volume.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ * @mode: open mode
+ *
+ * The @mode parameter specifies if the volume should be opened in read-only
+ * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that
+ * nobody else will be able to open this volume. UBI allows to have many volume
+ * readers and one writer at a time.
+ *
+ * If a static volume is being opened for the first time since boot, it will be
+ * checked by this function, which means it will be fully read and the CRC
+ * checksum of each logical eraseblock will be checked.
+ *
+ * This function returns volume descriptor in case of success and a negative
+ * error code in case of failure.
+ */
+struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
+{
+ int err;
+ struct ubi_volume_desc *desc;
+ struct ubi_device *ubi;
+ struct ubi_volume *vol;
+
+ dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ if (mode != UBI_READONLY && mode != UBI_READWRITE &&
+ mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * First of all, we have to get the UBI device to prevent its removal.
+ */
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+ err = -EINVAL;
+ goto out_put_ubi;
+ }
+
+ desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_put_ubi;
+ }
+
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto out_free;
+
+ spin_lock(&ubi->volumes_lock);
+ vol = ubi->volumes[vol_id];
+ if (!vol)
+ goto out_unlock;
+
+ err = -EBUSY;
+ switch (mode) {
+ case UBI_READONLY:
+ if (vol->exclusive)
+ goto out_unlock;
+ vol->readers += 1;
+ break;
+
+ case UBI_READWRITE:
+ if (vol->exclusive || vol->writers > 0)
+ goto out_unlock;
+ vol->writers += 1;
+ break;
+
+ case UBI_EXCLUSIVE:
+ if (vol->exclusive || vol->writers || vol->readers ||
+ vol->metaonly)
+ goto out_unlock;
+ vol->exclusive = 1;
+ break;
+
+ case UBI_METAONLY:
+ if (vol->metaonly || vol->exclusive)
+ goto out_unlock;
+ vol->metaonly = 1;
+ break;
+ }
+ get_device(&vol->dev);
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ desc->vol = vol;
+ desc->mode = mode;
+
+ mutex_lock(&ubi->ckvol_mutex);
+ if (!vol->checked) {
+ /* This is the first open - check the volume */
+ err = ubi_check_volume(ubi, vol_id);
+ if (err < 0) {
+ mutex_unlock(&ubi->ckvol_mutex);
+ ubi_close_volume(desc);
+ return ERR_PTR(err);
+ }
+ if (err == 1) {
+ ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
+ vol_id, ubi->ubi_num);
+ vol->corrupted = 1;
+ }
+ vol->checked = 1;
+ }
+ mutex_unlock(&ubi->ckvol_mutex);
+
+ return desc;
+
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ module_put(THIS_MODULE);
+out_free:
+ kfree(desc);
+out_put_ubi:
+ ubi_put_device(ubi);
+ ubi_err(ubi, "cannot open device %d, volume %d, error %d",
+ ubi_num, vol_id, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume);
+
+/**
+ * ubi_open_volume_nm - open UBI volume by name.
+ * @ubi_num: UBI device number
+ * @name: volume name
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume by name.
+ */
+struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
+ int mode)
+{
+ int i, vol_id = -1, len;
+ struct ubi_device *ubi;
+ struct ubi_volume_desc *ret;
+
+ dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len > UBI_VOL_NAME_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ spin_lock(&ubi->volumes_lock);
+ /* Walk all volumes of this UBI device */
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ struct ubi_volume *vol = ubi->volumes[i];
+
+ if (vol && len == vol->name_len && !strcmp(name, vol->name)) {
+ vol_id = i;
+ break;
+ }
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ if (vol_id >= 0)
+ ret = ubi_open_volume(ubi_num, vol_id, mode);
+ else
+ ret = ERR_PTR(-ENODEV);
+
+ /*
+ * We should put the UBI device even in case of success, because
+ * 'ubi_open_volume()' took a reference as well.
+ */
+ ubi_put_device(ubi);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
+
+/**
+ * ubi_open_volume_path - open UBI volume by its character device node path.
+ * @pathname: volume character device node path
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume the path
+ * to its character device node.
+ */
+struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
+{
+ int error, ubi_num, vol_id, mod;
+ struct inode *inode;
+ struct path path;
+
+ dbg_gen("open volume %s, mode %d", pathname, mode);
+
+ if (!pathname || !*pathname)
+ return ERR_PTR(-EINVAL);
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return ERR_PTR(error);
+
+ inode = d_backing_inode(path.dentry);
+ mod = inode->i_mode;
+ ubi_num = ubi_major2num(imajor(inode));
+ vol_id = iminor(inode) - 1;
+ path_put(&path);
+
+ if (!S_ISCHR(mod))
+ return ERR_PTR(-EINVAL);
+ if (vol_id >= 0 && ubi_num >= 0)
+ return ubi_open_volume(ubi_num, vol_id, mode);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_path);
+
+/**
+ * ubi_close_volume - close UBI volume.
+ * @desc: volume descriptor
+ */
+void ubi_close_volume(struct ubi_volume_desc *desc)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("close device %d, volume %d, mode %d",
+ ubi->ubi_num, vol->vol_id, desc->mode);
+
+ spin_lock(&ubi->volumes_lock);
+ switch (desc->mode) {
+ case UBI_READONLY:
+ vol->readers -= 1;
+ break;
+ case UBI_READWRITE:
+ vol->writers -= 1;
+ break;
+ case UBI_EXCLUSIVE:
+ vol->exclusive = 0;
+ break;
+ case UBI_METAONLY:
+ vol->metaonly = 0;
+ break;
+ }
+ vol->ref_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ kfree(desc);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(ubi_close_volume);
+
+/**
+ * leb_read_sanity_check - does sanity checks on read requests.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ *
+ * This function is used by ubi_leb_read() and ubi_leb_read_sg()
+ * to perform sanity checks.
+ */
+static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
+ int offset, int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
+ lnum >= vol->used_ebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size)
+ return -EINVAL;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return 0;
+}
+
+/**
+ * ubi_leb_read - read data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function reads data from offset @offset of logical eraseblock @lnum and
+ * stores the data at @buf. When reading from static volumes, @check specifies
+ * whether the data has to be checked or not. If yes, the whole logical
+ * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC
+ * checksum is per-eraseblock). So checking may substantially slow down the
+ * read speed. The @check argument is ignored for dynamic volumes.
+ *
+ * In case of success, this function returns zero. In case of failure, this
+ * function returns a negative error code.
+ *
+ * %-EBADMSG error code is returned:
+ * o for both static and dynamic volumes if MTD driver has detected a data
+ * integrity problem (unrecoverable ECC checksum mismatch in case of NAND);
+ * o for static volumes in case of data CRC mismatch.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF error code.
+ */
+int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
+ int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
+
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read);
+
+
+/**
+ * ubi_leb_read_sg - read data into a scatter gather list.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function works exactly like ubi_leb_read_sg(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
+
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
+
+/**
+ * ubi_leb_write - write data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to write to
+ * @buf: data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes @len bytes of data from @buf to offset @offset of
+ * logical eraseblock @lnum.
+ *
+ * This function takes care of physical eraseblock write failures. If write to
+ * the physical eraseblock write operation fails, the logical eraseblock is
+ * re-mapped to another physical eraseblock, the data is recovered, and the
+ * write finishes. UBI has a pool of reserved physical eraseblocks for this.
+ *
+ * If all the data were successfully written, zero is returned. If an error
+ * occurred and UBI has not been able to recover from it, this function returns
+ * a negative error code. Note, in case of an error, it is possible that
+ * something was still written to the flash media, but that may be some
+ * garbage.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int offset, int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size ||
+ offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_write);
+
+/*
+ * ubi_leb_change - change logical eraseblock atomically.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to change
+ * @buf: data to write
+ * @len: how many bytes to write
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. The length may be shorter than the logical
+ * eraseblock size, ant the logical eraseblock may be appended to more times
+ * later on. This function guarantees that in case of an unclean reboot the old
+ * contents is preserved. Returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
+ len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_change);
+
+/**
+ * ubi_leb_erase - erase logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and synchronously erases the
+ * correspondent physical eraseblock. Returns zero in case of success and a
+ * negative error code in case of failure.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err;
+
+ dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_erase);
+
+/**
+ * ubi_leb_unmap - un-map logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules the
+ * corresponding physical eraseblock for erasure, so that it will eventually be
+ * physically erased in background. This operation is much faster than the
+ * erase operation.
+ *
+ * Unlike erase, the un-map operation does not guarantee that the logical
+ * eraseblock will contain all 0xFF bytes when UBI is initialized again. For
+ * example, if several logical eraseblocks are un-mapped, and an unclean reboot
+ * happens after this, the logical eraseblocks will not necessarily be
+ * un-mapped again when this MTD device is attached. They may actually be
+ * mapped to the same physical eraseblocks again. So, this function has to be
+ * used with care.
+ *
+ * In other words, when un-mapping a logical eraseblock, UBI does not store
+ * any information about this on the flash media, it just marks the logical
+ * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical
+ * eraseblock is physically erased, it will be mapped again to the same logical
+ * eraseblock when the MTD device is attached again.
+ *
+ * The main and obvious use-case of this function is when the contents of a
+ * logical eraseblock has to be re-written. Then it is much more efficient to
+ * first un-map it, then write new data, rather than first erase it, then write
+ * new data. Note, once new data has been written to the logical eraseblock,
+ * UBI guarantees that the old contents has gone forever. In other words, if an
+ * unclean reboot happens after the logical eraseblock has been un-mapped and
+ * then written to, it will contain the last written data.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If the volume is damaged because of an interrupted update
+ * this function just returns immediately with %-EBADF code.
+ */
+int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return ubi_eba_unmap_leb(ubi, vol, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_unmap);
+
+/**
+ * ubi_leb_map - map logical eraseblock to a physical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function maps an un-mapped logical eraseblock @lnum to a physical
+ * eraseblock. This means, that after a successful invocation of this
+ * function the logical eraseblock @lnum will be empty (contain only %0xFF
+ * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
+ * happens.
+ *
+ * This function returns zero in case of success, %-EBADF if the volume is
+ * damaged because of an interrupted update, %-EBADMSG if the logical
+ * eraseblock is already mapped, and other negative error codes in case of
+ * other failures.
+ */
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (vol->eba_tbl[lnum] >= 0)
+ return -EBADMSG;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_map);
+
+/**
+ * ubi_is_mapped - check if logical eraseblock is mapped.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function checks if logical eraseblock @lnum is mapped to a physical
+ * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily
+ * mean it will still be un-mapped after the UBI device is re-attached. The
+ * logical eraseblock may become mapped to the physical eraseblock it was last
+ * mapped to.
+ *
+ * This function returns %1 if the LEB is mapped, %0 if not, and a negative
+ * error code in case of failure. If the volume is damaged because of an
+ * interrupted update this function just returns immediately with %-EBADF error
+ * code.
+ */
+int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+
+ dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return vol->eba_tbl[lnum] >= 0;
+}
+EXPORT_SYMBOL_GPL(ubi_is_mapped);
+
+/**
+ * ubi_sync - synchronize UBI device buffers.
+ * @ubi_num: UBI device to synchronize
+ *
+ * The underlying MTD device may cache data in hardware or in software. This
+ * function ensures the caches are flushed. Returns zero in case of success and
+ * a negative error code in case of failure.
+ */
+int ubi_sync(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ mtd_sync(ubi->mtd);
+ ubi_put_device(ubi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_sync);
+
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
+BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
+
+/**
+ * ubi_register_volume_notifier - register a volume notifier.
+ * @nb: the notifier description object
+ * @ignore_existing: if non-zero, do not send "added" notification for all
+ * already existing volumes
+ *
+ * This function registers a volume notifier, which means that
+ * 'nb->notifier_call()' will be invoked when an UBI volume is created,
+ * removed, re-sized, re-named, or updated. The first argument of the function
+ * is the notification type. The second argument is pointer to a
+ * &struct ubi_notification object which describes the notification event.
+ * Using UBI API from the volume notifier is prohibited.
+ *
+ * This function returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_register_volume_notifier(struct notifier_block *nb,
+ int ignore_existing)
+{
+ int err;
+
+ err = blocking_notifier_chain_register(&ubi_notifiers, nb);
+ if (err != 0)
+ return err;
+ if (ignore_existing)
+ return 0;
+
+ /*
+ * We are going to walk all UBI devices and all volumes, and
+ * notify the user about existing volumes by the %UBI_VOLUME_ADDED
+ * event. We have to lock the @ubi_devices_mutex to make sure UBI
+ * devices do not disappear.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ ubi_enumerate_volumes(nb);
+ mutex_unlock(&ubi_devices_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
+
+/**
+ * ubi_unregister_volume_notifier - unregister the volume notifier.
+ * @nb: the notifier description object
+ *
+ * This function unregisters volume notifier @nm and returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_unregister_volume_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
+}
+EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
new file mode 100644
index 000000000..2a45ac210
--- /dev/null
+++ b/drivers/mtd/ubi/misc.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* Here we keep miscellaneous functions which are used all over the UBI code */
+
+#include "ubi.h"
+
+/**
+ * calc_data_len - calculate how much real data is stored in a buffer.
+ * @ubi: UBI device description object
+ * @buf: a buffer with the contents of the physical eraseblock
+ * @length: the buffer length
+ *
+ * This function calculates how much "real data" is stored in @buf and returnes
+ * the length. Continuous 0xFF bytes at the end of the buffer are not
+ * considered as "real data".
+ */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length)
+{
+ int i;
+
+ ubi_assert(!(length & (ubi->min_io_size - 1)));
+
+ for (i = length - 1; i >= 0; i--)
+ if (((const uint8_t *)buf)[i] != 0xFF)
+ break;
+
+ /* The resulting length must be aligned to the minimum flash I/O size */
+ length = ALIGN(i + 1, ubi->min_io_size);
+ return length;
+}
+
+/**
+ * ubi_check_volume - check the contents of a static volume.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to check
+ *
+ * This function checks if static volume @vol_id is corrupted by fully reading
+ * it and checking data CRC. This function returns %0 if the volume is not
+ * corrupted, %1 if it is corrupted and a negative error code in case of
+ * failure. Dynamic volumes are not checked and zero is returned immediately.
+ */
+int ubi_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ void *buf;
+ int err = 0, i;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+
+ if (vol->vol_type != UBI_STATIC_VOLUME)
+ return 0;
+
+ buf = vmalloc(vol->usable_leb_size);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < vol->used_ebs; i++) {
+ int size;
+
+ cond_resched();
+
+ if (i == vol->used_ebs - 1)
+ size = vol->last_eb_bytes;
+ else
+ size = vol->usable_leb_size;
+
+ err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
+ if (err) {
+ if (mtd_is_eccerr(err))
+ err = 1;
+ break;
+ }
+ }
+
+ vfree(buf);
+ return err;
+}
+
+/**
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
+ * eraseblock handling.
+ * @ubi: UBI device description object
+ */
+void ubi_calculate_reserved(struct ubi_device *ubi)
+{
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
+}
+
+/**
+ * ubi_check_pattern - check if buffer contains only a certain byte pattern.
+ * @buf: buffer to check
+ * @patt: the pattern to check
+ * @size: buffer size in bytes
+ *
+ * This function returns %1 in there are only @patt bytes in @buf, and %0 if
+ * something else was also found.
+ */
+int ubi_check_pattern(const void *buf, uint8_t patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (((const uint8_t *)buf)[i] != patt)
+ return 0;
+ return 1;
+}
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
new file mode 100644
index 000000000..d0d072e7c
--- /dev/null
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -0,0 +1,513 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Thomas Gleixner
+ * Frank Haverkamp
+ * Oliver Lohmann
+ * Andreas Arnez
+ */
+
+/*
+ * This file defines the layout of UBI headers and all the other UBI on-flash
+ * data structures.
+ */
+
+#ifndef __UBI_MEDIA_H__
+#define __UBI_MEDIA_H__
+
+#include <asm/byteorder.h>
+
+/* The version of UBI images supported by this implementation */
+#define UBI_VERSION 1
+
+/* The highest erase counter value supported by this implementation */
+#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF
+
+/* The initial CRC32 value used when calculating CRC checksums */
+#define UBI_CRC32_INIT 0xFFFFFFFFU
+
+/* Erase counter header magic number (ASCII "UBI#") */
+#define UBI_EC_HDR_MAGIC 0x55424923
+/* Volume identifier header magic number (ASCII "UBI!") */
+#define UBI_VID_HDR_MAGIC 0x55424921
+
+/*
+ * Volume type constants used in the volume identifier header.
+ *
+ * @UBI_VID_DYNAMIC: dynamic volume
+ * @UBI_VID_STATIC: static volume
+ */
+enum {
+ UBI_VID_DYNAMIC = 1,
+ UBI_VID_STATIC = 2
+};
+
+/*
+ * Volume flags used in the volume table record.
+ *
+ * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
+ *
+ * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
+ * table. UBI automatically re-sizes the volume which has this flag and makes
+ * the volume to be of largest possible size. This means that if after the
+ * initialization UBI finds out that there are available physical eraseblocks
+ * present on the device, it automatically appends all of them to the volume
+ * (the physical eraseblocks reserved for bad eraseblocks handling and other
+ * reserved physical eraseblocks are not taken). So, if there is a volume with
+ * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
+ * eraseblocks will be zero after UBI is loaded, because all of them will be
+ * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
+ * after the volume had been initialized.
+ *
+ * The auto-resize feature is useful for device production purposes. For
+ * example, different NAND flash chips may have different amount of initial bad
+ * eraseblocks, depending of particular chip instance. Manufacturers of NAND
+ * chips usually guarantee that the amount of initial bad eraseblocks does not
+ * exceed certain percent, e.g. 2%. When one creates an UBI image which will be
+ * flashed to the end devices in production, he does not know the exact amount
+ * of good physical eraseblocks the NAND chip on the device will have, but this
+ * number is required to calculate the volume sized and put them to the volume
+ * table of the UBI image. In this case, one of the volumes (e.g., the one
+ * which will store the root file system) is marked as "auto-resizable", and
+ * UBI will adjust its size on the first boot if needed.
+ *
+ * Note, first UBI reserves some amount of physical eraseblocks for bad
+ * eraseblock handling, and then re-sizes the volume, not vice-versa. This
+ * means that the pool of reserved physical eraseblocks will always be present.
+ */
+enum {
+ UBI_VTBL_AUTORESIZE_FLG = 0x01,
+};
+
+/*
+ * Compatibility constants used by internal volumes.
+ *
+ * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
+ * to the flash
+ * @UBI_COMPAT_RO: attach this device in read-only mode
+ * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
+ * physical eraseblocks, don't allow the wear-leveling
+ * sub-system to move them
+ * @UBI_COMPAT_REJECT: reject this UBI image
+ */
+enum {
+ UBI_COMPAT_DELETE = 1,
+ UBI_COMPAT_RO = 2,
+ UBI_COMPAT_PRESERVE = 4,
+ UBI_COMPAT_REJECT = 5
+};
+
+/* Sizes of UBI headers */
+#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr)
+#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
+
+/* Sizes of UBI headers without the ending CRC */
+#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32))
+#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_ec_hdr - UBI erase counter header.
+ * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
+ * @version: version of UBI implementation which is supposed to accept this
+ * UBI image
+ * @padding1: reserved for future, zeroes
+ * @ec: the erase counter
+ * @vid_hdr_offset: where the VID header starts
+ * @data_offset: where the user data start
+ * @image_seq: image sequence number
+ * @padding2: reserved for future, zeroes
+ * @hdr_crc: erase counter header CRC checksum
+ *
+ * The erase counter header takes 64 bytes and has a plenty of unused space for
+ * future usage. The unused fields are zeroed. The @version field is used to
+ * indicate the version of UBI implementation which is supposed to be able to
+ * work with this UBI image. If @version is greater than the current UBI
+ * version, the image is rejected. This may be useful in future if something
+ * is changed radically. This field is duplicated in the volume identifier
+ * header.
+ *
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the the
+ * volume identifier header and user data, relative to the beginning of the
+ * physical eraseblock. These values have to be the same for all physical
+ * eraseblocks.
+ *
+ * The @image_seq field is used to validate a UBI image that has been prepared
+ * for a UBI device. The @image_seq value can be any value, but it must be the
+ * same on all eraseblocks. UBI will ensure that all new erase counter headers
+ * also contain this value, and will check the value when attaching the flash.
+ * One way to make use of @image_seq is to increase its value by one every time
+ * an image is flashed over an existing image, then, if the flashing does not
+ * complete, UBI will detect the error when attaching the media.
+ */
+struct ubi_ec_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be64 ec; /* Warning: the current limit is 31-bit anyway! */
+ __be32 vid_hdr_offset;
+ __be32 data_offset;
+ __be32 image_seq;
+ __u8 padding2[32];
+ __be32 hdr_crc;
+} __packed;
+
+/**
+ * struct ubi_vid_hdr - on-flash UBI volume identifier header.
+ * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
+ * @version: UBI implementation version which is supposed to accept this UBI
+ * image (%UBI_VERSION)
+ * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
+ * @copy_flag: if this logical eraseblock was copied from another physical
+ * eraseblock (for wear-leveling reasons)
+ * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
+ * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
+ * @vol_id: ID of this volume
+ * @lnum: logical eraseblock number
+ * @padding1: reserved for future, zeroes
+ * @data_size: how many bytes of data this logical eraseblock contains
+ * @used_ebs: total number of used logical eraseblocks in this volume
+ * @data_pad: how many bytes at the end of this physical eraseblock are not
+ * used
+ * @data_crc: CRC checksum of the data stored in this logical eraseblock
+ * @padding2: reserved for future, zeroes
+ * @sqnum: sequence number
+ * @padding3: reserved for future, zeroes
+ * @hdr_crc: volume identifier header CRC checksum
+ *
+ * The @sqnum is the value of the global sequence counter at the time when this
+ * VID header was created. The global sequence counter is incremented each time
+ * UBI writes a new VID header to the flash, i.e. when it maps a logical
+ * eraseblock to a new physical eraseblock. The global sequence counter is an
+ * unsigned 64-bit integer and we assume it never overflows. The @sqnum
+ * (sequence number) is used to distinguish between older and newer versions of
+ * logical eraseblocks.
+ *
+ * There are 2 situations when there may be more than one physical eraseblock
+ * corresponding to the same logical eraseblock, i.e., having the same @vol_id
+ * and @lnum values in the volume identifier header. Suppose we have a logical
+ * eraseblock L and it is mapped to the physical eraseblock P.
+ *
+ * 1. Because UBI may erase physical eraseblocks asynchronously, the following
+ * situation is possible: L is asynchronously erased, so P is scheduled for
+ * erasure, then L is written to,i.e. mapped to another physical eraseblock P1,
+ * so P1 is written to, then an unclean reboot happens. Result - there are 2
+ * physical eraseblocks P and P1 corresponding to the same logical eraseblock
+ * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the
+ * flash.
+ *
+ * 2. From time to time UBI moves logical eraseblocks to other physical
+ * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P
+ * to P1, and an unclean reboot happens before P is physically erased, there
+ * are two physical eraseblocks P and P1 corresponding to L and UBI has to
+ * select one of them when the flash is attached. The @sqnum field says which
+ * PEB is the original (obviously P will have lower @sqnum) and the copy. But
+ * it is not enough to select the physical eraseblock with the higher sequence
+ * number, because the unclean reboot could have happen in the middle of the
+ * copying process, so the data in P is corrupted. It is also not enough to
+ * just select the physical eraseblock with lower sequence number, because the
+ * data there may be old (consider a case if more data was added to P1 after
+ * the copying). Moreover, the unclean reboot may happen when the erasure of P
+ * was just started, so it result in unstable P, which is "mostly" OK, but
+ * still has unstable bits.
+ *
+ * UBI uses the @copy_flag field to indicate that this logical eraseblock is a
+ * copy. UBI also calculates data CRC when the data is moved and stores it at
+ * the @data_crc field of the copy (P1). So when UBI needs to pick one physical
+ * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is
+ * examined. If it is cleared, the situation* is simple and the newer one is
+ * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC
+ * checksum is correct, this physical eraseblock is selected (P1). Otherwise
+ * the older one (P) is selected.
+ *
+ * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
+ * Internal volumes are not seen from outside and are used for various internal
+ * UBI purposes. In this implementation there is only one internal volume - the
+ * layout volume. Internal volumes are the main mechanism of UBI extensions.
+ * For example, in future one may introduce a journal internal volume. Internal
+ * volumes have their own reserved range of IDs.
+ *
+ * The @compat field is only used for internal volumes and contains the "degree
+ * of their compatibility". It is always zero for user volumes. This field
+ * provides a mechanism to introduce UBI extensions and to be still compatible
+ * with older UBI binaries. For example, if someone introduced a journal in
+ * future, he would probably use %UBI_COMPAT_DELETE compatibility for the
+ * journal volume. And in this case, older UBI binaries, which know nothing
+ * about the journal volume, would just delete this volume and work perfectly
+ * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image
+ * - it just ignores the Ext3fs journal.
+ *
+ * The @data_crc field contains the CRC checksum of the contents of the logical
+ * eraseblock if this is a static volume. In case of dynamic volumes, it does
+ * not contain the CRC checksum as a rule. The only exception is when the
+ * data of the physical eraseblock was moved by the wear-leveling sub-system,
+ * then the wear-leveling sub-system calculates the data CRC and stores it in
+ * the @data_crc field. And of course, the @copy_flag is %in this case.
+ *
+ * The @data_size field is used only for static volumes because UBI has to know
+ * how many bytes of data are stored in this eraseblock. For dynamic volumes,
+ * this field usually contains zero. The only exception is when the data of the
+ * physical eraseblock was moved to another physical eraseblock for
+ * wear-leveling reasons. In this case, UBI calculates CRC checksum of the
+ * contents and uses both @data_crc and @data_size fields. In this case, the
+ * @data_size field contains data size.
+ *
+ * The @used_ebs field is used only for static volumes and indicates how many
+ * eraseblocks the data of the volume takes. For dynamic volumes this field is
+ * not used and always contains zero.
+ *
+ * The @data_pad is calculated when volumes are created using the alignment
+ * parameter. So, effectively, the @data_pad field reduces the size of logical
+ * eraseblocks of this volume. This is very handy when one uses block-oriented
+ * software (say, cramfs) on top of the UBI volume.
+ */
+struct ubi_vid_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 vol_type;
+ __u8 copy_flag;
+ __u8 compat;
+ __be32 vol_id;
+ __be32 lnum;
+ __u8 padding1[4];
+ __be32 data_size;
+ __be32 used_ebs;
+ __be32 data_pad;
+ __be32 data_crc;
+ __u8 padding2[4];
+ __be64 sqnum;
+ __u8 padding3[12];
+ __be32 hdr_crc;
+} __packed;
+
+/* Internal UBI volumes count */
+#define UBI_INT_VOL_COUNT 1
+
+/*
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
+ */
+#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
+
+/* The layout volume contains the volume table */
+
+#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
+#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
+#define UBI_LAYOUT_VOLUME_ALIGN 1
+#define UBI_LAYOUT_VOLUME_EBS 2
+#define UBI_LAYOUT_VOLUME_NAME "layout volume"
+#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
+
+/* The maximum number of volumes per one UBI device */
+#define UBI_MAX_VOLUMES 128
+
+/* The maximum volume name length */
+#define UBI_VOL_NAME_MAX 127
+
+/* Size of the volume table record */
+#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
+
+/* Size of the volume table record without the ending CRC */
+#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_vtbl_record - a record in the volume table.
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are unused at the end of the each physical
+ * eraseblock to satisfy the requested alignment
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @upd_marker: if volume update was started but not finished
+ * @name_len: volume name length
+ * @name: the volume name
+ * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
+ * @padding: reserved, zeroes
+ * @crc: a CRC32 checksum of the record
+ *
+ * The volume table records are stored in the volume table, which is stored in
+ * the layout volume. The layout volume consists of 2 logical eraseblock, each
+ * of which contains a copy of the volume table (i.e., the volume table is
+ * duplicated). The volume table is an array of &struct ubi_vtbl_record
+ * objects indexed by the volume ID.
+ *
+ * If the size of the logical eraseblock is large enough to fit
+ * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES
+ * records. Otherwise, it contains as many records as it can fit (i.e., size of
+ * logical eraseblock divided by sizeof(struct ubi_vtbl_record)).
+ *
+ * The @upd_marker flag is used to implement volume update. It is set to %1
+ * before update and set to %0 after the update. So if the update operation was
+ * interrupted, UBI knows that the volume is corrupted.
+ *
+ * The @alignment field is specified when the volume is created and cannot be
+ * later changed. It may be useful, for example, when a block-oriented file
+ * system works on top of UBI. The @data_pad field is calculated using the
+ * logical eraseblock size and @alignment. The alignment must be multiple to the
+ * minimal flash I/O unit. If @alignment is 1, all the available space of
+ * the physical eraseblocks is used.
+ *
+ * Empty records contain all zeroes and the CRC checksum of those zeroes.
+ */
+struct ubi_vtbl_record {
+ __be32 reserved_pebs;
+ __be32 alignment;
+ __be32 data_pad;
+ __u8 vol_type;
+ __u8 upd_marker;
+ __be16 name_len;
+ __u8 name[UBI_VOL_NAME_MAX+1];
+ __u8 flags;
+ __u8 padding[23];
+ __be32 crc;
+} __packed;
+
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[0];
+} __packed;
+#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
new file mode 100644
index 000000000..c998212fc
--- /dev/null
+++ b/drivers/mtd/ubi/ubi.h
@@ -0,0 +1,1102 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_UBI_H__
+#define __UBI_UBI_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/notifier.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/ubi.h>
+#include <asm/pgtable.h>
+
+#include "ubi-media.h"
+
+/* Maximum number of supported UBI devices */
+#define UBI_MAX_DEVICES 32
+
+/* UBI name used for character devices, sysfs, etc */
+#define UBI_NAME_STR "ubi"
+
+/* Normal UBI messages */
+#define ubi_msg(ubi, fmt, ...) pr_notice(UBI_NAME_STR "%d: " fmt "\n", \
+ ubi->ubi_num, ##__VA_ARGS__)
+/* UBI warning messages */
+#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
+ ubi->ubi_num, __func__, ##__VA_ARGS__)
+/* UBI error messages */
+#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
+ ubi->ubi_num, __func__, ##__VA_ARGS__)
+
+/* Background thread name pattern */
+#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
+
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
+#define UBI_LEB_UNMAPPED -1
+
+/*
+ * In case of errors, UBI tries to repeat the operation several times before
+ * returning error. The below constant defines how many times UBI re-tries.
+ */
+#define UBI_IO_RETRIES 3
+
+/*
+ * Length of the protection queue. The length is effectively equivalent to the
+ * number of (global) erase cycles PEBs are protected from the wear-leveling
+ * worker.
+ */
+#define UBI_PROT_QUEUE_LEN 10
+
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/*
+ * Error codes returned by the I/O sub-system.
+ *
+ * UBI_IO_FF: the read region of flash contains only 0xFFs
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ * integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
+ * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a
+ * data integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BITFLIPS: bit-flips were detected and corrected
+ *
+ * Note, it is probably better to have bit-flip and ebadmsg as flags which can
+ * be or'ed with other error code. But this is a big change because there are
+ * may callers, so it does not worth the risk of introducing a bug
+ */
+enum {
+ UBI_IO_FF = 1,
+ UBI_IO_FF_BITFLIPS,
+ UBI_IO_BAD_HDR,
+ UBI_IO_BAD_HDR_EBADMSG,
+ UBI_IO_BITFLIPS,
+};
+
+/*
+ * Return codes of the 'ubi_eba_copy_leb()' function.
+ *
+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
+ * PEB was put meanwhile, or there is I/O on the source PEB
+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
+ * PEB
+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
+ * PEB
+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
+ * PEB
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
+ * target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
+ */
+enum {
+ MOVE_CANCEL_RACE = 1,
+ MOVE_SOURCE_RD_ERR,
+ MOVE_TARGET_RD_ERR,
+ MOVE_TARGET_WR_ERR,
+ MOVE_TARGET_BITFLIPS,
+ MOVE_RETRY,
+};
+
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
+};
+
+/*
+ * Flags for emulate_power_cut in ubi_debug_info
+ *
+ * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
+ * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
+ */
+enum {
+ POWER_CUT_EC_WRITE = 0x01,
+ POWER_CUT_VID_WRITE = 0x02,
+};
+
+/**
+ * struct ubi_wl_entry - wear-leveling entry.
+ * @u.rb: link in the corresponding (free/used) RB-tree
+ * @u.list: link in the protection queue
+ * @ec: erase counter
+ * @pnum: physical eraseblock number
+ *
+ * This data structure is used in the WL sub-system. Each physical eraseblock
+ * has a corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL sub-system for details.
+ */
+struct ubi_wl_entry {
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+ int ec;
+ int pnum;
+};
+
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ * the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA sub-system to implement per-LEB
+ * locking. When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA sub-system for details.
+ */
+struct ubi_ltree_entry {
+ struct rb_node rb;
+ int vol_id;
+ int lnum;
+ int users;
+ struct rw_semaphore mutex;
+};
+
+/**
+ * struct ubi_rename_entry - volume re-name description data structure.
+ * @new_name_len: new volume name length
+ * @new_name: new volume name
+ * @remove: if not zero, this volume should be removed, not re-named
+ * @desc: descriptor of the volume
+ * @list: links re-name entries into a list
+ *
+ * This data structure is utilized in the multiple volume re-name code. Namely,
+ * UBI first creates a list of &struct ubi_rename_entry objects from the
+ * &struct ubi_rnvol_req request object, and then utilizes this list to do all
+ * the job.
+ */
+struct ubi_rename_entry {
+ int new_name_len;
+ char new_name[UBI_VOL_NAME_MAX + 1];
+ int remove;
+ struct ubi_volume_desc *desc;
+ struct list_head list;
+};
+
+struct ubi_volume_desc;
+
+/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
+ * struct ubi_volume - UBI volume description data structure.
+ * @dev: device object to make use of the the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi: reference to the UBI device description object
+ * @vol_id: volume ID
+ * @ref_count: volume reference count
+ * @readers: number of users holding this volume in read-only mode
+ * @writers: number of users holding this volume in read-write mode
+ * @exclusive: whether somebody holds this volume in exclusive mode
+ * @metaonly: whether somebody is altering only meta data of this volume
+ *
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @usable_leb_size: logical eraseblock size without padding
+ * @used_ebs: how many logical eraseblocks in this volume contain data
+ * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
+ * @used_bytes: how many bytes of data this volume contains
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are not used at the end of physical eraseblocks to
+ * satisfy the requested alignment
+ * @name_len: volume name length
+ * @name: volume name
+ *
+ * @upd_ebs: how many eraseblocks are expected to be updated
+ * @ch_lnum: LEB number which is being changing by the atomic LEB change
+ * operation
+ * @upd_bytes: how many bytes are expected to be received for volume update or
+ * atomic LEB change
+ * @upd_received: how many bytes were already received for volume update or
+ * atomic LEB change
+ * @upd_buf: update buffer which is used to collect update data or data for
+ * atomic LEB change
+ *
+ * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
+ * @checked: %1 if this static volume was checked
+ * @corrupted: %1 if the volume is corrupted (static volumes only)
+ * @upd_marker: %1 if the update marker is set for this volume
+ * @updating: %1 if the volume is being updated
+ * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
+ * @direct_writes: %1 if direct writes are enabled for this volume
+ *
+ * The @corrupted field indicates that the volume's contents is corrupted.
+ * Since UBI protects only static volumes, this field is not relevant to
+ * dynamic volumes - it is user's responsibility to assure their data
+ * integrity.
+ *
+ * The @upd_marker flag indicates that this volume is either being updated at
+ * the moment or is damaged because of an unclean reboot.
+ */
+struct ubi_volume {
+ struct device dev;
+ struct cdev cdev;
+ struct ubi_device *ubi;
+ int vol_id;
+ int ref_count;
+ int readers;
+ int writers;
+ int exclusive;
+ int metaonly;
+
+ int reserved_pebs;
+ int vol_type;
+ int usable_leb_size;
+ int used_ebs;
+ int last_eb_bytes;
+ long long used_bytes;
+ int alignment;
+ int data_pad;
+ int name_len;
+ char name[UBI_VOL_NAME_MAX + 1];
+
+ int upd_ebs;
+ int ch_lnum;
+ long long upd_bytes;
+ long long upd_received;
+ void *upd_buf;
+
+ int *eba_tbl;
+ unsigned int checked:1;
+ unsigned int corrupted:1;
+ unsigned int upd_marker:1;
+ unsigned int updating:1;
+ unsigned int changing_leb:1;
+ unsigned int direct_writes:1;
+};
+
+/**
+ * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
+ * @vol: reference to the corresponding volume description object
+ * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE
+ * or %UBI_METAONLY)
+ */
+struct ubi_volume_desc {
+ struct ubi_volume *vol;
+ int mode;
+};
+
+struct ubi_wl_entry;
+
+/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @chk_fastmap: if UBI fastmap extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @emulate_power_cut: emulate power cut for testing purposes
+ * @power_cut_counter: count down for writes left until emulated power cut
+ * @power_cut_min: minimum number of writes before emulating a power cut
+ * @power_cut_max: maximum number of writes until emulating a power cut
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ * @dfs_emulate_power_cut: debugfs knob to emulate power cuts
+ * @dfs_power_cut_min: debugfs knob for minimum writes before power cut
+ * @dfs_power_cut_max: debugfs knob for maximum writes until power cut
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int chk_fastmap:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ unsigned int emulate_power_cut:2;
+ unsigned int power_cut_counter;
+ unsigned int power_cut_min;
+ unsigned int power_cut_max;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_chk_fastmap;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+ struct dentry *dfs_emulate_power_cut;
+ struct dentry *dfs_power_cut_min;
+ struct dentry *dfs_power_cut_max;
+};
+
+/**
+ * struct ubi_device - UBI device description structure
+ * @dev: UBI device object to use the the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi_num: UBI device number
+ * @ubi_name: UBI device name
+ * @vol_count: number of volumes in this UBI device
+ * @volumes: volumes of this UBI device
+ * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
+ * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
+ * @vol->readers, @vol->writers, @vol->exclusive,
+ * @vol->metaonly, @vol->ref_count, @vol->mapping and
+ * @vol->eba_tbl.
+ * @ref_count: count of references on the UBI device
+ * @image_seq: image sequence number recorded on EC headers
+ *
+ * @rsvd_pebs: count of reserved physical eraseblocks
+ * @avail_pebs: count of available physical eraseblocks
+ * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
+ * handling
+ * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
+ *
+ * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
+ * of UBI initialization
+ * @vtbl_slots: how many slots are available in the volume table
+ * @vtbl_size: size of the volume table in bytes
+ * @vtbl: in-RAM volume table copy
+ * @device_mutex: protects on-flash volume table and serializes volume
+ * creation, deletion, update, re-size, re-name and set
+ * property
+ *
+ * @max_ec: current highest erase counter value
+ * @mean_ec: current mean erase counter value
+ *
+ * @global_sqnum: global sequence number
+ * @ltree_lock: protects the lock tree and @global_sqnum
+ * @ltree: the lock tree
+ * @alc_mutex: serializes "atomic LEB change" operations
+ *
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
+ * that critical sections cannot be interrupted by ubi_update_fastmap()
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
+ *
+ * @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
+ * @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
+ * @scrub: RB-tree of physical eraseblocks which need scrubbing
+ * @pq: protection queue (contain physical eraseblocks which are temporarily
+ * protected from the wear-leveling worker)
+ * @pq_head: protection queue head
+ * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
+ * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
+ * and @fm_wl_pool fields
+ * @move_mutex: serializes eraseblock moves
+ * @work_sem: used to wait for all the scheduled works to finish and prevent
+ * new works from being submitted
+ * @wl_scheduled: non-zero if the wear-leveling was scheduled
+ * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
+ * physical eraseblock
+ * @move_from: physical eraseblock from where the data is being moved
+ * @move_to: physical eraseblock where the data is being moved to
+ * @move_to_put: if the "to" PEB was put
+ * @works: list of pending works
+ * @works_count: count of pending works
+ * @bgt_thread: background thread description object
+ * @thread_enabled: if the background thread is enabled
+ * @bgt_name: background thread name
+ *
+ * @flash_size: underlying MTD device size (in bytes)
+ * @peb_count: count of physical eraseblocks on the MTD device
+ * @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @good_peb_count: count of good physical eraseblocks
+ * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
+ * used by UBI)
+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
+ * @min_io_size: minimal input/output unit size of the underlying MTD device
+ * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
+ * @ro_mode: if the UBI device is in read-only mode
+ * @leb_size: logical eraseblock size
+ * @leb_start: starting offset of logical eraseblocks within physical
+ * eraseblocks
+ * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
+ * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
+ * @vid_hdr_offset: starting offset of the volume identifier header (might be
+ * unaligned)
+ * @vid_hdr_aloffset: starting offset of the VID header aligned to
+ * @hdrs_min_io_size
+ * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
+ * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
+ * not
+ * @nor_flash: non-zero if working on top of NOR flash
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ * time (MTD write buffer size)
+ * @mtd: MTD device descriptor
+ *
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
+ * @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
+ */
+struct ubi_device {
+ struct cdev cdev;
+ struct device dev;
+ int ubi_num;
+ char ubi_name[sizeof(UBI_NAME_STR)+5];
+ int vol_count;
+ struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
+ spinlock_t volumes_lock;
+ int ref_count;
+ int image_seq;
+
+ int rsvd_pebs;
+ int avail_pebs;
+ int beb_rsvd_pebs;
+ int beb_rsvd_level;
+ int bad_peb_limit;
+
+ int autoresize_vol_id;
+ int vtbl_slots;
+ int vtbl_size;
+ struct ubi_vtbl_record *vtbl;
+ struct mutex device_mutex;
+
+ int max_ec;
+ /* Note, mean_ec is not updated run-time - should be fixed */
+ int mean_ec;
+
+ /* EBA sub-system's stuff */
+ unsigned long long global_sqnum;
+ spinlock_t ltree_lock;
+ struct rb_root ltree;
+ struct mutex alc_mutex;
+
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ struct rw_semaphore fm_eba_sem;
+ struct rw_semaphore fm_protect;
+ void *fm_buf;
+ size_t fm_size;
+ struct work_struct fm_work;
+ int fm_work_scheduled;
+
+ /* Wear-leveling sub-system's stuff */
+ struct rb_root used;
+ struct rb_root erroneous;
+ struct rb_root free;
+ int free_count;
+ struct rb_root scrub;
+ struct list_head pq[UBI_PROT_QUEUE_LEN];
+ int pq_head;
+ spinlock_t wl_lock;
+ struct mutex move_mutex;
+ struct rw_semaphore work_sem;
+ int wl_scheduled;
+ struct ubi_wl_entry **lookuptbl;
+ struct ubi_wl_entry *move_from;
+ struct ubi_wl_entry *move_to;
+ int move_to_put;
+ struct list_head works;
+ int works_count;
+ struct task_struct *bgt_thread;
+ int thread_enabled;
+ char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+
+ /* I/O sub-system's stuff */
+ long long flash_size;
+ int peb_count;
+ int peb_size;
+ int bad_peb_count;
+ int good_peb_count;
+ int corr_peb_count;
+ int erroneous_peb_count;
+ int max_erroneous;
+ int min_io_size;
+ int hdrs_min_io_size;
+ int ro_mode;
+ int leb_size;
+ int leb_start;
+ int ec_hdr_alsize;
+ int vid_hdr_alsize;
+ int vid_hdr_offset;
+ int vid_hdr_aloffset;
+ int vid_hdr_shift;
+ unsigned int bad_allowed:1;
+ unsigned int nor_flash:1;
+ int max_write_size;
+ struct mtd_info *mtd;
+
+ void *peb_buf;
+ struct mutex buf_mutex;
+ struct mutex ckvol_mutex;
+
+ struct ubi_debug_info dbg;
+};
+
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @shutdown argument is
+ * not zero, the worker has to free the resources and exit immediately as the
+ * WL sub-system is shutting down.
+ * The worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+ int anchor;
+};
+
+#include "debug.h"
+
+extern struct kmem_cache *ubi_wl_entry_slab;
+extern const struct file_operations ubi_ctrl_cdev_operations;
+extern const struct file_operations ubi_cdev_operations;
+extern const struct file_operations ubi_vol_cdev_operations;
+extern struct class *ubi_class;
+extern struct mutex ubi_devices_mutex;
+extern struct blocking_notifier_head ubi_notifiers;
+
+/* attach.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
+/* vtbl.c */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec);
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* vmt.c */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl);
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list);
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+
+/* upd.c */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes);
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req);
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+
+/* misc.c */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length);
+int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
+void ubi_calculate_reserved(struct ubi_device *ubi);
+int ubi_check_pattern(const void *buf, uint8_t patt, int size);
+
+/* eba.c */
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum);
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check);
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check);
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len);
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int used_ebs);
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len);
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_hdr *vid_hdr);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
+
+/* wl.c */
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+void ubi_wl_close(struct ubi_device *ubi);
+int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+
+/* io.c */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len);
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len);
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture);
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose);
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr);
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int verbose);
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr);
+
+/* build.c */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024);
+int ubi_detach_mtd_dev(int ubi_num, int anyway);
+struct ubi_device *ubi_get_device(int ubi_num);
+void ubi_put_device(struct ubi_device *ubi);
+struct ubi_device *ubi_get_by_major(int major);
+int ubi_major2num(int major);
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
+ int ntype);
+int ubi_notify_all(struct ubi_device *ubi, int ntype,
+ struct notifier_block *nb);
+int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
+
+/* kapi.c */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+#ifdef CONFIG_MTD_UBI_FASTMAP
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor);
+#else
+static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
+#endif
+
+/* block.c */
+#ifdef CONFIG_MTD_UBI_BLOCK
+int ubiblock_init(void);
+void ubiblock_exit(void);
+int ubiblock_create(struct ubi_volume_info *vi);
+int ubiblock_remove(struct ubi_volume_info *vi);
+#else
+static inline int ubiblock_init(void) { return 0; }
+static inline void ubiblock_exit(void) {}
+static inline int ubiblock_create(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+static inline int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+#endif
+
+/*
+ * ubi_for_each_free_peb - walk the UBI free RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_free_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
+
+/*
+ * ubi_for_each_used_peb - walk the UBI used RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_used_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
+
+/*
+ * ubi_for_each_scub_peb - walk the UBI scub RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
+
+/*
+ * ubi_for_each_protected_peb - walk the UBI protection queue.
+ * @ubi: UBI device description object
+ * @i: a integer used as counter
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ */
+#define ubi_for_each_protected_peb(ubi, i, e) \
+ for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \
+ list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
+
+/*
+ * ubi_rb_for_each_entry - walk an RB-tree.
+ * @rb: a pointer to type 'struct rb_node' to use as a loop counter
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ * @root: RB-tree's root
+ * @member: the name of the 'struct rb_node' within the RB-tree entry
+ */
+#define ubi_rb_for_each_entry(rb, pos, root, member) \
+ for (rb = rb_first(root), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
+ rb; \
+ rb = rb_next(rb), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
+
+/**
+ * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
+ * @ubi: UBI device description object
+ * @gfp_flags: GFP flags to allocate with
+ *
+ * This function returns a pointer to the newly allocated and zero-filled
+ * volume identifier header object in case of success and %NULL in case of
+ * failure.
+ */
+static inline struct ubi_vid_hdr *
+ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
+{
+ void *vid_hdr;
+
+ vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags);
+ if (!vid_hdr)
+ return NULL;
+
+ /*
+ * VID headers may be stored at un-aligned flash offsets, so we shift
+ * the pointer.
+ */
+ return vid_hdr + ubi->vid_hdr_shift;
+}
+
+/**
+ * ubi_free_vid_hdr - free a volume identifier header object.
+ * @ubi: UBI device description object
+ * @vid_hdr: the object to free
+ */
+static inline void ubi_free_vid_hdr(const struct ubi_device *ubi,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ void *p = vid_hdr;
+
+ if (!p)
+ return;
+
+ kfree(p - ubi->vid_hdr_shift);
+}
+
+/*
+ * This function is equivalent to 'ubi_io_read()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/*
+ * This function is equivalent to 'ubi_io_write()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn(ubi, "switch to read-only mode");
+ dump_stack();
+ }
+}
+
+/**
+ * vol_id2idx - get table index by volume ID.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ */
+static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id >= UBI_INTERNAL_VOL_START)
+ return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots;
+ else
+ return vol_id;
+}
+
+/**
+ * idx2vol_id - get volume ID by table index.
+ * @ubi: UBI device description object
+ * @idx: table index
+ */
+static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
+{
+ if (idx >= ubi->vtbl_slots)
+ return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START;
+ else
+ return idx;
+}
+
+#endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
new file mode 100644
index 000000000..2a1b6e037
--- /dev/null
+++ b/drivers/mtd/ubi/upd.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ *
+ * Jan 2007: Alexander Schmidt, hacked per-volume update.
+ */
+
+/*
+ * This file contains implementation of the volume update and atomic LEB change
+ * functionality.
+ *
+ * The update operation is based on the per-volume update marker which is
+ * stored in the volume table. The update marker is set before the update
+ * starts, and removed after the update has been finished. So if the update was
+ * interrupted by an unclean re-boot or due to some other reasons, the update
+ * marker stays on the flash media and UBI finds it when it attaches the MTD
+ * device next time. If the update marker is set for a volume, the volume is
+ * treated as damaged and most I/O operations are prohibited. Only a new update
+ * operation is allowed.
+ *
+ * Note, in general it is possible to implement the update operation as a
+ * transaction with a roll-back capability.
+ */
+
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/math64.h>
+#include "ubi.h"
+
+/**
+ * set_update_marker - set update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function sets the update marker flag for volume @vol. Returns zero
+ * in case of success and a negative error code in case of failure.
+ */
+static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_gen("set update marker for volume %d", vol->vol_id);
+
+ if (vol->upd_marker) {
+ ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
+ dbg_gen("already set");
+ return 0;
+ }
+
+ vtbl_rec = ubi->vtbl[vol->vol_id];
+ vtbl_rec.upd_marker = 1;
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ vol->upd_marker = 1;
+ mutex_unlock(&ubi->device_mutex);
+ return err;
+}
+
+/**
+ * clear_update_marker - clear update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: new data size in bytes
+ *
+ * This function clears the update marker for volume @vol, sets new volume
+ * data size and clears the "corrupted" flag (static volumes only). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int err;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_gen("clear update marker for volume %d", vol->vol_id);
+
+ vtbl_rec = ubi->vtbl[vol->vol_id];
+ ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
+ vtbl_rec.upd_marker = 0;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ vol->corrupted = 0;
+ vol->used_bytes = bytes;
+ vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ vol->upd_marker = 0;
+ mutex_unlock(&ubi->device_mutex);
+ return err;
+}
+
+/**
+ * ubi_start_update - start volume update.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: update bytes
+ *
+ * This function starts volume update operation. If @bytes is zero, the volume
+ * is just wiped out. Returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int i, err;
+
+ dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+ ubi_assert(!vol->updating && !vol->changing_leb);
+ vol->updating = 1;
+
+ vol->upd_buf = vmalloc(ubi->leb_size);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ err = set_update_marker(ubi, vol);
+ if (err)
+ return err;
+
+ /* Before updating - wipe out the volume */
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ return err;
+ }
+
+ if (bytes == 0) {
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+
+ err = clear_update_marker(ubi, vol, 0);
+ if (err)
+ return err;
+
+ vfree(vol->upd_buf);
+ vol->updating = 0;
+ return 0;
+ }
+
+ vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
+ vol->upd_bytes = bytes;
+ vol->upd_received = 0;
+ return 0;
+}
+
+/**
+ * ubi_start_leb_change - start atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @req: operation request
+ *
+ * This function starts atomic LEB change operation. Returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req)
+{
+ ubi_assert(!vol->updating && !vol->changing_leb);
+
+ dbg_gen("start changing LEB %d:%d, %u bytes",
+ vol->vol_id, req->lnum, req->bytes);
+ if (req->bytes == 0)
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
+
+ vol->upd_bytes = req->bytes;
+ vol->upd_received = 0;
+ vol->changing_leb = 1;
+ vol->ch_lnum = req->lnum;
+
+ vol->upd_buf = vmalloc(req->bytes);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * write_leb - write update data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: data size
+ * @used_ebs: how many logical eraseblocks will this volume contain (static
+ * volumes only)
+ *
+ * This function writes update data to corresponding logical eraseblock. In
+ * case of dynamic volume, this function checks if the data contains 0xFF bytes
+ * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole
+ * buffer contains only 0xFF bytes, the LEB is left unmapped.
+ *
+ * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is
+ * that we want to make sure that more data may be appended to the logical
+ * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and
+ * this PEB won't be writable anymore. So if one writes the file-system image
+ * to the UBI volume where 0xFFs mean free space - UBI makes sure this free
+ * space is writable after the update.
+ *
+ * We do not do this for static volumes because they are read-only. But this
+ * also cannot be done because we have to store per-LEB CRC and the correct
+ * data length.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int len, int used_ebs)
+{
+ int err;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ int l = ALIGN(len, ubi->min_io_size);
+
+ memset(buf + len, 0xFF, l - len);
+ len = ubi_calc_data_len(ubi, buf, l);
+ if (len == 0) {
+ dbg_gen("all %d bytes contain 0xFF - skip", len);
+ return 0;
+ }
+
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
+ } else {
+ /*
+ * When writing static volume, and this is the last logical
+ * eraseblock, the length (@len) does not have to be aligned to
+ * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()'
+ * function accepts exact (unaligned) length and stores it in
+ * the VID header. And it takes care of proper alignment by
+ * padding the buffer. Here we just make sure the padding will
+ * contain zeros, not random trash.
+ */
+ memset(buf + len, 0, vol->usable_leb_size - len);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_update_data - write more update data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function writes more data to the volume which is being updated. It may
+ * be called arbitrary number of times until all the update data arriveis. This
+ * function returns %0 in case of success, number of bytes written during the
+ * last call if the whole volume update has been successfully finished, and a
+ * negative error code in case of failure.
+ */
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ int lnum, offs, err = 0, len, to_write = count;
+
+ dbg_gen("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs);
+ if (vol->upd_received + count > vol->upd_bytes)
+ to_write = count = vol->upd_bytes - vol->upd_received;
+
+ /*
+ * When updating volumes, we accumulate whole logical eraseblock of
+ * data and write it at once.
+ */
+ if (offs != 0) {
+ /*
+ * This is a write to the middle of the logical eraseblock. We
+ * copy the data to our update buffer and wait for more data or
+ * flush it if the whole eraseblock is written or the update
+ * is finished.
+ */
+
+ len = vol->usable_leb_size - offs;
+ if (len > count)
+ len = count;
+
+ err = copy_from_user(vol->upd_buf + offs, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (offs + len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ int flush_len = offs + len;
+
+ /*
+ * OK, we gathered either the whole eraseblock or this
+ * is the last chunk, it's time to flush the buffer.
+ */
+ ubi_assert(flush_len <= vol->usable_leb_size);
+ err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
+ vol->upd_ebs);
+ if (err)
+ return err;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ buf += len;
+ lnum += 1;
+ }
+
+ /*
+ * If we've got more to write, let's continue. At this point we know we
+ * are starting from the beginning of an eraseblock.
+ */
+ while (count) {
+ if (count > vol->usable_leb_size)
+ len = vol->usable_leb_size;
+ else
+ len = count;
+
+ err = copy_from_user(vol->upd_buf, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ err = write_leb(ubi, vol, lnum, vol->upd_buf,
+ len, vol->upd_ebs);
+ if (err)
+ break;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ lnum += 1;
+ buf += len;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ /* The update is finished, clear the update marker */
+ err = clear_update_marker(ubi, vol, vol->upd_bytes);
+ if (err)
+ return err;
+ vol->updating = 0;
+ err = to_write;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function accepts more data to the volume which is being under the
+ * "atomic LEB change" operation. It may be called arbitrary number of times
+ * until all data arrives. This function returns %0 in case of success, number
+ * of bytes written during the last call if the whole "atomic LEB change"
+ * operation has been successfully finished, and a negative error code in case
+ * of failure.
+ */
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ int err;
+
+ dbg_gen("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (vol->upd_received + count > vol->upd_bytes)
+ count = vol->upd_bytes - vol->upd_received;
+
+ err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
+ if (err)
+ return -EFAULT;
+
+ vol->upd_received += count;
+
+ if (vol->upd_received == vol->upd_bytes) {
+ int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
+
+ memset(vol->upd_buf + vol->upd_bytes, 0xFF,
+ len - vol->upd_bytes);
+ len = ubi_calc_data_len(ubi, vol->upd_buf, len);
+ err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
+ vol->upd_buf, len);
+ if (err)
+ return err;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ vol->changing_leb = 0;
+ err = count;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
new file mode 100644
index 000000000..ff4d97848
--- /dev/null
+++ b/drivers/mtd/ubi/vmt.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file contains implementation of volume creation, deletion, updating and
+ * resizing.
+ */
+
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "ubi.h"
+
+static int self_check_volumes(struct ubi_device *ubi);
+
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
+static struct device_attribute attr_vol_reserved_ebs =
+ __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_type =
+ __ATTR(type, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_name =
+ __ATTR(name, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_corrupted =
+ __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_alignment =
+ __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_usable_eb_size =
+ __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_data_bytes =
+ __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_upd_marker =
+ __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
+
+/*
+ * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
+ *
+ * Consider a situation:
+ * A. process 1 opens a sysfs file related to volume Y, say
+ * /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
+ * B. process 2 removes volume Y;
+ * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
+ *
+ * In this situation, this function will return %-ENODEV because it will find
+ * out that the volume was removed from the @ubi->volumes array.
+ */
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+ struct ubi_device *ubi;
+
+ ubi = ubi_get_device(vol->ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ spin_lock(&ubi->volumes_lock);
+ if (!ubi->volumes[vol->vol_id]) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
+ return -ENODEV;
+ }
+ /* Take a reference to prevent volume removal */
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ if (attr == &attr_vol_reserved_ebs)
+ ret = sprintf(buf, "%d\n", vol->reserved_pebs);
+ else if (attr == &attr_vol_type) {
+ const char *tp;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ tp = "dynamic";
+ else
+ tp = "static";
+ ret = sprintf(buf, "%s\n", tp);
+ } else if (attr == &attr_vol_name)
+ ret = sprintf(buf, "%s\n", vol->name);
+ else if (attr == &attr_vol_corrupted)
+ ret = sprintf(buf, "%d\n", vol->corrupted);
+ else if (attr == &attr_vol_alignment)
+ ret = sprintf(buf, "%d\n", vol->alignment);
+ else if (attr == &attr_vol_usable_eb_size)
+ ret = sprintf(buf, "%d\n", vol->usable_leb_size);
+ else if (attr == &attr_vol_data_bytes)
+ ret = sprintf(buf, "%lld\n", vol->used_bytes);
+ else if (attr == &attr_vol_upd_marker)
+ ret = sprintf(buf, "%d\n", vol->upd_marker);
+ else
+ /* This must be a bug */
+ ret = -EINVAL;
+
+ /* We've done the operation, drop volume and UBI device references */
+ spin_lock(&ubi->volumes_lock);
+ vol->ref_count -= 1;
+ ubi_assert(vol->ref_count >= 0);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
+ return ret;
+}
+
+/* Release method for volume devices */
+static void vol_release(struct device *dev)
+{
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+
+ kfree(vol->eba_tbl);
+ kfree(vol);
+}
+
+/**
+ * volume_sysfs_init - initialize sysfs for new volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ *
+ * Note, this function does not free allocated resources in case of failure -
+ * the caller does it. This is because this would cause release() here and the
+ * caller would oops.
+ */
+static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err;
+
+ err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_type);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_name);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_corrupted);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_alignment);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_data_bytes);
+ if (err)
+ return err;
+ err = device_create_file(&vol->dev, &attr_vol_upd_marker);
+ return err;
+}
+
+/**
+ * volume_sysfs_close - close sysfs for a volume.
+ * @vol: volume description object
+ */
+static void volume_sysfs_close(struct ubi_volume *vol)
+{
+ device_remove_file(&vol->dev, &attr_vol_upd_marker);
+ device_remove_file(&vol->dev, &attr_vol_data_bytes);
+ device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
+ device_remove_file(&vol->dev, &attr_vol_alignment);
+ device_remove_file(&vol->dev, &attr_vol_corrupted);
+ device_remove_file(&vol->dev, &attr_vol_name);
+ device_remove_file(&vol->dev, &attr_vol_type);
+ device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
+ device_unregister(&vol->dev);
+}
+
+/**
+ * ubi_create_volume - create volume.
+ * @ubi: UBI device description object
+ * @req: volume creation request
+ *
+ * This function creates volume described by @req. If @req->vol_id id
+ * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
+ * and saves it in @req->vol_id. Returns zero in case of success and a negative
+ * error code in case of failure. Note, the caller has to have the
+ * @ubi->device_mutex locked.
+ */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
+{
+ int i, err, vol_id = req->vol_id, do_free = 1;
+ struct ubi_volume *vol;
+ struct ubi_vtbl_record vtbl_rec;
+ dev_t dev;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ /* Find unused volume ID */
+ dbg_gen("search for vacant volume ID");
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (!ubi->volumes[i]) {
+ vol_id = i;
+ break;
+ }
+
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ ubi_err(ubi, "out of volume IDs");
+ err = -ENFILE;
+ goto out_unlock;
+ }
+ req->vol_id = vol_id;
+ }
+
+ dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
+ ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
+ (int)req->vol_type, req->name);
+
+ /* Ensure that this volume does not exist */
+ err = -EEXIST;
+ if (ubi->volumes[vol_id]) {
+ ubi_err(ubi, "volume %d already exists", vol_id);
+ goto out_unlock;
+ }
+
+ /* Ensure that the name is unique */
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i] &&
+ ubi->volumes[i]->name_len == req->name_len &&
+ !strcmp(ubi->volumes[i]->name, req->name)) {
+ ubi_err(ubi, "volume \"%s\" exists (ID %d)",
+ req->name, i);
+ goto out_unlock;
+ }
+
+ /* Calculate how many eraseblocks are requested */
+ vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
+ vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
+
+ /* Reserve physical eraseblocks */
+ if (vol->reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs, only %d available",
+ ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ err = -ENOSPC;
+ goto out_unlock;
+ }
+ ubi->avail_pebs -= vol->reserved_pebs;
+ ubi->rsvd_pebs += vol->reserved_pebs;
+ spin_unlock(&ubi->volumes_lock);
+
+ vol->vol_id = vol_id;
+ vol->alignment = req->alignment;
+ vol->data_pad = ubi->leb_size % vol->alignment;
+ vol->vol_type = req->vol_type;
+ vol->name_len = req->name_len;
+ memcpy(vol->name, req->name, vol->name_len);
+ vol->ubi = ubi;
+
+ /*
+ * Finish all pending erases because there may be some LEBs belonging
+ * to the same volume ID.
+ */
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
+ if (err)
+ goto out_acc;
+
+ vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
+ if (!vol->eba_tbl) {
+ err = -ENOMEM;
+ goto out_acc;
+ }
+
+ for (i = 0; i < vol->reserved_pebs; i++)
+ vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ } else {
+ vol->used_ebs = div_u64_rem(vol->used_bytes,
+ vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes != 0)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
+ if (err) {
+ ubi_err(ubi, "cannot add character device");
+ goto out_mapping;
+ }
+
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.devt = dev;
+ vol->dev.class = ubi_class;
+
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = device_register(&vol->dev);
+ if (err) {
+ ubi_err(ubi, "cannot register device");
+ goto out_cdev;
+ }
+
+ err = volume_sysfs_init(ubi, vol);
+ if (err)
+ goto out_sysfs;
+
+ /* Fill volume table record */
+ memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
+ vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
+ vtbl_rec.alignment = cpu_to_be32(vol->alignment);
+ vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
+ vtbl_rec.name_len = cpu_to_be16(vol->name_len);
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ vtbl_rec.vol_type = UBI_VID_DYNAMIC;
+ else
+ vtbl_rec.vol_type = UBI_VID_STATIC;
+ memcpy(vtbl_rec.name, vol->name, vol->name_len);
+
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_sysfs;
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+ self_check_volumes(ubi);
+ return err;
+
+out_sysfs:
+ /*
+ * We have registered our device, we should not free the volume
+ * description object in this function in case of an error - it is
+ * freed by the release function.
+ *
+ * Get device reference to prevent the release function from being
+ * called just after sysfs has been closed.
+ */
+ do_free = 0;
+ get_device(&vol->dev);
+ volume_sysfs_close(vol);
+out_cdev:
+ cdev_del(&vol->cdev);
+out_mapping:
+ if (do_free)
+ kfree(vol->eba_tbl);
+out_acc:
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= vol->reserved_pebs;
+ ubi->avail_pebs += vol->reserved_pebs;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ if (do_free)
+ kfree(vol);
+ else
+ put_device(&vol->dev);
+ ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
+ return err;
+}
+
+/**
+ * ubi_remove_volume - remove volume.
+ * @desc: volume descriptor
+ * @no_vtbl: do not change volume table if not zero
+ *
+ * This function removes volume described by @desc. The volume has to be opened
+ * in "exclusive" mode. Returns zero in case of success and a negative error
+ * code in case of failure. The caller has to have the @ubi->device_mutex
+ * locked.
+ */
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
+
+ dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
+ ubi_assert(desc->mode == UBI_EXCLUSIVE);
+ ubi_assert(vol == ubi->volumes[vol_id]);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ /*
+ * The volume is busy, probably someone is reading one of its
+ * sysfs files.
+ */
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ ubi->volumes[vol_id] = NULL;
+ spin_unlock(&ubi->volumes_lock);
+
+ if (!no_vtbl) {
+ err = ubi_change_vtbl_record(ubi, vol_id, NULL);
+ if (err)
+ goto out_err;
+ }
+
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ goto out_err;
+ }
+
+ cdev_del(&vol->cdev);
+ volume_sysfs_close(vol);
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= reserved_pebs;
+ ubi->avail_pebs += reserved_pebs;
+ ubi_update_reserved(ubi);
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
+
+ return err;
+
+out_err:
+ ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ return err;
+}
+
+/**
+ * ubi_resize_volume - re-size volume.
+ * @desc: volume descriptor
+ * @reserved_pebs: new size in physical eraseblocks
+ *
+ * This function re-sizes the volume and returns zero in case of success, and a
+ * negative error code in case of failure. The caller has to have the
+ * @ubi->device_mutex locked.
+ */
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+{
+ int i, err, pebs, *new_mapping;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ struct ubi_vtbl_record vtbl_rec;
+ int vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
+ ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
+
+ if (vol->vol_type == UBI_STATIC_VOLUME &&
+ reserved_pebs < vol->used_ebs) {
+ ubi_err(ubi, "too small size %d, %d LEBs contain data",
+ reserved_pebs, vol->used_ebs);
+ return -EINVAL;
+ }
+
+ /* If the size is the same, we have nothing to do */
+ if (reserved_pebs == vol->reserved_pebs)
+ return 0;
+
+ new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
+ if (!new_mapping)
+ return -ENOMEM;
+
+ for (i = 0; i < reserved_pebs; i++)
+ new_mapping[i] = UBI_LEB_UNMAPPED;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ spin_unlock(&ubi->volumes_lock);
+ err = -EBUSY;
+ goto out_free;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ /* Reserve physical eraseblocks */
+ pebs = reserved_pebs - vol->reserved_pebs;
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ if (pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs: requested %d, available %d",
+ pebs, ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ spin_unlock(&ubi->volumes_lock);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= pebs;
+ ubi->rsvd_pebs += pebs;
+ for (i = 0; i < vol->reserved_pebs; i++)
+ new_mapping[i] = vol->eba_tbl[i];
+ kfree(vol->eba_tbl);
+ vol->eba_tbl = new_mapping;
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ /* Change volume table record */
+ vtbl_rec = ubi->vtbl[vol_id];
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_acc;
+
+ if (pebs < 0) {
+ for (i = 0; i < -pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+ if (err)
+ goto out_acc;
+ }
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs += pebs;
+ ubi->avail_pebs -= pebs;
+ ubi_update_reserved(ubi);
+ for (i = 0; i < reserved_pebs; i++)
+ new_mapping[i] = vol->eba_tbl[i];
+ kfree(vol->eba_tbl);
+ vol->eba_tbl = new_mapping;
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ vol->reserved_pebs = reserved_pebs;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ }
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
+ self_check_volumes(ubi);
+ return err;
+
+out_acc:
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ spin_unlock(&ubi->volumes_lock);
+ }
+out_free:
+ kfree(new_mapping);
+ return err;
+}
+
+/**
+ * ubi_rename_volumes - re-name UBI volumes.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names or removes volumes specified in the re-name list.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
+{
+ int err;
+ struct ubi_rename_entry *re;
+
+ err = ubi_vtbl_rename_volumes(ubi, rename_list);
+ if (err)
+ return err;
+
+ list_for_each_entry(re, rename_list, list) {
+ if (re->remove) {
+ err = ubi_remove_volume(re->desc, 1);
+ if (err)
+ break;
+ } else {
+ struct ubi_volume *vol = re->desc->vol;
+
+ spin_lock(&ubi->volumes_lock);
+ vol->name_len = re->new_name_len;
+ memcpy(vol->name, re->new_name, re->new_name_len + 1);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
+ }
+ }
+
+ if (!err)
+ self_check_volumes(ubi);
+ return err;
+}
+
+/**
+ * ubi_add_volume - add volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function adds an existing volume and initializes all its data
+ * structures. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err, vol_id = vol->vol_id;
+ dev_t dev;
+
+ dbg_gen("add volume %d", vol_id);
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
+ if (err) {
+ ubi_err(ubi, "cannot add character device for volume %d, error %d",
+ vol_id, err);
+ return err;
+ }
+
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.devt = dev;
+ vol->dev.class = ubi_class;
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = device_register(&vol->dev);
+ if (err)
+ goto out_cdev;
+
+ err = volume_sysfs_init(ubi, vol);
+ if (err) {
+ cdev_del(&vol->cdev);
+ volume_sysfs_close(vol);
+ return err;
+ }
+
+ self_check_volumes(ubi);
+ return err;
+
+out_cdev:
+ cdev_del(&vol->cdev);
+ return err;
+}
+
+/**
+ * ubi_free_volume - free volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function frees all resources for volume @vol but does not remove it.
+ * Used only when the UBI device is detached.
+ */
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ dbg_gen("free volume %d", vol->vol_id);
+
+ ubi->volumes[vol->vol_id] = NULL;
+ cdev_del(&vol->cdev);
+ volume_sysfs_close(vol);
+}
+
+/**
+ * self_check_volume - check volume information.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ *
+ * Returns zero if volume is all right and a a negative error code if not.
+ */
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ int idx = vol_id2idx(ubi, vol_id);
+ int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
+ const struct ubi_volume *vol;
+ long long n;
+ const char *name;
+
+ spin_lock(&ubi->volumes_lock);
+ reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ vol = ubi->volumes[idx];
+
+ if (!vol) {
+ if (reserved_pebs) {
+ ubi_err(ubi, "no volume info, but volume exists");
+ goto fail;
+ }
+ spin_unlock(&ubi->volumes_lock);
+ return 0;
+ }
+
+ if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
+ vol->name_len < 0) {
+ ubi_err(ubi, "negative values");
+ goto fail;
+ }
+ if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
+ ubi_err(ubi, "bad alignment");
+ goto fail;
+ }
+
+ n = vol->alignment & (ubi->min_io_size - 1);
+ if (vol->alignment != 1 && n) {
+ ubi_err(ubi, "alignment is not multiple of min I/O unit");
+ goto fail;
+ }
+
+ n = ubi->leb_size % vol->alignment;
+ if (vol->data_pad != n) {
+ ubi_err(ubi, "bad data_pad, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
+ vol->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err(ubi, "bad vol_type");
+ goto fail;
+ }
+
+ if (vol->upd_marker && vol->corrupted) {
+ ubi_err(ubi, "update marker and corrupted simultaneously");
+ goto fail;
+ }
+
+ if (vol->reserved_pebs > ubi->good_peb_count) {
+ ubi_err(ubi, "too large reserved_pebs");
+ goto fail;
+ }
+
+ n = ubi->leb_size - vol->data_pad;
+ if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
+ ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->name_len > UBI_VOL_NAME_MAX) {
+ ubi_err(ubi, "too long volume name, max is %d",
+ UBI_VOL_NAME_MAX);
+ goto fail;
+ }
+
+ n = strnlen(vol->name, vol->name_len + 1);
+ if (n != vol->name_len) {
+ ubi_err(ubi, "bad name_len %lld", n);
+ goto fail;
+ }
+
+ n = (long long)vol->used_ebs * vol->usable_leb_size;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ if (vol->corrupted) {
+ ubi_err(ubi, "corrupted dynamic volume");
+ goto fail;
+ }
+ if (vol->used_ebs != vol->reserved_pebs) {
+ ubi_err(ubi, "bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes != vol->usable_leb_size) {
+ ubi_err(ubi, "bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes != n) {
+ ubi_err(ubi, "bad used_bytes");
+ goto fail;
+ }
+ } else {
+ if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
+ ubi_err(ubi, "bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes < 0 ||
+ vol->last_eb_bytes > vol->usable_leb_size) {
+ ubi_err(ubi, "bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes < 0 || vol->used_bytes > n ||
+ vol->used_bytes < n - vol->usable_leb_size) {
+ ubi_err(ubi, "bad used_bytes");
+ goto fail;
+ }
+ }
+
+ alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
+ data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
+ name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
+ upd_marker = ubi->vtbl[vol_id].upd_marker;
+ name = &ubi->vtbl[vol_id].name[0];
+ if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
+ vol_type = UBI_DYNAMIC_VOLUME;
+ else
+ vol_type = UBI_STATIC_VOLUME;
+
+ if (alignment != vol->alignment || data_pad != vol->data_pad ||
+ upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
+ name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
+ ubi_err(ubi, "volume info is different");
+ goto fail;
+ }
+
+ spin_unlock(&ubi->volumes_lock);
+ return 0;
+
+fail:
+ ubi_err(ubi, "self-check failed for volume %d", vol_id);
+ if (vol)
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ dump_stack();
+ spin_unlock(&ubi->volumes_lock);
+ return -EINVAL;
+}
+
+/**
+ * self_check_volumes - check information about all volumes.
+ * @ubi: UBI device description object
+ *
+ * Returns zero if volumes are all right and a a negative error code if not.
+ */
+static int self_check_volumes(struct ubi_device *ubi)
+{
+ int i, err = 0;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ err = self_check_volume(ubi, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
new file mode 100644
index 000000000..68c9c5ea6
--- /dev/null
+++ b/drivers/mtd/ubi/vtbl.c
@@ -0,0 +1,864 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file includes volume table manipulation code. The volume table is an
+ * on-flash table containing volume meta-data like name, number of reserved
+ * physical eraseblocks, type, etc. The volume table is stored in the so-called
+ * "layout volume".
+ *
+ * The layout volume is an internal volume which is organized as follows. It
+ * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical
+ * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
+ * other. This redundancy guarantees robustness to unclean reboots. The volume
+ * table is basically an array of volume table records. Each record contains
+ * full information about the volume and protected by a CRC checksum. Note,
+ * nowadays we use the atomic LEB change operation when updating the volume
+ * table, so we do not really need 2 LEBs anymore, but we preserve the older
+ * design for the backward compatibility reasons.
+ *
+ * When the volume table is changed, it is first changed in RAM. Then LEB 0 is
+ * erased, and the updated volume table is written back to LEB 0. Then same for
+ * LEB 1. This scheme guarantees recoverability from unclean reboots.
+ *
+ * In this UBI implementation the on-flash volume table does not contain any
+ * information about how much data static volumes contain.
+ *
+ * But it would still be beneficial to store this information in the volume
+ * table. For example, suppose we have a static volume X, and all its physical
+ * eraseblocks became bad for some reasons. Suppose we are attaching the
+ * corresponding MTD device, for some reason we find no logical eraseblocks
+ * corresponding to the volume X. According to the volume table volume X does
+ * exist. So we don't know whether it is just empty or all its physical
+ * eraseblocks went bad. So we cannot alarm the user properly.
+ *
+ * The volume table also stores so-called "update marker", which is used for
+ * volume updates. Before updating the volume, the update marker is set, and
+ * after the update operation is finished, the update marker is cleared. So if
+ * the update operation was interrupted (e.g. by an unclean reboot) - the
+ * update marker is still there and we know that the volume's contents is
+ * damaged.
+ */
+
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <asm/div64.h>
+#include "ubi.h"
+
+static void self_vtbl_check(const struct ubi_device *ubi);
+
+/* Empty volume table record */
+static struct ubi_vtbl_record empty_vtbl_record;
+
+/**
+ * ubi_change_vtbl_record - change volume table record.
+ * @ubi: UBI device description object
+ * @idx: table index to change
+ * @vtbl_rec: new volume table record
+ *
+ * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty
+ * volume table record is written. The caller does not have to calculate CRC of
+ * the record as it is done by this function. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec)
+{
+ int i, err;
+ uint32_t crc;
+ struct ubi_volume *layout_vol;
+
+ ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+
+ if (!vtbl_rec)
+ vtbl_rec = &empty_vtbl_record;
+ else {
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+ ubi->vtbl_size);
+ if (err)
+ return err;
+ }
+
+ self_vtbl_check(ubi);
+ return 0;
+}
+
+/**
+ * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names multiple volumes specified in @req in the volume
+ * table. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list)
+{
+ int i, err;
+ struct ubi_rename_entry *re;
+ struct ubi_volume *layout_vol;
+
+ list_for_each_entry(re, rename_list, list) {
+ uint32_t crc;
+ struct ubi_volume *vol = re->desc->vol;
+ struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
+
+ if (re->remove) {
+ memcpy(vtbl_rec, &empty_vtbl_record,
+ sizeof(struct ubi_vtbl_record));
+ continue;
+ }
+
+ vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
+ memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
+ memset(vtbl_rec->name + re->new_name_len, 0,
+ UBI_VOL_NAME_MAX + 1 - re->new_name_len);
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec,
+ UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+ ubi->vtbl_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * vtbl_check - check if volume table is not corrupted and sensible.
+ * @ubi: UBI device description object
+ * @vtbl: volume table
+ *
+ * This function returns zero if @vtbl is all right, %1 if CRC is incorrect,
+ * and %-EINVAL if it contains inconsistent data.
+ */
+static int vtbl_check(const struct ubi_device *ubi,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
+ int upd_marker, err;
+ uint32_t crc;
+ const char *name;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ alignment = be32_to_cpu(vtbl[i].alignment);
+ data_pad = be32_to_cpu(vtbl[i].data_pad);
+ upd_marker = vtbl[i].upd_marker;
+ vol_type = vtbl[i].vol_type;
+ name_len = be16_to_cpu(vtbl[i].name_len);
+ name = &vtbl[i].name[0];
+
+ crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
+ if (be32_to_cpu(vtbl[i].crc) != crc) {
+ ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
+ i, crc, be32_to_cpu(vtbl[i].crc));
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return 1;
+ }
+
+ if (reserved_pebs == 0) {
+ if (memcmp(&vtbl[i], &empty_vtbl_record,
+ UBI_VTBL_RECORD_SIZE)) {
+ err = 2;
+ goto bad;
+ }
+ continue;
+ }
+
+ if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
+ name_len < 0) {
+ err = 3;
+ goto bad;
+ }
+
+ if (alignment > ubi->leb_size || alignment == 0) {
+ err = 4;
+ goto bad;
+ }
+
+ n = alignment & (ubi->min_io_size - 1);
+ if (alignment != 1 && n) {
+ err = 5;
+ goto bad;
+ }
+
+ n = ubi->leb_size % alignment;
+ if (data_pad != n) {
+ ubi_err(ubi, "bad data_pad, has to be %d", n);
+ err = 6;
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ err = 7;
+ goto bad;
+ }
+
+ if (upd_marker != 0 && upd_marker != 1) {
+ err = 8;
+ goto bad;
+ }
+
+ if (reserved_pebs > ubi->good_peb_count) {
+ ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
+ reserved_pebs, ubi->good_peb_count);
+ err = 9;
+ goto bad;
+ }
+
+ if (name_len > UBI_VOL_NAME_MAX) {
+ err = 10;
+ goto bad;
+ }
+
+ if (name[0] == '\0') {
+ err = 11;
+ goto bad;
+ }
+
+ if (name_len != strnlen(name, name_len + 1)) {
+ err = 12;
+ goto bad;
+ }
+ }
+
+ /* Checks that all names are unique */
+ for (i = 0; i < ubi->vtbl_slots - 1; i++) {
+ for (n = i + 1; n < ubi->vtbl_slots; n++) {
+ int len1 = be16_to_cpu(vtbl[i].name_len);
+ int len2 = be16_to_cpu(vtbl[n].name_len);
+
+ if (len1 > 0 && len1 == len2 &&
+ !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
+ ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return -EINVAL;
+}
+
+/**
+ * create_vtbl - create a copy of volume table.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @copy: number of the volume table copy
+ * @vtbl: contents of the volume table
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int copy, void *vtbl)
+{
+ int err, tries = 0;
+ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_ainf_peb *new_aeb;
+
+ dbg_gen("create volume table (copy #%d)", copy + 1);
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+retry:
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
+ goto out_free;
+ }
+
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
+ vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
+ vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
+ vid_hdr->data_size = vid_hdr->used_ebs =
+ vid_hdr->data_pad = cpu_to_be32(0);
+ vid_hdr->lnum = cpu_to_be32(copy);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
+
+ /* The EC header is already there, write the VID header */
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
+ if (err)
+ goto write_error;
+
+ /* Write the layout volume contents */
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
+ if (err)
+ goto write_error;
+
+ /*
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
+ */
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ if (err == -EIO && ++tries <= 5) {
+ /*
+ * Probably this physical eraseblock went bad, try to pick
+ * another one.
+ */
+ list_add(&new_aeb->u.list, &ai->erase);
+ goto retry;
+ }
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+}
+
+/**
+ * process_lvol - process the layout volume.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @av: layout volume attaching information
+ *
+ * This function is responsible for reading the layout volume, ensuring it is
+ * not corrupted, and recovering from corruptions if needed. Returns volume
+ * table in case of success and a negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
+{
+ int err;
+ struct rb_node *rb;
+ struct ubi_ainf_peb *aeb;
+ struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
+ int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
+
+ /*
+ * UBI goes through the following steps when it changes the layout
+ * volume:
+ * a. erase LEB 0;
+ * b. write new data to LEB 0;
+ * c. erase LEB 1;
+ * d. write new data to LEB 1.
+ *
+ * Before the change, both LEBs contain the same data.
+ *
+ * Due to unclean reboots, the contents of LEB 0 may be lost, but there
+ * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not.
+ * Similarly, LEB 1 may be lost, but there should be LEB 0. And
+ * finally, unclean reboots may result in a situation when neither LEB
+ * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB
+ * 0 contains more recent information.
+ *
+ * So the plan is to first check LEB 0. Then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
+ * we compare it with LEB 1, and if they are different, we copy LEB
+ * 0 to LEB 1;
+ * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
+ * to LEB 0.
+ */
+
+ dbg_gen("check layout volume");
+
+ /* Read both LEB 0 and LEB 1 into memory */
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
+ ubi->vtbl_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
+ /*
+ * Scrub the PEB later. Note, -EBADMSG indicates an
+ * uncorrectable ECC error, but we have our own CRC and
+ * the data will be checked later. If the data is OK,
+ * the PEB will be scrubbed (because we set
+ * aeb->scrub). If the data is not OK, the contents of
+ * the PEB will be recovered from the second copy, and
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
+ */
+ aeb->scrub = 1;
+ else if (err)
+ goto out_free;
+ }
+
+ err = -EINVAL;
+ if (leb[0]) {
+ leb_corrupted[0] = vtbl_check(ubi, leb[0]);
+ if (leb_corrupted[0] < 0)
+ goto out_free;
+ }
+
+ if (!leb_corrupted[0]) {
+ /* LEB 0 is OK */
+ if (leb[1])
+ leb_corrupted[1] = memcmp(leb[0], leb[1],
+ ubi->vtbl_size);
+ if (leb_corrupted[1]) {
+ ubi_warn(ubi, "volume table copy #2 is corrupted");
+ err = create_vtbl(ubi, ai, 1, leb[0]);
+ if (err)
+ goto out_free;
+ ubi_msg(ubi, "volume table was restored");
+ }
+
+ /* Both LEB 1 and LEB 2 are OK and consistent */
+ vfree(leb[1]);
+ return leb[0];
+ } else {
+ /* LEB 0 is corrupted or does not exist */
+ if (leb[1]) {
+ leb_corrupted[1] = vtbl_check(ubi, leb[1]);
+ if (leb_corrupted[1] < 0)
+ goto out_free;
+ }
+ if (leb_corrupted[1]) {
+ /* Both LEB 0 and LEB 1 are corrupted */
+ ubi_err(ubi, "both volume tables are corrupted");
+ goto out_free;
+ }
+
+ ubi_warn(ubi, "volume table copy #1 is corrupted");
+ err = create_vtbl(ubi, ai, 0, leb[1]);
+ if (err)
+ goto out_free;
+ ubi_msg(ubi, "volume table was restored");
+
+ vfree(leb[0]);
+ return leb[1];
+ }
+
+out_free:
+ vfree(leb[0]);
+ vfree(leb[1]);
+ return ERR_PTR(err);
+}
+
+/**
+ * create_empty_lvol - create empty layout volume.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns volume table contents in case of success and a
+ * negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int i;
+ struct ubi_vtbl_record *vtbl;
+
+ vtbl = vzalloc(ubi->vtbl_size);
+ if (!vtbl)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
+
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ int err;
+
+ err = create_vtbl(ubi, ai, i, vtbl);
+ if (err) {
+ vfree(vtbl);
+ return ERR_PTR(err);
+ }
+ }
+
+ return vtbl;
+}
+
+/**
+ * init_volumes - initialize volume information for existing volumes.
+ * @ubi: UBI device description object
+ * @ai: scanning information
+ * @vtbl: volume table
+ *
+ * This function allocates volume description objects for existing volumes.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, reserved_pebs = 0;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
+ continue; /* Empty record */
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ vol->alignment = be32_to_cpu(vtbl[i].alignment);
+ vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
+ vol->upd_marker = vtbl[i].upd_marker;
+ vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ vol->name_len = be16_to_cpu(vtbl[i].name_len);
+ vol->usable_leb_size = ubi->leb_size - vol->data_pad;
+ memcpy(vol->name, vtbl[i].name, vol->name_len);
+ vol->name[vol->name_len] = '\0';
+ vol->vol_id = i;
+
+ if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
+ /* Auto re-size flag may be set only for one volume */
+ if (ubi->autoresize_vol_id != -1) {
+ ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
+ kfree(vol);
+ return -EINVAL;
+ }
+
+ ubi->autoresize_vol_id = i;
+ }
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[i] = vol;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+ reserved_pebs += vol->reserved_pebs;
+
+ /*
+ * In case of dynamic volume UBI knows nothing about how many
+ * data is stored there. So assume the whole volume is used.
+ */
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ continue;
+ }
+
+ /* Static volumes only */
+ av = ubi_find_av(ai, i);
+ if (!av || !av->leb_count) {
+ /*
+ * No eraseblocks belonging to this volume found. We
+ * don't actually know whether this static volume is
+ * completely corrupted or just contains no data. And
+ * we cannot know this as long as data size is not
+ * stored on flash. So we just assume the volume is
+ * empty. FIXME: this should be handled.
+ */
+ continue;
+ }
+
+ if (av->leb_count != av->used_ebs) {
+ /*
+ * We found a static volume which misses several
+ * eraseblocks. Treat it as corrupted.
+ */
+ ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
+ av->vol_id, av->used_ebs - av->leb_count);
+ vol->corrupted = 1;
+ continue;
+ }
+
+ vol->used_ebs = av->used_ebs;
+ vol->used_bytes =
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
+ }
+
+ /* And add the layout volume */
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
+ vol->vol_type = UBI_DYNAMIC_VOLUME;
+ vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
+ memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
+ vol->usable_leb_size = ubi->leb_size;
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->reserved_pebs;
+ vol->used_bytes =
+ (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
+ vol->vol_id = UBI_LAYOUT_VOLUME_ID;
+ vol->ref_count = 1;
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
+ reserved_pebs += vol->reserved_pebs;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+
+ if (reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs, required %d, available %d",
+ reserved_pebs, ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ }
+ ubi->rsvd_pebs += reserved_pebs;
+ ubi->avail_pebs -= reserved_pebs;
+
+ return 0;
+}
+
+/**
+ * check_av - check volume attaching information.
+ * @vol: UBI volume description object
+ * @av: volume attaching information
+ *
+ * This function returns zero if the volume attaching information is consistent
+ * to the data read from the volume tabla, and %-EINVAL if not.
+ */
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
+{
+ int err;
+
+ if (av->highest_lnum >= vol->reserved_pebs) {
+ err = 1;
+ goto bad;
+ }
+ if (av->leb_count > vol->reserved_pebs) {
+ err = 2;
+ goto bad;
+ }
+ if (av->vol_type != vol->vol_type) {
+ err = 3;
+ goto bad;
+ }
+ if (av->used_ebs > vol->reserved_pebs) {
+ err = 4;
+ goto bad;
+ }
+ if (av->data_pad != vol->data_pad) {
+ err = 5;
+ goto bad;
+ }
+ return 0;
+
+bad:
+ ubi_err(vol->ubi, "bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
+ return -EINVAL;
+}
+
+/**
+ * check_attaching_info - check that attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * Even though we protect on-flash data by CRC checksums, we still don't trust
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
+ * information is OK and %-EINVAL if it is not.
+ */
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err, i;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ return -EINVAL;
+ }
+
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "too large volume ID %d found",
+ ai->highest_vol_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ cond_resched();
+
+ av = ubi_find_av(ai, i);
+ vol = ubi->volumes[i];
+ if (!vol) {
+ if (av)
+ ubi_remove_av(ai, av);
+ continue;
+ }
+
+ if (vol->reserved_pebs == 0) {
+ ubi_assert(i < ubi->vtbl_slots);
+
+ if (!av)
+ continue;
+
+ /*
+ * During attaching we found a volume which does not
+ * exist according to the information in the volume
+ * table. This must have happened due to an unclean
+ * reboot while the volume was being removed. Discard
+ * these eraseblocks.
+ */
+ ubi_msg(ubi, "finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_read_volume_table - read the volume table.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function reads volume table, checks it, recover from errors if needed,
+ * or creates it if needed. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int i, err;
+ struct ubi_ainf_volume *av;
+
+ empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
+
+ /*
+ * The number of supported volumes is limited by the eraseblock size
+ * and by the UBI_MAX_VOLUMES constant.
+ */
+ ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
+ if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
+ ubi->vtbl_slots = UBI_MAX_VOLUMES;
+
+ ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
+ ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
+
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
+ /*
+ * No logical eraseblocks belonging to the layout volume were
+ * found. This could mean that the flash is just empty. In
+ * this case we create empty layout volume.
+ *
+ * But if flash is not empty this must be a corruption or the
+ * MTD device just contains garbage.
+ */
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ } else {
+ ubi_err(ubi, "the layout volume was not found");
+ return -EINVAL;
+ }
+ } else {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ /* This must not happen with proper UBI images */
+ ubi_err(ubi, "too many LEBs (%d) in layout volume",
+ av->leb_count);
+ return -EINVAL;
+ }
+
+ ubi->vtbl = process_lvol(ubi, ai, av);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ }
+
+ ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
+
+ /*
+ * The layout volume is OK, initialize the corresponding in-RAM data
+ * structures.
+ */
+ err = init_volumes(ubi, ai, ubi->vtbl);
+ if (err)
+ goto out_free;
+
+ /*
+ * Make sure that the attaching information is consistent to the
+ * information stored in the volume table.
+ */
+ err = check_attaching_info(ubi, ai);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ vfree(ubi->vtbl);
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
+ }
+ return err;
+}
+
+/**
+ * self_vtbl_check - check volume table.
+ * @ubi: UBI device description object
+ */
+static void self_vtbl_check(const struct ubi_device *ubi)
+{
+ if (!ubi_dbg_chk_gen(ubi))
+ return;
+
+ if (vtbl_check(ubi, ubi->vtbl)) {
+ ubi_err(ubi, "self-check failed");
+ BUG();
+ }
+}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
new file mode 100644
index 000000000..16214d3d5
--- /dev/null
+++ b/drivers/mtd/ubi/wl.c
@@ -0,0 +1,1844 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
+ */
+
+/*
+ * UBI wear-leveling sub-system.
+ *
+ * This sub-system is responsible for wear-leveling. It works in terms of
+ * physical eraseblocks and erase counters and knows nothing about logical
+ * eraseblocks, volumes, etc. From this sub-system's perspective all physical
+ * eraseblocks are of two types - used and free. Used physical eraseblocks are
+ * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
+ * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
+ *
+ * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
+ * header. The rest of the physical eraseblock contains only %0xFF bytes.
+ *
+ * When physical eraseblocks are returned to the WL sub-system by means of the
+ * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
+ * done asynchronously in context of the per-UBI device background thread,
+ * which is also managed by the WL sub-system.
+ *
+ * The wear-leveling is ensured by means of moving the contents of used
+ * physical eraseblocks with low erase counter to free physical eraseblocks
+ * with high erase counter.
+ *
+ * If the WL sub-system fails to erase a physical eraseblock, it marks it as
+ * bad.
+ *
+ * This sub-system is also responsible for scrubbing. If a bit-flip is detected
+ * in a physical eraseblock, it has to be moved. Technically this is the same
+ * as moving it for wear-leveling reasons.
+ *
+ * As it was said, for the UBI sub-system all physical eraseblocks are either
+ * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
+ * RB-trees, as well as (temporarily) in the @wl->pq queue.
+ *
+ * When the WL sub-system returns a physical eraseblock, the physical
+ * eraseblock is protected from being moved for some "time". For this reason,
+ * the physical eraseblock is not directly moved from the @wl->free tree to the
+ * @wl->used tree. There is a protection queue in between where this
+ * physical eraseblock is temporarily stored (@wl->pq).
+ *
+ * All this protection stuff is needed because:
+ * o we don't want to move physical eraseblocks just after we have given them
+ * to the user; instead, we first want to let users fill them up with data;
+ *
+ * o there is a chance that the user will put the physical eraseblock very
+ * soon, so it makes sense not to move it for some time, but wait.
+ *
+ * Physical eraseblocks stay protected only for limited time. But the "time" is
+ * measured in erase cycles in this case. This is implemented with help of the
+ * protection queue. Eraseblocks are put to the tail of this queue when they
+ * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
+ * head of the queue on each erase operation (for any eraseblock). So the
+ * length of the queue defines how may (global) erase cycles PEBs are protected.
+ *
+ * To put it differently, each physical eraseblock has 2 main states: free and
+ * used. The former state corresponds to the @wl->free tree. The latter state
+ * is split up on several sub-states:
+ * o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is
+ * erroneous - e.g., there was a read error;
+ * o the WL movement is temporarily prohibited (@wl->pq queue);
+ * o scrubbing is needed (@wl->scrub tree).
+ *
+ * Depending on the sub-state, wear-leveling entries of the used physical
+ * eraseblocks may be kept in one of those structures.
+ *
+ * Note, in this implementation, we keep a small in-RAM object for each physical
+ * eraseblock. This is surely not a scalable solution. But it appears to be good
+ * enough for moderately large flashes and it is simple. In future, one may
+ * re-work this sub-system and make it more scalable.
+ *
+ * At the moment this sub-system does not utilize the sequence number, which
+ * was introduced relatively recently. But it would be wise to do this because
+ * the sequence number of a logical eraseblock characterizes how old is it. For
+ * example, when we move a PEB with low erase counter, and we need to pick the
+ * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
+ * pick target PEB with an average EC if our PEB is not very "old". This is a
+ * room for future re-works of the WL sub-system.
+ */
+
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include "ubi.h"
+#include "wl.h"
+
+/* Number of physical eraseblocks reserved for wear-leveling purposes */
+#define WL_RESERVED_PEBS 1
+
+/*
+ * Maximum difference between two erase counters. If this threshold is
+ * exceeded, the WL sub-system starts moving data from used physical
+ * eraseblocks with low erase counter to free physical eraseblocks with high
+ * erase counter.
+ */
+#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
+
+/*
+ * When a physical eraseblock is moved, the WL sub-system has to pick the target
+ * physical eraseblock to move to. The simplest way would be just to pick the
+ * one with the highest erase counter. But in certain workloads this could lead
+ * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
+ * situation when the picked physical eraseblock is constantly erased after the
+ * data is written to it. So, we have a constant which limits the highest erase
+ * counter of the free physical eraseblock to pick. Namely, the WL sub-system
+ * does not pick eraseblocks with erase counter greater than the lowest erase
+ * counter plus %WL_FREE_MAX_DIFF.
+ */
+#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
+
+/*
+ * Maximum number of consecutive background thread failures which is enough to
+ * switch to read-only mode.
+ */
+#define WL_MAX_FAILURES 32
+
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
+
+/**
+ * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
+ * @e: the wear-leveling entry to add
+ * @root: the root of the tree
+ *
+ * Note, we use (erase counter, physical eraseblock number) pairs as keys in
+ * the @ubi->used and @ubi->free RB-trees.
+ */
+static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node **p, *parent = NULL;
+
+ p = &root->rb_node;
+ while (*p) {
+ struct ubi_wl_entry *e1;
+
+ parent = *p;
+ e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
+
+ if (e->ec < e1->ec)
+ p = &(*p)->rb_left;
+ else if (e->ec > e1->ec)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&e->u.rb, parent, p);
+ rb_insert_color(&e->u.rb, root);
+}
+
+/**
+ * wl_tree_destroy - destroy a wear-leveling entry.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to add
+ *
+ * This function destroys a wear leveling entry and removes
+ * the reference from the lookup table.
+ */
+static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ ubi->lookuptbl[e->pnum] = NULL;
+ kmem_cache_free(ubi_wl_entry_slab, e);
+}
+
+/**
+ * do_work - do one pending work.
+ * @ubi: UBI device description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int do_work(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_work *wrk;
+
+ cond_resched();
+
+ /*
+ * @ubi->work_sem is used to synchronize with the workers. Workers take
+ * it in read mode, so many of them may be doing works at a time. But
+ * the queue flush code has to be sure the whole queue of works is
+ * done, and it takes the mutex in write mode.
+ */
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works)) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
+ return 0;
+ }
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Call the worker function. Do not touch the work structure
+ * after this call as it will have been freed or reused by that
+ * time by the worker function.
+ */
+ err = wrk->func(ubi, wrk, 0);
+ if (err)
+ ubi_err(ubi, "work failed with error code %d", err);
+ up_read(&ubi->work_sem);
+
+ return err;
+}
+
+/**
+ * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns non-zero if @e is in the @root RB-tree and zero if it
+ * is not.
+ */
+static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node *p;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
+
+ if (e->pnum == e1->pnum) {
+ ubi_assert(e == e1);
+ return 1;
+ }
+
+ if (e->ec < e1->ec)
+ p = p->rb_left;
+ else if (e->ec > e1->ec)
+ p = p->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * prot_queue_add - add physical eraseblock to the protection queue.
+ * @ubi: UBI device description object
+ * @e: the physical eraseblock to add
+ *
+ * This function adds @e to the tail of the protection queue @ubi->pq, where
+ * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
+ * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
+ * be locked.
+ */
+static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ int pq_tail = ubi->pq_head - 1;
+
+ if (pq_tail < 0)
+ pq_tail = UBI_PROT_QUEUE_LEN - 1;
+ ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
+ list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
+ dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
+}
+
+/**
+ * find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
+ * @root: the RB-tree where to look for
+ * @diff: maximum possible difference from the smallest erase counter
+ *
+ * This function looks for a wear leveling entry with erase counter closest to
+ * min + @diff, where min is the smallest erase counter.
+ */
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *prev_e = NULL;
+ int max;
+
+ e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ max = e->ec + diff;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
+ if (e1->ec >= max)
+ p = p->rb_left;
+ else {
+ p = p->rb_right;
+ prev_e = e;
+ e = e1;
+ }
+ }
+
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best WL entry
+ * such that fastmap can use the anchor PEB later. */
+ if (prev_e && !ubi->fm_disabled &&
+ !ubi->fm && e->pnum < UBI_FM_MAX_START)
+ return prev_e;
+
+ return e;
+}
+
+/**
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
+ * @ubi: UBI device description object
+ * @root: the RB-tree where to look for
+ *
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
+ */
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
+{
+ struct ubi_wl_entry *e, *first, *last;
+
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ e = may_reserve_for_fm(ubi, e, root);
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+/**
+ * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
+ * refill_wl_user_pool().
+ * @ubi: UBI device description object
+ *
+ * This function returns a a wear leveling entry in case of success and
+ * NULL in case of failure.
+ */
+static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err(ubi, "no free eraseblocks");
+ return NULL;
+ }
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /*
+ * Move the physical eraseblock to the protection queue where it will
+ * be protected from being moved for some time.
+ */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+
+ return e;
+}
+
+/**
+ * prot_queue_del - remove a physical eraseblock from the protection queue.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to remove
+ *
+ * This function deletes PEB @pnum from the protection queue and returns zero
+ * in case of success and %-ENODEV if the PEB was not found.
+ */
+static int prot_queue_del(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ e = ubi->lookuptbl[pnum];
+ if (!e)
+ return -ENODEV;
+
+ if (self_check_in_pq(ubi, e))
+ return -ENODEV;
+
+ list_del(&e->u.list);
+ dbg_wl("deleted PEB %d from the protection queue", e->pnum);
+ return 0;
+}
+
+/**
+ * sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @e: the the physical eraseblock to erase
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int torture)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+ unsigned long long ec = e->ec;
+
+ dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
+
+ err = self_check_ec(ubi, e->pnum, e->ec);
+ if (err)
+ return -EINVAL;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_sync_erase(ubi, e->pnum, torture);
+ if (err < 0)
+ goto out_free;
+
+ ec += err;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
+ e->pnum, ec);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
+ if (err)
+ goto out_free;
+
+ e->ec = ec;
+ spin_lock(&ubi->wl_lock);
+ if (e->ec > ubi->max_ec)
+ ubi->max_ec = e->ec;
+ spin_unlock(&ubi->wl_lock);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * serve_prot_queue - check if it is time to stop protecting PEBs.
+ * @ubi: UBI device description object
+ *
+ * This function is called after each erase operation and removes PEBs from the
+ * tail of the protection queue. These PEBs have been protected for long enough
+ * and should be moved to the used tree.
+ */
+static void serve_prot_queue(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e, *tmp;
+ int count;
+
+ /*
+ * There may be several protected physical eraseblock to remove,
+ * process them all.
+ */
+repeat:
+ count = 0;
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
+ dbg_wl("PEB %d EC %d protection over, move to used tree",
+ e->pnum, e->ec);
+
+ list_del(&e->u.list);
+ wl_tree_add(e, &ubi->used);
+ if (count++ > 32) {
+ /*
+ * Let's be nice and avoid holding the spinlock for
+ * too long.
+ */
+ spin_unlock(&ubi->wl_lock);
+ cond_resched();
+ goto repeat;
+ }
+ }
+
+ ubi->pq_head += 1;
+ if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
+ ubi->pq_head = 0;
+ ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * __schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list. Can only be used if ubi->work_sem is already held in read mode!
+ */
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ spin_lock(&ubi->wl_lock);
+ list_add_tail(&wrk->list, &ubi->works);
+ ubi_assert(ubi->works_count >= 0);
+ ubi->works_count += 1;
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
+ wake_up_process(ubi->bgt_thread);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ down_read(&ubi->work_sem);
+ __schedule_ubi_work(ubi, wrk);
+ up_read(&ubi->work_sem);
+}
+
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown);
+
+/**
+ * schedule_erase - schedule an erase work.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * This function returns zero in case of success and a %-ENOMEM in case of
+ * failure.
+ */
+static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ ubi_assert(e);
+
+ dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
+ e->pnum, e->ec, torture);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->func = &erase_worker;
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ schedule_ubi_work(ubi, wl_wrk);
+ return 0;
+}
+
+/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ return erase_worker(ubi, wl_wrk, 0);
+}
+
+/**
+ * wear_leveling_worker - wear-leveling worker function.
+ * @ubi: UBI device description object
+ * @wrk: the work object
+ * @shutdown: non-zero if the worker has to free memory and exit
+ * because the WL-subsystem is shutting down
+ *
+ * This function copies a more worn out physical eraseblock to a less worn out
+ * one. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int shutdown)
+{
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ int vol_id = -1, lnum = -1;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+#endif
+ struct ubi_wl_entry *e1, *e2;
+ struct ubi_vid_hdr *vid_hdr;
+
+ kfree(wrk);
+ if (shutdown)
+ return 0;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ mutex_lock(&ubi->move_mutex);
+ spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+ ubi_assert(!ubi->move_to_put);
+
+ if (!ubi->free.rb_node ||
+ (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
+ /*
+ * No free physical eraseblocks? Well, they must be waiting in
+ * the queue to be erased. Cancel movement - it will be
+ * triggered again when a free physical eraseblock appears.
+ *
+ * No used physical eraseblocks? They must be temporarily
+ * protected from being moved. They will be moved to the
+ * @ubi->used tree later and the wear-leveling will be
+ * triggered again.
+ */
+ dbg_wl("cancel WL, a list is empty: free %d, used %d",
+ !ubi->free.rb_node, !ubi->used.rb_node);
+ goto out_cancel;
+ }
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Check whether we need to produce an anchor PEB */
+ if (!anchor)
+ anchor = !anchor_pebs_avalible(&ubi->free);
+
+ if (anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ } else if (!ubi->scrub.rb_node) {
+#else
+ if (!ubi->scrub.rb_node) {
+#endif
+ /*
+ * Now pick the least worn-out used physical eraseblock and a
+ * highly worn-out free physical eraseblock. If the erase
+ * counters differ much enough, start wear-leveling.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ dbg_wl("no WL needed: min used EC %d, max free EC %d",
+ e1->ec, e2->ec);
+
+ /* Give the unused PEB back */
+ wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
+ goto out_cancel;
+ }
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("move PEB %d EC %d to PEB %d EC %d",
+ e1->pnum, e1->ec, e2->pnum, e2->ec);
+ } else {
+ /* Perform scrubbing */
+ scrubbing = 1;
+ e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
+ rb_erase(&e1->u.rb, &ubi->scrub);
+ dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
+ }
+
+ ubi->move_from = e1;
+ ubi->move_to = e2;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
+ * We so far do not know which logical eraseblock our physical
+ * eraseblock (@e1) belongs to. We have to read the volume identifier
+ * header first.
+ *
+ * Note, we are protected from this PEB being unmapped and erased. The
+ * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+ * which is being moved was unmapped.
+ */
+
+ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err == UBI_IO_FF) {
+ /*
+ * We are trying to move PEB without a VID header. UBI
+ * always write VID headers shortly after the PEB was
+ * given, so we have a situation when it has not yet
+ * had a chance to write it, because it was preempted.
+ * So add this PEB to the protection queue so far,
+ * because presumably more data will be written there
+ * (including the missing VID header), and then we'll
+ * move it.
+ */
+ dbg_wl("PEB %d has no VID header", e1->pnum);
+ protect = 1;
+ goto out_not_moved;
+ } else if (err == UBI_IO_FF_BITFLIPS) {
+ /*
+ * The same situation as %UBI_IO_FF, but bit-flips were
+ * detected. It is better to schedule this PEB for
+ * scrubbing.
+ */
+ dbg_wl("PEB %d has no VID header but has bit-flips",
+ e1->pnum);
+ scrubbing = 1;
+ goto out_not_moved;
+ }
+
+ ubi_err(ubi, "error %d while reading VID header from PEB %d",
+ err, e1->pnum);
+ goto out_error;
+ }
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+ err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
+ if (err) {
+ if (err == MOVE_CANCEL_RACE) {
+ /*
+ * The LEB has not been moved because the volume is
+ * being deleted or the PEB has been put meanwhile. We
+ * should prevent this PEB from being selected for
+ * wear-leveling movement again, so put it to the
+ * protection queue.
+ */
+ protect = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ err == MOVE_TARGET_RD_ERR) {
+ /*
+ * Target PEB had bit-flips or write error - torture it.
+ */
+ torture = 1;
+ goto out_not_moved;
+ }
+
+ if (err == MOVE_SOURCE_RD_ERR) {
+ /*
+ * An error happened while reading the source PEB. Do
+ * not switch to R/O mode in this case, and give the
+ * upper layers a possibility to recover from this,
+ * e.g. by unmapping corresponding LEB. Instead, just
+ * put this PEB to the @ubi->erroneous list to prevent
+ * UBI from trying to move it over and over again.
+ */
+ if (ubi->erroneous_peb_count > ubi->max_erroneous) {
+ ubi_err(ubi, "too many erroneous eraseblocks (%d)",
+ ubi->erroneous_peb_count);
+ goto out_error;
+ }
+ erroneous = 1;
+ goto out_not_moved;
+ }
+
+ if (err < 0)
+ goto out_error;
+
+ ubi_assert(0);
+ }
+
+ /* The PEB has been successfully moved */
+ if (scrubbing)
+ ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->move_to_put) {
+ wl_tree_add(e2, &ubi->used);
+ e2 = NULL;
+ }
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+ if (err) {
+ if (e2)
+ wl_entry_destroy(ubi, e2);
+ goto out_ro;
+ }
+
+ if (e2) {
+ /*
+ * Well, the target PEB was put meanwhile, schedule it for
+ * erasure.
+ */
+ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+ e2->pnum, vol_id, lnum);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+ if (err)
+ goto out_ro;
+ }
+
+ dbg_wl("done");
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+ /*
+ * For some reasons the LEB was not moved, might be an error, might be
+ * something else. @e1 was not changed, so return it back. @e2 might
+ * have been changed, schedule it for erasure.
+ */
+out_not_moved:
+ if (vol_id != -1)
+ dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
+ e1->pnum, vol_id, lnum, e2->pnum, err);
+ else
+ dbg_wl("cancel moving PEB %d to PEB %d (%d)",
+ e1->pnum, e2->pnum, err);
+ spin_lock(&ubi->wl_lock);
+ if (protect)
+ prot_queue_add(ubi, e1);
+ else if (erroneous) {
+ wl_tree_add(e1, &ubi->erroneous);
+ ubi->erroneous_peb_count += 1;
+ } else if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
+ else
+ wl_tree_add(e1, &ubi->used);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+ if (err)
+ goto out_ro;
+
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+out_error:
+ if (vol_id != -1)
+ ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+ else
+ ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+ err, e1->pnum, vol_id, lnum, e2->pnum);
+ spin_lock(&ubi->wl_lock);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
+
+out_ro:
+ ubi_ro_mode(ubi);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_assert(err != 0);
+ return err < 0 ? err : -EIO;
+
+out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+}
+
+/**
+ * ensure_wear_leveling - schedule wear-leveling if it is needed.
+ * @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
+ *
+ * This function checks if it is time to start wear-leveling and schedules it
+ * if yes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
+{
+ int err = 0;
+ struct ubi_wl_entry *e1;
+ struct ubi_wl_entry *e2;
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled)
+ /* Wear-leveling is already in the work queue */
+ goto out_unlock;
+
+ /*
+ * If the ubi->scrub tree is not empty, scrubbing is needed, and the
+ * the WL worker has to be scheduled anyway.
+ */
+ if (!ubi->scrub.rb_node) {
+ if (!ubi->used.rb_node || !ubi->free.rb_node)
+ /* No physical eraseblocks - no deal */
+ goto out_unlock;
+
+ /*
+ * We schedule wear-leveling only if the difference between the
+ * lowest erase counter of used physical eraseblocks and a high
+ * erase counter of free physical eraseblocks is greater than
+ * %UBI_WL_THRESHOLD.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
+ goto out_unlock;
+ dbg_wl("schedule wear-leveling");
+ } else
+ dbg_wl("schedule scrubbing");
+
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ err = -ENOMEM;
+ goto out_cancel;
+ }
+
+ wrk->anchor = 0;
+ wrk->func = &wear_leveling_worker;
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+ else
+ schedule_ubi_work(ubi, wrk);
+ return err;
+
+out_cancel:
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+out_unlock:
+ spin_unlock(&ubi->wl_lock);
+ return err;
+}
+
+/**
+ * erase_worker - physical eraseblock erase worker function.
+ * @ubi: UBI device description object
+ * @wl_wrk: the work object
+ * @shutdown: non-zero if the worker has to free memory and exit
+ * because the WL sub-system is shutting down
+ *
+ * This function erases a physical eraseblock and perform torture testing if
+ * needed. It also takes care about marking the physical eraseblock bad if
+ * needed. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown)
+{
+ struct ubi_wl_entry *e = wl_wrk->e;
+ int pnum = e->pnum;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
+
+ if (shutdown) {
+ dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
+ kfree(wl_wrk);
+ wl_entry_destroy(ubi, e);
+ return 0;
+ }
+
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+ err = sync_erase(ubi, e, wl_wrk->torture);
+ if (!err) {
+ /* Fine, we've erased it successfully */
+ kfree(wl_wrk);
+
+ spin_lock(&ubi->wl_lock);
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * One more erase operation has happened, take care about
+ * protected physical eraseblocks.
+ */
+ serve_prot_queue(ubi);
+
+ /* And take care about wear-leveling */
+ err = ensure_wear_leveling(ubi, 1);
+ return err;
+ }
+
+ ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
+ kfree(wl_wrk);
+
+ if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ err == -EBUSY) {
+ int err1;
+
+ /* Re-schedule the LEB for erasure */
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
+ if (err1) {
+ err = err1;
+ goto out_ro;
+ }
+ return err;
+ }
+
+ wl_entry_destroy(ubi, e);
+ if (err != -EIO)
+ /*
+ * If this is not %-EIO, we have no idea what to do. Scheduling
+ * this physical eraseblock for erasure again would cause
+ * errors again and again. Well, lets switch to R/O mode.
+ */
+ goto out_ro;
+
+ /* It is %-EIO, the PEB went bad */
+
+ if (!ubi->bad_allowed) {
+ ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
+ goto out_ro;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ if (ubi->beb_rsvd_pebs == 0) {
+ if (ubi->avail_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err(ubi, "no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_msg(ubi, "mark PEB %d as bad", pnum);
+ err = ubi_io_mark_bad(ubi, pnum);
+ if (err)
+ goto out_ro;
+
+ spin_lock(&ubi->volumes_lock);
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
+ ubi->bad_peb_count += 1;
+ ubi->good_peb_count -= 1;
+ ubi_calculate_reserved(ubi);
+ if (available_consumed)
+ ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
+ ubi_msg(ubi, "%d PEBs left in the reserve",
+ ubi->beb_rsvd_pebs);
+ else
+ ubi_warn(ubi, "last PEB from the reserve was used");
+ spin_unlock(&ubi->volumes_lock);
+
+ return err;
+
+out_ro:
+ if (available_consumed) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->avail_pebs += 1;
+ spin_unlock(&ubi->volumes_lock);
+ }
+ ubi_ro_mode(ubi);
+ return err;
+}
+
+/**
+ * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
+ * @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @pnum: physical eraseblock to return
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function is called to return physical eraseblock @pnum to the pool of
+ * free physical eraseblocks. The @torture flag has to be set if an I/O error
+ * occurred to this @pnum and it has to be tested. This function returns zero
+ * in case of success, and a negative error code in case of failure.
+ */
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+ dbg_wl("PEB %d", pnum);
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ down_read(&ubi->fm_protect);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (e == ubi->move_from) {
+ /*
+ * User is putting the physical eraseblock which was selected to
+ * be moved. It will be scheduled for erasure in the
+ * wear-leveling worker.
+ */
+ dbg_wl("PEB %d is being moved, wait", pnum);
+ spin_unlock(&ubi->wl_lock);
+
+ /* Wait for the WL worker by taking the @ubi->move_mutex */
+ mutex_lock(&ubi->move_mutex);
+ mutex_unlock(&ubi->move_mutex);
+ goto retry;
+ } else if (e == ubi->move_to) {
+ /*
+ * User is putting the physical eraseblock which was selected
+ * as the target the data is moved to. It may happen if the EBA
+ * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
+ * but the WL sub-system has not put the PEB to the "used" tree
+ * yet, but it is about to do this. So we just set a flag which
+ * will tell the WL worker that the PEB is not needed anymore
+ * and should be scheduled for erasure.
+ */
+ dbg_wl("PEB %d is the target of data moving", pnum);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_to_put = 1;
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ } else {
+ if (in_wl_tree(e, &ubi->used)) {
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
+ } else if (in_wl_tree(e, &ubi->scrub)) {
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ rb_erase(&e->u.rb, &ubi->scrub);
+ } else if (in_wl_tree(e, &ubi->erroneous)) {
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
+ rb_erase(&e->u.rb, &ubi->erroneous);
+ ubi->erroneous_peb_count -= 1;
+ ubi_assert(ubi->erroneous_peb_count >= 0);
+ /* Erroneous PEBs should be tortured */
+ torture = 1;
+ } else {
+ err = prot_queue_del(ubi, e->pnum);
+ if (err) {
+ ubi_err(ubi, "PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return err;
+ }
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = schedule_erase(ubi, e, vol_id, lnum, torture);
+ if (err) {
+ spin_lock(&ubi->wl_lock);
+ wl_tree_add(e, &ubi->used);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ up_read(&ubi->fm_protect);
+ return err;
+}
+
+/**
+ * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to schedule
+ *
+ * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
+ * needs scrubbing. This function schedules a physical eraseblock for
+ * scrubbing which is done in background. This function returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+ in_wl_tree(e, &ubi->erroneous)) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ if (e == ubi->move_to) {
+ /*
+ * This physical eraseblock was used to move data to. The data
+ * was moved but the PEB was not yet inserted to the proper
+ * tree. We should just wait a little and let the WL worker
+ * proceed.
+ */
+ spin_unlock(&ubi->wl_lock);
+ dbg_wl("the PEB %d is not in proper tree, retry", pnum);
+ yield();
+ goto retry;
+ }
+
+ if (in_wl_tree(e, &ubi->used)) {
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
+ } else {
+ int err;
+
+ err = prot_queue_del(ubi, e->pnum);
+ if (err) {
+ ubi_err(ubi, "PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
+
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Technically scrubbing is the same as wear-leveling, so it is done
+ * by the WL worker.
+ */
+ return ensure_wear_leveling(ubi, 0);
+}
+
+/**
+ * ubi_wl_flush - flush all pending works.
+ * @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int err = 0;
+ int found = 1;
+
+ /*
+ * Erase while the pending works queue is not empty, but not more than
+ * the number of currently pending works.
+ */
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
+
+ while (found) {
+ struct ubi_work *wrk, *tmp;
+ found = 0;
+
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
+ }
+
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
+ up_write(&ubi->work_sem);
+
+ return err;
+}
+
+/**
+ * tree_destroy - destroy an RB-tree.
+ * @ubi: UBI device description object
+ * @root: the root of the tree to destroy
+ */
+static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
+{
+ struct rb_node *rb;
+ struct ubi_wl_entry *e;
+
+ rb = root->rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ e = rb_entry(rb, struct ubi_wl_entry, u.rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &e->u.rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ wl_entry_destroy(ubi, e);
+ }
+ }
+}
+
+/**
+ * ubi_thread - UBI background thread.
+ * @u: the UBI device description object pointer
+ */
+int ubi_thread(void *u)
+{
+ int failures = 0;
+ struct ubi_device *ubi = u;
+
+ ubi_msg(ubi, "background thread \"%s\" started, PID %d",
+ ubi->bgt_name, task_pid_nr(current));
+
+ set_freezable();
+ for (;;) {
+ int err;
+
+ if (kthread_should_stop())
+ break;
+
+ if (try_to_freeze())
+ continue;
+
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works) || ubi->ro_mode ||
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&ubi->wl_lock);
+ schedule();
+ continue;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = do_work(ubi);
+ if (err) {
+ ubi_err(ubi, "%s: work failed with error code %d",
+ ubi->bgt_name, err);
+ if (failures++ > WL_MAX_FAILURES) {
+ /*
+ * Too many failures, disable the thread and
+ * switch to read-only mode.
+ */
+ ubi_msg(ubi, "%s: %d consecutive failures",
+ ubi->bgt_name, WL_MAX_FAILURES);
+ ubi_ro_mode(ubi);
+ ubi->thread_enabled = 0;
+ continue;
+ }
+ } else
+ failures = 0;
+
+ cond_resched();
+ }
+
+ dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
+ return 0;
+}
+
+/**
+ * shutdown_work - shutdown all pending works.
+ * @ubi: UBI device description object
+ */
+static void shutdown_work(struct ubi_device *ubi)
+{
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ flush_work(&ubi->fm_work);
+#endif
+ while (!list_empty(&ubi->works)) {
+ struct ubi_work *wrk;
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ wrk->func(ubi, wrk, 1);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ }
+}
+
+/**
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero in case of success, and a negative error code in
+ * case of failure.
+ */
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int err, i, reserved_pebs, found_pebs = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
+ struct ubi_wl_entry *e;
+
+ ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
+ spin_lock_init(&ubi->wl_lock);
+ mutex_init(&ubi->move_mutex);
+ init_rwsem(&ubi->work_sem);
+ ubi->max_ec = ai->max_ec;
+ INIT_LIST_HEAD(&ubi->works);
+
+ sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
+
+ err = -ENOMEM;
+ ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
+ if (!ubi->lookuptbl)
+ return err;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
+ INIT_LIST_HEAD(&ubi->pq[i]);
+ ubi->pq_head = 0;
+
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ wl_entry_destroy(ubi, e);
+ goto out_free;
+ }
+
+ found_pebs++;
+ }
+
+ ubi->free_count = 0;
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(e->ec >= 0);
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+
+ ubi->lookuptbl[e->pnum] = e;
+
+ found_pebs++;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (!aeb->scrub) {
+ dbg_wl("add PEB %d EC %d to the used tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->used);
+ } else {
+ dbg_wl("add PEB %d EC %d to the scrub tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->scrub);
+ }
+
+ found_pebs++;
+ }
+ }
+
+ dbg_wl("found %i PEBs", found_pebs);
+
+ if (ubi->fm) {
+ ubi_assert(ubi->good_peb_count == \
+ found_pebs + ubi->fm->used_blocks);
+
+ for (i = 0; i < ubi->fm->used_blocks; i++) {
+ e = ubi->fm->e[i];
+ ubi->lookuptbl[e->pnum] = e;
+ }
+ }
+ else
+ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+ ubi_fastmap_init(ubi, &reserved_pebs);
+
+ if (ubi->avail_pebs < reserved_pebs) {
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, reserved_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ goto out_free;
+ }
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
+
+ /* Schedule wear-leveling if needed */
+ err = ensure_wear_leveling(ubi, 0);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ shutdown_work(ubi);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
+ kfree(ubi->lookuptbl);
+ return err;
+}
+
+/**
+ * protection_queue_destroy - destroy the protection queue.
+ * @ubi: UBI device description object
+ */
+static void protection_queue_destroy(struct ubi_device *ubi)
+{
+ int i;
+ struct ubi_wl_entry *e, *tmp;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+ list_del(&e->u.list);
+ wl_entry_destroy(ubi, e);
+ }
+ }
+}
+
+/**
+ * ubi_wl_close - close the wear-leveling sub-system.
+ * @ubi: UBI device description object
+ */
+void ubi_wl_close(struct ubi_device *ubi)
+{
+ dbg_wl("close the WL sub-system");
+ ubi_fastmap_close(ubi);
+ shutdown_work(ubi);
+ protection_queue_destroy(ubi);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->erroneous);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
+ kfree(ubi->lookuptbl);
+}
+
+/**
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @ec: the erase counter to check
+ *
+ * This function returns zero if the erase counter of physical eraseblock @pnum
+ * is equivalent to @ec, and a negative error code if not or if an error
+ * occurred.
+ */
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
+{
+ int err;
+ long long read_ec;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ /* The header does not have to exist */
+ err = 0;
+ goto out_free;
+ }
+
+ read_ec = be64_to_cpu(ec_hdr->ec);
+ if (ec != read_ec && read_ec - ec > 1) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
+ dump_stack();
+ err = 1;
+ } else
+ err = 0;
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
+ * is not.
+ */
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
+{
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ if (in_wl_tree(e, root))
+ return 0;
+
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
+ e->pnum, e->ec, root);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
+ * self_check_in_pq - check if wear-leveling entry is in the protection
+ * queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
+ */
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ struct ubi_wl_entry *p;
+ int i;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+ list_for_each_entry(p, &ubi->pq[i], u.list)
+ if (p == e)
+ return 0;
+
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
+ e->pnum, e->ec);
+ dump_stack();
+ return -EINVAL;
+}
+#ifndef CONFIG_MTD_UBI_FASTMAP
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ ubi->free_count--;
+ ubi_assert(ubi->free_count >= 0);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ while (!ubi->free.rb_node && ubi->works_count) {
+ spin_unlock(&ubi->wl_lock);
+
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+
+ spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+retry:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->free.rb_node) {
+ if (ubi->works_count == 0) {
+ ubi_err(ubi, "no free eraseblocks");
+ ubi_assert(list_empty(&ubi->works));
+ spin_unlock(&ubi->wl_lock);
+ return -ENOSPC;
+ }
+
+ err = produce_free_peb(ubi);
+ if (err < 0) {
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ goto retry;
+
+ }
+ e = wl_get_wle(ubi);
+ prot_queue_add(ubi, e);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (err) {
+ ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
+ return err;
+ }
+
+ return e->pnum;
+}
+#else
+#include "fastmap-wl.c"
+#endif
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
new file mode 100644
index 000000000..bd1f07e5c
--- /dev/null
+++ b/drivers/mtd/ubi/wl.h
@@ -0,0 +1,28 @@
+#ifndef UBI_WL_H
+#define UBI_WL_H
+#ifdef CONFIG_MTD_UBI_FASTMAP
+static int anchor_pebs_avalible(struct rb_root *root);
+static void update_fastmap_work_fn(struct work_struct *wrk);
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static void ubi_fastmap_close(struct ubi_device *ubi);
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
+{
+ /* Reserve enough LEBs to store two fastmaps. */
+ *count += (ubi->fm_size / ubi->leb_size) * 2;
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+}
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root);
+#else /* !CONFIG_MTD_UBI_FASTMAP */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ return e;
+}
+#endif /* CONFIG_MTD_UBI_FASTMAP */
+#endif /* UBI_WL_H */